From c35ad9ee4f21c03baaea65e2479e9d08c4b4acd2 Mon Sep 17 00:00:00 2001 From: Mircea Trofin Date: Wed, 27 Apr 2022 14:19:14 -0700 Subject: [PATCH] [mlgo] Support exposing more features than those supported by models This allows the compiler to support more features than those supported by a model. The only requirement (development mode only) is that the new features must be appended at the end of the list of features requested from the model. The support is transparent to compiler code: for unsupported features, we provide a valid buffer to copy their values; it's just that this buffer is disconnected from the model, so insofar as the model is concerned (AOT or development mode), these features don't exist. The buffers are allocated at setup - meaning, at steady state, there is no extra allocation (maintaining the current invariant). These buffers has 2 roles: one, keep the compiler code simple. Second, allow logging their values in development mode. The latter allows retraining a model supporting the larger feature set starting from traces produced with the old model. For release mode (AOT-ed models), this decouples compiler evolution from model evolution, which we want in scenarios where the toolchain is frequently rebuilt and redeployed: we can first deploy the new features, and continue working with the older model, until a new model is made available, which can then be picked up the next time the compiler is built. Differential Revision: https://reviews.llvm.org/D124565 --- .../llvm/Analysis/InlineModelFeatureMaps.h | 4 +- llvm/include/llvm/Analysis/MLModelRunner.h | 19 +++- .../llvm/Analysis/ModelUnderTrainingRunner.h | 7 +- .../llvm/Analysis/NoInferenceModelRunner.h | 12 +-- .../llvm/Analysis/ReleaseModeModelRunner.h | 24 ++---- llvm/include/llvm/Analysis/TensorSpec.h | 2 + .../Analysis/DevelopmentModeInlineAdvisor.cpp | 7 +- llvm/lib/Analysis/MLInlineAdvisor.cpp | 10 +-- .../lib/Analysis/ModelUnderTrainingRunner.cpp | 30 +++++-- llvm/lib/Analysis/NoInferenceModelRunner.cpp | 16 +--- llvm/lib/Analysis/TFUtils.cpp | 17 +++- llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp | 22 ++--- llvm/unittests/Analysis/CMakeLists.txt | 3 +- llvm/unittests/Analysis/MLModelRunnerTest.cpp | 86 +++++++++++++++++++ llvm/unittests/Analysis/TFUtilsTest.cpp | 32 +++++++ 15 files changed, 216 insertions(+), 75 deletions(-) diff --git a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h index 1afa8a825f15..fb8236c28b25 100644 --- a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h +++ b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h @@ -10,6 +10,8 @@ #ifndef LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H #define LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H +#include "llvm/Analysis/TensorSpec.h" + #include #include #include @@ -127,7 +129,7 @@ inlineCostFeatureToMlFeature(InlineCostFeatureIndex Feature) { constexpr size_t NumberOfFeatures = static_cast(FeatureIndex::NumberOfFeatures); -extern const std::array FeatureNameMap; +extern const std::array FeatureMap; extern const char *const DecisionName; extern const char *const DefaultDecisionName; diff --git a/llvm/include/llvm/Analysis/MLModelRunner.h b/llvm/include/llvm/Analysis/MLModelRunner.h index a923af5f06d2..872c0e37f00e 100644 --- a/llvm/include/llvm/Analysis/MLModelRunner.h +++ b/llvm/include/llvm/Analysis/MLModelRunner.h @@ -10,6 +10,7 @@ #ifndef LLVM_ANALYSIS_MLMODELRUNNER_H #define LLVM_ANALYSIS_MLMODELRUNNER_H +#include "llvm/Analysis/TensorSpec.h" #include "llvm/IR/PassManager.h" namespace llvm { @@ -41,7 +42,7 @@ public: getTensorUntyped(static_cast(FeatureID))); } - virtual void *getTensorUntyped(size_t Index) = 0; + void *getTensorUntyped(size_t Index) { return InputBuffers[Index]; } const void *getTensorUntyped(size_t Index) const { return (const_cast(this))->getTensorUntyped(Index); } @@ -50,13 +51,27 @@ public: Kind getKind() const { return Type; } protected: - MLModelRunner(LLVMContext &Ctx, Kind Type) : Ctx(Ctx), Type(Type) { + MLModelRunner(LLVMContext &Ctx, Kind Type, size_t NrInputs) + : Ctx(Ctx), Type(Type), InputBuffers(NrInputs) { assert(Type != Kind::Unknown); } virtual void *evaluateUntyped() = 0; + void setUpBufferForTensor(size_t Index, const TensorSpec &Spec, + void *Buffer) { + if (!Buffer) { + OwnedBuffers.emplace_back(Spec.getTotalTensorBufferSize()); + Buffer = OwnedBuffers.back().data(); + } + InputBuffers[Index] = Buffer; + } + LLVMContext &Ctx; const Kind Type; + +private: + std::vector InputBuffers; + std::vector> OwnedBuffers; }; } // namespace llvm diff --git a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h index 071ccf96fe5b..72bd185b6c32 100644 --- a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h +++ b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h @@ -10,6 +10,7 @@ #ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H #define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H +#include "llvm/Analysis/TensorSpec.h" #include "llvm/Config/llvm-config.h" #ifdef LLVM_HAVE_TF_API @@ -48,6 +49,11 @@ public: StringRef DecisionName, const std::vector &InputSpecs, StringRef OutputSpecsPathOverride = ""); + static std::unique_ptr + createAndEnsureValid(LLVMContext &Ctx, const std::string &ModelPath, + StringRef DecisionName, + const std::vector &InputSpecs, + const std::vector &OutputSpecs); private: ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath, @@ -58,7 +64,6 @@ private: const std::vector OutputSpecs; Optional LastEvaluationResult; void *evaluateUntyped() override; - void *getTensorUntyped(size_t Index) override; bool isValid() const { return !!Evaluator; } }; diff --git a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h index 5bcedf98865c..980b40500d7c 100644 --- a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h +++ b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h @@ -10,13 +10,9 @@ #ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H #define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H -#include "llvm/Config/llvm-config.h" - -/// While not strictly necessary to conditionally compile this, it really -/// has no usecase outside the 'development' mode. -#ifdef LLVM_HAVE_TF_API #include "llvm/Analysis/MLModelRunner.h" -#include "llvm/Analysis/Utils/TFUtils.h" +#include "llvm/Analysis/TensorSpec.h" +#include "llvm/Config/llvm-config.h" namespace llvm { /// A pseudo model runner. We use it to store feature values when collecting /// logs for the default policy, in 'development' mode, but never ask it to @@ -34,10 +30,6 @@ private: void *evaluateUntyped() override { llvm_unreachable("We shouldn't call run on this model runner."); } - void *getTensorUntyped(size_t Index) override; - - std::vector> ValuesBuffer; }; } // namespace llvm -#endif // defined(LLVM_HAVE_TF_API) #endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H diff --git a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h index 6594b26ee6d9..bf1aaca2adbb 100644 --- a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h +++ b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h @@ -15,6 +15,7 @@ #define LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H #include "llvm/Analysis/MLModelRunner.h" +#include "llvm/Analysis/TensorSpec.h" #include "llvm/Support/ErrorHandling.h" #include @@ -30,21 +31,20 @@ public: /// FeatureNames' type should be an indexed collection of std::string, like /// std::array or std::vector, that has a size() method. template - ReleaseModeModelRunner(LLVMContext &Ctx, const FType &FeatureNames, + ReleaseModeModelRunner(LLVMContext &Ctx, const FType &InputSpec, StringRef DecisionName, StringRef FeedPrefix = "feed_", StringRef FetchPrefix = "fetch_") - : MLModelRunner(Ctx, MLModelRunner::Kind::Release), + : MLModelRunner(Ctx, MLModelRunner::Kind::Release, InputSpec.size()), CompiledModel(std::make_unique()) { assert(CompiledModel && "The CompiledModel should be valid"); - const size_t FeatureCount = FeatureNames.size(); - FeatureIndices.resize(FeatureCount); - - for (size_t I = 0; I < FeatureCount; ++I) { + for (size_t I = 0; I < InputSpec.size(); ++I) { const int Index = - CompiledModel->LookupArgIndex(FeedPrefix.str() + FeatureNames[I]); - assert(Index >= 0 && "Cannot find Feature in inlining model"); - FeatureIndices[I] = Index; + CompiledModel->LookupArgIndex(FeedPrefix.str() + InputSpec[I].name()); + void *Buffer = nullptr; + if (Index >= 0) + Buffer = CompiledModel->arg_data(Index); + setUpBufferForTensor(I, InputSpec[I], Buffer); } ResultIndex = CompiledModel->LookupResultIndex(FetchPrefix.str() + @@ -64,12 +64,6 @@ private: return CompiledModel->result_data(ResultIndex); } - void *getTensorUntyped(size_t Index) override { - return reinterpret_cast( - CompiledModel->arg_data(FeatureIndices[Index])); - } - - std::vector FeatureIndices; int32_t ResultIndex = -1; std::unique_ptr CompiledModel; }; diff --git a/llvm/include/llvm/Analysis/TensorSpec.h b/llvm/include/llvm/Analysis/TensorSpec.h index e4afcf90a0de..382ab3f10445 100644 --- a/llvm/include/llvm/Analysis/TensorSpec.h +++ b/llvm/include/llvm/Analysis/TensorSpec.h @@ -74,6 +74,8 @@ public: size_t getElementCount() const { return ElementCount; } /// Get the size, in bytes, of one element. size_t getElementByteSize() const { return ElementSize; } + /// Get the total size of a memory buffer needed to store the whole tensor. + size_t getTotalTensorBufferSize() const { return ElementCount * ElementSize; } template bool isElementType() const { return getDataType() == Type; diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp index 71c74a139a61..79ea160afc22 100644 --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -272,8 +272,8 @@ static const std::vector TrainingOnlyFeatures{ static const std::vector getInputFeatures() { std::vector InputSpecs; for (size_t I = 0; I < NumberOfFeatures; ++I) - InputSpecs.push_back( - TensorSpec::createSpec(TFFeedPrefix + FeatureNameMap[I], {1})); + InputSpecs.push_back(TensorSpec::createSpec( + TFFeedPrefix + FeatureMap[I].name(), FeatureMap[I].shape())); append_range(InputSpecs, TrainingOnlyFeatures); return InputSpecs; } @@ -289,8 +289,7 @@ TrainingLogger::TrainingLogger(StringRef LogFileName, std::vector FT; for (size_t I = 0; I < NumberOfFeatures; ++I) - FT.push_back( - {TensorSpec::createSpec(FeatureNameMap.at(I), {1}), None}); + FT.push_back({FeatureMap.at(I), None}); if (MUTR && MUTR->outputLoggedFeatureSpecs().size() > 1) append_range(FT, drop_begin(MUTR->outputLoggedFeatureSpecs())); diff --git a/llvm/lib/Analysis/MLInlineAdvisor.cpp b/llvm/lib/Analysis/MLInlineAdvisor.cpp index 2459db705da4..cc454dd3687d 100644 --- a/llvm/lib/Analysis/MLInlineAdvisor.cpp +++ b/llvm/lib/Analysis/MLInlineAdvisor.cpp @@ -37,7 +37,7 @@ std::unique_ptr llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM) { auto AOTRunner = std::make_unique>( - M.getContext(), FeatureNameMap, DecisionName); + M.getContext(), FeatureMap, DecisionName); return std::make_unique(M, MAM, std::move(AOTRunner)); } #endif @@ -51,14 +51,14 @@ static cl::opt SizeIncreaseThreshold( cl::init(2.0)); // clang-format off -const std::array llvm::FeatureNameMap{ +const std::array llvm::FeatureMap{ +#define POPULATE_NAMES(_, NAME) TensorSpec::createSpec(NAME, {1} ), // InlineCost features - these must come first -#define POPULATE_NAMES(INDEX_NAME, NAME) NAME, INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES) #undef POPULATE_NAMES // Non-cost features -#define POPULATE_NAMES(INDEX_NAME, NAME, COMMENT) NAME, +#define POPULATE_NAMES(_, NAME, __) TensorSpec::createSpec(NAME, {1} ), INLINE_FEATURE_ITERATOR(POPULATE_NAMES) #undef POPULATE_NAMES }; @@ -364,7 +364,7 @@ void MLInlineAdvice::reportContextForRemark( using namespace ore; OR << NV("Callee", Callee->getName()); for (size_t I = 0; I < NumberOfFeatures; ++I) - OR << NV(FeatureNameMap[I], + OR << NV(FeatureMap[I].name(), *getAdvisor()->getModelRunner().getTensor(I)); OR << NV("ShouldInline", isInliningRecommended()); } diff --git a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp index fab51d6a7aaf..d3cbfeda3ca1 100644 --- a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp +++ b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm/Analysis/TensorSpec.h" #include "llvm/Config/config.h" #if defined(LLVM_HAVE_TF_API) @@ -22,7 +23,7 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner( LLVMContext &Ctx, const std::string &ModelPath, const std::vector &InputSpecs, const std::vector &OutputSpecs) - : MLModelRunner(Ctx, MLModelRunner::Kind::Development), + : MLModelRunner(Ctx, MLModelRunner::Kind::Development, InputSpecs.size()), OutputSpecs(OutputSpecs) { Evaluator = std::make_unique( ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; }, @@ -32,6 +33,10 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner( Evaluator.reset(); return; } + + for (size_t I = 0, E = InputSpecs.size(); I < E; ++I) { + setUpBufferForTensor(I, InputSpecs[I], Evaluator->getUntypedInput(I)); + } } void *ModelUnderTrainingRunner::evaluateUntyped() { @@ -43,24 +48,31 @@ void *ModelUnderTrainingRunner::evaluateUntyped() { return LastEvaluationResult->getUntypedTensorValue(0); } -void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) { - return Evaluator->getUntypedInput(Index); +std::unique_ptr +ModelUnderTrainingRunner::createAndEnsureValid( + LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName, + const std::vector &InputSpecs, + StringRef OutputSpecsPathOverride) { + if (auto MaybeOutputSpecs = loadOutputSpecs(Ctx, DecisionName, ModelPath, + OutputSpecsPathOverride)) + return createAndEnsureValid(Ctx, ModelPath, DecisionName, InputSpecs, + *MaybeOutputSpecs); + Ctx.emitError("Could not load the policy model from the provided path"); + return nullptr; } std::unique_ptr ModelUnderTrainingRunner::createAndEnsureValid( LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName, const std::vector &InputSpecs, - StringRef OutputSpecsPathOverride) { + const std::vector &OutputSpecs) { std::unique_ptr MUTR; - if (auto MaybeOutputSpecs = loadOutputSpecs(Ctx, DecisionName, ModelPath, - OutputSpecsPathOverride)) - MUTR.reset(new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs, - *MaybeOutputSpecs)); + MUTR.reset( + new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs, OutputSpecs)); if (MUTR && MUTR->isValid()) return MUTR; - Ctx.emitError("Could not load the policy model from the provided path"); + Ctx.emitError("Could not load or create model evaluator."); return nullptr; } diff --git a/llvm/lib/Analysis/NoInferenceModelRunner.cpp b/llvm/lib/Analysis/NoInferenceModelRunner.cpp index 7178120ebe4f..1914b22f5d71 100644 --- a/llvm/lib/Analysis/NoInferenceModelRunner.cpp +++ b/llvm/lib/Analysis/NoInferenceModelRunner.cpp @@ -10,24 +10,14 @@ // logs for the default policy, in 'development' mode, but never ask it to // 'run'. //===----------------------------------------------------------------------===// -#include "llvm/Config/config.h" -#if defined(LLVM_HAVE_TF_API) - #include "llvm/Analysis/NoInferenceModelRunner.h" -#include "llvm/Analysis/Utils/TFUtils.h" using namespace llvm; NoInferenceModelRunner::NoInferenceModelRunner( LLVMContext &Ctx, const std::vector &Inputs) - : MLModelRunner(Ctx, MLModelRunner::Kind::NoOp) { - ValuesBuffer.reserve(Inputs.size()); + : MLModelRunner(Ctx, MLModelRunner::Kind::NoOp, Inputs.size()) { + size_t Index = 0; for (const auto &TS : Inputs) - ValuesBuffer.push_back(std::make_unique(TS.getElementCount() * - TS.getElementByteSize())); + setUpBufferForTensor(Index++, TS, nullptr); } - -void *NoInferenceModelRunner::getTensorUntyped(size_t Index) { - return ValuesBuffer[Index].get(); -} -#endif // defined(LLVM_HAVE_TF_API) diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp index 3d4ef160824a..203858c1cf06 100644 --- a/llvm/lib/Analysis/TFUtils.cpp +++ b/llvm/lib/Analysis/TFUtils.cpp @@ -300,16 +300,29 @@ TFModelEvaluatorImpl::TFModelEvaluatorImpl( errs() << TF_Message(Status.get()); invalidate(); } + size_t NrSupported = 0; for (size_t I = 0; I < InputSpecs.size(); ++I) { auto &InputSpec = InputSpecs[I]; InputFeed[I] = { TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()), InputSpec.port()}; + if (!InputFeed[I].oper) { + continue; + } + if (NrSupported++ != I) { + errs() + << "Unsupported features must be placed at the end of the InputSpecs"; + invalidate(); + return; + } if (!checkReportAndInvalidate(InputFeed[I], InputSpec)) return; initInput(I, static_cast(getTFTypeIndex(InputSpec.type())), InputSpec.shape()); } + InputFeed.resize(NrSupported); + Input.resize(NrSupported); + for (size_t I = 0; I < OutputSpecsSize; ++I) { auto OutputSpec = GetOutputSpecs(I); OutputFeed[I] = { @@ -387,7 +400,9 @@ void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type, } void *TFModelEvaluator::getUntypedInput(size_t Index) { - return TF_TensorData(Impl->getInput()[Index]); + if (Index < Impl->getInput().size()) + return TF_TensorData(Impl->getInput()[Index]); + return nullptr; } TFModelEvaluator::EvaluationResult::EvaluationResult( diff --git a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp index e01838e5fe11..7daf9025d303 100644 --- a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp @@ -15,6 +15,7 @@ #include "RegAllocGreedy.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/MLModelRunner.h" +#include "llvm/Analysis/TensorSpec.h" #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL) || defined(LLVM_HAVE_TF_API) #include "llvm/Analysis/ModelUnderTrainingRunner.h" #include "llvm/Analysis/NoInferenceModelRunner.h" @@ -320,14 +321,16 @@ private: mutable DenseMap CachedFeatures; }; +#define _DECL_FEATURES(type, name, shape, _) \ + TensorSpec::createSpec(#name, shape), + +static const std::vector InputFeatures{ + {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)}, +}; +#undef _DECL_FEATURES // =================================== // Release (AOT) - specifics // =================================== -const std::array FeatureNames{ -#define _GETNAME(_, NAME, __, ___) #NAME, - RA_EVICT_FEATURES_LIST(_GETNAME) -#undef _GETNAME -}; class ReleaseModeEvictionAdvisorAnalysis final : public RegAllocEvictionAdvisorAnalysis { public: @@ -349,7 +352,7 @@ private: getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override { if (!Runner) Runner = std::make_unique>( - MF.getFunction().getContext(), FeatureNames, DecisionName); + MF.getFunction().getContext(), InputFeatures, DecisionName); return std::make_unique( MF, RA, Runner.get(), getAnalysis(), getAnalysis()); @@ -363,13 +366,6 @@ private: // // Features we log #ifdef LLVM_HAVE_TF_API -#define _DECL_FEATURES(type, name, shape, _) \ - TensorSpec::createSpec(#name, shape), - -static const std::vector InputFeatures{ - {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)}, -}; -#undef _DECL_FEATURES static const TensorSpec Output = TensorSpec::createSpec(DecisionName, {1}); static const TensorSpec Reward = TensorSpec::createSpec("reward", {1}); diff --git a/llvm/unittests/Analysis/CMakeLists.txt b/llvm/unittests/Analysis/CMakeLists.txt index 0656b82af4d5..ed2cb81055b3 100644 --- a/llvm/unittests/Analysis/CMakeLists.txt +++ b/llvm/unittests/Analysis/CMakeLists.txt @@ -6,7 +6,7 @@ set(LLVM_LINK_COMPONENTS TransformUtils ) -set(MLGO_TESTS TFUtilsTest.cpp MLModelRunnerTest.cpp) +set(MLGO_TESTS TFUtilsTest.cpp) if (DEFINED LLVM_HAVE_TF_API) LIST(APPEND EXTRA_TESTS ${MLGO_TESTS}) else() @@ -39,6 +39,7 @@ add_llvm_unittest_with_input_files(AnalysisTests LoopNestTest.cpp MemoryBuiltinsTest.cpp MemorySSATest.cpp + MLModelRunnerTest.cpp PhiValuesTest.cpp ProfileSummaryInfoTest.cpp ScalarEvolutionTest.cpp diff --git a/llvm/unittests/Analysis/MLModelRunnerTest.cpp b/llvm/unittests/Analysis/MLModelRunnerTest.cpp index 9794365ca51c..05dadccbe1e7 100644 --- a/llvm/unittests/Analysis/MLModelRunnerTest.cpp +++ b/llvm/unittests/Analysis/MLModelRunnerTest.cpp @@ -8,10 +8,49 @@ #include "llvm/Analysis/MLModelRunner.h" #include "llvm/Analysis/NoInferenceModelRunner.h" +#include "llvm/Analysis/ReleaseModeModelRunner.h" #include "gtest/gtest.h" using namespace llvm; +namespace llvm { +// This is a mock of the kind of AOT-generated model evaluator. It has 2 tensors +// of shape {1}, and 'evaluation' adds them. +// The interface is the one expected by ReleaseModelRunner. +class MockAOTModel final { + int64_t A = 0; + int64_t B = 0; + int64_t R = 0; + +public: + MockAOTModel() = default; + int LookupArgIndex(const std::string &Name) { + if (Name == "prefix_a") + return 0; + if (Name == "prefix_b") + return 1; + return -1; + } + int LookupResultIndex(const std::string &) { return 0; } + void Run() { R = A + B; } + void *result_data(int RIndex) { + if (RIndex == 0) + return &R; + return nullptr; + } + void *arg_data(int Index) { + switch (Index) { + case 0: + return &A; + case 1: + return &B; + default: + return nullptr; + } + } +}; +} // namespace llvm + TEST(NoInferenceModelRunner, AccessTensors) { const std::vector Inputs{ TensorSpec::createSpec("F1", {1}), @@ -30,4 +69,51 @@ TEST(NoInferenceModelRunner, AccessTensors) { ASSERT_EQ(NIMR.getTensor(0)[0], 1); ASSERT_EQ(NIMR.getTensor(1)[8], 9); ASSERT_EQ(NIMR.getTensor(2)[1], 0.2f); +} + +TEST(ReleaseModeRunner, NormalUse) { + LLVMContext Ctx; + std::vector Inputs{TensorSpec::createSpec("a", {1}), + TensorSpec::createSpec("b", {1})}; + auto Evaluator = std::make_unique>( + Ctx, Inputs, "", "prefix_"); + *Evaluator->getTensor(0) = 1; + *Evaluator->getTensor(1) = 2; + EXPECT_EQ(Evaluator->evaluate(), 3); + EXPECT_EQ(*Evaluator->getTensor(0), 1); + EXPECT_EQ(*Evaluator->getTensor(1), 2); +} + +TEST(ReleaseModeRunner, ExtraFeatures) { + LLVMContext Ctx; + std::vector Inputs{TensorSpec::createSpec("a", {1}), + TensorSpec::createSpec("b", {1}), + TensorSpec::createSpec("c", {1})}; + auto Evaluator = std::make_unique>( + Ctx, Inputs, "", "prefix_"); + *Evaluator->getTensor(0) = 1; + *Evaluator->getTensor(1) = 2; + *Evaluator->getTensor(2) = -3; + EXPECT_EQ(Evaluator->evaluate(), 3); + EXPECT_EQ(*Evaluator->getTensor(0), 1); + EXPECT_EQ(*Evaluator->getTensor(1), 2); + EXPECT_EQ(*Evaluator->getTensor(2), -3); +} + +TEST(ReleaseModeRunner, ExtraFeaturesOutOfOrder) { + LLVMContext Ctx; + std::vector Inputs{ + TensorSpec::createSpec("a", {1}), + TensorSpec::createSpec("c", {1}), + TensorSpec::createSpec("b", {1}), + }; + auto Evaluator = std::make_unique>( + Ctx, Inputs, "", "prefix_"); + *Evaluator->getTensor(0) = 1; // a + *Evaluator->getTensor(1) = 2; // c + *Evaluator->getTensor(2) = -3; // b + EXPECT_EQ(Evaluator->evaluate(), -2); // a + b + EXPECT_EQ(*Evaluator->getTensor(0), 1); + EXPECT_EQ(*Evaluator->getTensor(1), 2); + EXPECT_EQ(*Evaluator->getTensor(2), -3); } \ No newline at end of file diff --git a/llvm/unittests/Analysis/TFUtilsTest.cpp b/llvm/unittests/Analysis/TFUtilsTest.cpp index a1495e9b6bbf..6ec129cf413d 100644 --- a/llvm/unittests/Analysis/TFUtilsTest.cpp +++ b/llvm/unittests/Analysis/TFUtilsTest.cpp @@ -10,6 +10,8 @@ #include "google/protobuf/struct.pb.h" #include "tensorflow/core/example/example.pb.h" #include "tensorflow/core/example/feature.pb.h" +#include "llvm/Analysis/ModelUnderTrainingRunner.h" +#include "llvm/Analysis/TensorSpec.h" #include "llvm/AsmParser/Parser.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" @@ -102,6 +104,36 @@ TEST(TFUtilsTest, EvalError) { EXPECT_FALSE(Evaluator.isValid()); } +TEST(TFUtilsTest, UnsupportedFeature) { + const static int64_t KnownSize = 214; + std::vector InputSpecs{ + TensorSpec::createSpec("serving_default_input_1", + {1, KnownSize}), + TensorSpec::createSpec("this_feature_does_not_exist", {2, 5})}; + + LLVMContext Ctx; + auto Evaluator = ModelUnderTrainingRunner::createAndEnsureValid( + Ctx, getModelPath(), "StatefulPartitionedCall", InputSpecs, + {LoggedFeatureSpec{ + TensorSpec::createSpec("StatefulPartitionedCall", {1}), + None}}); + int32_t *V = Evaluator->getTensor(0); + // Fill it up with 1s, we know the output. + for (auto I = 0; I < KnownSize; ++I) + V[I] = 1; + + float *F = Evaluator->getTensor(1); + for (auto I = 0; I < 2 * 5; ++I) + F[I] = 3.14 + I; + float Ret = Evaluator->evaluate(); + EXPECT_EQ(static_cast(Ret), 80); + // The input vector should be unchanged + for (auto I = 0; I < KnownSize; ++I) + EXPECT_EQ(V[I], 1); + for (auto I = 0; I < 2 * 5; ++I) + EXPECT_FLOAT_EQ(F[I], 3.14 + I); +} + #define PROTO_CHECKER(FNAME, TYPE, INDEX, EXP) \ do { \ const auto &V = Expected.feature_lists() \