forked from OSchip/llvm-project
[mlgo] Support exposing more features than those supported by models
This allows the compiler to support more features than those supported by a model. The only requirement (development mode only) is that the new features must be appended at the end of the list of features requested from the model. The support is transparent to compiler code: for unsupported features, we provide a valid buffer to copy their values; it's just that this buffer is disconnected from the model, so insofar as the model is concerned (AOT or development mode), these features don't exist. The buffers are allocated at setup - meaning, at steady state, there is no extra allocation (maintaining the current invariant). These buffers has 2 roles: one, keep the compiler code simple. Second, allow logging their values in development mode. The latter allows retraining a model supporting the larger feature set starting from traces produced with the old model. For release mode (AOT-ed models), this decouples compiler evolution from model evolution, which we want in scenarios where the toolchain is frequently rebuilt and redeployed: we can first deploy the new features, and continue working with the older model, until a new model is made available, which can then be picked up the next time the compiler is built. Differential Revision: https://reviews.llvm.org/D124565
This commit is contained in:
parent
eef76f9821
commit
c35ad9ee4f
|
@ -10,6 +10,8 @@
|
|||
#ifndef LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
|
||||
#define LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
|
||||
|
||||
#include "llvm/Analysis/TensorSpec.h"
|
||||
|
||||
#include <array>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -127,7 +129,7 @@ inlineCostFeatureToMlFeature(InlineCostFeatureIndex Feature) {
|
|||
constexpr size_t NumberOfFeatures =
|
||||
static_cast<size_t>(FeatureIndex::NumberOfFeatures);
|
||||
|
||||
extern const std::array<std::string, NumberOfFeatures> FeatureNameMap;
|
||||
extern const std::array<TensorSpec, NumberOfFeatures> FeatureMap;
|
||||
|
||||
extern const char *const DecisionName;
|
||||
extern const char *const DefaultDecisionName;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#ifndef LLVM_ANALYSIS_MLMODELRUNNER_H
|
||||
#define LLVM_ANALYSIS_MLMODELRUNNER_H
|
||||
|
||||
#include "llvm/Analysis/TensorSpec.h"
|
||||
#include "llvm/IR/PassManager.h"
|
||||
|
||||
namespace llvm {
|
||||
|
@ -41,7 +42,7 @@ public:
|
|||
getTensorUntyped(static_cast<size_t>(FeatureID)));
|
||||
}
|
||||
|
||||
virtual void *getTensorUntyped(size_t Index) = 0;
|
||||
void *getTensorUntyped(size_t Index) { return InputBuffers[Index]; }
|
||||
const void *getTensorUntyped(size_t Index) const {
|
||||
return (const_cast<MLModelRunner *>(this))->getTensorUntyped(Index);
|
||||
}
|
||||
|
@ -50,13 +51,27 @@ public:
|
|||
Kind getKind() const { return Type; }
|
||||
|
||||
protected:
|
||||
MLModelRunner(LLVMContext &Ctx, Kind Type) : Ctx(Ctx), Type(Type) {
|
||||
MLModelRunner(LLVMContext &Ctx, Kind Type, size_t NrInputs)
|
||||
: Ctx(Ctx), Type(Type), InputBuffers(NrInputs) {
|
||||
assert(Type != Kind::Unknown);
|
||||
}
|
||||
virtual void *evaluateUntyped() = 0;
|
||||
|
||||
void setUpBufferForTensor(size_t Index, const TensorSpec &Spec,
|
||||
void *Buffer) {
|
||||
if (!Buffer) {
|
||||
OwnedBuffers.emplace_back(Spec.getTotalTensorBufferSize());
|
||||
Buffer = OwnedBuffers.back().data();
|
||||
}
|
||||
InputBuffers[Index] = Buffer;
|
||||
}
|
||||
|
||||
LLVMContext &Ctx;
|
||||
const Kind Type;
|
||||
|
||||
private:
|
||||
std::vector<void *> InputBuffers;
|
||||
std::vector<std::vector<char *>> OwnedBuffers;
|
||||
};
|
||||
} // namespace llvm
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
|
||||
#define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
|
||||
|
||||
#include "llvm/Analysis/TensorSpec.h"
|
||||
#include "llvm/Config/llvm-config.h"
|
||||
|
||||
#ifdef LLVM_HAVE_TF_API
|
||||
|
@ -48,6 +49,11 @@ public:
|
|||
StringRef DecisionName,
|
||||
const std::vector<TensorSpec> &InputSpecs,
|
||||
StringRef OutputSpecsPathOverride = "");
|
||||
static std::unique_ptr<ModelUnderTrainingRunner>
|
||||
createAndEnsureValid(LLVMContext &Ctx, const std::string &ModelPath,
|
||||
StringRef DecisionName,
|
||||
const std::vector<TensorSpec> &InputSpecs,
|
||||
const std::vector<LoggedFeatureSpec> &OutputSpecs);
|
||||
|
||||
private:
|
||||
ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath,
|
||||
|
@ -58,7 +64,6 @@ private:
|
|||
const std::vector<LoggedFeatureSpec> OutputSpecs;
|
||||
Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
|
||||
void *evaluateUntyped() override;
|
||||
void *getTensorUntyped(size_t Index) override;
|
||||
bool isValid() const { return !!Evaluator; }
|
||||
};
|
||||
|
||||
|
|
|
@ -10,13 +10,9 @@
|
|||
#ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
|
||||
#define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
|
||||
|
||||
#include "llvm/Config/llvm-config.h"
|
||||
|
||||
/// While not strictly necessary to conditionally compile this, it really
|
||||
/// has no usecase outside the 'development' mode.
|
||||
#ifdef LLVM_HAVE_TF_API
|
||||
#include "llvm/Analysis/MLModelRunner.h"
|
||||
#include "llvm/Analysis/Utils/TFUtils.h"
|
||||
#include "llvm/Analysis/TensorSpec.h"
|
||||
#include "llvm/Config/llvm-config.h"
|
||||
namespace llvm {
|
||||
/// A pseudo model runner. We use it to store feature values when collecting
|
||||
/// logs for the default policy, in 'development' mode, but never ask it to
|
||||
|
@ -34,10 +30,6 @@ private:
|
|||
void *evaluateUntyped() override {
|
||||
llvm_unreachable("We shouldn't call run on this model runner.");
|
||||
}
|
||||
void *getTensorUntyped(size_t Index) override;
|
||||
|
||||
std::vector<std::unique_ptr<char[]>> ValuesBuffer;
|
||||
};
|
||||
} // namespace llvm
|
||||
#endif // defined(LLVM_HAVE_TF_API)
|
||||
#endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#define LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H
|
||||
|
||||
#include "llvm/Analysis/MLModelRunner.h"
|
||||
#include "llvm/Analysis/TensorSpec.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
|
||||
#include <memory>
|
||||
|
@ -30,21 +31,20 @@ public:
|
|||
/// FeatureNames' type should be an indexed collection of std::string, like
|
||||
/// std::array or std::vector, that has a size() method.
|
||||
template <class FType>
|
||||
ReleaseModeModelRunner(LLVMContext &Ctx, const FType &FeatureNames,
|
||||
ReleaseModeModelRunner(LLVMContext &Ctx, const FType &InputSpec,
|
||||
StringRef DecisionName, StringRef FeedPrefix = "feed_",
|
||||
StringRef FetchPrefix = "fetch_")
|
||||
: MLModelRunner(Ctx, MLModelRunner::Kind::Release),
|
||||
: MLModelRunner(Ctx, MLModelRunner::Kind::Release, InputSpec.size()),
|
||||
CompiledModel(std::make_unique<TGen>()) {
|
||||
assert(CompiledModel && "The CompiledModel should be valid");
|
||||
|
||||
const size_t FeatureCount = FeatureNames.size();
|
||||
FeatureIndices.resize(FeatureCount);
|
||||
|
||||
for (size_t I = 0; I < FeatureCount; ++I) {
|
||||
for (size_t I = 0; I < InputSpec.size(); ++I) {
|
||||
const int Index =
|
||||
CompiledModel->LookupArgIndex(FeedPrefix.str() + FeatureNames[I]);
|
||||
assert(Index >= 0 && "Cannot find Feature in inlining model");
|
||||
FeatureIndices[I] = Index;
|
||||
CompiledModel->LookupArgIndex(FeedPrefix.str() + InputSpec[I].name());
|
||||
void *Buffer = nullptr;
|
||||
if (Index >= 0)
|
||||
Buffer = CompiledModel->arg_data(Index);
|
||||
setUpBufferForTensor(I, InputSpec[I], Buffer);
|
||||
}
|
||||
|
||||
ResultIndex = CompiledModel->LookupResultIndex(FetchPrefix.str() +
|
||||
|
@ -64,12 +64,6 @@ private:
|
|||
return CompiledModel->result_data(ResultIndex);
|
||||
}
|
||||
|
||||
void *getTensorUntyped(size_t Index) override {
|
||||
return reinterpret_cast<char *>(
|
||||
CompiledModel->arg_data(FeatureIndices[Index]));
|
||||
}
|
||||
|
||||
std::vector<int32_t> FeatureIndices;
|
||||
int32_t ResultIndex = -1;
|
||||
std::unique_ptr<TGen> CompiledModel;
|
||||
};
|
||||
|
|
|
@ -74,6 +74,8 @@ public:
|
|||
size_t getElementCount() const { return ElementCount; }
|
||||
/// Get the size, in bytes, of one element.
|
||||
size_t getElementByteSize() const { return ElementSize; }
|
||||
/// Get the total size of a memory buffer needed to store the whole tensor.
|
||||
size_t getTotalTensorBufferSize() const { return ElementCount * ElementSize; }
|
||||
|
||||
template <typename T> bool isElementType() const {
|
||||
return getDataType<T>() == Type;
|
||||
|
|
|
@ -272,8 +272,8 @@ static const std::vector<TensorSpec> TrainingOnlyFeatures{
|
|||
static const std::vector<TensorSpec> getInputFeatures() {
|
||||
std::vector<TensorSpec> InputSpecs;
|
||||
for (size_t I = 0; I < NumberOfFeatures; ++I)
|
||||
InputSpecs.push_back(
|
||||
TensorSpec::createSpec<int64_t>(TFFeedPrefix + FeatureNameMap[I], {1}));
|
||||
InputSpecs.push_back(TensorSpec::createSpec<int64_t>(
|
||||
TFFeedPrefix + FeatureMap[I].name(), FeatureMap[I].shape()));
|
||||
append_range(InputSpecs, TrainingOnlyFeatures);
|
||||
return InputSpecs;
|
||||
}
|
||||
|
@ -289,8 +289,7 @@ TrainingLogger::TrainingLogger(StringRef LogFileName,
|
|||
std::vector<LoggedFeatureSpec> FT;
|
||||
|
||||
for (size_t I = 0; I < NumberOfFeatures; ++I)
|
||||
FT.push_back(
|
||||
{TensorSpec::createSpec<int64_t>(FeatureNameMap.at(I), {1}), None});
|
||||
FT.push_back({FeatureMap.at(I), None});
|
||||
if (MUTR && MUTR->outputLoggedFeatureSpecs().size() > 1)
|
||||
append_range(FT, drop_begin(MUTR->outputLoggedFeatureSpecs()));
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ std::unique_ptr<InlineAdvisor>
|
|||
llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM) {
|
||||
auto AOTRunner =
|
||||
std::make_unique<ReleaseModeModelRunner<llvm::InlinerSizeModel>>(
|
||||
M.getContext(), FeatureNameMap, DecisionName);
|
||||
M.getContext(), FeatureMap, DecisionName);
|
||||
return std::make_unique<MLInlineAdvisor>(M, MAM, std::move(AOTRunner));
|
||||
}
|
||||
#endif
|
||||
|
@ -51,14 +51,14 @@ static cl::opt<float> SizeIncreaseThreshold(
|
|||
cl::init(2.0));
|
||||
|
||||
// clang-format off
|
||||
const std::array<std::string, NumberOfFeatures> llvm::FeatureNameMap{
|
||||
const std::array<TensorSpec, NumberOfFeatures> llvm::FeatureMap{
|
||||
#define POPULATE_NAMES(_, NAME) TensorSpec::createSpec<int64_t>(NAME, {1} ),
|
||||
// InlineCost features - these must come first
|
||||
#define POPULATE_NAMES(INDEX_NAME, NAME) NAME,
|
||||
INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES)
|
||||
#undef POPULATE_NAMES
|
||||
|
||||
// Non-cost features
|
||||
#define POPULATE_NAMES(INDEX_NAME, NAME, COMMENT) NAME,
|
||||
#define POPULATE_NAMES(_, NAME, __) TensorSpec::createSpec<int64_t>(NAME, {1} ),
|
||||
INLINE_FEATURE_ITERATOR(POPULATE_NAMES)
|
||||
#undef POPULATE_NAMES
|
||||
};
|
||||
|
@ -364,7 +364,7 @@ void MLInlineAdvice::reportContextForRemark(
|
|||
using namespace ore;
|
||||
OR << NV("Callee", Callee->getName());
|
||||
for (size_t I = 0; I < NumberOfFeatures; ++I)
|
||||
OR << NV(FeatureNameMap[I],
|
||||
OR << NV(FeatureMap[I].name(),
|
||||
*getAdvisor()->getModelRunner().getTensor<int64_t>(I));
|
||||
OR << NV("ShouldInline", isInliningRecommended());
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/Analysis/TensorSpec.h"
|
||||
#include "llvm/Config/config.h"
|
||||
#if defined(LLVM_HAVE_TF_API)
|
||||
|
||||
|
@ -22,7 +23,7 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner(
|
|||
LLVMContext &Ctx, const std::string &ModelPath,
|
||||
const std::vector<TensorSpec> &InputSpecs,
|
||||
const std::vector<LoggedFeatureSpec> &OutputSpecs)
|
||||
: MLModelRunner(Ctx, MLModelRunner::Kind::Development),
|
||||
: MLModelRunner(Ctx, MLModelRunner::Kind::Development, InputSpecs.size()),
|
||||
OutputSpecs(OutputSpecs) {
|
||||
Evaluator = std::make_unique<TFModelEvaluator>(
|
||||
ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; },
|
||||
|
@ -32,6 +33,10 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner(
|
|||
Evaluator.reset();
|
||||
return;
|
||||
}
|
||||
|
||||
for (size_t I = 0, E = InputSpecs.size(); I < E; ++I) {
|
||||
setUpBufferForTensor(I, InputSpecs[I], Evaluator->getUntypedInput(I));
|
||||
}
|
||||
}
|
||||
|
||||
void *ModelUnderTrainingRunner::evaluateUntyped() {
|
||||
|
@ -43,24 +48,31 @@ void *ModelUnderTrainingRunner::evaluateUntyped() {
|
|||
return LastEvaluationResult->getUntypedTensorValue(0);
|
||||
}
|
||||
|
||||
void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) {
|
||||
return Evaluator->getUntypedInput(Index);
|
||||
std::unique_ptr<ModelUnderTrainingRunner>
|
||||
ModelUnderTrainingRunner::createAndEnsureValid(
|
||||
LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName,
|
||||
const std::vector<TensorSpec> &InputSpecs,
|
||||
StringRef OutputSpecsPathOverride) {
|
||||
if (auto MaybeOutputSpecs = loadOutputSpecs(Ctx, DecisionName, ModelPath,
|
||||
OutputSpecsPathOverride))
|
||||
return createAndEnsureValid(Ctx, ModelPath, DecisionName, InputSpecs,
|
||||
*MaybeOutputSpecs);
|
||||
Ctx.emitError("Could not load the policy model from the provided path");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::unique_ptr<ModelUnderTrainingRunner>
|
||||
ModelUnderTrainingRunner::createAndEnsureValid(
|
||||
LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName,
|
||||
const std::vector<TensorSpec> &InputSpecs,
|
||||
StringRef OutputSpecsPathOverride) {
|
||||
const std::vector<LoggedFeatureSpec> &OutputSpecs) {
|
||||
std::unique_ptr<ModelUnderTrainingRunner> MUTR;
|
||||
if (auto MaybeOutputSpecs = loadOutputSpecs(Ctx, DecisionName, ModelPath,
|
||||
OutputSpecsPathOverride))
|
||||
MUTR.reset(new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs,
|
||||
*MaybeOutputSpecs));
|
||||
MUTR.reset(
|
||||
new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs, OutputSpecs));
|
||||
if (MUTR && MUTR->isValid())
|
||||
return MUTR;
|
||||
|
||||
Ctx.emitError("Could not load the policy model from the provided path");
|
||||
Ctx.emitError("Could not load or create model evaluator.");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,24 +10,14 @@
|
|||
// logs for the default policy, in 'development' mode, but never ask it to
|
||||
// 'run'.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#include "llvm/Config/config.h"
|
||||
#if defined(LLVM_HAVE_TF_API)
|
||||
|
||||
#include "llvm/Analysis/NoInferenceModelRunner.h"
|
||||
#include "llvm/Analysis/Utils/TFUtils.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
NoInferenceModelRunner::NoInferenceModelRunner(
|
||||
LLVMContext &Ctx, const std::vector<TensorSpec> &Inputs)
|
||||
: MLModelRunner(Ctx, MLModelRunner::Kind::NoOp) {
|
||||
ValuesBuffer.reserve(Inputs.size());
|
||||
: MLModelRunner(Ctx, MLModelRunner::Kind::NoOp, Inputs.size()) {
|
||||
size_t Index = 0;
|
||||
for (const auto &TS : Inputs)
|
||||
ValuesBuffer.push_back(std::make_unique<char[]>(TS.getElementCount() *
|
||||
TS.getElementByteSize()));
|
||||
setUpBufferForTensor(Index++, TS, nullptr);
|
||||
}
|
||||
|
||||
void *NoInferenceModelRunner::getTensorUntyped(size_t Index) {
|
||||
return ValuesBuffer[Index].get();
|
||||
}
|
||||
#endif // defined(LLVM_HAVE_TF_API)
|
||||
|
|
|
@ -300,16 +300,29 @@ TFModelEvaluatorImpl::TFModelEvaluatorImpl(
|
|||
errs() << TF_Message(Status.get());
|
||||
invalidate();
|
||||
}
|
||||
size_t NrSupported = 0;
|
||||
for (size_t I = 0; I < InputSpecs.size(); ++I) {
|
||||
auto &InputSpec = InputSpecs[I];
|
||||
InputFeed[I] = {
|
||||
TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
|
||||
InputSpec.port()};
|
||||
if (!InputFeed[I].oper) {
|
||||
continue;
|
||||
}
|
||||
if (NrSupported++ != I) {
|
||||
errs()
|
||||
<< "Unsupported features must be placed at the end of the InputSpecs";
|
||||
invalidate();
|
||||
return;
|
||||
}
|
||||
if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
|
||||
return;
|
||||
initInput(I, static_cast<TF_DataType>(getTFTypeIndex(InputSpec.type())),
|
||||
InputSpec.shape());
|
||||
}
|
||||
InputFeed.resize(NrSupported);
|
||||
Input.resize(NrSupported);
|
||||
|
||||
for (size_t I = 0; I < OutputSpecsSize; ++I) {
|
||||
auto OutputSpec = GetOutputSpecs(I);
|
||||
OutputFeed[I] = {
|
||||
|
@ -387,7 +400,9 @@ void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
|
|||
}
|
||||
|
||||
void *TFModelEvaluator::getUntypedInput(size_t Index) {
|
||||
return TF_TensorData(Impl->getInput()[Index]);
|
||||
if (Index < Impl->getInput().size())
|
||||
return TF_TensorData(Impl->getInput()[Index]);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
TFModelEvaluator::EvaluationResult::EvaluationResult(
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "RegAllocGreedy.h"
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
#include "llvm/Analysis/MLModelRunner.h"
|
||||
#include "llvm/Analysis/TensorSpec.h"
|
||||
#if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL) || defined(LLVM_HAVE_TF_API)
|
||||
#include "llvm/Analysis/ModelUnderTrainingRunner.h"
|
||||
#include "llvm/Analysis/NoInferenceModelRunner.h"
|
||||
|
@ -320,14 +321,16 @@ private:
|
|||
mutable DenseMap<RegID, LIFeatureComponents> CachedFeatures;
|
||||
};
|
||||
|
||||
#define _DECL_FEATURES(type, name, shape, _) \
|
||||
TensorSpec::createSpec<type>(#name, shape),
|
||||
|
||||
static const std::vector<TensorSpec> InputFeatures{
|
||||
{RA_EVICT_FEATURES_LIST(_DECL_FEATURES)},
|
||||
};
|
||||
#undef _DECL_FEATURES
|
||||
// ===================================
|
||||
// Release (AOT) - specifics
|
||||
// ===================================
|
||||
const std::array<std::string, FeatureIDs::FeatureCount> FeatureNames{
|
||||
#define _GETNAME(_, NAME, __, ___) #NAME,
|
||||
RA_EVICT_FEATURES_LIST(_GETNAME)
|
||||
#undef _GETNAME
|
||||
};
|
||||
class ReleaseModeEvictionAdvisorAnalysis final
|
||||
: public RegAllocEvictionAdvisorAnalysis {
|
||||
public:
|
||||
|
@ -349,7 +352,7 @@ private:
|
|||
getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
|
||||
if (!Runner)
|
||||
Runner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>(
|
||||
MF.getFunction().getContext(), FeatureNames, DecisionName);
|
||||
MF.getFunction().getContext(), InputFeatures, DecisionName);
|
||||
return std::make_unique<MLEvictAdvisor>(
|
||||
MF, RA, Runner.get(), getAnalysis<MachineBlockFrequencyInfo>(),
|
||||
getAnalysis<MachineLoopInfo>());
|
||||
|
@ -363,13 +366,6 @@ private:
|
|||
//
|
||||
// Features we log
|
||||
#ifdef LLVM_HAVE_TF_API
|
||||
#define _DECL_FEATURES(type, name, shape, _) \
|
||||
TensorSpec::createSpec<type>(#name, shape),
|
||||
|
||||
static const std::vector<TensorSpec> InputFeatures{
|
||||
{RA_EVICT_FEATURES_LIST(_DECL_FEATURES)},
|
||||
};
|
||||
#undef _DECL_FEATURES
|
||||
static const TensorSpec Output =
|
||||
TensorSpec::createSpec<int64_t>(DecisionName, {1});
|
||||
static const TensorSpec Reward = TensorSpec::createSpec<float>("reward", {1});
|
||||
|
|
|
@ -6,7 +6,7 @@ set(LLVM_LINK_COMPONENTS
|
|||
TransformUtils
|
||||
)
|
||||
|
||||
set(MLGO_TESTS TFUtilsTest.cpp MLModelRunnerTest.cpp)
|
||||
set(MLGO_TESTS TFUtilsTest.cpp)
|
||||
if (DEFINED LLVM_HAVE_TF_API)
|
||||
LIST(APPEND EXTRA_TESTS ${MLGO_TESTS})
|
||||
else()
|
||||
|
@ -39,6 +39,7 @@ add_llvm_unittest_with_input_files(AnalysisTests
|
|||
LoopNestTest.cpp
|
||||
MemoryBuiltinsTest.cpp
|
||||
MemorySSATest.cpp
|
||||
MLModelRunnerTest.cpp
|
||||
PhiValuesTest.cpp
|
||||
ProfileSummaryInfoTest.cpp
|
||||
ScalarEvolutionTest.cpp
|
||||
|
|
|
@ -8,10 +8,49 @@
|
|||
|
||||
#include "llvm/Analysis/MLModelRunner.h"
|
||||
#include "llvm/Analysis/NoInferenceModelRunner.h"
|
||||
#include "llvm/Analysis/ReleaseModeModelRunner.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
namespace llvm {
|
||||
// This is a mock of the kind of AOT-generated model evaluator. It has 2 tensors
|
||||
// of shape {1}, and 'evaluation' adds them.
|
||||
// The interface is the one expected by ReleaseModelRunner.
|
||||
class MockAOTModel final {
|
||||
int64_t A = 0;
|
||||
int64_t B = 0;
|
||||
int64_t R = 0;
|
||||
|
||||
public:
|
||||
MockAOTModel() = default;
|
||||
int LookupArgIndex(const std::string &Name) {
|
||||
if (Name == "prefix_a")
|
||||
return 0;
|
||||
if (Name == "prefix_b")
|
||||
return 1;
|
||||
return -1;
|
||||
}
|
||||
int LookupResultIndex(const std::string &) { return 0; }
|
||||
void Run() { R = A + B; }
|
||||
void *result_data(int RIndex) {
|
||||
if (RIndex == 0)
|
||||
return &R;
|
||||
return nullptr;
|
||||
}
|
||||
void *arg_data(int Index) {
|
||||
switch (Index) {
|
||||
case 0:
|
||||
return &A;
|
||||
case 1:
|
||||
return &B;
|
||||
default:
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace llvm
|
||||
|
||||
TEST(NoInferenceModelRunner, AccessTensors) {
|
||||
const std::vector<TensorSpec> Inputs{
|
||||
TensorSpec::createSpec<int64_t>("F1", {1}),
|
||||
|
@ -30,4 +69,51 @@ TEST(NoInferenceModelRunner, AccessTensors) {
|
|||
ASSERT_EQ(NIMR.getTensor<int64_t>(0)[0], 1);
|
||||
ASSERT_EQ(NIMR.getTensor<int64_t>(1)[8], 9);
|
||||
ASSERT_EQ(NIMR.getTensor<float>(2)[1], 0.2f);
|
||||
}
|
||||
|
||||
TEST(ReleaseModeRunner, NormalUse) {
|
||||
LLVMContext Ctx;
|
||||
std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}),
|
||||
TensorSpec::createSpec<int64_t>("b", {1})};
|
||||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>(
|
||||
Ctx, Inputs, "", "prefix_");
|
||||
*Evaluator->getTensor<int64_t>(0) = 1;
|
||||
*Evaluator->getTensor<int64_t>(1) = 2;
|
||||
EXPECT_EQ(Evaluator->evaluate<int64_t>(), 3);
|
||||
EXPECT_EQ(*Evaluator->getTensor<int64_t>(0), 1);
|
||||
EXPECT_EQ(*Evaluator->getTensor<int64_t>(1), 2);
|
||||
}
|
||||
|
||||
TEST(ReleaseModeRunner, ExtraFeatures) {
|
||||
LLVMContext Ctx;
|
||||
std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}),
|
||||
TensorSpec::createSpec<int64_t>("b", {1}),
|
||||
TensorSpec::createSpec<int64_t>("c", {1})};
|
||||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>(
|
||||
Ctx, Inputs, "", "prefix_");
|
||||
*Evaluator->getTensor<int64_t>(0) = 1;
|
||||
*Evaluator->getTensor<int64_t>(1) = 2;
|
||||
*Evaluator->getTensor<int64_t>(2) = -3;
|
||||
EXPECT_EQ(Evaluator->evaluate<int64_t>(), 3);
|
||||
EXPECT_EQ(*Evaluator->getTensor<int64_t>(0), 1);
|
||||
EXPECT_EQ(*Evaluator->getTensor<int64_t>(1), 2);
|
||||
EXPECT_EQ(*Evaluator->getTensor<int64_t>(2), -3);
|
||||
}
|
||||
|
||||
TEST(ReleaseModeRunner, ExtraFeaturesOutOfOrder) {
|
||||
LLVMContext Ctx;
|
||||
std::vector<TensorSpec> Inputs{
|
||||
TensorSpec::createSpec<int64_t>("a", {1}),
|
||||
TensorSpec::createSpec<int64_t>("c", {1}),
|
||||
TensorSpec::createSpec<int64_t>("b", {1}),
|
||||
};
|
||||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>(
|
||||
Ctx, Inputs, "", "prefix_");
|
||||
*Evaluator->getTensor<int64_t>(0) = 1; // a
|
||||
*Evaluator->getTensor<int64_t>(1) = 2; // c
|
||||
*Evaluator->getTensor<int64_t>(2) = -3; // b
|
||||
EXPECT_EQ(Evaluator->evaluate<int64_t>(), -2); // a + b
|
||||
EXPECT_EQ(*Evaluator->getTensor<int64_t>(0), 1);
|
||||
EXPECT_EQ(*Evaluator->getTensor<int64_t>(1), 2);
|
||||
EXPECT_EQ(*Evaluator->getTensor<int64_t>(2), -3);
|
||||
}
|
|
@ -10,6 +10,8 @@
|
|||
#include "google/protobuf/struct.pb.h"
|
||||
#include "tensorflow/core/example/example.pb.h"
|
||||
#include "tensorflow/core/example/feature.pb.h"
|
||||
#include "llvm/Analysis/ModelUnderTrainingRunner.h"
|
||||
#include "llvm/Analysis/TensorSpec.h"
|
||||
#include "llvm/AsmParser/Parser.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
|
@ -102,6 +104,36 @@ TEST(TFUtilsTest, EvalError) {
|
|||
EXPECT_FALSE(Evaluator.isValid());
|
||||
}
|
||||
|
||||
TEST(TFUtilsTest, UnsupportedFeature) {
|
||||
const static int64_t KnownSize = 214;
|
||||
std::vector<TensorSpec> InputSpecs{
|
||||
TensorSpec::createSpec<int32_t>("serving_default_input_1",
|
||||
{1, KnownSize}),
|
||||
TensorSpec::createSpec<float>("this_feature_does_not_exist", {2, 5})};
|
||||
|
||||
LLVMContext Ctx;
|
||||
auto Evaluator = ModelUnderTrainingRunner::createAndEnsureValid(
|
||||
Ctx, getModelPath(), "StatefulPartitionedCall", InputSpecs,
|
||||
{LoggedFeatureSpec{
|
||||
TensorSpec::createSpec<float>("StatefulPartitionedCall", {1}),
|
||||
None}});
|
||||
int32_t *V = Evaluator->getTensor<int32_t>(0);
|
||||
// Fill it up with 1s, we know the output.
|
||||
for (auto I = 0; I < KnownSize; ++I)
|
||||
V[I] = 1;
|
||||
|
||||
float *F = Evaluator->getTensor<float>(1);
|
||||
for (auto I = 0; I < 2 * 5; ++I)
|
||||
F[I] = 3.14 + I;
|
||||
float Ret = Evaluator->evaluate<float>();
|
||||
EXPECT_EQ(static_cast<int64_t>(Ret), 80);
|
||||
// The input vector should be unchanged
|
||||
for (auto I = 0; I < KnownSize; ++I)
|
||||
EXPECT_EQ(V[I], 1);
|
||||
for (auto I = 0; I < 2 * 5; ++I)
|
||||
EXPECT_FLOAT_EQ(F[I], 3.14 + I);
|
||||
}
|
||||
|
||||
#define PROTO_CHECKER(FNAME, TYPE, INDEX, EXP) \
|
||||
do { \
|
||||
const auto &V = Expected.feature_lists() \
|
||||
|
|
Loading…
Reference in New Issue