[NFC][MLGO] Factor ModelUnderTrainingRunner for reuse

This is so we may reuse it. It was very non-inliner specific already.

Differential Revision: https://reviews.llvm.org/D115465
This commit is contained in:
Mircea Trofin 2021-12-09 12:56:16 -08:00
parent 28309c5436
commit 04f2712ef4
4 changed files with 127 additions and 84 deletions

View File

@ -0,0 +1,59 @@
//===- ModelUnderTrainingRunner.h -- 'development' mode runner --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
#define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
#include "llvm/Config/llvm-config.h"
#ifdef LLVM_HAVE_TF_API
#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/Analysis/Utils/TFUtils.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs
/// to dynamically load and evaluate a TF SavedModel
/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is
/// sacrificed for ease of use while training.
class ModelUnderTrainingRunner final : public MLModelRunner {
public:
ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath,
const std::vector<TensorSpec> &InputSpecs,
const std::vector<LoggedFeatureSpec> &OutputSpecs);
// Disallows copy and assign.
ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
ModelUnderTrainingRunner &
operator=(const ModelUnderTrainingRunner &) = delete;
bool isValid() const { return !!Evaluator; }
const std::vector<LoggedFeatureSpec> &outputLoggedFeatureSpecs() const {
return OutputSpecs;
}
const Optional<TFModelEvaluator::EvaluationResult> &
lastEvaluationResult() const {
return LastEvaluationResult;
}
private:
std::unique_ptr<TFModelEvaluator> Evaluator;
const std::vector<LoggedFeatureSpec> OutputSpecs;
Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
void *evaluateUntyped() override;
void *getTensorUntyped(size_t Index) override;
};
} // namespace llvm
#endif // define(LLVM_HAVE_TF_API)
#endif // LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H

View File

@ -103,6 +103,7 @@ add_llvm_component_library(LLVMAnalysis
MemoryLocation.cpp
MemorySSA.cpp
MemorySSAUpdater.cpp
ModelUnderTrainingRunner.cpp
ModuleDebugInfoPrinter.cpp
ModuleSummaryAnalysis.cpp
MustExecute.cpp

View File

@ -16,6 +16,7 @@
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
#include "llvm/Analysis/MLInlineAdvisor.h"
#include "llvm/Analysis/ModelUnderTrainingRunner.h"
#include "llvm/Analysis/NoInferenceModelRunner.h"
#include "llvm/Analysis/Utils/TFUtils.h"
#include "llvm/IR/LLVMContext.h"
@ -95,7 +96,6 @@ struct InlineEvent {
/// Because this is a protobuf, we cannot just stream the events as they come.
/// Internally, TrainingLogger stores data in column-major format, because that
/// lines up with how TF SequenceExample represents it.
class ModelUnderTrainingRunner;
class TrainingLogger final {
public:
TrainingLogger(StringRef LogFileName, const ModelUnderTrainingRunner *MUTR);
@ -262,55 +262,21 @@ private:
const int64_t Mandatory;
};
/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs
/// to dynamically load and evaluate a TF SavedModel
/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is
/// sacrificed for ease of use while training.
class ModelUnderTrainingRunner final : public MLModelRunner {
public:
ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath);
// Disallows copy and assign.
ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
ModelUnderTrainingRunner &
operator=(const ModelUnderTrainingRunner &) = delete;
bool isValid() const { return !!Evaluator; }
const std::vector<LoggedFeatureSpec> &outputLoggedFeatureSpecs() const {
return OutputSpecs;
}
const Optional<TFModelEvaluator::EvaluationResult> &
lastEvaluationResult() const {
return LastEvaluationResult;
}
static const std::vector<TensorSpec> getInputFeatures() {
std::vector<TensorSpec> InputSpecs;
for (size_t I = 0; I < NumberOfFeatures; ++I)
InputSpecs.push_back(TensorSpec::createSpec<int64_t>(
TFFeedPrefix + FeatureNameMap[I], {1}));
append_range(InputSpecs, TrainingOnlyFeatures);
return InputSpecs;
}
private:
std::unique_ptr<TFModelEvaluator> Evaluator;
std::vector<LoggedFeatureSpec> OutputSpecs;
Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
void *evaluateUntyped() override;
void *getTensorUntyped(size_t Index) override;
// The training framework needs some additional features.
const static std::vector<TensorSpec> TrainingOnlyFeatures;
};
const std::vector<TensorSpec> ModelUnderTrainingRunner::TrainingOnlyFeatures{
static const std::vector<TensorSpec> TrainingOnlyFeatures{
TensorSpec::createSpec<int64_t>(TFFeedPrefix + "inlining_default", {1}),
TensorSpec::createSpec<float>(TFFeedPrefix + "discount", {1}),
TensorSpec::createSpec<float>(TFFeedPrefix + "reward", {1}),
TensorSpec::createSpec<int32_t>(TFFeedPrefix + "step_type", {1})};
static const std::vector<TensorSpec> getInputFeatures() {
std::vector<TensorSpec> InputSpecs;
for (size_t I = 0; I < NumberOfFeatures; ++I)
InputSpecs.push_back(
TensorSpec::createSpec<int64_t>(TFFeedPrefix + FeatureNameMap[I], {1}));
append_range(InputSpecs, TrainingOnlyFeatures);
return InputSpecs;
}
} // namespace
TrainingLogger::TrainingLogger(StringRef LogFileName,
@ -451,40 +417,6 @@ size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() {
return Ret;
}
ModelUnderTrainingRunner::ModelUnderTrainingRunner(LLVMContext &Ctx,
const std::string &ModelPath)
: MLModelRunner(Ctx) {
std::vector<TensorSpec> InputSpecs =
ModelUnderTrainingRunner::getInputFeatures();
if (auto MaybeOutSpecs =
loadOutputSpecs(Ctx, DecisionName, ModelPath, TFOutputSpecOverride))
OutputSpecs = std::move(*MaybeOutSpecs);
else
return;
Evaluator = std::make_unique<TFModelEvaluator>(
ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; },
OutputSpecs.size());
if (!Evaluator || !Evaluator->isValid()) {
Ctx.emitError("Failed to create inliner saved model evaluator");
Evaluator.reset();
return;
}
}
void *ModelUnderTrainingRunner::evaluateUntyped() {
LastEvaluationResult = Evaluator->evaluate();
if (!LastEvaluationResult.hasValue()) {
Ctx.emitError("Error evaluating model.");
return nullptr;
}
return LastEvaluationResult->getTensorValue<int64_t>(0);
}
void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) {
return Evaluator->getUntypedInput(Index);
}
std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
Module &M, ModuleAnalysisManager &MAM,
std::function<bool(CallBase &)> GetDefaultAdvice) {
@ -493,11 +425,13 @@ std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
ModelUnderTrainingRunner *MUTRPtr = nullptr;
bool IsDoingInference = false;
if (TFModelUnderTrainingPath.empty())
Runner.reset(new NoInferenceModelRunner(
Ctx, ModelUnderTrainingRunner::getInputFeatures()));
Runner.reset(new NoInferenceModelRunner(Ctx, getInputFeatures()));
else {
auto MUTR = std::make_unique<ModelUnderTrainingRunner>(
Ctx, TFModelUnderTrainingPath);
std::unique_ptr<ModelUnderTrainingRunner> MUTR;
if (auto MaybeOutputSpecs = loadOutputSpecs(
Ctx, DecisionName, TFModelUnderTrainingPath, TFOutputSpecOverride))
MUTR = std::make_unique<ModelUnderTrainingRunner>(
Ctx, TFModelUnderTrainingPath, getInputFeatures(), *MaybeOutputSpecs);
if (!MUTR || !MUTR->isValid()) {
Ctx.emitError("Could not load the policy model from the provided path");
return nullptr;

View File

@ -0,0 +1,49 @@
//===- ModelUnderTrainingRunner.cpp - 'development' mode runner -----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implementation of a MLModelRunner for 'development' mode, i.e. evaluation
// happens off a model that's provided from the command line and is interpreted.
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
#if defined(LLVM_HAVE_TF_API)
#include "llvm/Analysis/ModelUnderTrainingRunner.h"
using namespace llvm;
ModelUnderTrainingRunner::ModelUnderTrainingRunner(
LLVMContext &Ctx, const std::string &ModelPath,
const std::vector<TensorSpec> &InputSpecs,
const std::vector<LoggedFeatureSpec> &OutputSpecs)
: MLModelRunner(Ctx), OutputSpecs(OutputSpecs) {
Evaluator = std::make_unique<TFModelEvaluator>(
ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; },
OutputSpecs.size());
if (!Evaluator || !Evaluator->isValid()) {
Ctx.emitError("Failed to create inliner saved model evaluator");
Evaluator.reset();
return;
}
}
void *ModelUnderTrainingRunner::evaluateUntyped() {
LastEvaluationResult = Evaluator->evaluate();
if (!LastEvaluationResult.hasValue()) {
Ctx.emitError("Error evaluating model.");
return nullptr;
}
return LastEvaluationResult->getUntypedTensorValue(0);
}
void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) {
return Evaluator->getUntypedInput(Index);
}
#endif // defined(LLVM_HAVE_TF_API)