14#include "llvm/Config/config.h"
15#if defined(LLVM_HAVE_TFLITE)
37 cl::desc(
"Path where the development - mode inlining log is saved."));
41 cl::desc(R
"(Path to SavedModel from the previous training iteration.
42The directory is also expected to contain a JSON specification of the
43outputs expected to be logged, where the first entry must be the
44inlining decision. The file containing the specification should be
45called output_spec.json. The expected JSON value is an array of
46dictionaries. Each dictionary should have 2 keys:
48- "tensor_spec, followed by the TensorSpec description of the
50- "logging_name", a string indicating the name to use when
51logging the output values.
56 "logging_name" : "some_name",
58 "name" : "model_name",
66The first value must always correspond to the decision.)"));
70 cl::desc(
"Override the path to the output spec json file. See "
71 "-ml-inliner-model-under-training documentation for the "
72 "specification of that file."));
76 cl::desc(
"Prefix for feature names."));
82 int64_t DefaultDecision = 0;
86 int64_t AdvisedDecision = 0;
98class TrainingLogger final {
100 TrainingLogger(StringRef LogFileName,
const ModelUnderTrainingRunner *MUTR,
101 const std::vector<TensorSpec> &FeatureMap);
104 void logInlineEvent(
const InlineEvent &Event,
105 const MLModelRunner &ModelRunner);
108 StringRef LogFileName;
109 const ModelUnderTrainingRunner *
const MUTR;
110 const std::vector<TensorSpec> &FeatureMap;
112 std::unique_ptr<Logger>
L;
115 size_t DefaultDecisionPos = std::numeric_limits<size_t>::max();
116 size_t DecisionPos = std::numeric_limits<size_t>::max();
146 DevelopmentModeMLInlineAdvisor(
149 std::unique_ptr<MLModelRunner>(
const std::vector<TensorSpec> &)>
151 std::function<
bool(CallBase &)> GetDefaultAdvice);
153 size_t getTotalSizeEstimate();
155 void updateNativeSizeEstimate(int64_t Change) {
156 *CurrentNativeSize += Change;
158 void resetNativeSize(Function *
F) {
159 PreservedAnalyses PA = PreservedAnalyses::all();
160 PA.
abandon<InlineSizeEstimatorAnalysis>();
161 FAM.invalidate(*
F, PA);
164 std::unique_ptr<MLInlineAdvice>
165 getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE)
override;
167 std::optional<size_t> getNativeSizeEstimate(
const Function &
F)
const;
170 bool isLogging()
const {
return !!Logger; }
171 std::unique_ptr<MLInlineAdvice> getMandatoryAdviceImpl(CallBase &CB)
override;
173 const bool IsDoingInference;
174 std::unique_ptr<TrainingLogger> Logger;
176 const std::optional<int32_t> InitialNativeSize;
177 std::optional<int32_t> CurrentNativeSize;
184 LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB,
185 OptimizationRemarkEmitter &ORE,
bool Recommendation,
186 TrainingLogger &Logger,
187 std::optional<size_t> CallerSizeEstimateBefore,
188 std::optional<size_t> CalleeSizeEstimateBefore,
189 bool DefaultDecision,
bool Mandatory =
false)
190 : MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger),
191 CallerSizeEstimateBefore(CallerSizeEstimateBefore),
192 CalleeSizeEstimateBefore(CalleeSizeEstimateBefore),
193 DefaultDecision(DefaultDecision), Mandatory(Mandatory) {}
195 virtual ~LoggingMLInlineAdvice() =
default;
198 DevelopmentModeMLInlineAdvisor *getAdvisor()
const {
199 return static_cast<DevelopmentModeMLInlineAdvisor *
>(Advisor);
201 void recordInliningImpl()
override {
202 MLInlineAdvice::recordInliningImpl();
203 getAdvisor()->resetNativeSize(Caller);
204 int Reward = std::numeric_limits<int>::max();
205 if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() &&
206 !getAdvisor()->isForcedToStop()) {
207 int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller) +
208 *CalleeSizeEstimateBefore;
209 Reward = NativeSizeAfter -
210 (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore);
211 getAdvisor()->updateNativeSizeEstimate(Reward);
216 void recordInliningWithCalleeDeletedImpl()
override {
217 MLInlineAdvice::recordInliningWithCalleeDeletedImpl();
218 getAdvisor()->resetNativeSize(Caller);
219 if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() &&
220 !getAdvisor()->isForcedToStop()) {
221 int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller);
222 int Reward = NativeSizeAfter -
223 (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore);
224 getAdvisor()->updateNativeSizeEstimate(Reward);
231 void recordUnsuccessfulInliningImpl(
const InlineResult &Result)
override {
232 MLInlineAdvice::recordUnsuccessfulInliningImpl(Result);
233 log(NoReward,
false);
236 void recordUnattemptedInliningImpl()
override {
237 MLInlineAdvice::recordUnattemptedInliningImpl();
238 log(NoReward,
false);
241 void log(int64_t Reward,
bool Success) {
245 Event.AdvisedDecision = isInliningRecommended();
246 Event.DefaultDecision = DefaultDecision;
248 Event.Reward = Reward;
249 Logger.logInlineEvent(Event, getAdvisor()->getModelRunner());
252 static const int64_t NoReward = 0;
253 TrainingLogger &Logger;
254 const std::optional<size_t> CallerSizeEstimateBefore;
255 const std::optional<size_t> CalleeSizeEstimateBefore;
256 const int64_t DefaultDecision;
257 const int64_t Mandatory;
260static const std::vector<TensorSpec> TrainingOnlyFeatures{
268static const std::vector<TensorSpec>
269convertInputFeatures(
const std::vector<TensorSpec> &OriginalFeatures) {
270 std::vector<TensorSpec> InputSpecs;
271 for (
const auto &Feature : OriginalFeatures)
272 InputSpecs.push_back(
TensorSpec(TFFeedPrefix + Feature.name(), Feature));
279TrainingLogger::TrainingLogger(
StringRef LogFileName,
280 const ModelUnderTrainingRunner *MUTR,
281 const std::vector<TensorSpec> &FeatureMap)
282 : LogFileName(LogFileName), MUTR(MUTR), FeatureMap(FeatureMap) {
284 std::vector<TensorSpec> FT(FeatureMap.begin(), FeatureMap.end());
289 DefaultDecisionPos = FT.size();
292 DecisionPos = FT.size();
295 auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
297 dbgs() << (
EC.message() +
":" + TrainingLog);
299 L = std::make_unique<Logger>(
302 L->switchContext(
"");
306void TrainingLogger::logInlineEvent(
const InlineEvent &Event,
308 L->startObservation();
309 size_t CurrentFeature = 0;
310 for (; CurrentFeature < FeatureMap.size(); ++CurrentFeature)
311 L->logTensorValue(CurrentFeature,
312 reinterpret_cast<const char *
>(
316 for (
size_t I = 0;
I < MUTR->extraOutputsForLoggingSpecs().
size(); ++
I) {
317 const char *RawData =
318 reinterpret_cast<const char *
>(MUTR->getUntypedExtraOutputValue(
I));
319 L->logTensorValue(CurrentFeature, RawData);
323 assert(CurrentFeature == DefaultDecisionPos);
324 L->logTensorValue(DefaultDecisionPos,
325 reinterpret_cast<const char *
>(&Event.DefaultDecision));
326 L->logTensorValue(DecisionPos,
327 reinterpret_cast<const char *
>(&Event.AdvisedDecision));
330 L->logReward(Event.Reward);
333 Effects.push_back(Event.Effect);
336DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
339 std::unique_ptr<MLModelRunner>(
const std::vector<TensorSpec> &)>
341 std::function<
bool(
CallBase &)> GetDefaultAdvice)
343 IsDoingInference(
isa<ModelUnderTrainingRunner>(getModelRunner())),
344 InitialNativeSize(isLogging() ? getTotalSizeEstimate() : 0),
345 CurrentNativeSize(InitialNativeSize) {
347 if (!TrainingLog.empty())
348 Logger = std::make_unique<TrainingLogger>(
351 assert(IsDoingInference || isLogging());
355DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(
const Function &
F)
const {
361 F.getParent()->getContext().emitError(
362 "Native size estimator is not present.");
368std::unique_ptr<MLInlineAdvice>
369DevelopmentModeMLInlineAdvisor::getMandatoryAdviceImpl(
CallBase &CB) {
370 return std::make_unique<LoggingMLInlineAdvice>(
372 CB, getCallerORE(CB),
true,
380std::unique_ptr<MLInlineAdvice>
381DevelopmentModeMLInlineAdvisor::getAdviceFromModel(
383 if (IsDoingInference && !isLogging())
386 bool DefaultAdvice = GetDefaultAdvice(CB);
387 auto Recommendation =
388 IsDoingInference ?
static_cast<bool>(ModelRunner->
evaluate<int64_t>())
390 return std::make_unique<LoggingMLInlineAdvice>(
392 CB, ORE, Recommendation,
400size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() {
405 if (
F.isDeclaration())
407 Ret += *getNativeSizeEstimate(
F);
414 std::function<
bool(
CallBase &)> GetDefaultAdvice) {
415 auto &Ctx = M.getContext();
416 auto RunnerFactory = [&](
const std::vector<TensorSpec> &
InputFeatures)
417 -> std::unique_ptr<MLModelRunner> {
418 std::unique_ptr<MLModelRunner> Runner;
419 const std::vector<TensorSpec> ConvertedFeatures =
421 if (TFModelUnderTrainingPath.empty())
424 Runner = ModelUnderTrainingRunner::createAndEnsureValid(
425 Ctx, TFModelUnderTrainingPath,
DecisionName, ConvertedFeatures,
426 TFOutputSpecOverride);
431 return std::make_unique<DevelopmentModeMLInlineAdvisor>(M,
MAM, RunnerFactory,
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements the BitVector class.
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
static bool isEvaluatorRequested()
Logging utility - given an ordered specification of features, and assuming a scalar reward,...
InlineAdvice that tracks changes post inlining.
virtual std::unique_ptr< MLInlineAdvice > getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE)
MLModelRunner interface: abstraction of a mechanism for evaluating a ML model.
void * getTensorUntyped(size_t Index)
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
StringRef - Represent a constant reference to a string, i.e.
static TensorSpec createSpec(const std::string &Name, const std::vector< int64_t > &Shape, int Port=0)
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::unique_ptr< InlineAdvisor > getDevelopmentModeAdvisor(Module &M, ModuleAnalysisManager &MAM, std::function< bool(CallBase &)> GetDefaultAdvice)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI const TensorSpec DefaultDecisionSpec
static const std::vector< TensorSpec > InputFeatures
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI const TensorSpec InlineDecisionSpec
LLVM_ABI const char *const RewardName
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.