LLVM 17.0.0git
MLInlineAdvisor.cpp
Go to the documentation of this file.
1//===- MLInlineAdvisor.cpp - machine learned InlineAdvisor ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interface between the inliner and a learned model.
10// It delegates model evaluation to either the AOT compiled model (the
11// 'release' mode) or a runtime-loaded model (the 'development' case).
12//
13//===----------------------------------------------------------------------===//
28#include "llvm/IR/Dominators.h"
30#include "llvm/IR/PassManager.h"
32
33using namespace llvm;
34
36 "inliner-interactive-channel-base", cl::Hidden,
38 "Base file path for the interactive mode. The incoming filename should "
39 "have the name <inliner-interactive-channel-base>.in, while the "
40 "outgoing name should be <inliner-interactive-channel-base>.out"));
41
42#if defined(LLVM_HAVE_TF_AOT_INLINERSIZEMODEL)
43// codegen-ed file
44#include "InlinerSizeModel.h" // NOLINT
45using CompiledModelType = llvm::InlinerSizeModel;
46#else
48#endif
49
50std::unique_ptr<InlineAdvisor>
52 if (!llvm::isEmbeddedModelEvaluatorValid<CompiledModelType>() &&
54 return nullptr;
55 std::unique_ptr<MLModelRunner> AOTRunner;
57 AOTRunner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>(
58 M.getContext(), FeatureMap, DecisionName);
59 else
60 AOTRunner = std::make_unique<InteractiveModelRunner>(
61 M.getContext(), FeatureMap, InlineDecisionSpec,
64 return std::make_unique<MLInlineAdvisor>(M, MAM, std::move(AOTRunner));
65}
66
67#define DEBUG_TYPE "inline-ml"
68
70 "ml-advisor-size-increase-threshold", cl::Hidden,
71 cl::desc("Maximum factor by which expected native size may increase before "
72 "blocking any further inlining."),
73 cl::init(2.0));
74
76 "ml-advisor-keep-fpi-cache", cl::Hidden,
78 "For test - keep the ML Inline advisor's FunctionPropertiesInfo cache"),
79 cl::init(false));
80
81// clang-format off
82const std::vector<TensorSpec> llvm::FeatureMap{
83#define POPULATE_NAMES(_, NAME) TensorSpec::createSpec<int64_t>(NAME, {1} ),
84// InlineCost features - these must come first
86#undef POPULATE_NAMES
87
88// Non-cost features
89#define POPULATE_NAMES(_, NAME, __) TensorSpec::createSpec<int64_t>(NAME, {1} ),
91#undef POPULATE_NAMES
92};
93// clang-format on
94
95const char *const llvm::DecisionName = "inlining_decision";
97 TensorSpec::createSpec<int64_t>(DecisionName, {1});
98const char *const llvm::DefaultDecisionName = "inlining_default";
99const char *const llvm::RewardName = "delta_size";
100
102 if (auto *CS = dyn_cast<CallBase>(&I))
103 if (Function *Callee = CS->getCalledFunction()) {
104 if (!Callee->isDeclaration()) {
105 return CS;
106 }
107 }
108 return nullptr;
109}
110
112 std::unique_ptr<MLModelRunner> Runner)
114 M, MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager()),
115 ModelRunner(std::move(Runner)),
116 CG(MAM.getResult<LazyCallGraphAnalysis>(M)),
117 InitialIRSize(getModuleIRSize()), CurrentIRSize(InitialIRSize) {
119 ModelRunner->switchContext("");
120 // Extract the 'call site height' feature - the position of a call site
121 // relative to the farthest statically reachable SCC node. We don't mutate
122 // this value while inlining happens. Empirically, this feature proved
123 // critical in behavioral cloning - i.e. training a model to mimic the manual
124 // heuristic's decisions - and, thus, equally important for training for
125 // improvement.
126 CallGraph CGraph(M);
127 for (auto I = scc_begin(&CGraph); !I.isAtEnd(); ++I) {
128 const std::vector<CallGraphNode *> &CGNodes = *I;
129 unsigned Level = 0;
130 for (auto *CGNode : CGNodes) {
131 Function *F = CGNode->getFunction();
132 if (!F || F->isDeclaration())
133 continue;
134 for (auto &I : instructions(F)) {
135 if (auto *CS = getInlinableCS(I)) {
136 auto *Called = CS->getCalledFunction();
137 auto Pos = FunctionLevels.find(&CG.get(*Called));
138 // In bottom up traversal, an inlinable callee is either in the
139 // same SCC, or to a function in a visited SCC. So not finding its
140 // level means we haven't visited it yet, meaning it's in this SCC.
141 if (Pos == FunctionLevels.end())
142 continue;
143 Level = std::max(Level, Pos->second + 1);
144 }
145 }
146 }
147 for (auto *CGNode : CGNodes) {
148 Function *F = CGNode->getFunction();
149 if (F && !F->isDeclaration())
150 FunctionLevels[&CG.get(*F)] = Level;
151 }
152 }
153 for (auto KVP : FunctionLevels) {
154 AllNodes.insert(KVP.first);
155 EdgeCount += getLocalCalls(KVP.first->getFunction());
156 }
157 NodeCount = AllNodes.size();
158}
159
161 return CG.lookup(F) ? FunctionLevels.at(CG.lookup(F)) : 0;
162}
163
165 if (!LastSCC || ForceStop)
166 return;
167 FPICache.clear();
168 // Function passes executed between InlinerPass runs may have changed the
169 // module-wide features.
170 // The cgscc pass manager rules are such that:
171 // - if a pass leads to merging SCCs, then the pipeline is restarted on the
172 // merged SCC
173 // - if a pass leads to splitting the SCC, then we continue with one of the
174 // splits
175 // This means that the NodesInLastSCC is a superset (not strict) of the nodes
176 // that subsequent passes would have processed
177 // - in addition, if new Nodes were created by a pass (e.g. CoroSplit),
178 // they'd be adjacent to Nodes in the last SCC. So we just need to check the
179 // boundary of Nodes in NodesInLastSCC for Nodes we haven't seen. We don't
180 // care about the nature of the Edge (call or ref).
181 NodeCount -= static_cast<int64_t>(NodesInLastSCC.size());
182 while (!NodesInLastSCC.empty()) {
183 const auto *N = *NodesInLastSCC.begin();
184 NodesInLastSCC.erase(N);
185 // The Function wrapped by N could have been deleted since we last saw it.
186 if (N->isDead()) {
187 assert(!N->getFunction().isDeclaration());
188 continue;
189 }
190 ++NodeCount;
191 EdgeCount += getLocalCalls(N->getFunction());
192 for (const auto &E : *(*N)) {
193 const auto *AdjNode = &E.getNode();
194 assert(!AdjNode->isDead() && !AdjNode->getFunction().isDeclaration());
195 auto I = AllNodes.insert(AdjNode);
196 if (I.second)
197 NodesInLastSCC.insert(AdjNode);
198 }
199 }
200
201 EdgeCount -= EdgesOfLastSeenNodes;
202 EdgesOfLastSeenNodes = 0;
203
204 // (Re)use NodesInLastSCC to remember the nodes in the SCC right now,
205 // in case the SCC is split before onPassExit and some nodes are split out
206 assert(NodesInLastSCC.empty());
207 for (const auto &N : *LastSCC)
208 NodesInLastSCC.insert(&N);
209}
210
212 // No need to keep this around - function passes will invalidate it.
213 if (!KeepFPICache)
214 FPICache.clear();
215 if (!LastSCC || ForceStop)
216 return;
217 // Keep track of the nodes and edges we last saw. Then, in onPassEntry,
218 // we update the node count and edge count from the subset of these nodes that
219 // survived.
220 EdgesOfLastSeenNodes = 0;
221
222 // Check on nodes that were in SCC onPassEntry
223 for (auto I = NodesInLastSCC.begin(); I != NodesInLastSCC.end();) {
224 if ((*I)->isDead())
225 NodesInLastSCC.erase(*I++);
226 else
227 EdgesOfLastSeenNodes += getLocalCalls((*I++)->getFunction());
228 }
229
230 // Check on nodes that may have got added to SCC
231 for (const auto &N : *LastSCC) {
232 assert(!N.isDead());
233 auto I = NodesInLastSCC.insert(&N);
234 if (I.second)
235 EdgesOfLastSeenNodes += getLocalCalls(N.getFunction());
236 }
237 assert(NodeCount >= NodesInLastSCC.size());
238 assert(EdgeCount >= EdgesOfLastSeenNodes);
239}
240
243}
244
245// Update the internal state of the advisor, and force invalidate feature
246// analysis. Currently, we maintain minimal (and very simple) global state - the
247// number of functions and the number of static calls. We also keep track of the
248// total IR size in this module, to stop misbehaving policies at a certain bloat
249// factor (SizeIncreaseThreshold)
251 bool CalleeWasDeleted) {
252 assert(!ForceStop);
253 Function *Caller = Advice.getCaller();
254 Function *Callee = Advice.getCallee();
255 // The caller features aren't valid anymore.
256 {
260 PA.abandon<LoopAnalysis>();
261 FAM.invalidate(*Caller, PA);
262 }
264 int64_t IRSizeAfter =
265 getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);
266 CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize);
267 if (CurrentIRSize > SizeIncreaseThreshold * InitialIRSize)
268 ForceStop = true;
269
270 // We can delta-update module-wide features. We know the inlining only changed
271 // the caller, and maybe the callee (by deleting the latter).
272 // Nodes are simple to update.
273 // For edges, we 'forget' the edges that the caller and callee used to have
274 // before inlining, and add back what they currently have together.
275 int64_t NewCallerAndCalleeEdges =
277
278 if (CalleeWasDeleted)
279 --NodeCount;
280 else
281 NewCallerAndCalleeEdges +=
283 EdgeCount += (NewCallerAndCalleeEdges - Advice.CallerAndCalleeEdges);
284 assert(CurrentIRSize >= 0 && EdgeCount >= 0 && NodeCount >= 0);
285}
286
287int64_t MLInlineAdvisor::getModuleIRSize() const {
288 int64_t Ret = 0;
289 for (auto &F : M)
290 if (!F.isDeclaration())
291 Ret += getIRSize(F);
292 return Ret;
293}
294
296 auto InsertPair =
297 FPICache.insert(std::make_pair(&F, FunctionPropertiesInfo()));
298 if (!InsertPair.second)
299 return InsertPair.first->second;
300 InsertPair.first->second = FAM.getResult<FunctionPropertiesAnalysis>(F);
301 return InsertPair.first->second;
302}
303
304std::unique_ptr<InlineAdvice> MLInlineAdvisor::getAdviceImpl(CallBase &CB) {
305 if (auto Skip = getSkipAdviceIfUnreachableCallsite(CB))
306 return Skip;
307
308 auto &Caller = *CB.getCaller();
309 auto &Callee = *CB.getCalledFunction();
310
311 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
313 };
314 auto &TIR = FAM.getResult<TargetIRAnalysis>(Callee);
316
317 auto MandatoryKind = InlineAdvisor::getMandatoryKind(CB, FAM, ORE);
318 // If this is a "never inline" case, there won't be any changes to internal
319 // state we need to track, so we can just return the base InlineAdvice, which
320 // will do nothing interesting.
321 // Same thing if this is a recursive case.
322 if (MandatoryKind == InlineAdvisor::MandatoryInliningKind::Never ||
323 &Caller == &Callee)
324 return getMandatoryAdvice(CB, false);
325
326 bool Mandatory =
328
329 // If we need to stop, we won't want to track anymore any state changes, so
330 // we just return the base InlineAdvice, which acts as a noop.
331 if (ForceStop) {
332 ORE.emit([&] {
333 return OptimizationRemarkMissed(DEBUG_TYPE, "ForceStop", &CB)
334 << "Won't attempt inlining because module size grew too much.";
335 });
336 return std::make_unique<InlineAdvice>(this, CB, ORE, Mandatory);
337 }
338
339 int CostEstimate = 0;
340 if (!Mandatory) {
341 auto IsCallSiteInlinable =
342 llvm::getInliningCostEstimate(CB, TIR, GetAssumptionCache);
343 if (!IsCallSiteInlinable) {
344 // We can't inline this for correctness reasons, so return the base
345 // InlineAdvice, as we don't care about tracking any state changes (which
346 // won't happen).
347 return std::make_unique<InlineAdvice>(this, CB, ORE, false);
348 }
349 CostEstimate = *IsCallSiteInlinable;
350 }
351
352 const auto CostFeatures =
353 llvm::getInliningCostFeatures(CB, TIR, GetAssumptionCache);
354 if (!CostFeatures) {
355 return std::make_unique<InlineAdvice>(this, CB, ORE, false);
356 }
357
358 if (Mandatory)
359 return getMandatoryAdvice(CB, true);
360
361 auto NrCtantParams = 0;
362 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
363 NrCtantParams += (isa<Constant>(*I));
364 }
365
366 auto &CallerBefore = getCachedFPI(Caller);
367 auto &CalleeBefore = getCachedFPI(Callee);
368
369 *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeBasicBlockCount) =
370 CalleeBefore.BasicBlockCount;
371 *ModelRunner->getTensor<int64_t>(FeatureIndex::CallSiteHeight) =
373 *ModelRunner->getTensor<int64_t>(FeatureIndex::NodeCount) = NodeCount;
374 *ModelRunner->getTensor<int64_t>(FeatureIndex::NrCtantParams) = NrCtantParams;
375 *ModelRunner->getTensor<int64_t>(FeatureIndex::EdgeCount) = EdgeCount;
376 *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerUsers) =
377 CallerBefore.Uses;
378 *ModelRunner->getTensor<int64_t>(
379 FeatureIndex::CallerConditionallyExecutedBlocks) =
380 CallerBefore.BlocksReachedFromConditionalInstruction;
381 *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerBasicBlockCount) =
382 CallerBefore.BasicBlockCount;
383 *ModelRunner->getTensor<int64_t>(
384 FeatureIndex::CalleeConditionallyExecutedBlocks) =
385 CalleeBefore.BlocksReachedFromConditionalInstruction;
386 *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeUsers) =
387 CalleeBefore.Uses;
388 *ModelRunner->getTensor<int64_t>(FeatureIndex::CostEstimate) = CostEstimate;
389
390 // Add the cost features
391 for (size_t I = 0;
392 I < static_cast<size_t>(InlineCostFeatureIndex::NumberOfFeatures); ++I) {
393 *ModelRunner->getTensor<int64_t>(inlineCostFeatureToMlFeature(
394 static_cast<InlineCostFeatureIndex>(I))) = CostFeatures->at(I);
395 }
396
397 return getAdviceFromModel(CB, ORE);
398}
399
400std::unique_ptr<MLInlineAdvice>
403 return std::make_unique<MLInlineAdvice>(
404 this, CB, ORE, static_cast<bool>(ModelRunner->evaluate<int64_t>()));
405}
406
407std::unique_ptr<InlineAdvice>
408MLInlineAdvisor::getSkipAdviceIfUnreachableCallsite(CallBase &CB) {
410 .isReachableFromEntry(CB.getParent()))
411 return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), false);
412 return nullptr;
413}
414
415std::unique_ptr<InlineAdvice> MLInlineAdvisor::getMandatoryAdvice(CallBase &CB,
416 bool Advice) {
417 // Make sure we track inlinings in all cases - mandatory or not.
418 if (auto Skip = getSkipAdviceIfUnreachableCallsite(CB))
419 return Skip;
420 if (Advice && !ForceStop)
421 return getMandatoryAdviceImpl(CB);
422
423 // If this is a "never inline" case, there won't be any changes to internal
424 // state we need to track, so we can just return the base InlineAdvice, which
425 // will do nothing interesting.
426 // Same if we are forced to stop - we don't track anymore.
427 return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), Advice);
428}
429
430std::unique_ptr<MLInlineAdvice>
432 return std::make_unique<MLInlineAdvice>(this, CB, getCallerORE(CB), true);
433}
434
435void MLInlineAdvisor::print(raw_ostream &OS) const {
436 OS << "[MLInlineAdvisor] Nodes: " << NodeCount << " Edges: " << EdgeCount
437 << " EdgesOfLastSeenNodes: " << EdgesOfLastSeenNodes << "\n";
438 OS << "[MLInlineAdvisor] FPI:\n";
439 for (auto I : FPICache) {
440 OS << I.first->getName() << ":\n";
441 I.second.print(OS);
442 OS << "\n";
443 }
444 OS << "\n";
445}
446
449 bool Recommendation)
450 : InlineAdvice(Advisor, CB, ORE, Recommendation),
451 CallerIRSize(Advisor->isForcedToStop() ? 0 : Advisor->getIRSize(*Caller)),
452 CalleeIRSize(Advisor->isForcedToStop() ? 0 : Advisor->getIRSize(*Callee)),
453 CallerAndCalleeEdges(Advisor->isForcedToStop()
454 ? 0
455 : (Advisor->getLocalCalls(*Caller) +
456 Advisor->getLocalCalls(*Callee))),
457 PreInlineCallerFPI(Advisor->getCachedFPI(*Caller)) {
458 if (Recommendation)
459 FPU.emplace(Advisor->getCachedFPI(*getCaller()), CB);
460}
461
462void MLInlineAdvice::reportContextForRemark(
464 using namespace ore;
465 OR << NV("Callee", Callee->getName());
466 for (size_t I = 0; I < NumberOfFeatures; ++I)
467 OR << NV(FeatureMap[I].name(),
468 *getAdvisor()->getModelRunner().getTensor<int64_t>(I));
469 OR << NV("ShouldInline", isInliningRecommended());
470}
471
473 FPU->finish(FAM);
474}
475
477 ORE.emit([&]() {
478 OptimizationRemark R(DEBUG_TYPE, "InliningSuccess", DLoc, Block);
479 reportContextForRemark(R);
480 return R;
481 });
482 getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ false);
483}
484
486 ORE.emit([&]() {
487 OptimizationRemark R(DEBUG_TYPE, "InliningSuccessWithCalleeDeleted", DLoc,
488 Block);
489 reportContextForRemark(R);
490 return R;
491 });
492 getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ true);
493}
494
496 const InlineResult &Result) {
497 getAdvisor()->getCachedFPI(*Caller) = PreInlineCallerFPI;
498 ORE.emit([&]() {
499 OptimizationRemarkMissed R(DEBUG_TYPE, "InliningAttemptedAndUnsuccessful",
500 DLoc, Block);
501 reportContextForRemark(R);
502 return R;
503 });
504}
506 assert(!FPU);
507 ORE.emit([&]() {
508 OptimizationRemarkMissed R(DEBUG_TYPE, "IniningNotAttempted", DLoc, Block);
509 reportContextForRemark(R);
510 return R;
511 });
512}
amdgpu Simplify well known AMD library false FunctionCallee Callee
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
#define DEBUG_TYPE
#define INLINE_COST_FEATURE_ITERATOR(M)
#define INLINE_FEATURE_ITERATOR(M)
Implements a lazy call graph analysis and related passes for the new pass manager.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define POPULATE_NAMES(_, NAME)
static cl::opt< bool > KeepFPICache("ml-advisor-keep-fpi-cache", cl::Hidden, cl::desc("For test - keep the ML Inline advisor's FunctionPropertiesInfo cache"), cl::init(false))
CallBase * getInlinableCS(Instruction &I)
static cl::opt< std::string > InteractiveChannelBaseName("inliner-interactive-channel-base", cl::Hidden, cl::desc("Base file path for the interactive mode. The incoming filename should " "have the name <inliner-interactive-channel-base>.in, while the " "outgoing name should be <inliner-interactive-channel-base>.out"))
static cl::opt< float > SizeIncreaseThreshold("ml-advisor-size-increase-threshold", cl::Hidden, cl::desc("Maximum factor by which expected native size may increase before " "blocking any further inlining."), cl::init(2.0))
#define DecisionName
print must be executed print the must be executed context for all instructions
if(VerifyEach)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
This header defines various interfaces for pass management in LLVM.
This builds on the llvm/ADT/GraphTraits.h file to find the strongly connected components (SCCs) of a ...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static const char * name
Definition: SMEABIPass.cpp:49
This pass exposes codegen information to IR-level passes.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
void invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Invalidate cached analyses for an IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1184
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1406
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1326
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1332
Function * getCaller()
Helper to get the caller (the parent function).
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:72
Common features for diagnostics dealing with optimization remarks that are used by both IR and MIR pa...
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
int64_t DirectCallsToDefinedFunctions
Number of direct calls made from this function to other functions defined in this module.
Capture state between an inlining decision having had been made, and its impact being observable.
Definition: InlineAdvisor.h:76
Function *const Callee
Function *const Caller
Caller and Callee are pre-inlining.
const BasicBlock *const Block
OptimizationRemarkEmitter & ORE
InlineAdvisor *const Advisor
const DebugLoc DLoc
bool isInliningRecommended() const
Get the inlining recommendation.
Interface for deciding whether to inline a call site or not.
OptimizationRemarkEmitter & getCallerORE(CallBase &CB)
FunctionAnalysisManager & FAM
static MandatoryInliningKind getMandatoryKind(CallBase &CB, FunctionAnalysisManager &FAM, OptimizationRemarkEmitter &ORE)
InlineResult is basically true or false.
Definition: InlineCost.h:179
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:933
const BasicBlock * getParent() const
Definition: Instruction.h:90
An analysis pass which computes the call graph for a module.
An SCC of the call graph.
Node & get(Function &F)
Get a graph node for a given function, scanning it to populate the graph data as necessary.
Node * lookup(const Function &F) const
Lookup a function in the graph which has already been scanned and added.
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:1268
InlineAdvice that tracks changes post inlining.
void updateCachedCallerFPI(FunctionAnalysisManager &FAM) const
const int64_t CallerIRSize
MLInlineAdvice(MLInlineAdvisor *Advisor, CallBase &CB, OptimizationRemarkEmitter &ORE, bool Recommendation)
const int64_t CalleeIRSize
void recordInliningImpl() override
Function * getCaller() const
const int64_t CallerAndCalleeEdges
void recordUnsuccessfulInliningImpl(const InlineResult &Result) override
Function * getCallee() const
void recordInliningWithCalleeDeletedImpl() override
void recordUnattemptedInliningImpl() override
MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM, std::unique_ptr< MLModelRunner > ModelRunner)
std::unique_ptr< MLModelRunner > ModelRunner
FunctionPropertiesInfo & getCachedFPI(Function &) const
void onPassExit(LazyCallGraph::SCC *SCC) override
This must be called when the Inliner pass is exited, as function passes may be run subsequently.
void onSuccessfulInlining(const MLInlineAdvice &Advice, bool CalleeWasDeleted)
virtual std::unique_ptr< MLInlineAdvice > getMandatoryAdviceImpl(CallBase &CB)
void onPassEntry(LazyCallGraph::SCC *SCC) override
This must be called when the Inliner pass is entered, to allow the InlineAdvisor update internal stat...
int64_t getLocalCalls(Function &F)
virtual std::unique_ptr< MLInlineAdvice > getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE)
int64_t getIRSize(Function &F) const
std::unique_ptr< InlineAdvice > getAdviceImpl(CallBase &CB) override
std::unique_ptr< InlineAdvice > getMandatoryAdvice(CallBase &CB, bool Advice) override
unsigned getInitialFunctionLevel(const Function &F) const
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
A mock class satisfying the interface expected by ReleaseModeModelRunner for its TGen parameter.
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void abandon()
Mark an analysis as abandoned.
Definition: PassManager.h:206
Analysis pass providing the TargetTransformInfo.
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
constexpr FeatureIndex inlineCostFeatureToMlFeature(InlineCostFeatureIndex Feature)
const char *const DefaultDecisionName
constexpr size_t NumberOfFeatures
std::optional< InlineCostFeatures > getInliningCostFeatures(CallBase &Call, TargetTransformInfo &CalleeTTI, function_ref< AssumptionCache &(Function &)> GetAssumptionCache, function_ref< BlockFrequencyInfo &(Function &)> GetBFI=nullptr, ProfileSummaryInfo *PSI=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
Get the expanded cost features.
scc_iterator< T > scc_begin(const T &G)
Construct the begin iterator for a deduced graph type T.
Definition: SCCIterator.h:232
const char *const DecisionName
const std::vector< TensorSpec > FeatureMap
const TensorSpec InlineDecisionSpec
const char *const RewardName
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1862
std::unique_ptr< InlineAdvisor > getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM)
std::optional< int > getInliningCostEstimate(CallBase &Call, TargetTransformInfo &CalleeTTI, function_ref< AssumptionCache &(Function &)> GetAssumptionCache, function_ref< BlockFrequencyInfo &(Function &)> GetBFI=nullptr, ProfileSummaryInfo *PSI=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
Get the cost estimate ignoring thresholds.
Definition: BitVector.h:851
#define N