LLVM 22.0.0git
MLInlineAdvisor.h
Go to the documentation of this file.
1//===- MLInlineAdvisor.h - ML - based InlineAdvisor factories ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_ANALYSIS_MLINLINEADVISOR_H
10#define LLVM_ANALYSIS_MLINLINEADVISOR_H
11
16#include "llvm/IR/PassManager.h"
17
18#include <map>
19#include <memory>
20#include <optional>
21
22namespace llvm {
24class Module;
25class MLInlineAdvice;
27
29public:
31 std::function<std::unique_ptr<MLModelRunner>(
32 const std::vector<TensorSpec> &)>
33 GetModelRunner,
34 std::function<bool(CallBase &)> GetDefaultAdvice);
35
36 virtual ~MLInlineAdvisor() = default;
37
38 void onPassEntry(LazyCallGraph::SCC *SCC) override;
39 void onPassExit(LazyCallGraph::SCC *SCC) override;
40
41 int64_t getIRSize(Function &F) const {
43 }
44 void onSuccessfulInlining(const MLInlineAdvice &Advice,
45 bool CalleeWasDeleted);
46
47 bool isForcedToStop() const { return ForceStop; }
48 int64_t getLocalCalls(Function &F);
49 const MLModelRunner &getModelRunner() const { return *ModelRunner; }
51 const std::vector<TensorSpec> &getFeatureMap() const { return FeatureMap; };
52 static const std::vector<TensorSpec> &getInitialFeatureMap();
53
54protected:
55 std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
56
57 std::unique_ptr<InlineAdvice> getMandatoryAdvice(CallBase &CB,
58 bool Advice) override;
59
60 virtual std::unique_ptr<MLInlineAdvice> getMandatoryAdviceImpl(CallBase &CB);
61
62 virtual std::unique_ptr<MLInlineAdvice>
64
65 // Get the initial 'level' of the function, or 0 if the function has been
66 // introduced afterwards.
67 // TODO: should we keep this updated?
68 unsigned getInitialFunctionLevel(const Function &F) const;
69
70 std::unique_ptr<MLModelRunner> ModelRunner;
71 std::function<bool(CallBase &)> GetDefaultAdvice;
72 std::vector<TensorSpec> FeatureMap;
73
74private:
75 int64_t getModuleIRSize() const;
76 std::unique_ptr<InlineAdvice>
77 getSkipAdviceIfUnreachableCallsite(CallBase &CB);
78 void print(raw_ostream &OS) const override;
79
80 // Using std::map to benefit from its iterator / reference non-invalidating
81 // semantics, which make it easy to use `getCachedFPI` results from multiple
82 // calls without needing to copy to avoid invalidation effects.
83 mutable std::map<const Function *, FunctionPropertiesInfo> FPICache;
84
85 LazyCallGraph &CG;
86
87 int64_t NodeCount = 0;
88 int64_t EdgeCount = 0;
89 int64_t EdgesOfLastSeenNodes = 0;
90 const bool UseIR2Vec;
91
92 std::map<const LazyCallGraph::Node *, unsigned> FunctionLevels;
93 const int32_t InitialIRSize = 0;
94 int32_t CurrentIRSize = 0;
97 DenseSet<Function *> DeadFunctions;
98 bool ForceStop = false;
100};
101
102/// InlineAdvice that tracks changes post inlining. For that reason, it only
103/// overrides the "successful inlining" extension points.
105public:
107 OptimizationRemarkEmitter &ORE, bool Recommendation);
108 virtual ~MLInlineAdvice() = default;
109
110 void recordInliningImpl() override;
112 void recordUnsuccessfulInliningImpl(const InlineResult &Result) override;
113 void recordUnattemptedInliningImpl() override;
114
115 Function *getCaller() const { return Caller; }
116 Function *getCallee() const { return Callee; }
117
118 const int64_t CallerIRSize;
119 const int64_t CalleeIRSize;
120 const int64_t CallerAndCalleeEdges;
122
123private:
124 void reportContextForRemark(DiagnosticInfoOptimizationBase &OR);
125 MLInlineAdvisor *getAdvisor() const {
126 return static_cast<MLInlineAdvisor *>(Advisor);
127 };
128 // Make a copy of the FPI of the caller right before inlining. If inlining
129 // fails, we can just update the cache with that value.
130 const FunctionPropertiesInfo PreInlineCallerFPI;
131 std::optional<FunctionPropertiesUpdater> FPU;
132};
133
134} // namespace llvm
135
136#endif // LLVM_ANALYSIS_MLINLINEADVISOR_H
This header defines various interfaces for pass management in LLVM.
Implements a lazy call graph analysis and related passes for the new pass manager.
#define F(x, y, z)
Definition MD5.cpp:55
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Implements a dense probed hash-table based set.
Definition DenseSet.h:261
Common features for diagnostics dealing with optimization remarks that are used by both IR and MIR pa...
Function *const Callee
Function *const Caller
Caller and Callee are pre-inlining.
OptimizationRemarkEmitter & ORE
InlineAdvisor *const Advisor
LLVM_ABI InlineAdvice(InlineAdvisor *Advisor, CallBase &CB, OptimizationRemarkEmitter &ORE, bool IsInliningRecommended)
InlineAdvisor(InlineAdvisor &&)=delete
InlineResult is basically true or false.
Definition InlineCost.h:181
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
InlineAdvice that tracks changes post inlining.
void updateCachedCallerFPI(FunctionAnalysisManager &FAM) const
virtual ~MLInlineAdvice()=default
const int64_t CallerIRSize
MLInlineAdvice(MLInlineAdvisor *Advisor, CallBase &CB, OptimizationRemarkEmitter &ORE, bool Recommendation)
const int64_t CalleeIRSize
void recordInliningImpl() override
Function * getCaller() const
const int64_t CallerAndCalleeEdges
void recordUnsuccessfulInliningImpl(const InlineResult &Result) override
Function * getCallee() const
void recordInliningWithCalleeDeletedImpl() override
void recordUnattemptedInliningImpl() override
const std::vector< TensorSpec > & getFeatureMap() const
std::unique_ptr< MLModelRunner > ModelRunner
FunctionPropertiesInfo & getCachedFPI(Function &) const
void onPassExit(LazyCallGraph::SCC *SCC) override
This must be called when the Inliner pass is exited, as function passes may be run subsequently.
const MLModelRunner & getModelRunner() const
void onSuccessfulInlining(const MLInlineAdvice &Advice, bool CalleeWasDeleted)
static const std::vector< TensorSpec > & getInitialFeatureMap()
virtual std::unique_ptr< MLInlineAdvice > getMandatoryAdviceImpl(CallBase &CB)
void onPassEntry(LazyCallGraph::SCC *SCC) override
This must be called when the Inliner pass is entered, to allow the InlineAdvisor update internal stat...
MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM, std::function< std::unique_ptr< MLModelRunner >(const std::vector< TensorSpec > &)> GetModelRunner, std::function< bool(CallBase &)> GetDefaultAdvice)
int64_t getLocalCalls(Function &F)
std::vector< TensorSpec > FeatureMap
virtual std::unique_ptr< MLInlineAdvice > getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE)
int64_t getIRSize(Function &F) const
std::function< bool(CallBase &)> GetDefaultAdvice
std::unique_ptr< InlineAdvice > getAdviceImpl(CallBase &CB) override
std::unique_ptr< InlineAdvice > getMandatoryAdvice(CallBase &CB, bool Advice) override
virtual ~MLInlineAdvisor()=default
bool isForcedToStop() const
unsigned getInitialFunctionLevel(const Function &F) const
MLModelRunner interface: abstraction of a mechanism for evaluating a ML model.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
The optimization diagnostic interface.
Analysis providing profile information.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
This is an optimization pass for GlobalISel generic memory operations.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39