36#if defined(LLVM_HAVE_TFLITE)
45 "regalloc-priority-interactive-channel-base",
cl::Hidden,
47 "Base file path for the interactive mode. The incoming filename should "
48 "have the name <regalloc-priority-interactive-channel-base>.in, while "
49 "the outgoing name should be "
50 "<regalloc-priority-interactive-channel-base>.out"));
55#ifdef LLVM_HAVE_TFLITE
61 cl::desc(
"Training log for the register allocator priority model"));
65 cl::desc(
"The model being trained for register allocation priority"));
73#define RA_PRIORITY_FEATURES_LIST(M) \
74 M(int64_t, li_size, PerLiveRangeShape, "size") \
75 M(int64_t, stage, PerLiveRangeShape, "stage") \
76 M(float, weight, PerLiveRangeShape, "weight")
78#define DecisionName "priority"
85#define _FEATURE_IDX(_, name, __, ___) name,
112#define _DECL_FEATURES(type, name, shape, _) \
113 TensorSpec::createSpec<type>(#name, shape),
140 std::unique_ptr<RegAllocPriorityAdvisor>
144 Runner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>(
147 Runner = std::make_unique<InteractiveModelRunner>(
152 return std::make_unique<MLPriorityAdvisor>(
153 MF,
RA, &getAnalysis<SlotIndexes>(), Runner.get());
155 std::unique_ptr<MLModelRunner> Runner;
163#ifdef LLVM_HAVE_TFLITE
164static const TensorSpec Reward = TensorSpec::createSpec<float>(
"reward", {1});
166#define _DECL_TRAIN_FEATURES(type, name, shape, _) \
167 TensorSpec::createSpec<type>(std::string("action_") + #name, shape),
169static const std::vector<TensorSpec> TrainingInputFeatures{
171 TensorSpec::createSpec<float>(
"action_discount", {1}),
172 TensorSpec::createSpec<int32_t>(
"action_step_type", {1}),
173 TensorSpec::createSpec<float>(
"action_reward", {1})}};
174#undef _DECL_TRAIN_FEATURES
184 unsigned getPriority(
const LiveInterval &LI)
const override;
188class DevelopmentModePriorityAdvisorAnalysis final
191 DevelopmentModePriorityAdvisorAnalysis()
195 return R->getAdvisorMode() == AdvisorMode::Development;
200 if (!Log || !Log->hasAnyObservationForContext(MF.
getName()))
206 if (Log->currentContext() != MF.
getName()) {
208 "The training log context shouldn't have had changed.");
210 if (Log->hasObservationInProgress())
211 Log->logReward<
float>(GetReward());
222 bool doInitialization(
Module &M)
override {
224 if (ModelUnderTraining.empty() && TrainingLog.empty()) {
225 Ctx.
emitError(
"Regalloc development mode should be requested with at "
226 "least logging enabled and/or a training model");
229 if (ModelUnderTraining.empty())
230 Runner = std::make_unique<NoInferenceModelRunner>(Ctx,
InputFeatures);
232 Runner = ModelUnderTrainingRunner::createAndEnsureValid(
233 Ctx, ModelUnderTraining,
DecisionName, TrainingInputFeatures);
235 Ctx.
emitError(
"Regalloc: could not set up the model runner");
238 if (TrainingLog.empty())
241 auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
243 M.getContext().emitError(
EC.message() +
":" + TrainingLog);
247 if (
auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(Runner.get()))
254 Log = std::make_unique<Logger>(std::move(
OS), LFS, Reward,
259 std::unique_ptr<RegAllocPriorityAdvisor>
264 Log->switchContext(MF.
getName());
267 return std::make_unique<DevelopmentModePriorityAdvisor>(
268 MF,
RA, &getAnalysis<SlotIndexes>(), Runner.get(), Log.get());
271 std::unique_ptr<MLModelRunner> Runner;
272 std::unique_ptr<Logger> Log;
279 return llvm::isEmbeddedModelEvaluatorValid<CompiledModelType>() ||
300 *Runner->
getTensor<int64_t>(1) =
static_cast<int64_t
>(Stage);
310#ifdef LLVM_HAVE_TFLITE
312 return new DevelopmentModePriorityAdvisorAnalysis();
316DevelopmentModePriorityAdvisor::getPriority(
const LiveInterval &LI)
const {
319 if (isa<ModelUnderTrainingRunner>(getRunner())) {
322 Prio = getDefaultAdvisor().getPriority(LI);
325 if (TrainingLog.empty())
331 if (Log->hasObservationInProgress())
332 Log->logReward<
float>(0.0);
334 Log->startObservation();
335 size_t CurrentFeature = 0;
336 for (; CurrentFeature <
InputFeatures.size(); ++CurrentFeature) {
337 Log->logTensorValue(CurrentFeature,
338 reinterpret_cast<const char *
>(
339 getRunner().getTensorUntyped(CurrentFeature)));
342 if (
auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(&getRunner())) {
343 for (
size_t I = 0;
I < MUTR->extraOutputsForLoggingSpecs().size();
344 ++
I, ++CurrentFeature)
347 reinterpret_cast<const char *
>(MUTR->getUntypedExtraOutputValue(
I)));
350 float Ret =
static_cast<float>(Prio);
351 Log->logTensorValue(CurrentFeature,
reinterpret_cast<const char *
>(&Ret));
352 Log->endObservation();
354 return static_cast<unsigned>(Prio);
static cl::opt< std::string > InteractiveChannelBaseName("inliner-interactive-channel-base", cl::Hidden, cl::desc("Base file path for the interactive mode. The incoming filename should " "have the name <inliner-interactive-channel-base>.in, while the " "outgoing name should be <inliner-interactive-channel-base>.out"))
#define _FEATURE_IDX(A, B, C, D)
#define _DECL_FEATURES(type, name, shape, _)
static cl::opt< std::string > InteractiveChannelBaseName("regalloc-priority-interactive-channel-base", cl::Hidden, cl::desc("Base file path for the interactive mode. The incoming filename should " "have the name <regalloc-priority-interactive-channel-base>.in, while " "the outgoing name should be " "<regalloc-priority-interactive-channel-base>.out"))
#define RA_PRIORITY_FEATURES_LIST(M)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
LiveInterval - This class represents the liveness of a register, or stack slot.
unsigned getSize() const
getSize - Returns the sum of sizes of all the LiveRange's.
Logging utility - given an ordered specification of features, and assuming a scalar reward,...
MLModelRunner interface: abstraction of a mechanism for evaluating a tensorflow "saved model".
virtual void switchContext(StringRef Name)
T * getTensor(I FeatureID)
const MLModelRunner & getRunner() const
MLPriorityAdvisor(const MachineFunction &MF, const RAGreedy &RA, SlotIndexes *const Indexes, MLModelRunner *Runner)
const RegAllocPriorityAdvisor & getDefaultAdvisor() const
unsigned getPriority(const LiveInterval &LI) const override
Find the priority value for a live range.
float getPriorityImpl(const LiveInterval &LI) const
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Function & getFunction()
Return the LLVM function that this machine code represents.
A Module instance is used to store all the information related to an LLVM module.
A mock class satisfying the interface expected by ReleaseModeModelRunner for its TGen parameter.
const ExtraRegInfo & getExtraInfo() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Interface to the priority advisor, which is responsible for prioritizing live ranges.
SlotIndexes *const Indexes
ReleaseModePriorityAdvisorAnalysis()
static bool classof(const RegAllocPriorityAdvisorAnalysis *R)
An efficient, type-erasing, non-owning reference to a callable.
This is an optimization pass for GlobalISel generic memory operations.
RegAllocPriorityAdvisorAnalysis * createReleaseModePriorityAdvisor()
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
static const TensorSpec DecisionSpec
const char *const DecisionName
static const std::vector< TensorSpec > InputFeatures
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
RegAllocPriorityAdvisorAnalysis * createDevelopmentModePriorityAdvisor()
static const std::vector< int64_t > PerLiveRangeShape