LLVM 23.0.0git
MLRegAllocEvictAdvisor.cpp
Go to the documentation of this file.
1//===- MLRegAllocEvictAdvisor.cpp - ML eviction advisor -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementation of the ML eviction advisor and reward injection pass
10//
11//===----------------------------------------------------------------------===//
12
13#include "AllocationOrder.h"
14#include "RegAllocGreedy.h"
19#if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL) || defined(LLVM_HAVE_TFLITE)
23#endif
32#include "llvm/CodeGen/Passes.h"
35#include "llvm/IR/Module.h"
37#include "llvm/Pass.h"
38#include "llvm/PassRegistry.h"
41
42#include <array>
43#include <bitset>
44#include <memory>
45#include <unordered_map>
46
47using namespace llvm;
48
49#define DEBUG_TYPE "ml-regalloc"
50
51// Generated header in release (AOT) mode
52#if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL)
53#include "RegAllocEvictModel.h"
54using CompiledModelType = RegAllocEvictModel;
55#else
57#endif
58
60 "regalloc-evict-interactive-channel-base", cl::Hidden,
62 "Base file path for the interactive mode. The incoming filename should "
63 "have the name <regalloc-evict-interactive-channel-base>.in, while the "
64 "outgoing name should be "
65 "<regalloc-evict-interactive-channel-base>.out"));
66
68 "mlregalloc-max-eviction-count", cl::Hidden,
69 cl::desc("The maximum number of times a live range can be "
70 "evicted before preventing it from being evicted"),
71 cl::init(100));
72
73// Options that only make sense in development mode
74#ifdef LLVM_HAVE_TFLITE
75#include "RegAllocScore.h"
77
78static cl::opt<std::string> TrainingLog(
79 "regalloc-training-log", cl::Hidden,
80 cl::desc("Training log for the register allocator eviction model"));
81
82static cl::opt<std::string> ModelUnderTraining(
83 "regalloc-model", cl::Hidden,
84 cl::desc("The model being trained for register allocation eviction"));
85
86#endif // #ifdef LLVM_HAVE_TFLITE
87
88/// The score injection pass.
89/// This pass calculates the score for a function and inserts it in the log, but
90/// this happens only in development mode. It's a no-op otherwise.
91namespace llvm {
93} // namespace llvm
94
95namespace {
96class RegAllocScoring : public MachineFunctionPass {
97public:
98 static char ID;
99
100 RegAllocScoring() : MachineFunctionPass(ID) {}
101
102 ~RegAllocScoring() override = default;
103
104 StringRef getPassName() const override {
105 return "Register Allocation Pass Scoring";
106 }
107
108 /// RegAllocReward analysis usage.
109 void getAnalysisUsage(AnalysisUsage &AU) const override {
110 AU.setPreservesAll();
111 AU.addRequired<RegAllocEvictionAdvisorAnalysisLegacy>();
112 AU.addRequired<RegAllocPriorityAdvisorAnalysisLegacy>();
113 AU.addRequired<MachineBlockFrequencyInfoWrapperPass>();
115 }
116
117 /// Performs this pass
118 bool runOnMachineFunction(MachineFunction &) override;
119};
120} // namespace
121
122char RegAllocScoring::ID = 0;
124 return new RegAllocScoring();
125}
126
127INITIALIZE_PASS(RegAllocScoring, "regallocscoringpass",
128 "Register Allocation Scoring Pass", false, false)
129
130// ===================================
131// Common ML Advisor declarations
132// ===================================
133namespace {
134// Most features are as described above, so we'll reuse this vector in defining
135// them.
136static const std::vector<int64_t> PerLiveRangeShape{1, NumberOfInterferences};
137
138// --------------
139// Features table
140// --------------
141// For each interfering live range (incl. the candidate) we collect a number of
142// features. However, because the features are of different types (and because
143// of ML best practices), we organize the tensors per feature, not per
144// candidate. Each such tensor has a scalar value corresponding to the
145// interferring live range at that position, in the order in AllocationOrder.
146// The last position corresponds to the virt reg seeking allocation.
147// Exception to all that is the progression feature, which is just a scalar (see
148// its documentation for details).
149// Note on naming: the "_by_max" are normalized using the largest value of that
150// tensor, as observed in the current decision making stage (i.e. for the
151// current call to the advisor's tryFindEvictionCandidate)
152//
153// The feature list format: type, name, shape, documentation.
154// Note: we can really just use int64 and float, hence the modeling of some
155// bools as int64 values.
156#define RA_EVICT_FEATURES_LIST(M) \
157 M(int64_t, mask, PerLiveRangeShape, \
158 "boolean values, 0 for unavailable candidates (i.e. if a position is 0, " \
159 "it " \
160 "can't be evicted)") \
161 M(int64_t, is_free, PerLiveRangeShape, \
162 "boolean values, 1 if this phys reg is actually free (no interferences)") \
163 M(float, nr_urgent, PerLiveRangeShape, \
164 "number of 'urgent' intervals, normalized. Urgent are those that are OK " \
165 "to break cascades") \
166 M(float, nr_broken_hints, PerLiveRangeShape, \
167 "if this position were evicted, how many broken hints would there be") \
168 M(int64_t, is_hint, PerLiveRangeShape, \
169 "is this a preferred phys reg for the candidate") \
170 M(int64_t, is_local, PerLiveRangeShape, \
171 "is this live range local to a basic block") \
172 M(float, nr_rematerializable, PerLiveRangeShape, \
173 "nr rematerializable ranges") \
174 M(float, nr_defs_and_uses, PerLiveRangeShape, \
175 "bb freq - weighed nr defs and uses") \
176 M(float, weighed_reads_by_max, PerLiveRangeShape, \
177 "bb freq - weighed nr of reads, normalized") \
178 M(float, weighed_writes_by_max, PerLiveRangeShape, \
179 "bb feq - weighed nr of writes, normalized") \
180 M(float, weighed_read_writes_by_max, PerLiveRangeShape, \
181 "bb freq - weighed nr of uses that are both read and writes, normalized") \
182 M(float, weighed_indvars_by_max, PerLiveRangeShape, \
183 "bb freq - weighed nr of uses that are indvars, normalized") \
184 M(float, hint_weights_by_max, PerLiveRangeShape, \
185 "bb freq - weighed nr of uses that are hints, normalized") \
186 M(float, start_bb_freq_by_max, PerLiveRangeShape, \
187 "the freq in the start block, normalized") \
188 M(float, end_bb_freq_by_max, PerLiveRangeShape, \
189 "freq of end block, normalized") \
190 M(float, hottest_bb_freq_by_max, PerLiveRangeShape, \
191 "hottest BB freq, normalized") \
192 M(float, liverange_size, PerLiveRangeShape, \
193 "size (instr index diff) of the LR") \
194 M(float, use_def_density, PerLiveRangeShape, \
195 "the max weight, as computed by the manual heuristic") \
196 M(int64_t, max_stage, PerLiveRangeShape, \
197 "largest stage of an interval in this LR") \
198 M(int64_t, min_stage, PerLiveRangeShape, \
199 "lowest stage of an interval in this LR") \
200 M(float, progress, {1}, "ratio of current queue size to initial size")
201
202// The model learns to pick one of the mask == 1 interferences. This is the
203// name of the output tensor. The contract with the model is that the output
204// will be guaranteed to be to a mask == 1 position. Using a macro here to
205// avoid 'not used' warnings (and keep cond compilation to a minimum)
206#define DecisionName "index_to_evict"
207static const TensorSpec DecisionSpec =
209
210// Named features index.
211enum FeatureIDs {
212#define _FEATURE_IDX_SIMPLE(_, name, __, ___) name
213#define _FEATURE_IDX(A, B, C, D) _FEATURE_IDX_SIMPLE(A, B, C, D),
215#undef _FEATURE_IDX
216#undef _FEATURE_IDX_SIMPLE
217};
218
219// The ML advisor will typically have a sparse input to the evaluator, because
220// various phys regs won't be available. It's easier (maintenance-wise) to
221// bulk-reset the state of the evaluator each time we are about to use it
222// again.
223template <typename T> size_t getTotalSize(const std::vector<int64_t> &Shape) {
224 size_t Ret = sizeof(T);
225 for (const auto V : Shape)
226 Ret *= V;
227 return Ret;
228}
229
230void resetInputs(MLModelRunner &Runner) {
231#define _RESET(TYPE, NAME, SHAPE, __) \
232 std::memset(Runner.getTensorUntyped(FeatureIDs::NAME), 0, \
233 getTotalSize<TYPE>(SHAPE));
235#undef _RESET
236}
237
238// Per-live interval components that get aggregated into the feature values
239// that will be passed to the evaluator.
240struct LIFeatureComponents {
241 double R = 0;
242 double W = 0;
243 double RW = 0;
244 double IndVarUpdates = 0;
245 double HintWeights = 0.0;
246 int64_t NumDefsAndUses = 0;
247 float HottestBlockFreq = 0.0;
248 bool IsRemat = false;
249};
250
251using CandidateRegList =
252 std::array<std::pair<MCRegister, bool>, NumberOfInterferences>;
253using FeaturesListNormalizer =
255
256/// The ML evictor (commonalities between release and development mode)
257class MLEvictAdvisor : public RegAllocEvictionAdvisor {
258public:
259 MLEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
260 MLModelRunner *Runner, const MachineBlockFrequencyInfo &MBFI,
261 const MachineLoopInfo &Loops);
262
263protected:
264 const RegAllocEvictionAdvisor &getDefaultAdvisor() const {
265 return static_cast<const RegAllocEvictionAdvisor &>(DefaultAdvisor);
266 }
267
268 // The assumption is that if the Runner could not be constructed, we emit-ed
269 // error, and we shouldn't be asking for it here.
270 const MLModelRunner &getRunner() const { return *Runner; }
271
272 /// This just calls Evaluate on the Runner, but in the development mode
273 /// case, if we're just capturing the log of the default advisor, it needs
274 /// to call the latter instead, so we need to pass all the necessary
275 /// parameters for it. In the development case, it will also log.
276 virtual int64_t
277 tryFindEvictionCandidatePosition(const LiveInterval &VirtReg,
278 const AllocationOrder &Order,
279 unsigned OrderLimit, uint8_t CostPerUseLimit,
280 const SmallVirtRegSet &FixedRegisters) const;
281
282 /// Load the features of the given VirtReg (allocated or not) at column Pos,
283 /// but if that can't be evicted, return false instead.
284 bool
285 loadInterferenceFeatures(const LiveInterval &VirtReg, MCRegister PhysReg,
286 bool IsHint, const SmallVirtRegSet &FixedRegisters,
287 llvm::SmallVectorImpl<float> &Largest, size_t Pos,
288 SmallVectorImpl<LRStartEndInfo> &LRPosInfo) const;
289
290private:
291 static float getInitialQueueSize(const MachineFunction &MF);
292
294 const LiveInterval &VirtReg, const AllocationOrder &Order,
295 uint8_t CostPerUseLimit,
296 const SmallVirtRegSet &FixedRegisters) const override;
297
298 void extractFeatures(const SmallVectorImpl<const LiveInterval *> &Intervals,
299 llvm::SmallVectorImpl<float> &Largest, size_t Pos,
300 int64_t IsHint, int64_t LocalIntfsCount, float NumUrgent,
301 SmallVectorImpl<LRStartEndInfo> &LRPosInfo) const;
302
303 // Point-in-time: we didn't learn this, so we always delegate to the
304 // default.
306 const LiveInterval &VirtReg, MCRegister PhysReg,
307 const SmallVirtRegSet &FixedRegisters) const override {
308 return getDefaultAdvisor().canEvictHintInterference(VirtReg, PhysReg,
309 FixedRegisters);
310 }
311
312 const LIFeatureComponents &
313 getLIFeatureComponents(const LiveInterval &LI) const;
314
315 // Hold on to a default advisor for:
316 // 1) the implementation of canEvictHintInterference, because we didn't
317 // learn that nuance yet; 2) for bootstrapping (logging) in the development
318 // mode case.
319 const DefaultEvictionAdvisor DefaultAdvisor;
320 MLModelRunner *const Runner;
321 const MachineBlockFrequencyInfo &MBFI;
322 const MachineLoopInfo &Loops;
323
324 // Indices of those features we don't want to normalize.
325 // This could be static and shared, but its initialization is non-trivial.
326 std::bitset<FeatureIDs::FeatureCount> DoNotNormalize;
327 const float InitialQSize;
328
329 using RegID = unsigned;
330 mutable DenseMap<RegID, LIFeatureComponents> CachedFeatures;
331
332 mutable std::unordered_map<unsigned, unsigned> VirtRegEvictionCounts;
333
334 void onEviction(Register RegBeingEvicted) const {
335 // If we cannot find the virtual register in the map, we just assume it has
336 // not been evicted before and thus has a value of zero (which is what the
337 // subscript operator returns by default).
338 ++VirtRegEvictionCounts[RegBeingEvicted.id()];
339 }
340
341 unsigned getEvictionCount(Register Reg) const {
342 auto EvictionCountIt = VirtRegEvictionCounts.find(Reg.id());
343 if (EvictionCountIt != VirtRegEvictionCounts.end())
344 return EvictionCountIt->second;
345 return 0;
346 }
347};
348
349#define _DECL_FEATURES(type, name, shape, _) \
350 TensorSpec::createSpec<type>(#name, shape),
351
352// ===================================
353// Release (AOT) - specifics
354// ===================================
355/// Common provider for legacy and new pass managers.
356class ReleaseModeEvictionAdvisorProvider final
358public:
359 ReleaseModeEvictionAdvisorProvider(LLVMContext &Ctx)
360 : RegAllocEvictionAdvisorProvider(AdvisorMode::Release, Ctx) {
362 }
363 // support for isa<> and dyn_cast.
364 static bool classof(const RegAllocEvictionAdvisorProvider *R) {
365 return R->getAdvisorMode() == AdvisorMode::Release;
366 }
367
368 std::unique_ptr<RegAllocEvictionAdvisor>
369 getAdvisor(const MachineFunction &MF, const RAGreedy &RA,
371 if (!Runner) {
372 if (InteractiveChannelBaseName.empty())
373 Runner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>(
374 MF.getFunction().getContext(), InputFeatures, DecisionName);
375 else
376 Runner = std::make_unique<InteractiveModelRunner>(
380 }
381 assert(MBFI && Loops &&
382 "Invalid provider state: must have analysis available");
383 return std::make_unique<MLEvictAdvisor>(MF, RA, Runner.get(), *MBFI,
384 *Loops);
385 }
386
387private:
388 std::vector<TensorSpec> InputFeatures;
389 std::unique_ptr<MLModelRunner> Runner;
390};
391
392class ReleaseModeEvictionAdvisorAnalysisLegacy final
394public:
395 ReleaseModeEvictionAdvisorAnalysisLegacy()
396 : RegAllocEvictionAdvisorAnalysisLegacy(AdvisorMode::Release) {}
397
398 void logRewardIfNeeded(const MachineFunction &MF,
399 llvm::function_ref<float()> GetReward) override {
400 // No-op in release mode
401 }
402
403 bool doInitialization(Module &M) override {
404 Provider =
405 std::make_unique<ReleaseModeEvictionAdvisorProvider>(M.getContext());
406 return false;
407 }
408
409 static bool classof(const RegAllocEvictionAdvisorAnalysisLegacy *R) {
410 return R->getAdvisorMode() == AdvisorMode::Release;
411 }
412
413 void getAnalysisUsage(AnalysisUsage &AU) const override {
417 }
418};
419
420// ===================================
421// Development mode-specifics
422// ===================================
423//
424// Features we log
425#ifdef LLVM_HAVE_TFLITE
426static const TensorSpec Reward = TensorSpec::createSpec<float>("reward", {1});
427
428// Features we bind on the model. The tensor names have a prefix, and we also
429// need to include some tensors that are expected to be present by the
430// training algo.
431// TODO: can we just get rid of these?
432#define _DECL_TRAIN_FEATURES(type, name, shape, _) \
433 TensorSpec::createSpec<type>(std::string("action_") + #name, shape),
434
435class DevelopmentModeEvictAdvisor : public MLEvictAdvisor {
436public:
437 DevelopmentModeEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
438 MLModelRunner *Runner,
439 const MachineBlockFrequencyInfo &MBFI,
440 const MachineLoopInfo &Loops, Logger *Log)
441 : MLEvictAdvisor(MF, RA, Runner, MBFI, Loops), Log(Log) {}
442
443private:
444 int64_t tryFindEvictionCandidatePosition(
445 const LiveInterval &VirtReg, const AllocationOrder &Order,
446 unsigned OrderLimit, uint8_t CostPerUseLimit,
447 const SmallVirtRegSet &FixedRegisters) const override;
448
449 Logger *const Log;
450};
451
452class DevelopmentModeEvictionAdvisorProvider final
454public:
455 DevelopmentModeEvictionAdvisorProvider(LLVMContext &Ctx)
456 : RegAllocEvictionAdvisorProvider(AdvisorMode::Development, Ctx) {
458 TrainingInputFeatures = {
459 RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES)
460 TensorSpec::createSpec<float>("action_discount", {1}),
461 TensorSpec::createSpec<int32_t>("action_step_type", {1}),
462 TensorSpec::createSpec<float>("action_reward", {1})};
463 if (ModelUnderTraining.empty() && TrainingLog.empty()) {
464 Ctx.emitError("Regalloc development mode should be requested with at "
465 "least logging enabled and/or a training model");
466 return;
467 }
468 if (ModelUnderTraining.empty())
469 Runner = std::make_unique<NoInferenceModelRunner>(Ctx, InputFeatures);
470 else
471 Runner = ModelUnderTrainingRunner::createAndEnsureValid(
472 Ctx, ModelUnderTraining, DecisionName, TrainingInputFeatures);
473 if (!Runner) {
474 Ctx.emitError("Regalloc: could not set up the model runner");
475 return;
476 }
477 if (TrainingLog.empty())
478 return;
479 std::error_code EC;
480 auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
481 if (EC) {
482 Ctx.emitError(EC.message() + ":" + TrainingLog);
483 return;
484 }
485 std::vector<TensorSpec> LFS = InputFeatures;
486 if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(Runner.get()))
487 append_range(LFS, MUTR->extraOutputsForLoggingSpecs());
488 // We always log the output; in particular, if we're not evaluating, we
489 // don't have an output spec json file. That's why we handle the
490 // 'normal' output separately.
491 LFS.push_back(DecisionSpec);
492
493 Log = std::make_unique<Logger>(std::move(OS), LFS, Reward,
494 /*IncludeReward*/ true);
495 return;
496 }
497
498 // support for isa<> and dyn_cast.
499 static bool classof(const RegAllocEvictionAdvisorProvider *R) {
500 return R->getAdvisorMode() == AdvisorMode::Development;
501 }
502
503 void logRewardIfNeeded(const MachineFunction &MF,
504 llvm::function_ref<float()> GetReward) override {
505 if (!Log || !Log->hasAnyObservationForContext(MF.getName()))
506 return;
507 // The function pass manager would run all the function passes for a
508 // function, so we assume the last context belongs to this function. If
509 // this invariant ever changes, we can implement at that time switching
510 // contexts. At this point, it'd be an error
511 if (Log->currentContext() != MF.getName()) {
513 "The training log context shouldn't have had changed.");
514 }
515 if (Log->hasObservationInProgress())
516 Log->logReward<float>(GetReward());
517 }
518
519 std::unique_ptr<RegAllocEvictionAdvisor>
520 getAdvisor(const MachineFunction &MF, const RAGreedy &RA,
522 if (!Runner)
523 return nullptr;
524 if (Log)
525 Log->switchContext(MF.getName());
526 assert(MBFI && Loops &&
527 "Invalid provider state: must have analysis available");
528 return std::make_unique<DevelopmentModeEvictAdvisor>(
529 MF, RA, Runner.get(), *MBFI, *Loops, Log.get());
530 }
531
532private:
533 std::vector<TensorSpec> InputFeatures;
534 std::vector<TensorSpec> TrainingInputFeatures;
535
536 std::unique_ptr<MLModelRunner> Runner;
537 std::unique_ptr<Logger> Log;
538};
539
540class DevelopmentModeEvictionAdvisorAnalysisLegacy final
542public:
543 DevelopmentModeEvictionAdvisorAnalysisLegacy()
544 : RegAllocEvictionAdvisorAnalysisLegacy(AdvisorMode::Development) {}
545
546 bool doInitialization(Module &M) override {
547 Provider = std::make_unique<DevelopmentModeEvictionAdvisorProvider>(
548 M.getContext());
549 return false;
550 }
551
552 void logRewardIfNeeded(const MachineFunction &MF,
553 llvm::function_ref<float()> GetReward) override {
554 Provider->logRewardIfNeeded(MF, GetReward);
555 }
556
557 // support for isa<> and dyn_cast.
558 static bool classof(const RegAllocEvictionAdvisorAnalysisLegacy *R) {
559 return R->getAdvisorMode() == AdvisorMode::Development;
560 }
561
562 void getAnalysisUsage(AnalysisUsage &AU) const override {
566 }
567};
568
569#endif // #ifdef LLVM_HAVE_TFLITE
570} // namespace
571
572float MLEvictAdvisor::getInitialQueueSize(const MachineFunction &MF) {
573 auto &MRI = MF.getRegInfo();
574 unsigned NumUsedRegs = 0;
575 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
577 if (!MRI.reg_nodbg_empty(Reg))
578 ++NumUsedRegs;
579 }
580 return static_cast<float>(NumUsedRegs);
581}
582
583MLEvictAdvisor::MLEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
584 MLModelRunner *Runner,
585 const MachineBlockFrequencyInfo &MBFI,
586 const MachineLoopInfo &Loops)
587 : RegAllocEvictionAdvisor(MF, RA), DefaultAdvisor(MF, RA),
588 Runner(std::move(Runner)), MBFI(MBFI), Loops(Loops),
589 InitialQSize(MLEvictAdvisor::getInitialQueueSize(MF)) {
590 assert(this->Runner);
591 Runner->switchContext(MF.getName());
592 DoNotNormalize.set(FeatureIDs::mask);
593 DoNotNormalize.set(FeatureIDs::is_free);
594 DoNotNormalize.set(FeatureIDs::is_hint);
595 DoNotNormalize.set(FeatureIDs::is_local);
596 DoNotNormalize.set(FeatureIDs::min_stage);
597 DoNotNormalize.set(FeatureIDs::max_stage);
598 DoNotNormalize.set(FeatureIDs::progress);
599}
600
601int64_t MLEvictAdvisor::tryFindEvictionCandidatePosition(
602 const LiveInterval &, const AllocationOrder &, unsigned, uint8_t,
603 const SmallVirtRegSet &) const {
604 int64_t Ret = Runner->evaluate<int64_t>();
605 assert(Ret >= 0);
607 return Ret;
608}
609
610bool MLEvictAdvisor::loadInterferenceFeatures(
611 const LiveInterval &VirtReg, MCRegister PhysReg, bool IsHint,
612 const SmallVirtRegSet &FixedRegisters,
613 llvm::SmallVectorImpl<float> &Largest, size_t Pos,
614 llvm::SmallVectorImpl<LRStartEndInfo> &LRPosInfo) const {
615 // It is only possible to evict virtual register interference.
616 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) {
617 // leave unavailable
618 return false;
619 }
620
621 const bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
622 int64_t LocalIntfs = 0;
623 float NumUrgent = 0.0f;
624
625 // The cascade tracking is the same as in the default advisor
626 unsigned Cascade = RA.getExtraInfo().getCascadeOrCurrentNext(VirtReg.reg());
627
629 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
630 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, Unit);
631 // Different from the default heuristic, we don't make any assumptions
632 // about what having more than 10 results in the query may mean.
633 const auto &IFIntervals = Q.interferingVRegs(EvictInterferenceCutoff);
634 if (IFIntervals.empty() && InterferingIntervals.empty())
635 continue;
636 if (IFIntervals.size() >= EvictInterferenceCutoff)
637 return false;
638 InterferingIntervals.append(IFIntervals.begin(), IFIntervals.end());
639 for (const LiveInterval *Intf : reverse(IFIntervals)) {
640 assert(Intf->reg().isVirtual() &&
641 "Only expecting virtual register interference from query");
642 // This is the same set of legality checks as in the default case: don't
643 // try to evict fixed regs or 'done' ones. Also don't break cascades,
644 // except in the urgent case, with the same nuances used in the default
645 // heuristic.
646 // We could try sharing this between the advisors, but it may end up
647 // more complex than it is right now.
648 if (FixedRegisters.count(Intf->reg()))
649 return false;
650 if (RA.getExtraInfo().getStage(*Intf) == RS_Done)
651 return false;
652 bool Urgent =
653 !VirtReg.isSpillable() &&
654 (Intf->isSpillable() ||
655 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg())) <
656 RegClassInfo.getNumAllocatableRegs(
657 MRI->getRegClass(Intf->reg())));
658
659 unsigned IntfCascade = RA.getExtraInfo().getCascade(Intf->reg());
660 // There is a potential that the model could be adversarial and
661 // continually evict live ranges over and over again, leading to a
662 // large amount of compile time being spent in regalloc. If we hit the
663 // threshold, prevent the range from being evicted. We still let the
664 // range through if it is urgent as we are required to produce an
665 // eviction if the candidate is not spillable.
666 if (getEvictionCount(Intf->reg()) > MaxEvictionCount && !Urgent)
667 return false;
668
669 // Only evict older cascades or live ranges without a cascade.
670 if (Cascade <= IntfCascade) {
671 if (!Urgent)
672 return false;
673 ++NumUrgent;
674 }
675
676 LocalIntfs += (IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
677 (!EnableLocalReassign || !canReassign(*Intf, PhysReg)));
678 }
679 }
680 // OK, so if we made it this far, this LR is an eviction candidate, load its
681 // features.
682 extractFeatures(InterferingIntervals, Largest, Pos, IsHint, LocalIntfs,
683 NumUrgent, LRPosInfo);
684 return true;
685}
686
687MCRegister MLEvictAdvisor::tryFindEvictionCandidate(
688 const LiveInterval &VirtReg, const AllocationOrder &Order,
689 uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const {
690 auto MaybeOrderLimit = getOrderLimit(VirtReg, Order, CostPerUseLimit);
691 if (!MaybeOrderLimit)
693 unsigned OrderLimit = *MaybeOrderLimit;
694
695 // The heuristic sets initial costs such as, if CostPerUseLimit is
696 // max<uint8_t>, then any of the costs of the legally-evictable intervals
697 // would be lower. When that happens, one of those will be selected.
698 // Therefore, we allow the candidate be selected, unless the candidate is
699 // unspillable, in which case it would be incorrect to not find a register
700 // for it.
701 const bool MustFindEviction =
702 (!VirtReg.isSpillable() && CostPerUseLimit == static_cast<uint8_t>(~0u));
703 // Number of available candidates - if 0, no need to continue.
704 size_t Available = 0;
705 // Make sure we don't have leftover partial state from an attempt where we
706 // had no available candidates and bailed out early.
707 resetInputs(*Runner);
708
709 // Track the index->register mapping because AllocationOrder doesn't do that
710 // and we'd have to scan it.
711 // Also track their mask, to write asserts/debug.
712 CandidateRegList Regs;
713 Regs.fill({0, false});
714
715 // Track the largest value of features seen during this eviction session. We
716 // only normalize (some of) the float features, but it's just simpler to
717 // dimension 'Largest' to all the features, especially since we have the
718 // 'DoNotNormalize' list.
719 FeaturesListNormalizer Largest(FeatureIDs::FeatureCount, 0.0);
720
721 // Same overal idea as in the default eviction policy - we visit the values
722 // of AllocationOrder one at a time. If it's not legally available, we mask
723 // off the corresponding feature column (==do nothing because we already
724 // reset all the features to 0) Use Pos to capture the column we load
725 // features at - in AllocationOrder order.
726 size_t Pos = 0;
728 for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit); I != E;
729 ++I, ++Pos) {
730 MCRegister PhysReg = *I;
731 assert(!Regs[Pos].second);
732 assert(PhysReg);
733 if (!canAllocatePhysReg(CostPerUseLimit, PhysReg)) {
734 continue;
735 }
736 if (loadInterferenceFeatures(VirtReg, PhysReg, I.isHint(), FixedRegisters,
737 Largest, Pos, LRPosInfo)) {
738 ++Available;
739 Regs[Pos] = std::make_pair(PhysReg, true);
740 }
741 }
742 if (Available == 0) {
743 // Nothing to decide, nothing to learn.
744 assert(!MustFindEviction);
746 }
747 const size_t ValidPosLimit = Pos;
748 // If we must find eviction, the candidate should be masked out of the
749 // decision making process.
750 Regs[CandidateVirtRegPos].second = !MustFindEviction;
751 if (!MustFindEviction)
752 extractFeatures(SmallVector<const LiveInterval *, 1>(1, &VirtReg), Largest,
753 CandidateVirtRegPos, /*IsHint*/ 0,
754 /*LocalIntfsCount*/ 0,
755 /*NumUrgent*/ 0.0, LRPosInfo);
756 assert(InitialQSize > 0.0 && "We couldn't have gotten here if we had "
757 "nothing to allocate initially.");
758 // Normalize the features.
759 for (auto &V : Largest)
760 V = V ? V : 1.0;
762 ++FeatureIndex) {
763 if (DoNotNormalize.test(FeatureIndex))
764 continue;
765 for (size_t Pos = 0; Pos < NumberOfInterferences; ++Pos) {
766 Runner->getTensor<float>(FeatureIndex)[Pos] /= Largest[FeatureIndex];
767 }
768 }
769 *Runner->getTensor<float>(FeatureIDs::progress) =
770 static_cast<float>(RA.getQueueSize()) / InitialQSize;
771
772 // Get a decision.
773 size_t CandidatePos = tryFindEvictionCandidatePosition(
774 VirtReg, Order, OrderLimit, CostPerUseLimit, FixedRegisters);
775 // The contract with the ML side is that CandidatePos is mask == 1 (i.e.
776 // Regs[CandidatePos].second)
777 assert(Regs[CandidatePos].second);
778 if (CandidatePos == CandidateVirtRegPos) {
779 onEviction(VirtReg.reg());
780 assert(!MustFindEviction);
782 }
783 assert(CandidatePos < ValidPosLimit);
784 (void)ValidPosLimit;
785
786 // Update information about how many times the virtual registers being
787 // evicted have been evicted so that we can prevent the model from evicting
788 // the same ranges continually and eating compile time.
789 for (MCRegUnit Unit : TRI->regunits(Regs[CandidatePos].first)) {
790 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, Unit);
791 const auto &IFIntervals = Q.interferingVRegs(EvictInterferenceCutoff);
792 for (const LiveInterval *Intf : reverse(IFIntervals)) {
793 onEviction(Intf->reg());
794 }
795 }
796
797 return Regs[CandidatePos].first;
798}
799
800const LIFeatureComponents &
801MLEvictAdvisor::getLIFeatureComponents(const LiveInterval &LI) const {
802 RegID ID = LI.reg().id();
803 LIFeatureComponents Empty;
804 auto I = CachedFeatures.insert(std::make_pair(ID, Empty));
805 LIFeatureComponents &Ret = I.first->getSecond();
806 if (!I.second)
807 return Ret;
808
811
813 I = MRI->reg_instr_nodbg_begin(LI.reg()),
814 E = MRI->reg_instr_nodbg_end();
815 I != E;) {
816 MachineInstr *MI = &*(I++);
817
818 ++Ret.NumDefsAndUses;
819 if (!Visited.insert(MI).second)
820 continue;
821
822 if (MI->isIdentityCopy() || MI->isImplicitDef())
823 continue;
824
825 bool Reads, Writes;
826 std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg());
827
828 float Freq = MBFI.getBlockFreqRelativeToEntryBlock(MI->getParent());
829 Ret.HottestBlockFreq = std::max(Freq, Ret.HottestBlockFreq);
830
831 Ret.R += (Reads && !Writes) * Freq;
832 Ret.W += (!Reads && Writes) * Freq;
833 Ret.RW += (Reads && Writes) * Freq;
834
835 auto *MBB = MI->getParent();
836 auto *Loop = Loops.getLoopFor(MBB);
837 bool IsExiting = Loop ? Loop->isLoopExiting(MBB) : false;
838
839 if (Writes && IsExiting && LIS->isLiveOutOfMBB(LI, MBB))
840 Ret.IndVarUpdates += Freq;
841
842 if (MI->isCopy() && VirtRegAuxInfo::copyHint(MI, LI.reg(), TRI, *MRI))
843 Ret.HintWeights += Freq;
844 }
846 LI, *LIS, *VRM, *MRI, *MF.getSubtarget().getInstrInfo());
847 return Ret;
848}
849
850// Overall, this currently mimics what we do for weight calculation, but instead
851// of accummulating the various features, we keep them separate.
852void MLEvictAdvisor::extractFeatures(
854 llvm::SmallVectorImpl<float> &Largest, size_t Pos, int64_t IsHint,
855 int64_t LocalIntfsCount, float NumUrgent,
856 SmallVectorImpl<LRStartEndInfo> &LRPosInfo) const {
857 int64_t NumDefsAndUses = 0;
858 int64_t NumBrokenHints = 0;
859 double R = 0.0;
860 double W = 0.0;
861 double RW = 0.0;
862 double IndVarUpdates = 0.0;
863 double HintWeights = 0.0;
864 float StartBBFreq = 0.0;
865 float EndBBFreq = 0.0;
866 float HottestBlockFreq = 0.0;
867 int32_t NumRematerializable = 0;
868 float TotalWeight = 0.0;
869
870 SlotIndex EndSI = LIS->getSlotIndexes()->getZeroIndex();
871 SlotIndex StartSI = LIS->getSlotIndexes()->getLastIndex();
872 int64_t MaxStage = 0;
873 int64_t MinStage =
874 Intervals.empty() ? 0 : std::numeric_limits<int64_t>::max();
875
876 for (const auto *L : Intervals) {
877 const LiveInterval &LI = *L;
878 MaxStage = std::max<int64_t>(
879 MaxStage, static_cast<int64_t>(RA.getExtraInfo().getStage(LI)));
880 MinStage = std::min<int64_t>(
881 MinStage, static_cast<int64_t>(RA.getExtraInfo().getStage(LI)));
882
883 TotalWeight = std::max(TotalWeight, LI.weight());
884
885 if (LI.beginIndex() < StartSI)
886 StartSI = LI.beginIndex();
887
888 if (LI.endIndex() > EndSI)
889 EndSI = LI.endIndex();
890 const LIFeatureComponents &LIFC = getLIFeatureComponents(LI);
891 NumBrokenHints += VRM->hasPreferredPhys(LI.reg());
892
893 NumDefsAndUses += LIFC.NumDefsAndUses;
894 HottestBlockFreq = std::max(HottestBlockFreq, LIFC.HottestBlockFreq);
895 R += LIFC.R;
896 W += LIFC.W;
897 RW += LIFC.RW;
898
899 IndVarUpdates += LIFC.IndVarUpdates;
900
901 HintWeights += LIFC.HintWeights;
902 NumRematerializable += LIFC.IsRemat;
903 }
904 size_t Size = 0;
905 if (!Intervals.empty()) {
906 StartBBFreq =
907 MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(StartSI));
908 if (EndSI >= LIS->getSlotIndexes()->getLastIndex())
909 EndSI = LIS->getSlotIndexes()->getLastIndex().getPrevIndex();
910 EndBBFreq =
911 MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(EndSI));
912 Size = StartSI.distance(EndSI);
913 }
914 // Set the features at the column 'Pos'.
915#define SET(ID, TYPE, VAL) \
916 do { \
917 Runner->getTensor<TYPE>(FeatureIDs::ID)[Pos] = static_cast<TYPE>(VAL); \
918 if (!DoNotNormalize.test(FeatureIDs::ID)) \
919 Largest[FeatureIDs::ID] = \
920 std::max(Largest[FeatureIDs::ID], static_cast<float>(VAL)); \
921 } while (false)
922 SET(mask, int64_t, 1);
923 SET(is_free, int64_t, Intervals.empty());
924 SET(nr_urgent, float, NumUrgent);
925 SET(nr_broken_hints, float, NumBrokenHints);
926 SET(is_hint, int64_t, IsHint);
927 SET(is_local, int64_t, LocalIntfsCount);
928 SET(nr_rematerializable, float, NumRematerializable);
929 SET(nr_defs_and_uses, float, NumDefsAndUses);
930 SET(weighed_reads_by_max, float, R);
931 SET(weighed_writes_by_max, float, W);
932 SET(weighed_read_writes_by_max, float, RW);
933 SET(weighed_indvars_by_max, float, IndVarUpdates);
934 SET(hint_weights_by_max, float, HintWeights);
935 SET(start_bb_freq_by_max, float, StartBBFreq);
936 SET(end_bb_freq_by_max, float, EndBBFreq);
937 SET(hottest_bb_freq_by_max, float, HottestBlockFreq);
938 SET(liverange_size, float, Size);
939 SET(use_def_density, float, TotalWeight);
940 SET(max_stage, int64_t, MaxStage);
941 SET(min_stage, int64_t, MinStage);
942#undef SET
943}
944
945// Development mode-specific implementations
946#ifdef LLVM_HAVE_TFLITE
947
950 return new DevelopmentModeEvictionAdvisorAnalysisLegacy();
951}
952
953int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition(
954 const LiveInterval &VirtReg, const AllocationOrder &Order,
955 unsigned OrderLimit, uint8_t CostPerUseLimit,
956 const SmallVirtRegSet &FixedRegisters) const {
957 int64_t Ret = 0;
958 if (isa<ModelUnderTrainingRunner>(getRunner())) {
959 Ret = MLEvictAdvisor::tryFindEvictionCandidatePosition(
960 VirtReg, Order, OrderLimit, CostPerUseLimit, FixedRegisters);
961 } else {
962 MCRegister PhysReg = getDefaultAdvisor().tryFindEvictionCandidate(
963 VirtReg, Order, CostPerUseLimit, FixedRegisters);
964 // Find the index of the selected PhysReg. We need it for logging,
965 // otherwise this is wasted cycles (but so would starting development mode
966 // without a model nor logging)
967 if (!PhysReg)
969 else
970 for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit);
971 I != E; ++I, ++Ret)
972 if (*I == PhysReg)
973 break;
974 }
975 if (TrainingLog.empty())
976 return Ret;
977 // TODO(mtrofin): when we support optional rewards, this can go away. In the
978 // meantime, we log the "pretend" reward (0) for the previous observation
979 // before starting a new one.
980 if (Log->hasObservationInProgress())
981 Log->logReward<float>(0.0);
982
983 Log->startObservation();
984 size_t CurrentFeature = 0;
986 for (; CurrentFeature < FeatureCount; ++CurrentFeature) {
987 Log->logTensorValue(CurrentFeature,
988 reinterpret_cast<const char *>(
989 getRunner().getTensorUntyped(CurrentFeature)));
990 }
991 if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(&getRunner()))
992 for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size();
993 ++I, ++CurrentFeature)
994 Log->logTensorValue(
995 CurrentFeature,
996 reinterpret_cast<const char *>(MUTR->getUntypedExtraOutputValue(I)));
997 // The output is right after the features and the extra outputs
998 Log->logTensorValue(CurrentFeature, reinterpret_cast<const char *>(&Ret));
999 Log->endObservation();
1000 return Ret;
1001}
1002
1003bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) {
1004 std::optional<float> CachedReward;
1005 auto GetReward = [&]() {
1006 if (!CachedReward)
1007 CachedReward = static_cast<float>(
1009 MF, getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI())
1010 .getScore());
1011 return *CachedReward;
1012 };
1013
1014 getAnalysis<RegAllocEvictionAdvisorAnalysisLegacy>().logRewardIfNeeded(
1015 MF, GetReward);
1016 getAnalysis<RegAllocPriorityAdvisorAnalysisLegacy>().logRewardIfNeeded(
1017 MF, GetReward);
1018 return false;
1019}
1020#endif // #ifdef LLVM_HAVE_TFLITE
1021
1022RegAllocEvictionAdvisorProvider *
1024 return new ReleaseModeEvictionAdvisorProvider(Ctx);
1025}
1026
1029#if defined(LLVM_HAVE_TFLITE)
1030 return new DevelopmentModeEvictionAdvisorProvider(Ctx);
1031#endif
1032 return nullptr;
1033}
1034
1039 ? new ReleaseModeEvictionAdvisorAnalysisLegacy()
1040 : nullptr;
1041}
1042
1043// In all cases except development mode, we don't need scoring.
1044#if !defined(LLVM_HAVE_TFLITE)
1045bool RegAllocScoring::runOnMachineFunction(MachineFunction &) { return false; }
1046#endif
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
static constexpr unsigned long long mask(BlockVerifier::State S)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
@ Available
We know the block is fully available. This is a fixpoint.
Definition GVN.cpp:951
Hexagon Hardware Loops
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
Live Register Matrix
#define I(x, y, z)
Definition MD5.cpp:57
NoopSavedModelImpl CompiledModelType
static cl::opt< std::string > InteractiveChannelBaseName("inliner-interactive-channel-base", cl::Hidden, cl::desc("Base file path for the interactive mode. The incoming filename should " "have the name <inliner-interactive-channel-base>.in, while the " "outgoing name should be <inliner-interactive-channel-base>.out"))
static cl::opt< unsigned > MaxEvictionCount("mlregalloc-max-eviction-count", cl::Hidden, cl::desc("The maximum number of times a live range can be " "evicted before preventing it from being evicted"), cl::init(100))
#define RA_EVICT_FEATURES_LIST(M)
#define SET(ID, TYPE, VAL)
#define _RESET(TYPE, NAME, SHAPE, __)
static cl::opt< std::string > InteractiveChannelBaseName("regalloc-evict-interactive-channel-base", cl::Hidden, cl::desc("Base file path for the interactive mode. The incoming filename should " "have the name <regalloc-evict-interactive-channel-base>.in, while the " "outgoing name should be " "<regalloc-evict-interactive-channel-base>.out"))
#define _FEATURE_IDX(A, B, C, D)
#define _DECL_FEATURES(type, name, shape, _)
#define DecisionName
Register Reg
Register const TargetRegisterInfo * TRI
#define T
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
SI optimize exec mask operations pre RA
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
Iterator getOrderLimitEnd(unsigned OrderLimit) const
Iterator begin() const
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
Query interferences between a single live virtual register and a live interval union.
const SmallVectorImpl< const LiveInterval * > & interferingVRegs(unsigned MaxInterferingRegs=std::numeric_limits< unsigned >::max())
LiveInterval - This class represents the liveness of a register, or stack slot.
float weight() const
Register reg() const
bool isSpillable() const
isSpillable - Can this interval be spilled?
SlotIndex beginIndex() const
beginIndex - Return the lowest numbered slot covered.
SlotIndex endIndex() const
endNumber - return the maximum point of the range of the whole, exclusive.
@ IK_VirtReg
Virtual register interference.
Logging utility - given an ordered specification of features, and assuming a scalar reward,...
bool isLoopExiting(const BlockT *BB) const
True if terminator in the block can branch to another block that is outside of the current loop.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
static constexpr unsigned NoRegister
Definition MCRegister.h:60
MLModelRunner interface: abstraction of a mechanism for evaluating a ML model.
virtual void switchContext(StringRef Name)
T * getTensor(I FeatureID)
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
double getBlockFreqRelativeToEntryBlock(const MachineBasicBlock *MBB) const
Compute the frequency of the block, relative to the entry block.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
defusechain_instr_iterator< true, true, true, true > reg_instr_nodbg_iterator
reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk all defs and uses of the sp...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
A mock class satisfying the interface expected by ReleaseModeModelRunner for its TGen parameter.
virtual bool doInitialization(Module &)
doInitialization - Virtual method overridden by subclasses to do any necessary initialization before ...
Definition Pass.h:128
ImmutableAnalysis abstraction for fetching the Eviction Advisor.
virtual void logRewardIfNeeded(const MachineFunction &MF, function_ref< float()> GetReward)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Common provider for legacy and new pass managers.
virtual std::unique_ptr< RegAllocEvictionAdvisor > getAdvisor(const MachineFunction &MF, const RAGreedy &RA, MachineBlockFrequencyInfo *MBFI, MachineLoopInfo *Loops)=0
virtual void logRewardIfNeeded(const MachineFunction &MF, llvm::function_ref< float()> GetReward)
RegAllocEvictionAdvisorProvider(AdvisorMode Mode, LLVMContext &Ctx)
virtual bool canEvictHintInterference(const LiveInterval &VirtReg, MCRegister PhysReg, const SmallVirtRegSet &FixedRegisters) const =0
Find out if we can evict the live ranges occupying the given PhysReg, which is a hint (preferred regi...
virtual MCRegister tryFindEvictionCandidate(const LiveInterval &VirtReg, const AllocationOrder &Order, uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const =0
Find a physical register that can be freed by evicting the FixedRegisters, or return NoRegister.
LLVM_ABI_FOR_TEST double getScore() const
Wrapper class representing virtual and physical registers.
Definition Register.h:20
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition Register.h:72
constexpr unsigned id() const
Definition Register.h:100
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
int distance(SlotIndex other) const
Return the distance from this index to the given one.
SlotIndex getPrevIndex() const
Returns the previous index.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:175
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
static TensorSpec createSpec(const std::string &Name, const std::vector< int64_t > &Shape, int Port=0)
Definition TensorSpec.h:65
static bool isRematerializable(const LiveInterval &LI, const LiveIntervals &LIS, const VirtRegMap &VRM, const MachineRegisterInfo &MRI, const TargetInstrInfo &TII)
Determine if all values in LI are rematerializable.
static Register copyHint(const MachineInstr *MI, Register Reg, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI)
Return the preferred allocation register for reg, given a COPY instruction.
An efficient, type-erasing, non-owning reference to a callable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
bool isEmbeddedModelEvaluatorValid()
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
SmallSet< Register, 16 > SmallVirtRegSet
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
RegAllocEvictionAdvisorAnalysisLegacy * createReleaseModeAdvisorAnalysisLegacy()
RegAllocEvictionAdvisorProvider * createDevelopmentModeAdvisorProvider(LLVMContext &Ctx)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
static const TensorSpec DecisionSpec
RegAllocScore calculateRegAllocScore(const MachineFunction &MF, const MachineBlockFrequencyInfo &MBFI)
Calculate a score.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
static const std::vector< TensorSpec > InputFeatures
@ RS_Done
There is nothing more we can do to this live range.
LLVM_ABI FunctionPass * createRegAllocScoringPass()
When learning an eviction policy, extract score(reward) information, otherwise this does nothing.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
cl::opt< unsigned > EvictInterferenceCutoff
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
LLVM_ATTRIBUTE_RETURNS_NONNULL RegAllocEvictionAdvisorProvider * createReleaseModeAdvisorProvider(LLVMContext &Ctx)
static const int64_t NumberOfInterferences
static const std::vector< int64_t > PerLiveRangeShape
RegAllocEvictionAdvisorAnalysisLegacy * createDevelopmentModeAdvisorAnalysisLegacy()
static const int64_t CandidateVirtRegPos
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870