LLVM 23.0.0git
GCNSchedStrategy.h
Go to the documentation of this file.
1//===-- GCNSchedStrategy.h - GCN Scheduler Strategy -*- C++ -*-------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AMDGPU_GCNSCHEDSTRATEGY_H
14#define LLVM_LIB_TARGET_AMDGPU_GCNSCHEDSTRATEGY_H
15
16#include "GCNRegPressure.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/MapVector.h"
24
25namespace llvm {
26
28class SIRegisterInfo;
29class GCNSubtarget;
30class GCNSchedStage;
31
41
42#ifndef NDEBUG
43raw_ostream &operator<<(raw_ostream &OS, const GCNSchedStageID &StageID);
44#endif
45
46/// This is a minimal scheduler strategy. The main difference between this
47/// and the GenericScheduler is that GCNSchedStrategy uses different
48/// heuristics to determine excess/critical pressure sets.
50protected:
51 SUnit *pickNodeBidirectional(bool &IsTopNode, bool &PickedPending);
52
53 void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy,
54 const RegPressureTracker &RPTracker,
55 SchedCandidate &Cand, bool &IsPending,
56 bool IsBottomUp);
57
58 void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
59 const RegPressureTracker &RPTracker,
60 const SIRegisterInfo *SRI, unsigned SGPRPressure,
61 unsigned VGPRPressure, bool IsBottomUp);
62
63 /// Evaluates instructions in the pending queue using a subset of scheduling
64 /// heuristics.
65 ///
66 /// Instructions that cannot be issued due to hardware constraints are placed
67 /// in the pending queue rather than the available queue, making them normally
68 /// invisible to scheduling heuristics. However, in certain scenarios (such as
69 /// avoiding register spilling), it may be beneficial to consider scheduling
70 /// these not-yet-ready instructions.
72 SchedBoundary *Zone) const;
73
74 void printCandidateDecision(const SchedCandidate &Current,
75 const SchedCandidate &Preferred);
76
77 std::vector<unsigned> Pressure;
78
79 std::vector<unsigned> MaxPressure;
80
82
84
86
88
89 // Scheduling stages for this strategy.
91
92 // Pointer to the current SchedStageID.
94
95 // GCN RP Tracker for top-down scheduling
97
98 // GCN RP Tracker for botttom-up scheduling
100
101public:
102 // schedule() have seen register pressure over the critical limits and had to
103 // track register pressure for actual scheduling heuristics.
105
106 // Schedule known to have excess register pressure. Be more conservative in
107 // increasing ILP and preserving VGPRs.
108 bool KnownExcessRP = false;
109
110 // An error margin is necessary because of poor performance of the generic RP
111 // tracker and can be adjusted up for tuning heuristics to try and more
112 // aggressively reduce register pressure.
113 unsigned ErrorMargin = 3;
114
115 // Bias for SGPR limits under a high register pressure.
116 const unsigned HighRPSGPRBias = 7;
117
118 // Bias for VGPR limits under a high register pressure.
119 const unsigned HighRPVGPRBias = 7;
120
122
124
125 unsigned SGPRLimitBias = 0;
126
127 unsigned VGPRLimitBias = 0;
128
130
131 SUnit *pickNode(bool &IsTopNode) override;
132
133 void schedNode(SUnit *SU, bool IsTopNode) override;
134
135 void initialize(ScheduleDAGMI *DAG) override;
136
137 unsigned getTargetOccupancy() { return TargetOccupancy; }
138
139 void setTargetOccupancy(unsigned Occ) { TargetOccupancy = Occ; }
140
142
143 // Advances stage. Returns true if there are remaining stages.
144 bool advanceStage();
145
146 bool hasNextStage() const;
147
149
151
153};
154
155/// The goal of this scheduling strategy is to maximize kernel occupancy (i.e.
156/// maximum number of waves per simd).
158public:
160 bool IsLegacyScheduler = false);
161};
162
163/// The goal of this scheduling strategy is to maximize ILP for a single wave
164/// (i.e. latency hiding).
166protected:
167 bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
168 SchedBoundary *Zone) const override;
169
170public:
172};
173
174/// The goal of this scheduling strategy is to maximize memory clause for a
175/// single wave.
177protected:
178 bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
179 SchedBoundary *Zone) const override;
180
181public:
183};
184
186 unsigned ScheduleLength;
187 unsigned BubbleCycles;
188
189public:
190 ScheduleMetrics() = default;
191 ScheduleMetrics(unsigned L, unsigned BC)
192 : ScheduleLength(L), BubbleCycles(BC) {}
193 unsigned getLength() const { return ScheduleLength; }
194 unsigned getBubbles() const { return BubbleCycles; }
195 unsigned getMetric() const {
196 unsigned Metric = (BubbleCycles * ScaleFactor) / ScheduleLength;
197 // Metric is zero if the amount of bubbles is less than 1% which is too
198 // small. So, return 1.
199 return Metric ? Metric : 1;
200 }
201 static const unsigned ScaleFactor;
202};
203
205 dbgs() << "\n Schedule Metric (scaled by "
207 << " ) is: " << Sm.getMetric() << " [ " << Sm.getBubbles() << "/"
208 << Sm.getLength() << " ]\n";
209 return OS;
210}
211
212class GCNScheduleDAGMILive;
215 // The live in/out pressure as indexed by the first or last MI in the region
216 // before scheduling.
218 // The mapping of RegionIDx to key instruction
219 DenseMap<unsigned, MachineInstr *> IdxToInstruction;
220 // Whether we are calculating LiveOuts or LiveIns
221 bool IsLiveOut;
222
223public:
224 RegionPressureMap() = default;
226 : DAG(GCNDAG), IsLiveOut(LiveOut) {}
227 // Build the Instr->LiveReg and RegionIdx->Instr maps
228 void buildLiveRegMap();
229
230 // Retrieve the LiveReg for a given RegionIdx
232 assert(IdxToInstruction.contains(RegionIdx));
233 MachineInstr *Key = IdxToInstruction[RegionIdx];
234 return RegionLiveRegMap[Key];
235 }
236};
237
238/// A region's boundaries i.e. a pair of instruction bundle iterators. The lower
239/// boundary is inclusive, the upper boundary is exclusive.
241 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>;
242
244 friend class GCNSchedStage;
249 friend class PreRARematStage;
251 friend class RegionPressureMap;
252
253 const GCNSubtarget &ST;
254
256
257 // Occupancy target at the beginning of function scheduling cycle.
258 unsigned StartingOccupancy;
259
260 // Minimal real occupancy recorder for the function.
261 unsigned MinOccupancy;
262
263 // Vector of regions recorder for later rescheduling
265
266 // Record regions with high register pressure.
267 BitVector RegionsWithHighRP;
268
269 // Record regions with excess register pressure over the physical register
270 // limit. Register pressure in these regions usually will result in spilling.
271 BitVector RegionsWithExcessRP;
272
273 // Regions that have IGLP instructions (SCHED_GROUP_BARRIER or IGLP_OPT).
274 BitVector RegionsWithIGLPInstrs;
275
276 // Region live-in cache.
278
279 // Region pressure cache.
281
282 // Temporary basic block live-in cache.
284
285 // The map of the initial first region instruction to region live in registers
287
288 // Calculate the map of the initial first region instruction to region live in
289 // registers
291
292 // Calculate the map of the initial last region instruction to region live out
293 // registers
295 getRegionLiveOutMap() const;
296
297 // The live out registers per region. These are internally stored as a map of
298 // the initial last region instruction to region live out registers, but can
299 // be retreived with the regionIdx by calls to getLiveRegsForRegionIdx.
300 RegionPressureMap RegionLiveOuts;
301
302 // Return current region pressure.
303 GCNRegPressure getRealRegPressure(unsigned RegionIdx) const;
304
305 // Compute and cache live-ins and pressure for all regions in block.
306 void computeBlockPressure(unsigned RegionIdx, const MachineBasicBlock *MBB);
307
308 /// If necessary, updates a region's boundaries following insertion ( \p NewMI
309 /// != nullptr) or removal ( \p NewMI == nullptr) of a \p MI in the region.
310 /// For an MI removal, this must be called before the MI is actually erased
311 /// from its parent MBB.
312 void updateRegionBoundaries(RegionBoundaries &RegionBounds,
314 MachineInstr *NewMI);
315
316 void runSchedStages();
317
318 std::unique_ptr<GCNSchedStage> createSchedStage(GCNSchedStageID SchedStageID);
319
320public:
322 std::unique_ptr<MachineSchedStrategy> S);
323
324 void schedule() override;
325
326 void finalizeSchedule() override;
327};
328
329// GCNSchedStrategy applies multiple scheduling stages to a function.
331protected:
333
335
337
339
341
343
344 // The current block being scheduled.
346
347 // Current region index.
348 unsigned RegionIdx = 0;
349
350 // Record the original order of instructions before scheduling.
351 std::vector<MachineInstr *> Unsched;
352
353 // RP before scheduling the current region.
355
356 // RP after scheduling the current region.
358
359 std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations;
360
362
363public:
364 // Initialize state for a scheduling stage. Returns false if the current stage
365 // should be skipped.
366 virtual bool initGCNSchedStage();
367
368 // Finalize state after finishing a scheduling pass on the function.
369 virtual void finalizeGCNSchedStage();
370
371 // Setup for scheduling a region. Returns false if the current region should
372 // be skipped.
373 virtual bool initGCNRegion();
374
375 // Finalize state after scheduling a region.
376 virtual void finalizeGCNRegion();
377
378 // Track whether a new region is also a new MBB.
379 void setupNewBlock();
380
381 // Check result of scheduling.
382 void checkScheduling();
383
384 // computes the given schedule virtual execution time in clocks
385 ScheduleMetrics getScheduleMetrics(const std::vector<SUnit> &InputSchedule);
387 unsigned computeSUnitReadyCycle(const SUnit &SU, unsigned CurrCycle,
388 DenseMap<unsigned, unsigned> &ReadyCycles,
389 const TargetSchedModel &SM);
390
391 // Returns true if scheduling should be reverted.
392 virtual bool shouldRevertScheduling(unsigned WavesAfter);
393
394 // Returns true if current region has known excess pressure.
395 bool isRegionWithExcessRP() const {
396 return DAG.RegionsWithExcessRP[RegionIdx];
397 }
398
399 // The region number this stage is currently working on
400 unsigned getRegionIdx() { return RegionIdx; }
401
402 // Returns true if the new schedule may result in more spilling.
403 bool mayCauseSpilling(unsigned WavesAfter);
404
405 // Attempt to revert scheduling for this region.
406 void revertScheduling();
407
409
410 virtual ~GCNSchedStage() = default;
411};
412
420
422private:
423 // Record regions with excess archvgpr register pressure over the physical
424 // register limit. Register pressure in these regions usually will result in
425 // spilling.
426 BitVector RegionsWithExcessArchVGPR;
427
428 const SIInstrInfo *TII;
429 const SIRegisterInfo *SRI;
430
431 /// Do a speculative rewrite and collect copy locations. The speculative
432 /// rewrite allows us to calculate the RP of the code after the rewrite, and
433 /// the copy locations allow us to calculate the total cost of copies required
434 /// for the rewrite. Stores the rewritten instructions in \p RewriteCands ,
435 /// the copy locations for uses (of the MFMA result) in \p CopyForUse and the
436 /// copy locations for defs (of the MFMA operands) in \p CopyForDef
437 bool
438 initHeuristics(std::vector<std::pair<MachineInstr *, unsigned>> &RewriteCands,
439 DenseMap<MachineBasicBlock *, std::set<Register>> &CopyForUse,
441
442 /// Calculate the rewrite cost and undo the state change (e.g. rewriting) done
443 /// in initHeuristics. Uses \p CopyForUse and \p CopyForDef to calculate copy
444 /// costs, and \p RewriteCands to undo rewriting.
445 int64_t getRewriteCost(
446 const std::vector<std::pair<MachineInstr *, unsigned>> &RewriteCands,
447 const DenseMap<MachineBasicBlock *, std::set<Register>> &CopyForUse,
448 const SmallPtrSetImpl<MachineInstr *> &CopyForDef);
449
450 /// Do the final rewrite on \p RewriteCands and insert any needed copies.
451 bool
452 rewrite(const std::vector<std::pair<MachineInstr *, unsigned>> &RewriteCands);
453
454 /// \returns true if this MI is a rewrite candidate.
455 bool isRewriteCandidate(MachineInstr *MI) const;
456
457 /// Finds all the reaching defs of \p UseMO and stores the SlotIndexes into \p
458 /// DefIdxs
459 void findReachingDefs(MachineOperand &UseMO, LiveIntervals *LIS,
461
462 /// Finds all the reaching uses of \p DefMI and stores the use operands in \p
463 /// ReachingUses
464 void findReachingUses(MachineInstr *DefMI, LiveIntervals *LIS,
466
467public:
468 bool initGCNSchedStage() override;
469
472};
473
475private:
476 // Save the initial occupancy before starting this stage.
477 unsigned InitialOccupancy;
478 // Save the temporary target occupancy before starting this stage.
479 unsigned TempTargetOccupancy;
480 // Track whether any region was scheduled by this stage.
481 bool IsAnyRegionScheduled;
482
483public:
484 bool initGCNSchedStage() override;
485
486 void finalizeGCNSchedStage() override;
487
488 bool initGCNRegion() override;
489
490 bool shouldRevertScheduling(unsigned WavesAfter) override;
491
494};
495
496// Retry function scheduling if we found resulting occupancy and it is
497// lower than used for other scheduling passes. This will give more freedom
498// to schedule low register pressure blocks.
500public:
501 bool initGCNSchedStage() override;
502
503 bool initGCNRegion() override;
504
505 bool shouldRevertScheduling(unsigned WavesAfter) override;
506
509};
510
511/// Attempts to reduce function spilling or, if there is no spilling, to
512/// increase function occupancy by one with respect to ArchVGPR usage by sinking
513/// rematerializable instructions to their use. When the stage
514/// estimates reducing spilling or increasing occupancy is possible, as few
515/// instructions as possible are rematerialized to reduce potential negative
516/// effects on function latency.
518private:
519 /// Useful information about a rematerializable instruction.
520 struct RematInstruction {
521 /// Single use of the rematerializable instruction's defined register,
522 /// located in a different block.
524 /// Rematerialized version of \p DefMI, set in
525 /// PreRARematStage::rematerialize. Used for reverting rematerializations.
526 MachineInstr *RematMI;
527 /// Set of regions in which the rematerializable instruction's defined
528 /// register is a live-in.
529 SmallDenseSet<unsigned, 4> LiveInRegions;
530
531 RematInstruction(MachineInstr *UseMI) : UseMI(UseMI) {}
532 };
533
534 /// Maps all MIs to their parent region. MI terminators are considered to be
535 /// outside the region they delimitate, and as such are not stored in the map.
537 /// Parent MBB to each region, in region order.
539 /// Collects instructions to rematerialize.
541 /// Collects regions whose live-ins or register pressure will change due to
542 /// rematerializations.
544 /// In case we need to rollback rematerializations, save lane masks for all
545 /// rematerialized registers in all regions in which they are live-ins.
547 /// After successful stage initialization, indicates which regions should be
548 /// rescheduled.
549 BitVector RescheduleRegions;
550 /// The target occupancy the stage is trying to achieve. Empty when the
551 /// objective is spilling reduction.
552 std::optional<unsigned> TargetOcc;
553 /// Achieved occupancy *only* through rematerializations (pre-rescheduling).
554 /// Smaller than or equal to the target occupancy.
555 unsigned AchievedOcc;
556
557 /// Returns whether remat can reduce spilling or increase function occupancy
558 /// by 1 through rematerialization. If it can do one, collects instructions in
559 /// PreRARematStage::Rematerializations and sets the target occupancy in
560 /// PreRARematStage::TargetOccupancy.
561 bool canIncreaseOccupancyOrReduceSpill();
562
563 /// Whether the MI is rematerializable
564 bool isReMaterializable(const MachineInstr &MI);
565
566 /// Rematerializes all instructions in PreRARematStage::Rematerializations
567 /// and stores the achieved occupancy after remat in
568 /// PreRARematStage::AchievedOcc.
569 void rematerialize();
570
571 /// If remat alone did not increase occupancy to the target one, rollbacks all
572 /// rematerializations and resets live-ins/RP in all regions impacted by the
573 /// stage to their pre-stage values.
574 void finalizeGCNSchedStage() override;
575
576public:
577 bool initGCNSchedStage() override;
578
579 bool initGCNRegion() override;
580
581 bool shouldRevertScheduling(unsigned WavesAfter) override;
582
585};
586
594
603
605private:
606 std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations;
607
608 bool HasIGLPInstrs = false;
609
610public:
611 void schedule() override;
612
613 void finalizeSchedule() override;
614
616 std::unique_ptr<MachineSchedStrategy> S,
617 bool RemoveKillFlags);
618};
619
620} // End namespace llvm
621
622#endif // LLVM_LIB_TARGET_AMDGPU_GCNSCHEDSTRATEGY_H
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
This file defines the DenseMap class.
This file defines the GCNRegPressure class, which tracks registry pressure by bookkeeping number of S...
IRTranslator LLVM IR MI
This file implements a map that provides insertion order iteration.
bool shouldRevertScheduling(unsigned WavesAfter) override
ClusteredLowOccStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
GCNMaxILPSchedStrategy(const MachineSchedContext *C)
bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const override
Apply a set of heuristics to a new candidate.
bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const override
GCNMaxMemoryClauseSchedStrategy tries best to clause memory instructions as much as possible.
GCNMaxMemoryClauseSchedStrategy(const MachineSchedContext *C)
GCNMaxOccupancySchedStrategy(const MachineSchedContext *C, bool IsLegacyScheduler=false)
void finalizeSchedule() override
Allow targets to perform final scheduling actions at the level of the whole MachineFunction.
void schedule() override
Orders nodes according to selected style.
GCNPostScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
DenseMap< unsigned, LaneBitmask > LiveRegSet
GCNSchedStrategy & S
GCNRegPressure PressureBefore
bool isRegionWithExcessRP() const
bool mayCauseSpilling(unsigned WavesAfter)
ScheduleMetrics getScheduleMetrics(const std::vector< SUnit > &InputSchedule)
GCNScheduleDAGMILive & DAG
const GCNSchedStageID StageID
std::vector< MachineInstr * > Unsched
GCNRegPressure PressureAfter
MachineFunction & MF
virtual void finalizeGCNRegion()
SIMachineFunctionInfo & MFI
unsigned computeSUnitReadyCycle(const SUnit &SU, unsigned CurrCycle, DenseMap< unsigned, unsigned > &ReadyCycles, const TargetSchedModel &SM)
virtual ~GCNSchedStage()=default
virtual void finalizeGCNSchedStage()
virtual bool initGCNSchedStage()
virtual bool shouldRevertScheduling(unsigned WavesAfter)
std::vector< std::unique_ptr< ScheduleDAGMutation > > SavedMutations
GCNSchedStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
MachineBasicBlock * CurrentMBB
const GCNSubtarget & ST
This is a minimal scheduler strategy.
const unsigned HighRPSGPRBias
GCNDownwardRPTracker DownwardTracker
GCNSchedStrategy(const MachineSchedContext *C)
SmallVector< GCNSchedStageID, 4 > SchedStages
std::vector< unsigned > MaxPressure
SUnit * pickNodeBidirectional(bool &IsTopNode, bool &PickedPending)
GCNSchedStageID getCurrentStage()
bool tryPendingCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const
Evaluates instructions in the pending queue using a subset of scheduling heuristics.
SmallVectorImpl< GCNSchedStageID >::iterator CurrentStage
void schedNode(SUnit *SU, bool IsTopNode) override
Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an instruction and updated scheduled/rem...
GCNDownwardRPTracker * getDownwardTracker()
std::vector< unsigned > Pressure
void initialize(ScheduleDAGMI *DAG) override
Initialize the strategy after building the DAG for a new region.
GCNUpwardRPTracker UpwardTracker
void printCandidateDecision(const SchedCandidate &Current, const SchedCandidate &Preferred)
const unsigned HighRPVGPRBias
void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy, const RegPressureTracker &RPTracker, SchedCandidate &Cand, bool &IsPending, bool IsBottomUp)
void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop, const RegPressureTracker &RPTracker, const SIRegisterInfo *SRI, unsigned SGPRPressure, unsigned VGPRPressure, bool IsBottomUp)
void setTargetOccupancy(unsigned Occ)
SUnit * pickNode(bool &IsTopNode) override
Pick the next node to schedule, or return NULL.
GCNUpwardRPTracker * getUpwardTracker()
GCNSchedStageID getNextStage() const
void finalizeSchedule() override
Allow targets to perform final scheduling actions at the level of the whole MachineFunction.
void schedule() override
Orders nodes according to selected style.
GCNScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
ScheduleDAGMILive * DAG
GenericScheduler(const MachineSchedContext *C)
bool shouldRevertScheduling(unsigned WavesAfter) override
ILPInitialScheduleStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
MachineInstrBundleIterator< MachineInstr > iterator
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
bool shouldRevertScheduling(unsigned WavesAfter) override
MemoryClauseInitialScheduleStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
bool shouldRevertScheduling(unsigned WavesAfter) override
OccInitialScheduleStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
PreRARematStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
bool shouldRevertScheduling(unsigned WavesAfter) override
bool initGCNSchedStage() override
Track the current register pressure at some position in the instruction stream, and remember the high...
GCNRPTracker::LiveRegSet & getLiveRegsForRegionIdx(unsigned RegionIdx)
RegionPressureMap(GCNScheduleDAGMILive *GCNDAG, bool LiveOut)
RewriteMFMAFormStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
Scheduling unit. This is a node in the scheduling DAG.
Each Scheduling boundary is associated with ready queues.
bool RemoveKillFlags
True if the DAG builder should remove kill flags (in preparation for rescheduling).
ScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
unsigned getBubbles() const
ScheduleMetrics(unsigned L, unsigned BC)
unsigned getLength() const
static const unsigned ScaleFactor
unsigned getMetric() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:291
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
typename SuperClass::iterator iterator
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Provide an instruction scheduling machine model to CodeGen passes.
UnclusteredHighRPStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
bool shouldRevertScheduling(unsigned WavesAfter) override
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > RegionBoundaries
A region's boundaries i.e.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Policy for scheduling the next instruction in the candidate's zone.
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...