LLVM  10.0.0svn
MachineScheduler.h
Go to the documentation of this file.
1 //===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides an interface for customizing the standard MachineScheduler
10 // pass. Note that the entire pass may be replaced as follows:
11 //
12 // <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
13 // PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
14 // ...}
15 //
16 // The MachineScheduler pass is only responsible for choosing the regions to be
17 // scheduled. Targets can override the DAG builder and scheduler without
18 // replacing the pass as follows:
19 //
20 // ScheduleDAGInstrs *<Target>PassConfig::
21 // createMachineScheduler(MachineSchedContext *C) {
22 // return new CustomMachineScheduler(C);
23 // }
24 //
25 // The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
26 // scheduling while updating the instruction stream, register pressure, and live
27 // intervals. Most targets don't need to override the DAG builder and list
28 // scheduler, but subtargets that require custom scheduling heuristics may
29 // plugin an alternate MachineSchedStrategy. The strategy is responsible for
30 // selecting the highest priority node from the list:
31 //
32 // ScheduleDAGInstrs *<Target>PassConfig::
33 // createMachineScheduler(MachineSchedContext *C) {
34 // return new ScheduleDAGMILive(C, CustomStrategy(C));
35 // }
36 //
37 // The DAG builder can also be customized in a sense by adding DAG mutations
38 // that will run after DAG building and before list scheduling. DAG mutations
39 // can adjust dependencies based on target-specific knowledge or add weak edges
40 // to aid heuristics:
41 //
42 // ScheduleDAGInstrs *<Target>PassConfig::
43 // createMachineScheduler(MachineSchedContext *C) {
44 // ScheduleDAGMI *DAG = createGenericSchedLive(C);
45 // DAG->addMutation(new CustomDAGMutation(...));
46 // return DAG;
47 // }
48 //
49 // A target that supports alternative schedulers can use the
50 // MachineSchedRegistry to allow command line selection. This can be done by
51 // implementing the following boilerplate:
52 //
53 // static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
54 // return new CustomMachineScheduler(C);
55 // }
56 // static MachineSchedRegistry
57 // SchedCustomRegistry("custom", "Run my target's custom scheduler",
58 // createCustomMachineSched);
59 //
60 //
61 // Finally, subtargets that don't need to implement custom heuristics but would
62 // like to configure the GenericScheduler's policy for a given scheduler region,
63 // including scheduling direction and register pressure tracking policy, can do
64 // this:
65 //
66 // void <SubTarget>Subtarget::
67 // overrideSchedPolicy(MachineSchedPolicy &Policy,
68 // unsigned NumRegionInstrs) const {
69 // Policy.<Flag> = true;
70 // }
71 //
72 //===----------------------------------------------------------------------===//
73 
74 #ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
75 #define LLVM_CODEGEN_MACHINESCHEDULER_H
76 
77 #include "llvm/ADT/ArrayRef.h"
78 #include "llvm/ADT/BitVector.h"
79 #include "llvm/ADT/STLExtras.h"
80 #include "llvm/ADT/SmallVector.h"
81 #include "llvm/ADT/StringRef.h"
82 #include "llvm/ADT/Twine.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <memory>
96 #include <string>
97 #include <vector>
98 
99 namespace llvm {
100 
101 extern cl::opt<bool> ForceTopDown;
102 extern cl::opt<bool> ForceBottomUp;
103 extern cl::opt<bool> VerifyScheduling;
104 
105 class LiveIntervals;
106 class MachineDominatorTree;
107 class MachineFunction;
108 class MachineInstr;
109 class MachineLoopInfo;
110 class RegisterClassInfo;
111 class SchedDFSResult;
112 class ScheduleHazardRecognizer;
113 class TargetInstrInfo;
114 class TargetPassConfig;
115 class TargetRegisterInfo;
116 
117 /// MachineSchedContext provides enough context from the MachineScheduler pass
118 /// for the target to instantiate a scheduler.
120  MachineFunction *MF = nullptr;
121  const MachineLoopInfo *MLI = nullptr;
122  const MachineDominatorTree *MDT = nullptr;
123  const TargetPassConfig *PassConfig = nullptr;
124  AliasAnalysis *AA = nullptr;
125  LiveIntervals *LIS = nullptr;
126 
128 
130  virtual ~MachineSchedContext();
131 };
132 
133 /// MachineSchedRegistry provides a selection of available machine instruction
134 /// schedulers.
136  : public MachinePassRegistryNode<
137  ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
138 public:
140 
141  // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
143 
145 
146  MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
147  : MachinePassRegistryNode(N, D, C) {
148  Registry.Add(this);
149  }
150 
151  ~MachineSchedRegistry() { Registry.Remove(this); }
152 
153  // Accessors.
154  //
157  }
158 
160  return (MachineSchedRegistry *)Registry.getList();
161  }
162 
164  Registry.setListener(L);
165  }
166 };
167 
168 class ScheduleDAGMI;
169 
170 /// Define a generic scheduling policy for targets that don't provide their own
171 /// MachineSchedStrategy. This can be overriden for each scheduling region
172 /// before building the DAG.
174  // Allow the scheduler to disable register pressure tracking.
175  bool ShouldTrackPressure = false;
176  /// Track LaneMasks to allow reordering of independent subregister writes
177  /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
178  bool ShouldTrackLaneMasks = false;
179 
180  // Allow the scheduler to force top-down or bottom-up scheduling. If neither
181  // is true, the scheduler runs in both directions and converges.
182  bool OnlyTopDown = false;
183  bool OnlyBottomUp = false;
184 
185  // Disable heuristic that tries to fetch nodes from long dependency chains
186  // first.
187  bool DisableLatencyHeuristic = false;
188 
189  MachineSchedPolicy() = default;
190 };
191 
192 /// MachineSchedStrategy - Interface to the scheduling algorithm used by
193 /// ScheduleDAGMI.
194 ///
195 /// Initialization sequence:
196 /// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
198  virtual void anchor();
199 
200 public:
201  virtual ~MachineSchedStrategy() = default;
202 
203  /// Optionally override the per-region scheduling policy.
206  unsigned NumRegionInstrs) {}
207 
208  virtual void dumpPolicy() const {}
209 
210  /// Check if pressure tracking is needed before building the DAG and
211  /// initializing this strategy. Called after initPolicy.
212  virtual bool shouldTrackPressure() const { return true; }
213 
214  /// Returns true if lanemasks should be tracked. LaneMask tracking is
215  /// necessary to reorder independent subregister defs for the same vreg.
216  /// This has to be enabled in combination with shouldTrackPressure().
217  virtual bool shouldTrackLaneMasks() const { return false; }
218 
219  // If this method returns true, handling of the scheduling regions
220  // themselves (in case of a scheduling boundary in MBB) will be done
221  // beginning with the topmost region of MBB.
222  virtual bool doMBBSchedRegionsTopDown() const { return false; }
223 
224  /// Initialize the strategy after building the DAG for a new region.
225  virtual void initialize(ScheduleDAGMI *DAG) = 0;
226 
227  /// Tell the strategy that MBB is about to be processed.
228  virtual void enterMBB(MachineBasicBlock *MBB) {};
229 
230  /// Tell the strategy that current MBB is done.
231  virtual void leaveMBB() {};
232 
233  /// Notify this strategy that all roots have been released (including those
234  /// that depend on EntrySU or ExitSU).
235  virtual void registerRoots() {}
236 
237  /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
238  /// schedule the node at the top of the unscheduled region. Otherwise it will
239  /// be scheduled at the bottom.
240  virtual SUnit *pickNode(bool &IsTopNode) = 0;
241 
242  /// Scheduler callback to notify that a new subtree is scheduled.
243  virtual void scheduleTree(unsigned SubtreeID) {}
244 
245  /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
246  /// instruction and updated scheduled/remaining flags in the DAG nodes.
247  virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
248 
249  /// When all predecessor dependencies have been resolved, free this node for
250  /// top-down scheduling.
251  virtual void releaseTopNode(SUnit *SU) = 0;
252 
253  /// When all successor dependencies have been resolved, free this node for
254  /// bottom-up scheduling.
255  virtual void releaseBottomNode(SUnit *SU) = 0;
256 };
257 
258 /// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
259 /// schedules machine instructions according to the given MachineSchedStrategy
260 /// without much extra book-keeping. This is the common functionality between
261 /// PreRA and PostRA MachineScheduler.
263 protected:
266  std::unique_ptr<MachineSchedStrategy> SchedImpl;
267 
268  /// Ordered list of DAG postprocessing steps.
269  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
270 
271  /// The top of the unscheduled zone.
273 
274  /// The bottom of the unscheduled zone.
276 
277  /// Record the next node in a scheduled cluster.
278  const SUnit *NextClusterPred = nullptr;
279  const SUnit *NextClusterSucc = nullptr;
280 
281 #ifndef NDEBUG
282  /// The number of instructions scheduled so far. Used to cut off the
283  /// scheduler at the point determined by misched-cutoff.
284  unsigned NumInstrsScheduled = 0;
285 #endif
286 
287 public:
288  ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
289  bool RemoveKillFlags)
290  : ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA),
291  LIS(C->LIS), SchedImpl(std::move(S)) {}
292 
293  // Provide a vtable anchor
294  ~ScheduleDAGMI() override;
295 
296  /// If this method returns true, handling of the scheduling regions
297  /// themselves (in case of a scheduling boundary in MBB) will be done
298  /// beginning with the topmost region of MBB.
299  bool doMBBSchedRegionsTopDown() const override {
300  return SchedImpl->doMBBSchedRegionsTopDown();
301  }
302 
303  // Returns LiveIntervals instance for use in DAG mutators and such.
304  LiveIntervals *getLIS() const { return LIS; }
305 
306  /// Return true if this DAG supports VReg liveness and RegPressure.
307  virtual bool hasVRegLiveness() const { return false; }
308 
309  /// Add a postprocessing step to the DAG builder.
310  /// Mutations are applied in the order that they are added after normal DAG
311  /// building and before MachineSchedStrategy initialization.
312  ///
313  /// ScheduleDAGMI takes ownership of the Mutation object.
314  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
315  if (Mutation)
316  Mutations.push_back(std::move(Mutation));
317  }
318 
319  MachineBasicBlock::iterator top() const { return CurrentTop; }
320  MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
321 
322  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
323  /// region. This covers all instructions in a block, while schedule() may only
324  /// cover a subset.
325  void enterRegion(MachineBasicBlock *bb,
328  unsigned regioninstrs) override;
329 
330  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
331  /// reorderable instructions.
332  void schedule() override;
333 
334  void startBlock(MachineBasicBlock *bb) override;
335  void finishBlock() override;
336 
337  /// Change the position of an instruction within the basic block and update
338  /// live ranges and region boundary iterators.
339  void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
340 
341  const SUnit *getNextClusterPred() const { return NextClusterPred; }
342 
343  const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
344 
345  void viewGraph(const Twine &Name, const Twine &Title) override;
346  void viewGraph() override;
347 
348 protected:
349  // Top-Level entry points for the schedule() driver...
350 
351  /// Apply each ScheduleDAGMutation step in order. This allows different
352  /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
353  void postprocessDAG();
354 
355  /// Release ExitSU predecessors and setup scheduler queues.
356  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
357 
358  /// Update scheduler DAG and queues after scheduling an instruction.
359  void updateQueues(SUnit *SU, bool IsTopNode);
360 
361  /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
362  void placeDebugValues();
363 
364  /// dump the scheduled Sequence.
365  void dumpSchedule() const;
366 
367  // Lesser helpers...
368  bool checkSchedLimit();
369 
370  void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
371  SmallVectorImpl<SUnit*> &BotRoots);
372 
373  void releaseSucc(SUnit *SU, SDep *SuccEdge);
374  void releaseSuccessors(SUnit *SU);
375  void releasePred(SUnit *SU, SDep *PredEdge);
376  void releasePredecessors(SUnit *SU);
377 };
378 
379 /// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
380 /// machine instructions while updating LiveIntervals and tracking regpressure.
382 protected:
384 
385  /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
386  /// will be empty.
387  SchedDFSResult *DFSResult = nullptr;
389 
391 
392  /// Maps vregs to the SUnits of their uses in the current scheduling region.
394 
395  // Map each SU to its summary of pressure changes. This array is updated for
396  // liveness during bottom-up scheduling. Top-down scheduling may proceed but
397  // has no affect on the pressure diffs.
399 
400  /// Register pressure in this region computed by initRegPressure.
401  bool ShouldTrackPressure = false;
402  bool ShouldTrackLaneMasks = false;
405 
406  /// List of pressure sets that exceed the target's pressure limit before
407  /// scheduling, listed in increasing set ID order. Each pressure set is paired
408  /// with its max pressure in the currently scheduled regions.
409  std::vector<PressureChange> RegionCriticalPSets;
410 
411  /// The top of the unscheduled zone.
414 
415  /// The bottom of the unscheduled zone.
418 
419  /// True if disconnected subregister components are already renamed.
420  /// The renaming is only done on demand if lane masks are tracked.
421  bool DisconnectedComponentsRenamed = false;
422 
423 public:
425  std::unique_ptr<MachineSchedStrategy> S)
426  : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
427  RegClassInfo(C->RegClassInfo), RPTracker(RegPressure),
428  TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}
429 
430  ~ScheduleDAGMILive() override;
431 
432  /// Return true if this DAG supports VReg liveness and RegPressure.
433  bool hasVRegLiveness() const override { return true; }
434 
435  /// Return true if register pressure tracking is enabled.
436  bool isTrackingPressure() const { return ShouldTrackPressure; }
437 
438  /// Get current register pressure for the top scheduled instructions.
439  const IntervalPressure &getTopPressure() const { return TopPressure; }
440  const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
441 
442  /// Get current register pressure for the bottom scheduled instructions.
443  const IntervalPressure &getBotPressure() const { return BotPressure; }
444  const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
445 
446  /// Get register pressure for the entire scheduling region before scheduling.
447  const IntervalPressure &getRegPressure() const { return RegPressure; }
448 
449  const std::vector<PressureChange> &getRegionCriticalPSets() const {
450  return RegionCriticalPSets;
451  }
452 
454  return SUPressureDiffs[SU->NodeNum];
455  }
456  const PressureDiff &getPressureDiff(const SUnit *SU) const {
457  return SUPressureDiffs[SU->NodeNum];
458  }
459 
460  /// Compute a DFSResult after DAG building is complete, and before any
461  /// queue comparisons.
462  void computeDFSResult();
463 
464  /// Return a non-null DFS result if the scheduling strategy initialized it.
465  const SchedDFSResult *getDFSResult() const { return DFSResult; }
466 
467  BitVector &getScheduledTrees() { return ScheduledTrees; }
468 
469  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
470  /// region. This covers all instructions in a block, while schedule() may only
471  /// cover a subset.
472  void enterRegion(MachineBasicBlock *bb,
475  unsigned regioninstrs) override;
476 
477  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
478  /// reorderable instructions.
479  void schedule() override;
480 
481  /// Compute the cyclic critical path through the DAG.
482  unsigned computeCyclicCriticalPath();
483 
484  void dump() const override;
485 
486 protected:
487  // Top-Level entry points for the schedule() driver...
488 
489  /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
490  /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
491  /// region, TopTracker and BottomTracker will be initialized to the top and
492  /// bottom of the DAG region without covereing any unscheduled instruction.
493  void buildDAGWithRegPressure();
494 
495  /// Release ExitSU predecessors and setup scheduler queues. Re-position
496  /// the Top RP tracker in case the region beginning has changed.
497  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
498 
499  /// Move an instruction and update register pressure.
500  void scheduleMI(SUnit *SU, bool IsTopNode);
501 
502  // Lesser helpers...
503 
504  void initRegPressure();
505 
506  void updatePressureDiffs(ArrayRef<RegisterMaskPair> LiveUses);
507 
508  void updateScheduledPressure(const SUnit *SU,
509  const std::vector<unsigned> &NewMaxPressure);
510 
511  void collectVRegUses(SUnit &SU);
512 };
513 
514 //===----------------------------------------------------------------------===//
515 ///
516 /// Helpers for implementing custom MachineSchedStrategy classes. These take
517 /// care of the book-keeping associated with list scheduling heuristics.
518 ///
519 //===----------------------------------------------------------------------===//
520 
521 /// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
522 /// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
523 /// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
524 ///
525 /// This is a convenience class that may be used by implementations of
526 /// MachineSchedStrategy.
527 class ReadyQueue {
528  unsigned ID;
529  std::string Name;
530  std::vector<SUnit*> Queue;
531 
532 public:
533  ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
534 
535  unsigned getID() const { return ID; }
536 
537  StringRef getName() const { return Name; }
538 
539  // SU is in this queue if it's NodeQueueID is a superset of this ID.
540  bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
541 
542  bool empty() const { return Queue.empty(); }
543 
544  void clear() { Queue.clear(); }
545 
546  unsigned size() const { return Queue.size(); }
547 
548  using iterator = std::vector<SUnit*>::iterator;
549 
550  iterator begin() { return Queue.begin(); }
551 
552  iterator end() { return Queue.end(); }
553 
554  ArrayRef<SUnit*> elements() { return Queue; }
555 
556  iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
557 
558  void push(SUnit *SU) {
559  Queue.push_back(SU);
560  SU->NodeQueueId |= ID;
561  }
562 
563  iterator remove(iterator I) {
564  (*I)->NodeQueueId &= ~ID;
565  *I = Queue.back();
566  unsigned idx = I - Queue.begin();
567  Queue.pop_back();
568  return Queue.begin() + idx;
569  }
570 
571  void dump() const;
572 };
573 
574 /// Summarize the unscheduled region.
576  // Critical path through the DAG in expected latency.
577  unsigned CriticalPath;
578  unsigned CyclicCritPath;
579 
580  // Scaled count of micro-ops left to schedule.
581  unsigned RemIssueCount;
582 
584 
585  // Unscheduled resources
587 
588  SchedRemainder() { reset(); }
589 
590  void reset() {
591  CriticalPath = 0;
592  CyclicCritPath = 0;
593  RemIssueCount = 0;
594  IsAcyclicLatencyLimited = false;
595  RemainingCounts.clear();
596  }
597 
598  void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
599 };
600 
601 /// Each Scheduling boundary is associated with ready queues. It tracks the
602 /// current cycle in the direction of movement, and maintains the state
603 /// of "hazards" and other interlocks at the current cycle.
605 public:
606  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
607  enum {
608  TopQID = 1,
609  BotQID = 2,
610  LogMaxQID = 2
611  };
612 
613  ScheduleDAGMI *DAG = nullptr;
614  const TargetSchedModel *SchedModel = nullptr;
615  SchedRemainder *Rem = nullptr;
616 
619 
620  ScheduleHazardRecognizer *HazardRec = nullptr;
621 
622 private:
623  /// True if the pending Q should be checked/updated before scheduling another
624  /// instruction.
625  bool CheckPending;
626 
627  /// Number of cycles it takes to issue the instructions scheduled in this
628  /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
629  /// See getStalls().
630  unsigned CurrCycle;
631 
632  /// Micro-ops issued in the current cycle
633  unsigned CurrMOps;
634 
635  /// MinReadyCycle - Cycle of the soonest available instruction.
636  unsigned MinReadyCycle;
637 
638  // The expected latency of the critical path in this scheduled zone.
639  unsigned ExpectedLatency;
640 
641  // The latency of dependence chains leading into this zone.
642  // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
643  // For each cycle scheduled: DLat -= 1.
644  unsigned DependentLatency;
645 
646  /// Count the scheduled (issued) micro-ops that can be retired by
647  /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
648  unsigned RetiredMOps;
649 
650  // Count scheduled resources that have been executed. Resources are
651  // considered executed if they become ready in the time that it takes to
652  // saturate any resource including the one in question. Counts are scaled
653  // for direct comparison with other resources. Counts can be compared with
654  // MOps * getMicroOpFactor and Latency * getLatencyFactor.
655  SmallVector<unsigned, 16> ExecutedResCounts;
656 
657  /// Cache the max count for a single resource.
658  unsigned MaxExecutedResCount;
659 
660  // Cache the critical resources ID in this scheduled zone.
661  unsigned ZoneCritResIdx;
662 
663  // Is the scheduled region resource limited vs. latency limited.
664  bool IsResourceLimited;
665 
666  // Record the highest cycle at which each resource has been reserved by a
667  // scheduled instruction.
668  SmallVector<unsigned, 16> ReservedCycles;
669 
670  // For each PIdx, stores first index into ReservedCycles that corresponds to
671  // it.
672  SmallVector<unsigned, 16> ReservedCyclesIndex;
673 
674 #ifndef NDEBUG
675  // Remember the greatest possible stall as an upper bound on the number of
676  // times we should retry the pending queue because of a hazard.
677  unsigned MaxObservedStall;
678 #endif
679 
680 public:
681  /// Pending queues extend the ready queues with the same ID and the
682  /// PendingFlag set.
683  SchedBoundary(unsigned ID, const Twine &Name):
684  Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
685  reset();
686  }
687 
688  ~SchedBoundary();
689 
690  void reset();
691 
692  void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
693  SchedRemainder *rem);
694 
695  bool isTop() const {
696  return Available.getID() == TopQID;
697  }
698 
699  /// Number of cycles to issue the instructions scheduled in this zone.
700  unsigned getCurrCycle() const { return CurrCycle; }
701 
702  /// Micro-ops issued in the current cycle
703  unsigned getCurrMOps() const { return CurrMOps; }
704 
705  // The latency of dependence chains leading into this zone.
706  unsigned getDependentLatency() const { return DependentLatency; }
707 
708  /// Get the number of latency cycles "covered" by the scheduled
709  /// instructions. This is the larger of the critical path within the zone
710  /// and the number of cycles required to issue the instructions.
711  unsigned getScheduledLatency() const {
712  return std::max(ExpectedLatency, CurrCycle);
713  }
714 
715  unsigned getUnscheduledLatency(SUnit *SU) const {
716  return isTop() ? SU->getHeight() : SU->getDepth();
717  }
718 
719  unsigned getResourceCount(unsigned ResIdx) const {
720  return ExecutedResCounts[ResIdx];
721  }
722 
723  /// Get the scaled count of scheduled micro-ops and resources, including
724  /// executed resources.
725  unsigned getCriticalCount() const {
726  if (!ZoneCritResIdx)
727  return RetiredMOps * SchedModel->getMicroOpFactor();
728  return getResourceCount(ZoneCritResIdx);
729  }
730 
731  /// Get a scaled count for the minimum execution time of the scheduled
732  /// micro-ops that are ready to execute by getExecutedCount. Notice the
733  /// feedback loop.
734  unsigned getExecutedCount() const {
735  return std::max(CurrCycle * SchedModel->getLatencyFactor(),
736  MaxExecutedResCount);
737  }
738 
739  unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
740 
741  // Is the scheduled region resource limited vs. latency limited.
742  bool isResourceLimited() const { return IsResourceLimited; }
743 
744  /// Get the difference between the given SUnit's ready time and the current
745  /// cycle.
746  unsigned getLatencyStallCycles(SUnit *SU);
747 
748  unsigned getNextResourceCycleByInstance(unsigned InstanceIndex,
749  unsigned Cycles);
750 
751  std::pair<unsigned, unsigned> getNextResourceCycle(unsigned PIdx,
752  unsigned Cycles);
753 
754  bool checkHazard(SUnit *SU);
755 
756  unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
757 
758  unsigned getOtherResourceCount(unsigned &OtherCritIdx);
759 
760  void releaseNode(SUnit *SU, unsigned ReadyCycle);
761 
762  void bumpCycle(unsigned NextCycle);
763 
764  void incExecutedResources(unsigned PIdx, unsigned Count);
765 
766  unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
767 
768  void bumpNode(SUnit *SU);
769 
770  void releasePending();
771 
772  void removeReady(SUnit *SU);
773 
774  /// Call this before applying any other heuristics to the Available queue.
775  /// Updates the Available/Pending Q's if necessary and returns the single
776  /// available instruction, or NULL if there are multiple candidates.
777  SUnit *pickOnlyChoice();
778 
779  void dumpScheduledState() const;
780 };
781 
782 /// Base class for GenericScheduler. This class maintains information about
783 /// scheduling candidates based on TargetSchedModel making it easy to implement
784 /// heuristics for either preRA or postRA scheduling.
786 public:
787  /// Represent the type of SchedCandidate found within a single queue.
788  /// pickNodeBidirectional depends on these listed by decreasing priority.
789  enum CandReason : uint8_t {
790  NoCand, Only1, PhysReg, RegExcess, RegCritical, Stall, Cluster, Weak,
791  RegMax, ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
792  TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
793 
794 #ifndef NDEBUG
795  static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
796 #endif
797 
798  /// Policy for scheduling the next instruction in the candidate's zone.
799  struct CandPolicy {
800  bool ReduceLatency = false;
801  unsigned ReduceResIdx = 0;
802  unsigned DemandResIdx = 0;
803 
804  CandPolicy() = default;
805 
806  bool operator==(const CandPolicy &RHS) const {
807  return ReduceLatency == RHS.ReduceLatency &&
808  ReduceResIdx == RHS.ReduceResIdx &&
809  DemandResIdx == RHS.DemandResIdx;
810  }
811  bool operator!=(const CandPolicy &RHS) const {
812  return !(*this == RHS);
813  }
814  };
815 
816  /// Status of an instruction's critical resource consumption.
818  // Count critical resources in the scheduled region required by SU.
819  unsigned CritResources = 0;
820 
821  // Count critical resources from another region consumed by SU.
822  unsigned DemandedResources = 0;
823 
824  SchedResourceDelta() = default;
825 
826  bool operator==(const SchedResourceDelta &RHS) const {
827  return CritResources == RHS.CritResources
828  && DemandedResources == RHS.DemandedResources;
829  }
830  bool operator!=(const SchedResourceDelta &RHS) const {
831  return !operator==(RHS);
832  }
833  };
834 
835  /// Store the state used by GenericScheduler heuristics, required for the
836  /// lifetime of one invocation of pickNode().
837  struct SchedCandidate {
839 
840  // The best SUnit candidate.
842 
843  // The reason for this candidate.
845 
846  // Whether this candidate should be scheduled at top/bottom.
847  bool AtTop;
848 
849  // Register pressure values for the best candidate.
851 
852  // Critical resource consumption of the best candidate.
854 
855  SchedCandidate() { reset(CandPolicy()); }
856  SchedCandidate(const CandPolicy &Policy) { reset(Policy); }
857 
858  void reset(const CandPolicy &NewPolicy) {
859  Policy = NewPolicy;
860  SU = nullptr;
861  Reason = NoCand;
862  AtTop = false;
863  RPDelta = RegPressureDelta();
864  ResDelta = SchedResourceDelta();
865  }
866 
867  bool isValid() const { return SU; }
868 
869  // Copy the status of another candidate without changing policy.
870  void setBest(SchedCandidate &Best) {
871  assert(Best.Reason != NoCand && "uninitialized Sched candidate");
872  SU = Best.SU;
873  Reason = Best.Reason;
874  AtTop = Best.AtTop;
875  RPDelta = Best.RPDelta;
876  ResDelta = Best.ResDelta;
877  }
878 
879  void initResourceDelta(const ScheduleDAGMI *DAG,
880  const TargetSchedModel *SchedModel);
881  };
882 
883 protected:
885  const TargetSchedModel *SchedModel = nullptr;
886  const TargetRegisterInfo *TRI = nullptr;
887 
889 
891 
892  void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
893  SchedBoundary *OtherZone);
894 
895 #ifndef NDEBUG
896  void traceCandidate(const SchedCandidate &Cand);
897 #endif
898 
899 private:
900  bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
901  bool ComputeRemLatency, unsigned &RemLatency) const;
902 };
903 
904 // Utility functions used by heuristics in tryCandidate().
905 bool tryLess(int TryVal, int CandVal,
909 bool tryGreater(int TryVal, int CandVal,
915  SchedBoundary &Zone);
916 bool tryPressure(const PressureChange &TryP,
917  const PressureChange &CandP,
921  const TargetRegisterInfo *TRI,
922  const MachineFunction &MF);
923 unsigned getWeakLeft(const SUnit *SU, bool isTop);
924 int biasPhysReg(const SUnit *SU, bool isTop);
925 
926 /// GenericScheduler shrinks the unscheduled zone using heuristics to balance
927 /// the schedule.
929 public:
931  GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
932  Bot(SchedBoundary::BotQID, "BotQ") {}
933 
934  void initPolicy(MachineBasicBlock::iterator Begin,
936  unsigned NumRegionInstrs) override;
937 
938  void dumpPolicy() const override;
939 
940  bool shouldTrackPressure() const override {
941  return RegionPolicy.ShouldTrackPressure;
942  }
943 
944  bool shouldTrackLaneMasks() const override {
945  return RegionPolicy.ShouldTrackLaneMasks;
946  }
947 
948  void initialize(ScheduleDAGMI *dag) override;
949 
950  SUnit *pickNode(bool &IsTopNode) override;
951 
952  void schedNode(SUnit *SU, bool IsTopNode) override;
953 
954  void releaseTopNode(SUnit *SU) override {
955  if (SU->isScheduled)
956  return;
957 
958  Top.releaseNode(SU, SU->TopReadyCycle);
959  TopCand.SU = nullptr;
960  }
961 
962  void releaseBottomNode(SUnit *SU) override {
963  if (SU->isScheduled)
964  return;
965 
966  Bot.releaseNode(SU, SU->BotReadyCycle);
967  BotCand.SU = nullptr;
968  }
969 
970  void registerRoots() override;
971 
972 protected:
973  ScheduleDAGMILive *DAG = nullptr;
974 
976 
977  // State of the top and bottom scheduled instruction boundaries.
980 
981  /// Candidate last picked from Top boundary.
983  /// Candidate last picked from Bot boundary.
985 
986  void checkAcyclicLatency();
987 
988  void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
989  const RegPressureTracker &RPTracker,
990  RegPressureTracker &TempTracker);
991 
992  virtual void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
993  SchedBoundary *Zone) const;
994 
995  SUnit *pickNodeBidirectional(bool &IsTopNode);
996 
997  void pickNodeFromQueue(SchedBoundary &Zone,
998  const CandPolicy &ZonePolicy,
999  const RegPressureTracker &RPTracker,
1000  SchedCandidate &Candidate);
1001 
1002  void reschedulePhysReg(SUnit *SU, bool isTop);
1003 };
1004 
1005 /// PostGenericScheduler - Interface to the scheduling algorithm used by
1006 /// ScheduleDAGMI.
1007 ///
1008 /// Callbacks from ScheduleDAGMI:
1009 /// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
1011 protected:
1015 
1016 public:
1018  GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}
1019 
1020  ~PostGenericScheduler() override = default;
1021 
1024  unsigned NumRegionInstrs) override {
1025  /* no configurable policy */
1026  }
1027 
1028  /// PostRA scheduling does not track pressure.
1029  bool shouldTrackPressure() const override { return false; }
1030 
1031  void initialize(ScheduleDAGMI *Dag) override;
1032 
1033  void registerRoots() override;
1034 
1035  SUnit *pickNode(bool &IsTopNode) override;
1036 
1037  void scheduleTree(unsigned SubtreeID) override {
1038  llvm_unreachable("PostRA scheduler does not support subtree analysis.");
1039  }
1040 
1041  void schedNode(SUnit *SU, bool IsTopNode) override;
1042 
1043  void releaseTopNode(SUnit *SU) override {
1044  if (SU->isScheduled)
1045  return;
1046  Top.releaseNode(SU, SU->TopReadyCycle);
1047  }
1048 
1049  // Only called for roots.
1050  void releaseBottomNode(SUnit *SU) override {
1051  BotRoots.push_back(SU);
1052  }
1053 
1054 protected:
1055  void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
1056 
1057  void pickNodeFromQueue(SchedCandidate &Cand);
1058 };
1059 
1060 /// Create the standard converging machine scheduler. This will be used as the
1061 /// default scheduler if the target does not set a default.
1062 /// Adds default DAG mutations.
1064 
1065 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
1067 
1068 std::unique_ptr<ScheduleDAGMutation>
1070  const TargetRegisterInfo *TRI);
1071 
1072 std::unique_ptr<ScheduleDAGMutation>
1074  const TargetRegisterInfo *TRI);
1075 
1076 std::unique_ptr<ScheduleDAGMutation>
1078  const TargetRegisterInfo *TRI);
1079 
1080 } // end namespace llvm
1081 
1082 #endif // LLVM_CODEGEN_MACHINESCHEDULER_H
unsigned getCriticalCount() const
Get the scaled count of scheduled micro-ops and resources, including executed resources.
VReg2SUnitMultiMap VRegUses
Maps vregs to the SUnits of their uses in the current scheduling region.
uint64_t CallInst * C
void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs) override
Optionally override the per-region scheduling policy.
int biasPhysReg(const SUnit *SU, bool isTop)
Minimize physical register live ranges.
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:233
unsigned getZoneCritResIdx() const
Base class for GenericScheduler.
static MachinePassRegistry< ScheduleDAGCtor > Registry
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:224
Each Scheduling boundary is associated with ready queues.
PostGenericScheduler - Interface to the scheduling algorithm used by ScheduleDAGMI.
GenericSchedulerBase(const MachineSchedContext *C)
This class represents lattice values for constants.
Definition: AllocatorList.h:23
const MachineSchedContext * Context
void Remove(MachinePassRegistryNode< PassCtorTy > *Node)
Remove - Removes a function pass from the registration list.
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name
MachineBasicBlock::iterator CurrentTop
The top of the unscheduled zone.
const MachineLoopInfo * MLI
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling...
unsigned getCurrCycle() const
Number of cycles to issue the instructions scheduled in this zone.
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:398
bool isInQueue(SUnit *SU) const
ScheduleDAGMI * createGenericSchedPostRA(MachineSchedContext *C)
Create a generic scheduler with no vreg liveness or DAG mutation passes.
const IntervalPressure & getBotPressure() const
Get current register pressure for the bottom scheduled instructions.
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
unsigned getUnscheduledLatency(SUnit *SU) const
const IntervalPressure & getRegPressure() const
Get register pressure for the entire scheduling region before scheduling.
unsigned getDependentLatency() const
unsigned const TargetRegisterInfo * TRI
Summarize the unscheduled region.
void reset(const CandPolicy &NewPolicy)
const SchedDFSResult * getDFSResult() const
Return a non-null DFS result if the scheduling strategy initialized it.
MachineSchedRegistry provides a selection of available machine instruction schedulers.
RegisterClassInfo * RegClassInfo
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
std::unique_ptr< MachineSchedStrategy > SchedImpl
unsigned BotReadyCycle
Cycle relative to end when node is ready.
Definition: ScheduleDAG.h:300
bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary &Zone)
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:284
unsigned getID() const
ArrayRef< SUnit * > elements()
const RegPressureTracker & getBotRPTracker() const
Definition: BitVector.h:937
BitVector & getScheduledTrees()
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
const TargetPassConfig * PassConfig
virtual void dumpPolicy() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
StringRef getName() const
void scheduleTree(unsigned SubtreeID) override
Scheduler callback to notify that a new subtree is scheduled.
PressureDiff & getPressureDiff(const SUnit *SU)
std::vector< std::unique_ptr< ScheduleDAGMutation > > Mutations
Ordered list of DAG postprocessing steps.
void setListener(MachinePassRegistryListener< PassCtorTy > *L)
Target-Independent Code Generator Pass Configuration Options.
bool shouldTrackLaneMasks() const override
Returns true if lanemasks should be tracked.
static MachineSchedRegistry * getList()
static const char * getReasonStr(SIScheduleCandReason Reason)
Compute the values of each DAG node for various metrics during DFS.
Definition: ScheduleDFS.h:65
unsigned TopReadyCycle
Cycle relative to start when node is ready.
Definition: ScheduleDAG.h:299
PowerPC VSX FMA Mutation
MachineBasicBlock::iterator LiveRegionEnd
iterator find(SUnit *SU)
RegPressureTracker BotRPTracker
MachineBasicBlock::iterator top() const
MachineSchedPolicy RegionPolicy
std::vector< PressureChange > RegionCriticalPSets
List of pressure sets that exceed the target&#39;s pressure limit before scheduling, listed in increasing...
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
virtual void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs)
Optionally override the per-region scheduling policy.
bool doMBBSchedRegionsTopDown() const override
If this method returns true, handling of the scheduling regions themselves (in case of a scheduling b...
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
std::vector< SUnit * >::iterator iterator
unsigned getCurrMOps() const
Micro-ops issued in the current cycle.
const SUnit * getNextClusterSucc() const
TargetInstrInfo - Interface to description of machine instruction set.
virtual void registerRoots()
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
Scheduling dependency.
Definition: ScheduleDAG.h:49
RegisterClassInfo * RegClassInfo
static void setListener(MachinePassRegistryListener< FunctionPassCtor > *L)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
CandReason
Represent the type of SchedCandidate found within a single queue.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
Helpers for implementing custom MachineSchedStrategy classes.
Array of PressureDiffs.
RegisterPressure computed within a region of instructions delimited by TopIdx and BottomIdx...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
bool tryGreater(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
virtual void enterMBB(MachineBasicBlock *MBB)
Tell the strategy that MBB is about to be processed.
cl::opt< bool > VerifyScheduling
unsigned size() const
unsigned getLatencyFactor() const
Multiply cycle count by this factor to normalize it relative to other resources.
void Add(MachinePassRegistryNode< PassCtorTy > *Node)
Add - Adds a function pass to the registration list.
bool operator!=(const CandPolicy &RHS) const
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
Track the current register pressure at some position in the instruction stream, and remember the high...
Policy for scheduling the next instruction in the candidate&#39;s zone.
unsigned getScheduledLatency() const
Get the number of latency cycles "covered" by the scheduled instructions.
List of PressureChanges in order of increasing, unique PSetID.
MachinePassRegistryNode< PassCtorTy > * getList()
ScheduleDAGInstrs *(*)(MachineSchedContext *) ScheduleDAGCtor
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
unsigned getWeakLeft(const SUnit *SU, bool isTop)
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1186
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SchedBoundary(unsigned ID, const Twine &Name)
Pending queues extend the ready queues with the same ID and the PendingFlag set.
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
bool operator==(const SchedResourceDelta &RHS) const
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:390
GenericScheduler shrinks the unscheduled zone using heuristics to balance the schedule.
bool shouldTrackPressure() const override
Check if pressure tracking is needed before building the DAG and initializing this strategy...
void releaseNode(SUnit *SU, unsigned ReadyCycle)
unsigned getMicroOpFactor() const
Multiply number of micro-ops by this factor to normalize it relative to other resources.
bool hasVRegLiveness() const override
Return true if this DAG supports VReg liveness and RegPressure.
const RegPressureTracker & getTopRPTracker() const
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
bool isResourceLimited() const
std::unique_ptr< ScheduleDAGMutation > createCopyConstrainDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
unsigned getResourceCount(unsigned ResIdx) const
PostGenericScheduler(const MachineSchedContext *C)
bool tryLess(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
Return true if this heuristic determines order.
ReadyQueue(unsigned id, const Twine &name)
unsigned getExecutedCount() const
Get a scaled count for the minimum execution time of the scheduled micro-ops that are ready to execut...
virtual bool shouldTrackPressure() const
Check if pressure tracking is needed before building the DAG and initializing this strategy...
const IntervalPressure & getTopPressure() const
Get current register pressure for the top scheduled instructions.
ScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
const PressureDiff & getPressureDiff(const SUnit *SU) const
virtual void scheduleTree(unsigned SubtreeID)
Scheduler callback to notify that a new subtree is scheduled.
unsigned NodeQueueId
Queue id of node.
Definition: ScheduleDAG.h:265
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:406
A ScheduleDAG for scheduling lists of MachineInstr.
Define a generic scheduling policy for targets that don&#39;t provide their own MachineSchedStrategy.
Representation of each machine instruction.
Definition: MachineInstr.h:63
LiveIntervals * LIS
const MachineDominatorTree * MDT
Status of an instruction&#39;s critical resource consumption.
GenericScheduler(const MachineSchedContext *C)
LiveIntervals * getLIS() const
bool shouldTrackPressure() const override
PostRA scheduling does not track pressure.
SmallVector< SUnit *, 8 > BotRoots
SchedCandidate BotCand
Candidate last picked from Bot boundary.
cl::opt< bool > ForceBottomUp
virtual void leaveMBB()
Tell the strategy that current MBB is done.
MachineBasicBlock::iterator bottom() const
bool operator!=(const SchedResourceDelta &RHS) const
MachinePassRegistryNode * getNext() const
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
MachineSchedStrategy - Interface to the scheduling algorithm used by ScheduleDAGMI.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
MachinePassRegistryNode - Machine pass node stored in registration list.
Capture a change in pressure for a single pressure set.
SmallVector< unsigned, 16 > RemainingCounts
const SUnit * getNextClusterPred() const
IntervalPressure TopPressure
The top of the unscheduled zone.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling...
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:264
const std::vector< PressureChange > & getRegionCriticalPSets() const
MachineBasicBlock::iterator CurrentBottom
The bottom of the unscheduled zone.
bool tryPressure(const PressureChange &TryP, const PressureChange &CandP, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason, const TargetRegisterInfo *TRI, const MachineFunction &MF)
Store the effects of a change in pressure on things that MI scheduler cares about.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineSchedRegistry * getNext() const
bool operator==(const CandPolicy &RHS) const
bool isTrackingPressure() const
Return true if register pressure tracking is enabled.
static const char * name
virtual bool doMBBSchedRegionsTopDown() const
SchedCandidate TopCand
Candidate last picked from Top boundary.
IntervalPressure RegPressure
void push(SUnit *SU)
IRTranslator LLVM IR MI
virtual bool shouldTrackLaneMasks() const
Returns true if lanemasks should be tracked.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:1975
bool empty() const
IntervalPressure BotPressure
The bottom of the unscheduled zone.
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
AliasAnalysis * AA
RegPressureTracker TopRPTracker
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
RegPressureTracker RPTracker
ScheduleDAGCtor FunctionPassCtor
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:242
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
cl::opt< bool > ForceTopDown
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.