LLVM 18.0.0git
MachineScheduler.cpp
Go to the documentation of this file.
1//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// MachineScheduler schedules machine instructions after phi elimination. It
10// preserves LiveIntervals so it can be invoked before register allocation.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/BitVector.h"
17#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/Statistic.h"
51#include "llvm/Config/llvm-config.h"
53#include "llvm/MC/LaneBitmask.h"
54#include "llvm/Pass.h"
57#include "llvm/Support/Debug.h"
61#include <algorithm>
62#include <cassert>
63#include <cstdint>
64#include <iterator>
65#include <limits>
66#include <memory>
67#include <string>
68#include <tuple>
69#include <utility>
70#include <vector>
71
72using namespace llvm;
73
74#define DEBUG_TYPE "machine-scheduler"
75
76STATISTIC(NumClustered, "Number of load/store pairs clustered");
77
78namespace llvm {
79
81 cl::desc("Force top-down list scheduling"));
83 cl::desc("Force bottom-up list scheduling"));
86 cl::desc("Print critical path length to stdout"));
87
89 "verify-misched", cl::Hidden,
90 cl::desc("Verify machine instrs before and after machine scheduling"));
91
92#ifndef NDEBUG
94 "view-misched-dags", cl::Hidden,
95 cl::desc("Pop up a window to show MISched dags after they are processed"));
96cl::opt<bool> PrintDAGs("misched-print-dags", cl::Hidden,
97 cl::desc("Print schedule DAGs"));
99 "misched-dump-reserved-cycles", cl::Hidden, cl::init(false),
100 cl::desc("Dump resource usage at schedule boundary."));
102 "misched-detail-resource-booking", cl::Hidden, cl::init(false),
103 cl::desc("Show details of invoking getNextResoufceCycle."));
104#else
105const bool ViewMISchedDAGs = false;
106const bool PrintDAGs = false;
107const bool MischedDetailResourceBooking = false;
108#ifdef LLVM_ENABLE_DUMP
109const bool MISchedDumpReservedCycles = false;
110#endif // LLVM_ENABLE_DUMP
111#endif // NDEBUG
112
113} // end namespace llvm
114
115#ifndef NDEBUG
116/// In some situations a few uninteresting nodes depend on nearly all other
117/// nodes in the graph, provide a cutoff to hide them.
118static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
119 cl::desc("Hide nodes with more predecessor/successor than cutoff"));
120
122 cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
123
125 cl::desc("Only schedule this function"));
126static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
127 cl::desc("Only schedule this MBB#"));
128#endif // NDEBUG
129
130/// Avoid quadratic complexity in unusually large basic blocks by limiting the
131/// size of the ready lists.
133 cl::desc("Limit ready list to N instructions"), cl::init(256));
134
135static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
136 cl::desc("Enable register pressure scheduling."), cl::init(true));
137
138static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
139 cl::desc("Enable cyclic critical path analysis."), cl::init(true));
140
142 cl::desc("Enable memop clustering."),
143 cl::init(true));
144static cl::opt<bool>
145 ForceFastCluster("force-fast-cluster", cl::Hidden,
146 cl::desc("Switch to fast cluster algorithm with the lost "
147 "of some fusion opportunities"),
148 cl::init(false));
150 FastClusterThreshold("fast-cluster-threshold", cl::Hidden,
151 cl::desc("The threshold for fast cluster"),
152 cl::init(1000));
153
154#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
156 "misched-dump-schedule-trace", cl::Hidden, cl::init(false),
157 cl::desc("Dump resource usage at schedule boundary."));
159 HeaderColWidth("misched-dump-schedule-trace-col-header-width", cl::Hidden,
160 cl::desc("Set width of the columns with "
161 "the resources and schedule units"),
162 cl::init(19));
164 ColWidth("misched-dump-schedule-trace-col-width", cl::Hidden,
165 cl::desc("Set width of the columns showing resource booking."),
166 cl::init(5));
168 "misched-sort-resources-in-trace", cl::Hidden, cl::init(true),
169 cl::desc("Sort the resources printed in the dump trace"));
170#endif
171
173 MIResourceCutOff("misched-resource-cutoff", cl::Hidden,
174 cl::desc("Number of intervals to track"), cl::init(10));
175
176// DAG subtrees must have at least this many nodes.
177static const unsigned MinSubtreeSize = 8;
178
179// Pin the vtables to this file.
180void MachineSchedStrategy::anchor() {}
181
182void ScheduleDAGMutation::anchor() {}
183
184//===----------------------------------------------------------------------===//
185// Machine Instruction Scheduling Pass and Registry
186//===----------------------------------------------------------------------===//
187
190}
191
193 delete RegClassInfo;
194}
195
196namespace {
197
198/// Base class for a machine scheduler class that can run at any point.
199class MachineSchedulerBase : public MachineSchedContext,
200 public MachineFunctionPass {
201public:
202 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
203
204 void print(raw_ostream &O, const Module* = nullptr) const override;
205
206protected:
207 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
208};
209
210/// MachineScheduler runs after coalescing and before register allocation.
211class MachineScheduler : public MachineSchedulerBase {
212public:
213 MachineScheduler();
214
215 void getAnalysisUsage(AnalysisUsage &AU) const override;
216
217 bool runOnMachineFunction(MachineFunction&) override;
218
219 static char ID; // Class identification, replacement for typeinfo
220
221protected:
222 ScheduleDAGInstrs *createMachineScheduler();
223};
224
225/// PostMachineScheduler runs after shortly before code emission.
226class PostMachineScheduler : public MachineSchedulerBase {
227public:
228 PostMachineScheduler();
229
230 void getAnalysisUsage(AnalysisUsage &AU) const override;
231
232 bool runOnMachineFunction(MachineFunction&) override;
233
234 static char ID; // Class identification, replacement for typeinfo
235
236protected:
237 ScheduleDAGInstrs *createPostMachineScheduler();
238};
239
240} // end anonymous namespace
241
242char MachineScheduler::ID = 0;
243
244char &llvm::MachineSchedulerID = MachineScheduler::ID;
245
247 "Machine Instruction Scheduler", false, false)
255
256MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) {
258}
259
260void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
261 AU.setPreservesCFG();
271}
272
273char PostMachineScheduler::ID = 0;
274
275char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
276
277INITIALIZE_PASS_BEGIN(PostMachineScheduler, "postmisched",
278 "PostRA Machine Instruction Scheduler", false, false)
282INITIALIZE_PASS_END(PostMachineScheduler, "postmisched",
283 "PostRA Machine Instruction Scheduler", false, false)
284
285PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) {
287}
288
289void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
290 AU.setPreservesCFG();
296}
297
300
301/// A dummy default scheduler factory indicates whether the scheduler
302/// is overridden on the command line.
304 return nullptr;
305}
306
307/// MachineSchedOpt allows command line selection of the scheduler.
312 cl::desc("Machine instruction scheduler to use"));
313
315DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
317
319 "enable-misched",
320 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
321 cl::Hidden);
322
324 "enable-post-misched",
325 cl::desc("Enable the post-ra machine instruction scheduling pass."),
326 cl::init(true), cl::Hidden);
327
328/// Decrement this iterator until reaching the top or a non-debug instr.
332 assert(I != Beg && "reached the top of the region, cannot decrement");
333 while (--I != Beg) {
334 if (!I->isDebugOrPseudoInstr())
335 break;
336 }
337 return I;
338}
339
340/// Non-const version.
346}
347
348/// If this iterator is a debug value, increment until reaching the End or a
349/// non-debug instruction.
353 for(; I != End; ++I) {
354 if (!I->isDebugOrPseudoInstr())
355 break;
356 }
357 return I;
358}
359
360/// Non-const version.
366}
367
368/// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
369ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
370 // Select the scheduler, or set the default.
372 if (Ctor != useDefaultMachineSched)
373 return Ctor(this);
374
375 // Get the default scheduler set by the target for this function.
376 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
377 if (Scheduler)
378 return Scheduler;
379
380 // Default to GenericScheduler.
381 return createGenericSchedLive(this);
382}
383
384/// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
385/// the caller. We don't have a command line option to override the postRA
386/// scheduler. The Target must configure it.
387ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
388 // Get the postRA scheduler set by the target for this function.
389 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
390 if (Scheduler)
391 return Scheduler;
392
393 // Default to GenericScheduler.
394 return createGenericSchedPostRA(this);
395}
396
397/// Top-level MachineScheduler pass driver.
398///
399/// Visit blocks in function order. Divide each block into scheduling regions
400/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
401/// consistent with the DAG builder, which traverses the interior of the
402/// scheduling regions bottom-up.
403///
404/// This design avoids exposing scheduling boundaries to the DAG builder,
405/// simplifying the DAG builder's support for "special" target instructions.
406/// At the same time the design allows target schedulers to operate across
407/// scheduling boundaries, for example to bundle the boundary instructions
408/// without reordering them. This creates complexity, because the target
409/// scheduler must update the RegionBegin and RegionEnd positions cached by
410/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
411/// design would be to split blocks at scheduling boundaries, but LLVM has a
412/// general bias against block splitting purely for implementation simplicity.
413bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
414 if (skipFunction(mf.getFunction()))
415 return false;
416
417 if (EnableMachineSched.getNumOccurrences()) {
419 return false;
420 } else if (!mf.getSubtarget().enableMachineScheduler())
421 return false;
422
423 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
424
425 // Initialize the context of the pass.
426 MF = &mf;
427 MLI = &getAnalysis<MachineLoopInfo>();
428 MDT = &getAnalysis<MachineDominatorTree>();
429 PassConfig = &getAnalysis<TargetPassConfig>();
430 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
431
432 LIS = &getAnalysis<LiveIntervals>();
433
434 if (VerifyScheduling) {
435 LLVM_DEBUG(LIS->dump());
436 MF->verify(this, "Before machine scheduling.");
437 }
438 RegClassInfo->runOnMachineFunction(*MF);
439
440 // Instantiate the selected scheduler for this target, function, and
441 // optimization level.
442 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
443 scheduleRegions(*Scheduler, false);
444
445 LLVM_DEBUG(LIS->dump());
447 MF->verify(this, "After machine scheduling.");
448 return true;
449}
450
451bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
452 if (skipFunction(mf.getFunction()))
453 return false;
454
455 if (EnablePostRAMachineSched.getNumOccurrences()) {
457 return false;
458 } else if (!mf.getSubtarget().enablePostRAMachineScheduler()) {
459 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
460 return false;
461 }
462 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
463
464 // Initialize the context of the pass.
465 MF = &mf;
466 MLI = &getAnalysis<MachineLoopInfo>();
467 PassConfig = &getAnalysis<TargetPassConfig>();
468 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
469
471 MF->verify(this, "Before post machine scheduling.");
472
473 // Instantiate the selected scheduler for this target, function, and
474 // optimization level.
475 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
476 scheduleRegions(*Scheduler, true);
477
479 MF->verify(this, "After post machine scheduling.");
480 return true;
481}
482
483/// Return true of the given instruction should not be included in a scheduling
484/// region.
485///
486/// MachineScheduler does not currently support scheduling across calls. To
487/// handle calls, the DAG builder needs to be modified to create register
488/// anti/output dependencies on the registers clobbered by the call's regmask
489/// operand. In PreRA scheduling, the stack pointer adjustment already prevents
490/// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
491/// the boundary, but there would be no benefit to postRA scheduling across
492/// calls this late anyway.
495 MachineFunction *MF,
496 const TargetInstrInfo *TII) {
497 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
498}
499
500/// A region of an MBB for scheduling.
501namespace {
502struct SchedRegion {
503 /// RegionBegin is the first instruction in the scheduling region, and
504 /// RegionEnd is either MBB->end() or the scheduling boundary after the
505 /// last instruction in the scheduling region. These iterators cannot refer
506 /// to instructions outside of the identified scheduling region because
507 /// those may be reordered before scheduling this region.
508 MachineBasicBlock::iterator RegionBegin;
510 unsigned NumRegionInstrs;
511
513 unsigned N) :
514 RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {}
515};
516} // end anonymous namespace
517
519
520static void
522 MBBRegionsVector &Regions,
523 bool RegionsTopDown) {
526
528 for(MachineBasicBlock::iterator RegionEnd = MBB->end();
529 RegionEnd != MBB->begin(); RegionEnd = I) {
530
531 // Avoid decrementing RegionEnd for blocks with no terminator.
532 if (RegionEnd != MBB->end() ||
533 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
534 --RegionEnd;
535 }
536
537 // The next region starts above the previous region. Look backward in the
538 // instruction stream until we find the nearest boundary.
539 unsigned NumRegionInstrs = 0;
540 I = RegionEnd;
541 for (;I != MBB->begin(); --I) {
542 MachineInstr &MI = *std::prev(I);
543 if (isSchedBoundary(&MI, &*MBB, MF, TII))
544 break;
545 if (!MI.isDebugOrPseudoInstr()) {
546 // MBB::size() uses instr_iterator to count. Here we need a bundle to
547 // count as a single instruction.
548 ++NumRegionInstrs;
549 }
550 }
551
552 // It's possible we found a scheduling region that only has debug
553 // instructions. Don't bother scheduling these.
554 if (NumRegionInstrs != 0)
555 Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs));
556 }
557
558 if (RegionsTopDown)
559 std::reverse(Regions.begin(), Regions.end());
560}
561
562/// Main driver for both MachineScheduler and PostMachineScheduler.
563void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
564 bool FixKillFlags) {
565 // Visit all machine basic blocks.
566 //
567 // TODO: Visit blocks in global postorder or postorder within the bottom-up
568 // loop tree. Then we can optionally compute global RegPressure.
569 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
570 MBB != MBBEnd; ++MBB) {
571
572 Scheduler.startBlock(&*MBB);
573
574#ifndef NDEBUG
575 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
576 continue;
577 if (SchedOnlyBlock.getNumOccurrences()
578 && (int)SchedOnlyBlock != MBB->getNumber())
579 continue;
580#endif
581
582 // Break the block into scheduling regions [I, RegionEnd). RegionEnd
583 // points to the scheduling boundary at the bottom of the region. The DAG
584 // does not include RegionEnd, but the region does (i.e. the next
585 // RegionEnd is above the previous RegionBegin). If the current block has
586 // no terminator then RegionEnd == MBB->end() for the bottom region.
587 //
588 // All the regions of MBB are first found and stored in MBBRegions, which
589 // will be processed (MBB) top-down if initialized with true.
590 //
591 // The Scheduler may insert instructions during either schedule() or
592 // exitRegion(), even for empty regions. So the local iterators 'I' and
593 // 'RegionEnd' are invalid across these calls. Instructions must not be
594 // added to other regions than the current one without updating MBBRegions.
595
596 MBBRegionsVector MBBRegions;
597 getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown());
598 for (const SchedRegion &R : MBBRegions) {
599 MachineBasicBlock::iterator I = R.RegionBegin;
600 MachineBasicBlock::iterator RegionEnd = R.RegionEnd;
601 unsigned NumRegionInstrs = R.NumRegionInstrs;
602
603 // Notify the scheduler of the region, even if we may skip scheduling
604 // it. Perhaps it still needs to be bundled.
605 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
606
607 // Skip empty scheduling regions (0 or 1 schedulable instructions).
608 if (I == RegionEnd || I == std::prev(RegionEnd)) {
609 // Close the current region. Bundle the terminator if needed.
610 // This invalidates 'RegionEnd' and 'I'.
611 Scheduler.exitRegion();
612 continue;
613 }
614 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
615 LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB)
616 << " " << MBB->getName() << "\n From: " << *I
617 << " To: ";
618 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
619 else dbgs() << "End\n";
620 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
622 errs() << MF->getName();
623 errs() << ":%bb. " << MBB->getNumber();
624 errs() << " " << MBB->getName() << " \n";
625 }
626
627 // Schedule a region: possibly reorder instructions.
628 // This invalidates the original region iterators.
629 Scheduler.schedule();
630
631 // Close the current region.
632 Scheduler.exitRegion();
633 }
634 Scheduler.finishBlock();
635 // FIXME: Ideally, no further passes should rely on kill flags. However,
636 // thumb2 size reduction is currently an exception, so the PostMIScheduler
637 // needs to do this.
638 if (FixKillFlags)
639 Scheduler.fixupKills(*MBB);
640 }
641 Scheduler.finalizeSchedule();
642}
643
644void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
645 // unimplemented
646}
647
648#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
650 dbgs() << "Queue " << Name << ": ";
651 for (const SUnit *SU : Queue)
652 dbgs() << SU->NodeNum << " ";
653 dbgs() << "\n";
654}
655#endif
656
657//===----------------------------------------------------------------------===//
658// ScheduleDAGMI - Basic machine instruction scheduling. This is
659// independent of PreRA/PostRA scheduling and involves no extra book-keeping for
660// virtual registers.
661// ===----------------------------------------------------------------------===/
662
663// Provide a vtable anchor.
665
666/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
667/// NumPredsLeft reaches zero, release the successor node.
668///
669/// FIXME: Adjust SuccSU height based on MinLatency.
671 SUnit *SuccSU = SuccEdge->getSUnit();
672
673 if (SuccEdge->isWeak()) {
674 --SuccSU->WeakPredsLeft;
675 if (SuccEdge->isCluster())
676 NextClusterSucc = SuccSU;
677 return;
678 }
679#ifndef NDEBUG
680 if (SuccSU->NumPredsLeft == 0) {
681 dbgs() << "*** Scheduling failed! ***\n";
682 dumpNode(*SuccSU);
683 dbgs() << " has been released too many times!\n";
684 llvm_unreachable(nullptr);
685 }
686#endif
687 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
688 // CurrCycle may have advanced since then.
689 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
690 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
691
692 --SuccSU->NumPredsLeft;
693 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
694 SchedImpl->releaseTopNode(SuccSU);
695}
696
697/// releaseSuccessors - Call releaseSucc on each of SU's successors.
699 for (SDep &Succ : SU->Succs)
700 releaseSucc(SU, &Succ);
701}
702
703/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
704/// NumSuccsLeft reaches zero, release the predecessor node.
705///
706/// FIXME: Adjust PredSU height based on MinLatency.
708 SUnit *PredSU = PredEdge->getSUnit();
709
710 if (PredEdge->isWeak()) {
711 --PredSU->WeakSuccsLeft;
712 if (PredEdge->isCluster())
713 NextClusterPred = PredSU;
714 return;
715 }
716#ifndef NDEBUG
717 if (PredSU->NumSuccsLeft == 0) {
718 dbgs() << "*** Scheduling failed! ***\n";
719 dumpNode(*PredSU);
720 dbgs() << " has been released too many times!\n";
721 llvm_unreachable(nullptr);
722 }
723#endif
724 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
725 // CurrCycle may have advanced since then.
726 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
727 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
728
729 --PredSU->NumSuccsLeft;
730 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
731 SchedImpl->releaseBottomNode(PredSU);
732}
733
734/// releasePredecessors - Call releasePred on each of SU's predecessors.
736 for (SDep &Pred : SU->Preds)
737 releasePred(SU, &Pred);
738}
739
742 SchedImpl->enterMBB(bb);
743}
744
746 SchedImpl->leaveMBB();
748}
749
750/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
751/// crossing a scheduling boundary. [begin, end) includes all instructions in
752/// the region, including the boundary itself and single-instruction regions
753/// that don't get scheduled.
757 unsigned regioninstrs)
758{
759 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
760
761 SchedImpl->initPolicy(begin, end, regioninstrs);
762}
763
764/// This is normally called from the main scheduler loop but may also be invoked
765/// by the scheduling strategy to perform additional code motion.
768 // Advance RegionBegin if the first instruction moves down.
769 if (&*RegionBegin == MI)
770 ++RegionBegin;
771
772 // Update the instruction stream.
773 BB->splice(InsertPos, BB, MI);
774
775 // Update LiveIntervals
776 if (LIS)
777 LIS->handleMove(*MI, /*UpdateFlags=*/true);
778
779 // Recede RegionBegin if an instruction moves above the first.
780 if (RegionBegin == InsertPos)
781 RegionBegin = MI;
782}
783
785#if LLVM_ENABLE_ABI_BREAKING_CHECKS && !defined(NDEBUG)
786 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
788 return false;
789 }
790 ++NumInstrsScheduled;
791#endif
792 return true;
793}
794
795/// Per-region scheduling driver, called back from
796/// MachineScheduler::runOnMachineFunction. This is a simplified driver that
797/// does not consider liveness or register pressure. It is useful for PostRA
798/// scheduling and potentially other custom schedulers.
800 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
801 LLVM_DEBUG(SchedImpl->dumpPolicy());
802
803 // Build the DAG.
805
807
808 SmallVector<SUnit*, 8> TopRoots, BotRoots;
809 findRootsAndBiasEdges(TopRoots, BotRoots);
810
811 LLVM_DEBUG(dump());
812 if (PrintDAGs) dump();
814
815 // Initialize the strategy before modifying the DAG.
816 // This may initialize a DFSResult to be used for queue priority.
817 SchedImpl->initialize(this);
818
819 // Initialize ready queues now that the DAG and priority data are finalized.
820 initQueues(TopRoots, BotRoots);
821
822 bool IsTopNode = false;
823 while (true) {
824 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
825 SUnit *SU = SchedImpl->pickNode(IsTopNode);
826 if (!SU) break;
827
828 assert(!SU->isScheduled && "Node already scheduled");
829 if (!checkSchedLimit())
830 break;
831
832 MachineInstr *MI = SU->getInstr();
833 if (IsTopNode) {
834 assert(SU->isTopReady() && "node still has unscheduled dependencies");
835 if (&*CurrentTop == MI)
837 else
839 } else {
840 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
843 if (&*priorII == MI)
844 CurrentBottom = priorII;
845 else {
846 if (&*CurrentTop == MI)
847 CurrentTop = nextIfDebug(++CurrentTop, priorII);
850 }
851 }
852 // Notify the scheduling strategy before updating the DAG.
853 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
854 // runs, it can then use the accurate ReadyCycle time to determine whether
855 // newly released nodes can move to the readyQ.
856 SchedImpl->schedNode(SU, IsTopNode);
857
858 updateQueues(SU, IsTopNode);
859 }
860 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
861
863
864 LLVM_DEBUG({
865 dbgs() << "*** Final schedule for "
866 << printMBBReference(*begin()->getParent()) << " ***\n";
867 dumpSchedule();
868 dbgs() << '\n';
869 });
870}
871
872/// Apply each ScheduleDAGMutation step in order.
874 for (auto &m : Mutations)
875 m->apply(this);
876}
877
880 SmallVectorImpl<SUnit*> &BotRoots) {
881 for (SUnit &SU : SUnits) {
882 assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits");
883
884 // Order predecessors so DFSResult follows the critical path.
885 SU.biasCriticalPath();
886
887 // A SUnit is ready to top schedule if it has no predecessors.
888 if (!SU.NumPredsLeft)
889 TopRoots.push_back(&SU);
890 // A SUnit is ready to bottom schedule if it has no successors.
891 if (!SU.NumSuccsLeft)
892 BotRoots.push_back(&SU);
893 }
895}
896
897/// Identify DAG roots and setup scheduler queues.
899 ArrayRef<SUnit*> BotRoots) {
900 NextClusterSucc = nullptr;
901 NextClusterPred = nullptr;
902
903 // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
904 //
905 // Nodes with unreleased weak edges can still be roots.
906 // Release top roots in forward order.
907 for (SUnit *SU : TopRoots)
908 SchedImpl->releaseTopNode(SU);
909
910 // Release bottom roots in reverse order so the higher priority nodes appear
911 // first. This is more natural and slightly more efficient.
913 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
914 SchedImpl->releaseBottomNode(*I);
915 }
916
919
920 SchedImpl->registerRoots();
921
922 // Advance past initial DebugValues.
925}
926
927/// Update scheduler queues after scheduling an instruction.
928void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
929 // Release dependent instructions for scheduling.
930 if (IsTopNode)
932 else
934
935 SU->isScheduled = true;
936}
937
938/// Reinsert any remaining debug_values, just like the PostRA scheduler.
940 // If first instruction was a DBG_VALUE then put it back.
941 if (FirstDbgValue) {
944 }
945
946 for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator
947 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
948 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
949 MachineInstr *DbgValue = P.first;
950 MachineBasicBlock::iterator OrigPrevMI = P.second;
951 if (&*RegionBegin == DbgValue)
952 ++RegionBegin;
953 BB->splice(std::next(OrigPrevMI), BB, DbgValue);
954 if (RegionEnd != BB->end() && OrigPrevMI == &*RegionEnd)
956 }
957}
958
959#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
960static const char *scheduleTableLegend = " i: issue\n x: resource booked";
961
963 // Bail off when there is no schedule model to query.
965 return;
966
967 // Nothing to show if there is no or just one instruction.
968 if (BB->size() < 2)
969 return;
970
971 dbgs() << " * Schedule table (TopDown):\n";
972 dbgs() << scheduleTableLegend << "\n";
973 const unsigned FirstCycle = getSUnit(&*(std::begin(*this)))->TopReadyCycle;
974 unsigned LastCycle = getSUnit(&*(std::prev(std::end(*this))))->TopReadyCycle;
975 for (MachineInstr &MI : *this) {
976 SUnit *SU = getSUnit(&MI);
977 if (!SU)
978 continue;
979 const MCSchedClassDesc *SC = getSchedClass(SU);
982 PI != PE; ++PI) {
983 if (SU->TopReadyCycle + PI->ReleaseAtCycle - 1 > LastCycle)
984 LastCycle = SU->TopReadyCycle + PI->ReleaseAtCycle - 1;
985 }
986 }
987 // Print the header with the cycles
989 for (unsigned C = FirstCycle; C <= LastCycle; ++C)
990 dbgs() << llvm::left_justify("| " + std::to_string(C), ColWidth);
991 dbgs() << "|\n";
992
993 for (MachineInstr &MI : *this) {
994 SUnit *SU = getSUnit(&MI);
995 if (!SU) {
996 dbgs() << "Missing SUnit\n";
997 continue;
998 }
999 std::string NodeName("SU(");
1000 NodeName += std::to_string(SU->NodeNum) + ")";
1001 dbgs() << llvm::left_justify(NodeName, HeaderColWidth);
1002 unsigned C = FirstCycle;
1003 for (; C <= LastCycle; ++C) {
1004 if (C == SU->TopReadyCycle)
1005 dbgs() << llvm::left_justify("| i", ColWidth);
1006 else
1007 dbgs() << llvm::left_justify("|", ColWidth);
1008 }
1009 dbgs() << "|\n";
1010 const MCSchedClassDesc *SC = getSchedClass(SU);
1011
1015
1017 llvm::stable_sort(ResourcesIt,
1018 [](const MCWriteProcResEntry &LHS,
1019 const MCWriteProcResEntry &RHS) -> bool {
1020 return LHS.AcquireAtCycle < RHS.AcquireAtCycle ||
1021 (LHS.AcquireAtCycle == RHS.AcquireAtCycle &&
1022 LHS.ReleaseAtCycle < RHS.ReleaseAtCycle);
1023 });
1024 for (const MCWriteProcResEntry &PI : ResourcesIt) {
1025 C = FirstCycle;
1026 const std::string ResName =
1027 SchedModel.getResourceName(PI.ProcResourceIdx);
1028 dbgs() << llvm::right_justify(ResName + " ", HeaderColWidth);
1029 for (; C < SU->TopReadyCycle + PI.AcquireAtCycle; ++C) {
1030 dbgs() << llvm::left_justify("|", ColWidth);
1031 }
1032 for (unsigned I = 0, E = PI.ReleaseAtCycle - PI.AcquireAtCycle; I != E;
1033 ++I, ++C)
1034 dbgs() << llvm::left_justify("| x", ColWidth);
1035 while (C++ <= LastCycle)
1036 dbgs() << llvm::left_justify("|", ColWidth);
1037 // Place end char
1038 dbgs() << "| \n";
1039 }
1040 }
1041}
1042
1044 // Bail off when there is no schedule model to query.
1046 return;
1047
1048 // Nothing to show if there is no or just one instruction.
1049 if (BB->size() < 2)
1050 return;
1051
1052 dbgs() << " * Schedule table (BottomUp):\n";
1053 dbgs() << scheduleTableLegend << "\n";
1054
1055 const int FirstCycle = getSUnit(&*(std::begin(*this)))->BotReadyCycle;
1056 int LastCycle = getSUnit(&*(std::prev(std::end(*this))))->BotReadyCycle;
1057 for (MachineInstr &MI : *this) {
1058 SUnit *SU = getSUnit(&MI);
1059 if (!SU)
1060 continue;
1061 const MCSchedClassDesc *SC = getSchedClass(SU);
1064 PI != PE; ++PI) {
1065 if ((int)SU->BotReadyCycle - PI->ReleaseAtCycle + 1 < LastCycle)
1066 LastCycle = (int)SU->BotReadyCycle - PI->ReleaseAtCycle + 1;
1067 }
1068 }
1069 // Print the header with the cycles
1070 dbgs() << llvm::left_justify("Cycle", HeaderColWidth);
1071 for (int C = FirstCycle; C >= LastCycle; --C)
1072 dbgs() << llvm::left_justify("| " + std::to_string(C), ColWidth);
1073 dbgs() << "|\n";
1074
1075 for (MachineInstr &MI : *this) {
1076 SUnit *SU = getSUnit(&MI);
1077 if (!SU) {
1078 dbgs() << "Missing SUnit\n";
1079 continue;
1080 }
1081 std::string NodeName("SU(");
1082 NodeName += std::to_string(SU->NodeNum) + ")";
1083 dbgs() << llvm::left_justify(NodeName, HeaderColWidth);
1084 int C = FirstCycle;
1085 for (; C >= LastCycle; --C) {
1086 if (C == (int)SU->BotReadyCycle)
1087 dbgs() << llvm::left_justify("| i", ColWidth);
1088 else
1089 dbgs() << llvm::left_justify("|", ColWidth);
1090 }
1091 dbgs() << "|\n";
1092 const MCSchedClassDesc *SC = getSchedClass(SU);
1096
1098 llvm::stable_sort(ResourcesIt,
1099 [](const MCWriteProcResEntry &LHS,
1100 const MCWriteProcResEntry &RHS) -> bool {
1101 return LHS.AcquireAtCycle < RHS.AcquireAtCycle ||
1102 (LHS.AcquireAtCycle == RHS.AcquireAtCycle &&
1103 LHS.ReleaseAtCycle < RHS.ReleaseAtCycle);
1104 });
1105 for (const MCWriteProcResEntry &PI : ResourcesIt) {
1106 C = FirstCycle;
1107 const std::string ResName =
1108 SchedModel.getResourceName(PI.ProcResourceIdx);
1109 dbgs() << llvm::right_justify(ResName + " ", HeaderColWidth);
1110 for (; C > ((int)SU->BotReadyCycle - (int)PI.AcquireAtCycle); --C) {
1111 dbgs() << llvm::left_justify("|", ColWidth);
1112 }
1113 for (unsigned I = 0, E = PI.ReleaseAtCycle - PI.AcquireAtCycle; I != E;
1114 ++I, --C)
1115 dbgs() << llvm::left_justify("| x", ColWidth);
1116 while (C-- >= LastCycle)
1117 dbgs() << llvm::left_justify("|", ColWidth);
1118 // Place end char
1119 dbgs() << "| \n";
1120 }
1121 }
1122}
1123#endif
1124
1125#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1128 if (ForceTopDown)
1130 else if (ForceBottomUp)
1132 else {
1133 dbgs() << "* Schedule table (Bidirectional): not implemented\n";
1134 }
1135 }
1136
1137 for (MachineInstr &MI : *this) {
1138 if (SUnit *SU = getSUnit(&MI))
1139 dumpNode(*SU);
1140 else
1141 dbgs() << "Missing SUnit\n";
1142 }
1143}
1144#endif
1145
1146//===----------------------------------------------------------------------===//
1147// ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
1148// preservation.
1149//===----------------------------------------------------------------------===//
1150
1152 delete DFSResult;
1153}
1154
1156 const MachineInstr &MI = *SU.getInstr();
1157 for (const MachineOperand &MO : MI.operands()) {
1158 if (!MO.isReg())
1159 continue;
1160 if (!MO.readsReg())
1161 continue;
1162 if (TrackLaneMasks && !MO.isUse())
1163 continue;
1164
1165 Register Reg = MO.getReg();
1166 if (!Reg.isVirtual())
1167 continue;
1168
1169 // Ignore re-defs.
1170 if (TrackLaneMasks) {
1171 bool FoundDef = false;
1172 for (const MachineOperand &MO2 : MI.all_defs()) {
1173 if (MO2.getReg() == Reg && !MO2.isDead()) {
1174 FoundDef = true;
1175 break;
1176 }
1177 }
1178 if (FoundDef)
1179 continue;
1180 }
1181
1182 // Record this local VReg use.
1184 for (; UI != VRegUses.end(); ++UI) {
1185 if (UI->SU == &SU)
1186 break;
1187 }
1188 if (UI == VRegUses.end())
1190 }
1191}
1192
1193/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
1194/// crossing a scheduling boundary. [begin, end) includes all instructions in
1195/// the region, including the boundary itself and single-instruction regions
1196/// that don't get scheduled.
1200 unsigned regioninstrs)
1201{
1202 // ScheduleDAGMI initializes SchedImpl's per-region policy.
1203 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
1204
1205 // For convenience remember the end of the liveness region.
1206 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
1207
1209
1210 ShouldTrackPressure = SchedImpl->shouldTrackPressure();
1211 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
1212
1214 "ShouldTrackLaneMasks requires ShouldTrackPressure");
1215}
1216
1217// Setup the register pressure trackers for the top scheduled and bottom
1218// scheduled regions.
1220 VRegUses.clear();
1222 for (SUnit &SU : SUnits)
1223 collectVRegUses(SU);
1224
1226 ShouldTrackLaneMasks, false);
1228 ShouldTrackLaneMasks, false);
1229
1230 // Close the RPTracker to finalize live ins.
1232
1234
1235 // Initialize the live ins and live outs.
1238
1239 // Close one end of the tracker so we can call
1240 // getMaxUpward/DownwardPressureDelta before advancing across any
1241 // instructions. This converts currently live regs into live ins/outs.
1244
1246 if (!BotRPTracker.getLiveThru().empty()) {
1248 LLVM_DEBUG(dbgs() << "Live Thru: ";
1250 };
1251
1252 // For each live out vreg reduce the pressure change associated with other
1253 // uses of the same vreg below the live-out reaching def.
1255
1256 // Account for liveness generated by the region boundary.
1257 if (LiveRegionEnd != RegionEnd) {
1259 BotRPTracker.recede(&LiveUses);
1260 updatePressureDiffs(LiveUses);
1261 }
1262
1263 LLVM_DEBUG(dbgs() << "Top Pressure:\n";
1265 dbgs() << "Bottom Pressure:\n";
1267
1269 (RegionEnd->isDebugInstr() &&
1271 "Can't find the region bottom");
1272
1273 // Cache the list of excess pressure sets in this region. This will also track
1274 // the max pressure in the scheduled code for these sets.
1275 RegionCriticalPSets.clear();
1276 const std::vector<unsigned> &RegionPressure =
1278 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
1279 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
1280 if (RegionPressure[i] > Limit) {
1281 LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit
1282 << " Actual " << RegionPressure[i] << "\n");
1283 RegionCriticalPSets.push_back(PressureChange(i));
1284 }
1285 }
1286 LLVM_DEBUG(dbgs() << "Excess PSets: ";
1287 for (const PressureChange &RCPS
1289 << TRI->getRegPressureSetName(RCPS.getPSet()) << " ";
1290 dbgs() << "\n");
1291}
1292
1295 const std::vector<unsigned> &NewMaxPressure) {
1296 const PressureDiff &PDiff = getPressureDiff(SU);
1297 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
1298 for (const PressureChange &PC : PDiff) {
1299 if (!PC.isValid())
1300 break;
1301 unsigned ID = PC.getPSet();
1302 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
1303 ++CritIdx;
1304 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
1305 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
1306 && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max())
1307 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
1308 }
1309 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
1310 if (NewMaxPressure[ID] >= Limit - 2) {
1311 LLVM_DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
1312 << NewMaxPressure[ID]
1313 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ")
1314 << Limit << "(+ " << BotRPTracker.getLiveThru()[ID]
1315 << " livethru)\n");
1316 }
1317 }
1318}
1319
1320/// Update the PressureDiff array for liveness after scheduling this
1321/// instruction.
1323 ArrayRef<RegisterMaskPair> LiveUses) {
1324 for (const RegisterMaskPair &P : LiveUses) {
1325 Register Reg = P.RegUnit;
1326 /// FIXME: Currently assuming single-use physregs.
1327 if (!Reg.isVirtual())
1328 continue;
1329
1331 // If the register has just become live then other uses won't change
1332 // this fact anymore => decrement pressure.
1333 // If the register has just become dead then other uses make it come
1334 // back to life => increment pressure.
1335 bool Decrement = P.LaneMask.any();
1336
1337 for (const VReg2SUnit &V2SU
1338 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1339 SUnit &SU = *V2SU.SU;
1340 if (SU.isScheduled || &SU == &ExitSU)
1341 continue;
1342
1343 PressureDiff &PDiff = getPressureDiff(&SU);
1344 PDiff.addPressureChange(Reg, Decrement, &MRI);
1345 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") "
1346 << printReg(Reg, TRI) << ':'
1347 << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr();
1348 dbgs() << " to "; PDiff.dump(*TRI););
1349 }
1350 } else {
1351 assert(P.LaneMask.any());
1352 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n");
1353 // This may be called before CurrentBottom has been initialized. However,
1354 // BotRPTracker must have a valid position. We want the value live into the
1355 // instruction or live out of the block, so ask for the previous
1356 // instruction's live-out.
1357 const LiveInterval &LI = LIS->getInterval(Reg);
1358 VNInfo *VNI;
1361 if (I == BB->end())
1362 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1363 else {
1365 VNI = LRQ.valueIn();
1366 }
1367 // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1368 assert(VNI && "No live value at use.");
1369 for (const VReg2SUnit &V2SU
1370 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1371 SUnit *SU = V2SU.SU;
1372 // If this use comes before the reaching def, it cannot be a last use,
1373 // so decrease its pressure change.
1374 if (!SU->isScheduled && SU != &ExitSU) {
1375 LiveQueryResult LRQ =
1377 if (LRQ.valueIn() == VNI) {
1378 PressureDiff &PDiff = getPressureDiff(SU);
1379 PDiff.addPressureChange(Reg, true, &MRI);
1380 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
1381 << *SU->getInstr();
1382 dbgs() << " to "; PDiff.dump(*TRI););
1383 }
1384 }
1385 }
1386 }
1387 }
1388}
1389
1391#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1392 if (EntrySU.getInstr() != nullptr)
1394 for (const SUnit &SU : SUnits) {
1395 dumpNodeAll(SU);
1396 if (ShouldTrackPressure) {
1397 dbgs() << " Pressure Diff : ";
1398 getPressureDiff(&SU).dump(*TRI);
1399 }
1400 dbgs() << " Single Issue : ";
1401 if (SchedModel.mustBeginGroup(SU.getInstr()) &&
1402 SchedModel.mustEndGroup(SU.getInstr()))
1403 dbgs() << "true;";
1404 else
1405 dbgs() << "false;";
1406 dbgs() << '\n';
1407 }
1408 if (ExitSU.getInstr() != nullptr)
1410#endif
1411}
1412
1413/// schedule - Called back from MachineScheduler::runOnMachineFunction
1414/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1415/// only includes instructions that have DAG nodes, not scheduling boundaries.
1416///
1417/// This is a skeletal driver, with all the functionality pushed into helpers,
1418/// so that it can be easily extended by experimental schedulers. Generally,
1419/// implementing MachineSchedStrategy should be sufficient to implement a new
1420/// scheduling algorithm. However, if a scheduler further subclasses
1421/// ScheduleDAGMILive then it will want to override this virtual method in order
1422/// to update any specialized state.
1424 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1425 LLVM_DEBUG(SchedImpl->dumpPolicy());
1427
1429
1430 SmallVector<SUnit*, 8> TopRoots, BotRoots;
1431 findRootsAndBiasEdges(TopRoots, BotRoots);
1432
1433 // Initialize the strategy before modifying the DAG.
1434 // This may initialize a DFSResult to be used for queue priority.
1435 SchedImpl->initialize(this);
1436
1437 LLVM_DEBUG(dump());
1438 if (PrintDAGs) dump();
1440
1441 // Initialize ready queues now that the DAG and priority data are finalized.
1442 initQueues(TopRoots, BotRoots);
1443
1444 bool IsTopNode = false;
1445 while (true) {
1446 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1447 SUnit *SU = SchedImpl->pickNode(IsTopNode);
1448 if (!SU) break;
1449
1450 assert(!SU->isScheduled && "Node already scheduled");
1451 if (!checkSchedLimit())
1452 break;
1453
1454 scheduleMI(SU, IsTopNode);
1455
1456 if (DFSResult) {
1457 unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1458 if (!ScheduledTrees.test(SubtreeID)) {
1459 ScheduledTrees.set(SubtreeID);
1460 DFSResult->scheduleTree(SubtreeID);
1461 SchedImpl->scheduleTree(SubtreeID);
1462 }
1463 }
1464
1465 // Notify the scheduling strategy after updating the DAG.
1466 SchedImpl->schedNode(SU, IsTopNode);
1467
1468 updateQueues(SU, IsTopNode);
1469 }
1470 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1471
1473
1474 LLVM_DEBUG({
1475 dbgs() << "*** Final schedule for "
1476 << printMBBReference(*begin()->getParent()) << " ***\n";
1477 dumpSchedule();
1478 dbgs() << '\n';
1479 });
1480}
1481
1482/// Build the DAG and setup three register pressure trackers.
1484 if (!ShouldTrackPressure) {
1485 RPTracker.reset();
1486 RegionCriticalPSets.clear();
1488 return;
1489 }
1490
1491 // Initialize the register pressure tracker used by buildSchedGraph.
1493 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1494
1495 // Account for liveness generate by the region boundary.
1496 if (LiveRegionEnd != RegionEnd)
1497 RPTracker.recede();
1498
1499 // Build the DAG, and compute current register pressure.
1501
1502 // Initialize top/bottom trackers after computing region pressure.
1504}
1505
1507 if (!DFSResult)
1508 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1509 DFSResult->clear();
1511 DFSResult->resize(SUnits.size());
1514}
1515
1516/// Compute the max cyclic critical path through the DAG. The scheduling DAG
1517/// only provides the critical path for single block loops. To handle loops that
1518/// span blocks, we could use the vreg path latencies provided by
1519/// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1520/// available for use in the scheduler.
1521///
1522/// The cyclic path estimation identifies a def-use pair that crosses the back
1523/// edge and considers the depth and height of the nodes. For example, consider
1524/// the following instruction sequence where each instruction has unit latency
1525/// and defines an eponymous virtual register:
1526///
1527/// a->b(a,c)->c(b)->d(c)->exit
1528///
1529/// The cyclic critical path is a two cycles: b->c->b
1530/// The acyclic critical path is four cycles: a->b->c->d->exit
1531/// LiveOutHeight = height(c) = len(c->d->exit) = 2
1532/// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1533/// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1534/// LiveInDepth = depth(b) = len(a->b) = 1
1535///
1536/// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1537/// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1538/// CyclicCriticalPath = min(2, 2) = 2
1539///
1540/// This could be relevant to PostRA scheduling, but is currently implemented
1541/// assuming LiveIntervals.
1543 // This only applies to single block loop.
1544 if (!BB->isSuccessor(BB))
1545 return 0;
1546
1547 unsigned MaxCyclicLatency = 0;
1548 // Visit each live out vreg def to find def/use pairs that cross iterations.
1550 Register Reg = P.RegUnit;
1551 if (!Reg.isVirtual())
1552 continue;
1553 const LiveInterval &LI = LIS->getInterval(Reg);
1554 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1555 if (!DefVNI)
1556 continue;
1557
1559 const SUnit *DefSU = getSUnit(DefMI);
1560 if (!DefSU)
1561 continue;
1562
1563 unsigned LiveOutHeight = DefSU->getHeight();
1564 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1565 // Visit all local users of the vreg def.
1566 for (const VReg2SUnit &V2SU
1567 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1568 SUnit *SU = V2SU.SU;
1569 if (SU == &ExitSU)
1570 continue;
1571
1572 // Only consider uses of the phi.
1574 if (!LRQ.valueIn()->isPHIDef())
1575 continue;
1576
1577 // Assume that a path spanning two iterations is a cycle, which could
1578 // overestimate in strange cases. This allows cyclic latency to be
1579 // estimated as the minimum slack of the vreg's depth or height.
1580 unsigned CyclicLatency = 0;
1581 if (LiveOutDepth > SU->getDepth())
1582 CyclicLatency = LiveOutDepth - SU->getDepth();
1583
1584 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1585 if (LiveInHeight > LiveOutHeight) {
1586 if (LiveInHeight - LiveOutHeight < CyclicLatency)
1587 CyclicLatency = LiveInHeight - LiveOutHeight;
1588 } else
1589 CyclicLatency = 0;
1590
1591 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1592 << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1593 if (CyclicLatency > MaxCyclicLatency)
1594 MaxCyclicLatency = CyclicLatency;
1595 }
1596 }
1597 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1598 return MaxCyclicLatency;
1599}
1600
1601/// Release ExitSU predecessors and setup scheduler queues. Re-position
1602/// the Top RP tracker in case the region beginning has changed.
1604 ArrayRef<SUnit*> BotRoots) {
1605 ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1606 if (ShouldTrackPressure) {
1607 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1609 }
1610}
1611
1612/// Move an instruction and update register pressure.
1613void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1614 // Move the instruction to its new location in the instruction stream.
1615 MachineInstr *MI = SU->getInstr();
1616
1617 if (IsTopNode) {
1618 assert(SU->isTopReady() && "node still has unscheduled dependencies");
1619 if (&*CurrentTop == MI)
1621 else {
1624 }
1625
1626 if (ShouldTrackPressure) {
1627 // Update top scheduled pressure.
1628 RegisterOperands RegOpers;
1629 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1631 // Adjust liveness and add missing dead+read-undef flags.
1632 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1633 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1634 } else {
1635 // Adjust for missing dead-def flags.
1636 RegOpers.detectDeadDefs(*MI, *LIS);
1637 }
1638
1639 TopRPTracker.advance(RegOpers);
1640 assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1641 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure(
1643
1645 }
1646 } else {
1647 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1650 if (&*priorII == MI)
1651 CurrentBottom = priorII;
1652 else {
1653 if (&*CurrentTop == MI) {
1654 CurrentTop = nextIfDebug(++CurrentTop, priorII);
1656 }
1658 CurrentBottom = MI;
1660 }
1661 if (ShouldTrackPressure) {
1662 RegisterOperands RegOpers;
1663 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1665 // Adjust liveness and add missing dead+read-undef flags.
1666 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1667 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1668 } else {
1669 // Adjust for missing dead-def flags.
1670 RegOpers.detectDeadDefs(*MI, *LIS);
1671 }
1672
1676 BotRPTracker.recede(RegOpers, &LiveUses);
1677 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1678 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure(
1680
1682 updatePressureDiffs(LiveUses);
1683 }
1684 }
1685}
1686
1687//===----------------------------------------------------------------------===//
1688// BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1689//===----------------------------------------------------------------------===//
1690
1691namespace {
1692
1693/// Post-process the DAG to create cluster edges between neighboring
1694/// loads or between neighboring stores.
1695class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1696 struct MemOpInfo {
1697 SUnit *SU;
1699 int64_t Offset;
1700 unsigned Width;
1701
1702 MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps,
1703 int64_t Offset, unsigned Width)
1704 : SU(SU), BaseOps(BaseOps.begin(), BaseOps.end()), Offset(Offset),
1705 Width(Width) {}
1706
1707 static bool Compare(const MachineOperand *const &A,
1708 const MachineOperand *const &B) {
1709 if (A->getType() != B->getType())
1710 return A->getType() < B->getType();
1711 if (A->isReg())
1712 return A->getReg() < B->getReg();
1713 if (A->isFI()) {
1714 const MachineFunction &MF = *A->getParent()->getParent()->getParent();
1716 bool StackGrowsDown = TFI.getStackGrowthDirection() ==
1718 return StackGrowsDown ? A->getIndex() > B->getIndex()
1719 : A->getIndex() < B->getIndex();
1720 }
1721
1722 llvm_unreachable("MemOpClusterMutation only supports register or frame "
1723 "index bases.");
1724 }
1725
1726 bool operator<(const MemOpInfo &RHS) const {
1727 // FIXME: Don't compare everything twice. Maybe use C++20 three way
1728 // comparison instead when it's available.
1729 if (std::lexicographical_compare(BaseOps.begin(), BaseOps.end(),
1730 RHS.BaseOps.begin(), RHS.BaseOps.end(),
1731 Compare))
1732 return true;
1733 if (std::lexicographical_compare(RHS.BaseOps.begin(), RHS.BaseOps.end(),
1734 BaseOps.begin(), BaseOps.end(), Compare))
1735 return false;
1736 if (Offset != RHS.Offset)
1737 return Offset < RHS.Offset;
1738 return SU->NodeNum < RHS.SU->NodeNum;
1739 }
1740 };
1741
1742 const TargetInstrInfo *TII;
1743 const TargetRegisterInfo *TRI;
1744 bool IsLoad;
1745
1746public:
1747 BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1748 const TargetRegisterInfo *tri, bool IsLoad)
1749 : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1750
1751 void apply(ScheduleDAGInstrs *DAGInstrs) override;
1752
1753protected:
1754 void clusterNeighboringMemOps(ArrayRef<MemOpInfo> MemOps, bool FastCluster,
1755 ScheduleDAGInstrs *DAG);
1756 void collectMemOpRecords(std::vector<SUnit> &SUnits,
1757 SmallVectorImpl<MemOpInfo> &MemOpRecords);
1758 bool groupMemOps(ArrayRef<MemOpInfo> MemOps, ScheduleDAGInstrs *DAG,
1760};
1761
1762class StoreClusterMutation : public BaseMemOpClusterMutation {
1763public:
1764 StoreClusterMutation(const TargetInstrInfo *tii,
1765 const TargetRegisterInfo *tri)
1766 : BaseMemOpClusterMutation(tii, tri, false) {}
1767};
1768
1769class LoadClusterMutation : public BaseMemOpClusterMutation {
1770public:
1771 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1772 : BaseMemOpClusterMutation(tii, tri, true) {}
1773};
1774
1775} // end anonymous namespace
1776
1777namespace llvm {
1778
1779std::unique_ptr<ScheduleDAGMutation>
1781 const TargetRegisterInfo *TRI) {
1782 return EnableMemOpCluster ? std::make_unique<LoadClusterMutation>(TII, TRI)
1783 : nullptr;
1784}
1785
1786std::unique_ptr<ScheduleDAGMutation>
1788 const TargetRegisterInfo *TRI) {
1789 return EnableMemOpCluster ? std::make_unique<StoreClusterMutation>(TII, TRI)
1790 : nullptr;
1791}
1792
1793} // end namespace llvm
1794
1795// Sorting all the loads/stores first, then for each load/store, checking the
1796// following load/store one by one, until reach the first non-dependent one and
1797// call target hook to see if they can cluster.
1798// If FastCluster is enabled, we assume that, all the loads/stores have been
1799// preprocessed and now, they didn't have dependencies on each other.
1800void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1801 ArrayRef<MemOpInfo> MemOpRecords, bool FastCluster,
1802 ScheduleDAGInstrs *DAG) {
1803 // Keep track of the current cluster length and bytes for each SUnit.
1805
1806 // At this point, `MemOpRecords` array must hold atleast two mem ops. Try to
1807 // cluster mem ops collected within `MemOpRecords` array.
1808 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1809 // Decision to cluster mem ops is taken based on target dependent logic
1810 auto MemOpa = MemOpRecords[Idx];
1811
1812 // Seek for the next load/store to do the cluster.
1813 unsigned NextIdx = Idx + 1;
1814 for (; NextIdx < End; ++NextIdx)
1815 // Skip if MemOpb has been clustered already or has dependency with
1816 // MemOpa.
1817 if (!SUnit2ClusterInfo.count(MemOpRecords[NextIdx].SU->NodeNum) &&
1818 (FastCluster ||
1819 (!DAG->IsReachable(MemOpRecords[NextIdx].SU, MemOpa.SU) &&
1820 !DAG->IsReachable(MemOpa.SU, MemOpRecords[NextIdx].SU))))
1821 break;
1822 if (NextIdx == End)
1823 continue;
1824
1825 auto MemOpb = MemOpRecords[NextIdx];
1826 unsigned ClusterLength = 2;
1827 unsigned CurrentClusterBytes = MemOpa.Width + MemOpb.Width;
1828 if (SUnit2ClusterInfo.count(MemOpa.SU->NodeNum)) {
1829 ClusterLength = SUnit2ClusterInfo[MemOpa.SU->NodeNum].first + 1;
1830 CurrentClusterBytes =
1831 SUnit2ClusterInfo[MemOpa.SU->NodeNum].second + MemOpb.Width;
1832 }
1833
1834 if (!TII->shouldClusterMemOps(MemOpa.BaseOps, MemOpb.BaseOps, ClusterLength,
1835 CurrentClusterBytes))
1836 continue;
1837
1838 SUnit *SUa = MemOpa.SU;
1839 SUnit *SUb = MemOpb.SU;
1840 if (SUa->NodeNum > SUb->NodeNum)
1841 std::swap(SUa, SUb);
1842
1843 // FIXME: Is this check really required?
1844 if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)))
1845 continue;
1846
1847 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1848 << SUb->NodeNum << ")\n");
1849 ++NumClustered;
1850
1851 if (IsLoad) {
1852 // Copy successor edges from SUa to SUb. Interleaving computation
1853 // dependent on SUa can prevent load combining due to register reuse.
1854 // Predecessor edges do not need to be copied from SUb to SUa since
1855 // nearby loads should have effectively the same inputs.
1856 for (const SDep &Succ : SUa->Succs) {
1857 if (Succ.getSUnit() == SUb)
1858 continue;
1859 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum
1860 << ")\n");
1861 DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial));
1862 }
1863 } else {
1864 // Copy predecessor edges from SUb to SUa to avoid the SUnits that
1865 // SUb dependent on scheduled in-between SUb and SUa. Successor edges
1866 // do not need to be copied from SUa to SUb since no one will depend
1867 // on stores.
1868 // Notice that, we don't need to care about the memory dependency as
1869 // we won't try to cluster them if they have any memory dependency.
1870 for (const SDep &Pred : SUb->Preds) {
1871 if (Pred.getSUnit() == SUa)
1872 continue;
1873 LLVM_DEBUG(dbgs() << " Copy Pred SU(" << Pred.getSUnit()->NodeNum
1874 << ")\n");
1875 DAG->addEdge(SUa, SDep(Pred.getSUnit(), SDep::Artificial));
1876 }
1877 }
1878
1879 SUnit2ClusterInfo[MemOpb.SU->NodeNum] = {ClusterLength,
1880 CurrentClusterBytes};
1881
1882 LLVM_DEBUG(dbgs() << " Curr cluster length: " << ClusterLength
1883 << ", Curr cluster bytes: " << CurrentClusterBytes
1884 << "\n");
1885 }
1886}
1887
1888void BaseMemOpClusterMutation::collectMemOpRecords(
1889 std::vector<SUnit> &SUnits, SmallVectorImpl<MemOpInfo> &MemOpRecords) {
1890 for (auto &SU : SUnits) {
1891 if ((IsLoad && !SU.getInstr()->mayLoad()) ||
1892 (!IsLoad && !SU.getInstr()->mayStore()))
1893 continue;
1894
1895 const MachineInstr &MI = *SU.getInstr();
1897 int64_t Offset;
1898 bool OffsetIsScalable;
1899 unsigned Width;
1901 OffsetIsScalable, Width, TRI)) {
1902 MemOpRecords.push_back(MemOpInfo(&SU, BaseOps, Offset, Width));
1903
1904 LLVM_DEBUG(dbgs() << "Num BaseOps: " << BaseOps.size() << ", Offset: "
1905 << Offset << ", OffsetIsScalable: " << OffsetIsScalable
1906 << ", Width: " << Width << "\n");
1907 }
1908#ifndef NDEBUG
1909 for (const auto *Op : BaseOps)
1910 assert(Op);
1911#endif
1912 }
1913}
1914
1915bool BaseMemOpClusterMutation::groupMemOps(
1918 bool FastCluster =
1920 MemOps.size() * DAG->SUnits.size() / 1000 > FastClusterThreshold;
1921
1922 for (const auto &MemOp : MemOps) {
1923 unsigned ChainPredID = DAG->SUnits.size();
1924 if (FastCluster) {
1925 for (const SDep &Pred : MemOp.SU->Preds) {
1926 // We only want to cluster the mem ops that have the same ctrl(non-data)
1927 // pred so that they didn't have ctrl dependency for each other. But for
1928 // store instrs, we can still cluster them if the pred is load instr.
1929 if ((Pred.isCtrl() &&
1930 (IsLoad ||
1931 (Pred.getSUnit() && Pred.getSUnit()->getInstr()->mayStore()))) &&
1932 !Pred.isArtificial()) {
1933 ChainPredID = Pred.getSUnit()->NodeNum;
1934 break;
1935 }
1936 }
1937 } else
1938 ChainPredID = 0;
1939
1940 Groups[ChainPredID].push_back(MemOp);
1941 }
1942 return FastCluster;
1943}
1944
1945/// Callback from DAG postProcessing to create cluster edges for loads/stores.
1946void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAG) {
1947 // Collect all the clusterable loads/stores
1948 SmallVector<MemOpInfo, 32> MemOpRecords;
1949 collectMemOpRecords(DAG->SUnits, MemOpRecords);
1950
1951 if (MemOpRecords.size() < 2)
1952 return;
1953
1954 // Put the loads/stores without dependency into the same group with some
1955 // heuristic if the DAG is too complex to avoid compiling time blow up.
1956 // Notice that, some fusion pair could be lost with this.
1958 bool FastCluster = groupMemOps(MemOpRecords, DAG, Groups);
1959
1960 for (auto &Group : Groups) {
1961 // Sorting the loads/stores, so that, we can stop the cluster as early as
1962 // possible.
1963 llvm::sort(Group.second);
1964
1965 // Trying to cluster all the neighboring loads/stores.
1966 clusterNeighboringMemOps(Group.second, FastCluster, DAG);
1967 }
1968}
1969
1970//===----------------------------------------------------------------------===//
1971// CopyConstrain - DAG post-processing to encourage copy elimination.
1972//===----------------------------------------------------------------------===//
1973
1974namespace {
1975
1976/// Post-process the DAG to create weak edges from all uses of a copy to
1977/// the one use that defines the copy's source vreg, most likely an induction
1978/// variable increment.
1979class CopyConstrain : public ScheduleDAGMutation {
1980 // Transient state.
1981 SlotIndex RegionBeginIdx;
1982
1983 // RegionEndIdx is the slot index of the last non-debug instruction in the
1984 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1985 SlotIndex RegionEndIdx;
1986
1987public:
1988 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1989
1990 void apply(ScheduleDAGInstrs *DAGInstrs) override;
1991
1992protected:
1993 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1994};
1995
1996} // end anonymous namespace
1997
1998namespace llvm {
1999
2000std::unique_ptr<ScheduleDAGMutation>
2002 const TargetRegisterInfo *TRI) {
2003 return std::make_unique<CopyConstrain>(TII, TRI);
2004}
2005
2006} // end namespace llvm
2007
2008/// constrainLocalCopy handles two possibilities:
2009/// 1) Local src:
2010/// I0: = dst
2011/// I1: src = ...
2012/// I2: = dst
2013/// I3: dst = src (copy)
2014/// (create pred->succ edges I0->I1, I2->I1)
2015///
2016/// 2) Local copy:
2017/// I0: dst = src (copy)
2018/// I1: = dst
2019/// I2: src = ...
2020/// I3: = dst
2021/// (create pred->succ edges I1->I2, I3->I2)
2022///
2023/// Although the MachineScheduler is currently constrained to single blocks,
2024/// this algorithm should handle extended blocks. An EBB is a set of
2025/// contiguously numbered blocks such that the previous block in the EBB is
2026/// always the single predecessor.
2027void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
2028 LiveIntervals *LIS = DAG->getLIS();
2029 MachineInstr *Copy = CopySU->getInstr();
2030
2031 // Check for pure vreg copies.
2032 const MachineOperand &SrcOp = Copy->getOperand(1);
2033 Register SrcReg = SrcOp.getReg();
2034 if (!SrcReg.isVirtual() || !SrcOp.readsReg())
2035 return;
2036
2037 const MachineOperand &DstOp = Copy->getOperand(0);
2038 Register DstReg = DstOp.getReg();
2039 if (!DstReg.isVirtual() || DstOp.isDead())
2040 return;
2041
2042 // Check if either the dest or source is local. If it's live across a back
2043 // edge, it's not local. Note that if both vregs are live across the back
2044 // edge, we cannot successfully contrain the copy without cyclic scheduling.
2045 // If both the copy's source and dest are local live intervals, then we
2046 // should treat the dest as the global for the purpose of adding
2047 // constraints. This adds edges from source's other uses to the copy.
2048 unsigned LocalReg = SrcReg;
2049 unsigned GlobalReg = DstReg;
2050 LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
2051 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
2052 LocalReg = DstReg;
2053 GlobalReg = SrcReg;
2054 LocalLI = &LIS->getInterval(LocalReg);
2055 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
2056 return;
2057 }
2058 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
2059
2060 // Find the global segment after the start of the local LI.
2061 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
2062 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
2063 // local live range. We could create edges from other global uses to the local
2064 // start, but the coalescer should have already eliminated these cases, so
2065 // don't bother dealing with it.
2066 if (GlobalSegment == GlobalLI->end())
2067 return;
2068
2069 // If GlobalSegment is killed at the LocalLI->start, the call to find()
2070 // returned the next global segment. But if GlobalSegment overlaps with
2071 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI
2072 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
2073 if (GlobalSegment->contains(LocalLI->beginIndex()))
2074 ++GlobalSegment;
2075
2076 if (GlobalSegment == GlobalLI->end())
2077 return;
2078
2079 // Check if GlobalLI contains a hole in the vicinity of LocalLI.
2080 if (GlobalSegment != GlobalLI->begin()) {
2081 // Two address defs have no hole.
2082 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
2083 GlobalSegment->start)) {
2084 return;
2085 }
2086 // If the prior global segment may be defined by the same two-address
2087 // instruction that also defines LocalLI, then can't make a hole here.
2088 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
2089 LocalLI->beginIndex())) {
2090 return;
2091 }
2092 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
2093 // it would be a disconnected component in the live range.
2094 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
2095 "Disconnected LRG within the scheduling region.");
2096 }
2097 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
2098 if (!GlobalDef)
2099 return;
2100
2101 SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
2102 if (!GlobalSU)
2103 return;
2104
2105 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
2106 // constraining the uses of the last local def to precede GlobalDef.
2107 SmallVector<SUnit*,8> LocalUses;
2108 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
2109 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
2110 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
2111 for (const SDep &Succ : LastLocalSU->Succs) {
2112 if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg)
2113 continue;
2114 if (Succ.getSUnit() == GlobalSU)
2115 continue;
2116 if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit()))
2117 return;
2118 LocalUses.push_back(Succ.getSUnit());
2119 }
2120 // Open the top of the GlobalLI hole by constraining any earlier global uses
2121 // to precede the start of LocalLI.
2122 SmallVector<SUnit*,8> GlobalUses;
2123 MachineInstr *FirstLocalDef =
2124 LIS->getInstructionFromIndex(LocalLI->beginIndex());
2125 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
2126 for (const SDep &Pred : GlobalSU->Preds) {
2127 if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg)
2128 continue;
2129 if (Pred.getSUnit() == FirstLocalSU)
2130 continue;
2131 if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit()))
2132 return;
2133 GlobalUses.push_back(Pred.getSUnit());
2134 }
2135 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
2136 // Add the weak edges.
2137 for (SUnit *LU : LocalUses) {
2138 LLVM_DEBUG(dbgs() << " Local use SU(" << LU->NodeNum << ") -> SU("
2139 << GlobalSU->NodeNum << ")\n");
2140 DAG->addEdge(GlobalSU, SDep(LU, SDep::Weak));
2141 }
2142 for (SUnit *GU : GlobalUses) {
2143 LLVM_DEBUG(dbgs() << " Global use SU(" << GU->NodeNum << ") -> SU("
2144 << FirstLocalSU->NodeNum << ")\n");
2145 DAG->addEdge(FirstLocalSU, SDep(GU, SDep::Weak));
2146 }
2147}
2148
2149/// Callback from DAG postProcessing to create weak edges to encourage
2150/// copy elimination.
2151void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
2152 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
2153 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
2154
2155 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
2156 if (FirstPos == DAG->end())
2157 return;
2158 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
2159 RegionEndIdx = DAG->getLIS()->getInstructionIndex(
2160 *priorNonDebug(DAG->end(), DAG->begin()));
2161
2162 for (SUnit &SU : DAG->SUnits) {
2163 if (!SU.getInstr()->isCopy())
2164 continue;
2165
2166 constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG));
2167 }
2168}
2169
2170//===----------------------------------------------------------------------===//
2171// MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
2172// and possibly other custom schedulers.
2173//===----------------------------------------------------------------------===//
2174
2175static const unsigned InvalidCycle = ~0U;
2176
2178
2179/// Given a Count of resource usage and a Latency value, return true if a
2180/// SchedBoundary becomes resource limited.
2181/// If we are checking after scheduling a node, we should return true when
2182/// we just reach the resource limit.
2183static bool checkResourceLimit(unsigned LFactor, unsigned Count,
2184 unsigned Latency, bool AfterSchedNode) {
2185 int ResCntFactor = (int)(Count - (Latency * LFactor));
2186 if (AfterSchedNode)
2187 return ResCntFactor >= (int)LFactor;
2188 else
2189 return ResCntFactor > (int)LFactor;
2190}
2191
2193 // A new HazardRec is created for each DAG and owned by SchedBoundary.
2194 // Destroying and reconstructing it is very expensive though. So keep
2195 // invalid, placeholder HazardRecs.
2196 if (HazardRec && HazardRec->isEnabled()) {
2197 delete HazardRec;
2198 HazardRec = nullptr;
2199 }
2200 Available.clear();
2201 Pending.clear();
2202 CheckPending = false;
2203 CurrCycle = 0;
2204 CurrMOps = 0;
2205 MinReadyCycle = std::numeric_limits<unsigned>::max();
2206 ExpectedLatency = 0;
2207 DependentLatency = 0;
2208 RetiredMOps = 0;
2209 MaxExecutedResCount = 0;
2210 ZoneCritResIdx = 0;
2211 IsResourceLimited = false;
2212 ReservedCycles.clear();
2213 ReservedResourceSegments.clear();
2214 ReservedCyclesIndex.clear();
2215 ResourceGroupSubUnitMasks.clear();
2216#if LLVM_ENABLE_ABI_BREAKING_CHECKS
2217 // Track the maximum number of stall cycles that could arise either from the
2218 // latency of a DAG edge or the number of cycles that a processor resource is
2219 // reserved (SchedBoundary::ReservedCycles).
2220 MaxObservedStall = 0;
2221#endif
2222 // Reserve a zero-count for invalid CritResIdx.
2223 ExecutedResCounts.resize(1);
2224 assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
2225}
2226
2228init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
2229 reset();
2230 if (!SchedModel->hasInstrSchedModel())
2231 return;
2233 for (SUnit &SU : DAG->SUnits) {
2234 const MCSchedClassDesc *SC = DAG->getSchedClass(&SU);
2235 RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC)
2236 * SchedModel->getMicroOpFactor();
2238 PI = SchedModel->getWriteProcResBegin(SC),
2239 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2240 unsigned PIdx = PI->ProcResourceIdx;
2241 unsigned Factor = SchedModel->getResourceFactor(PIdx);
2242 assert(PI->ReleaseAtCycle >= PI->AcquireAtCycle);
2243 RemainingCounts[PIdx] +=
2244 (Factor * (PI->ReleaseAtCycle - PI->AcquireAtCycle));
2245 }
2246 }
2247}
2248
2250init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
2251 reset();
2252 DAG = dag;
2253 SchedModel = smodel;
2254 Rem = rem;
2256 unsigned ResourceCount = SchedModel->getNumProcResourceKinds();
2257 ReservedCyclesIndex.resize(ResourceCount);
2258 ExecutedResCounts.resize(ResourceCount);
2259 ResourceGroupSubUnitMasks.resize(ResourceCount, APInt(ResourceCount, 0));
2260 unsigned NumUnits = 0;
2261
2262 for (unsigned i = 0; i < ResourceCount; ++i) {
2263 ReservedCyclesIndex[i] = NumUnits;
2264 NumUnits += SchedModel->getProcResource(i)->NumUnits;
2265 if (isUnbufferedGroup(i)) {
2266 auto SubUnits = SchedModel->getProcResource(i)->SubUnitsIdxBegin;
2267 for (unsigned U = 0, UE = SchedModel->getProcResource(i)->NumUnits;
2268 U != UE; ++U)
2269 ResourceGroupSubUnitMasks[i].setBit(SubUnits[U]);
2270 }
2271 }
2272
2273 ReservedCycles.resize(NumUnits, InvalidCycle);
2274 }
2275}
2276
2277/// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
2278/// these "soft stalls" differently than the hard stall cycles based on CPU
2279/// resources and computed by checkHazard(). A fully in-order model
2280/// (MicroOpBufferSize==0) will not make use of this since instructions are not
2281/// available for scheduling until they are ready. However, a weaker in-order
2282/// model may use this for heuristics. For example, if a processor has in-order
2283/// behavior when reading certain resources, this may come into play.
2285 if (!SU->isUnbuffered)
2286 return 0;
2287
2288 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2289 if (ReadyCycle > CurrCycle)
2290 return ReadyCycle - CurrCycle;
2291 return 0;
2292}
2293
2294/// Compute the next cycle at which the given processor resource unit
2295/// can be scheduled.
2297 unsigned ReleaseAtCycle,
2298 unsigned AcquireAtCycle) {
2300 if (isTop())
2301 return ReservedResourceSegments[InstanceIdx].getFirstAvailableAtFromTop(
2302 CurrCycle, AcquireAtCycle, ReleaseAtCycle);
2303
2304 return ReservedResourceSegments[InstanceIdx].getFirstAvailableAtFromBottom(
2305 CurrCycle, AcquireAtCycle, ReleaseAtCycle);
2306 }
2307
2308 unsigned NextUnreserved = ReservedCycles[InstanceIdx];
2309 // If this resource has never been used, always return cycle zero.
2310 if (NextUnreserved == InvalidCycle)
2311 return CurrCycle;
2312 // For bottom-up scheduling add the cycles needed for the current operation.
2313 if (!isTop())
2314 NextUnreserved = std::max(CurrCycle, NextUnreserved + ReleaseAtCycle);
2315 return NextUnreserved;
2316}
2317
2318/// Compute the next cycle at which the given processor resource can be
2319/// scheduled. Returns the next cycle and the index of the processor resource
2320/// instance in the reserved cycles vector.
2321std::pair<unsigned, unsigned>
2323 unsigned ReleaseAtCycle,
2324 unsigned AcquireAtCycle) {
2326 LLVM_DEBUG(dbgs() << " Resource booking (@" << CurrCycle << "c): \n");
2328 LLVM_DEBUG(dbgs() << " getNextResourceCycle (@" << CurrCycle << "c): \n");
2329 }
2330 unsigned MinNextUnreserved = InvalidCycle;
2331 unsigned InstanceIdx = 0;
2332 unsigned StartIndex = ReservedCyclesIndex[PIdx];
2333 unsigned NumberOfInstances = SchedModel->getProcResource(PIdx)->NumUnits;
2334 assert(NumberOfInstances > 0 &&
2335 "Cannot have zero instances of a ProcResource");
2336
2337 if (isUnbufferedGroup(PIdx)) {
2338 // If any subunits are used by the instruction, report that the
2339 // subunits of the resource group are available at the first cycle
2340 // in which the unit is available, effectively removing the group
2341 // record from hazarding and basing the hazarding decisions on the
2342 // subunit records. Otherwise, choose the first available instance
2343 // from among the subunits. Specifications which assign cycles to
2344 // both the subunits and the group or which use an unbuffered
2345 // group with buffered subunits will appear to schedule
2346 // strangely. In the first case, the additional cycles for the
2347 // group will be ignored. In the second, the group will be
2348 // ignored entirely.
2349 for (const MCWriteProcResEntry &PE :
2352 if (ResourceGroupSubUnitMasks[PIdx][PE.ProcResourceIdx])
2353 return std::make_pair(getNextResourceCycleByInstance(
2354 StartIndex, ReleaseAtCycle, AcquireAtCycle),
2355 StartIndex);
2356
2357 auto SubUnits = SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin;
2358 for (unsigned I = 0, End = NumberOfInstances; I < End; ++I) {
2359 unsigned NextUnreserved, NextInstanceIdx;
2360 std::tie(NextUnreserved, NextInstanceIdx) =
2361 getNextResourceCycle(SC, SubUnits[I], ReleaseAtCycle, AcquireAtCycle);
2362 if (MinNextUnreserved > NextUnreserved) {
2363 InstanceIdx = NextInstanceIdx;
2364 MinNextUnreserved = NextUnreserved;
2365 }
2366 }
2367 return std::make_pair(MinNextUnreserved, InstanceIdx);
2368 }
2369
2370 for (unsigned I = StartIndex, End = StartIndex + NumberOfInstances; I < End;
2371 ++I) {
2372 unsigned NextUnreserved =
2373 getNextResourceCycleByInstance(I, ReleaseAtCycle, AcquireAtCycle);
2375 LLVM_DEBUG(dbgs() << " Instance " << I - StartIndex << " available @"
2376 << NextUnreserved << "c\n");
2377 if (MinNextUnreserved > NextUnreserved) {
2378 InstanceIdx = I;
2379 MinNextUnreserved = NextUnreserved;
2380 }
2381 }
2383 LLVM_DEBUG(dbgs() << " selecting " << SchedModel->getResourceName(PIdx)
2384 << "[" << InstanceIdx - StartIndex << "]"
2385 << " available @" << MinNextUnreserved << "c"
2386 << "\n");
2387 return std::make_pair(MinNextUnreserved, InstanceIdx);
2388}
2389
2390/// Does this SU have a hazard within the current instruction group.
2391///
2392/// The scheduler supports two modes of hazard recognition. The first is the
2393/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
2394/// supports highly complicated in-order reservation tables
2395/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
2396///
2397/// The second is a streamlined mechanism that checks for hazards based on
2398/// simple counters that the scheduler itself maintains. It explicitly checks
2399/// for instruction dispatch limitations, including the number of micro-ops that
2400/// can dispatch per cycle.
2401///
2402/// TODO: Also check whether the SU must start a new group.
2404 if (HazardRec->isEnabled()
2406 return true;
2407 }
2408
2409 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
2410 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
2411 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
2412 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
2413 return true;
2414 }
2415
2416 if (CurrMOps > 0 &&
2417 ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) ||
2418 (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) {
2419 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must "
2420 << (isTop() ? "begin" : "end") << " group\n");
2421 return true;
2422 }
2423
2425 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2426 for (const MCWriteProcResEntry &PE :
2429 unsigned ResIdx = PE.ProcResourceIdx;
2430 unsigned ReleaseAtCycle = PE.ReleaseAtCycle;
2431 unsigned AcquireAtCycle = PE.AcquireAtCycle;
2432 unsigned NRCycle, InstanceIdx;
2433 std::tie(NRCycle, InstanceIdx) =
2434 getNextResourceCycle(SC, ResIdx, ReleaseAtCycle, AcquireAtCycle);
2435 if (NRCycle > CurrCycle) {
2436#if LLVM_ENABLE_ABI_BREAKING_CHECKS
2437 MaxObservedStall = std::max(ReleaseAtCycle, MaxObservedStall);
2438#endif
2439 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
2440 << SchedModel->getResourceName(ResIdx)
2441 << '[' << InstanceIdx - ReservedCyclesIndex[ResIdx] << ']'
2442 << "=" << NRCycle << "c\n");
2443 return true;
2444 }
2445 }
2446 }
2447 return false;
2448}
2449
2450// Find the unscheduled node in ReadySUs with the highest latency.
2453 SUnit *LateSU = nullptr;
2454 unsigned RemLatency = 0;
2455 for (SUnit *SU : ReadySUs) {
2456 unsigned L = getUnscheduledLatency(SU);
2457 if (L > RemLatency) {
2458 RemLatency = L;
2459 LateSU = SU;
2460 }
2461 }
2462 if (LateSU) {
2463 LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU("
2464 << LateSU->NodeNum << ") " << RemLatency << "c\n");
2465 }
2466 return RemLatency;
2467}
2468
2469// Count resources in this zone and the remaining unscheduled
2470// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
2471// resource index, or zero if the zone is issue limited.
2473getOtherResourceCount(unsigned &OtherCritIdx) {
2474 OtherCritIdx = 0;
2476 return 0;
2477
2478 unsigned OtherCritCount = Rem->RemIssueCount
2479 + (RetiredMOps * SchedModel->getMicroOpFactor());
2480 LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
2481 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
2482 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
2483 PIdx != PEnd; ++PIdx) {
2484 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
2485 if (OtherCount > OtherCritCount) {
2486 OtherCritCount = OtherCount;
2487 OtherCritIdx = PIdx;
2488 }
2489 }
2490 if (OtherCritIdx) {
2491 LLVM_DEBUG(
2492 dbgs() << " " << Available.getName() << " + Remain CritRes: "
2493 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
2494 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
2495 }
2496 return OtherCritCount;
2497}
2498
2499void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
2500 unsigned Idx) {
2501 assert(SU->getInstr() && "Scheduled SUnit must have instr");
2502
2503#if LLVM_ENABLE_ABI_BREAKING_CHECKS
2504 // ReadyCycle was been bumped up to the CurrCycle when this node was
2505 // scheduled, but CurrCycle may have been eagerly advanced immediately after
2506 // scheduling, so may now be greater than ReadyCycle.
2507 if (ReadyCycle > CurrCycle)
2508 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
2509#endif
2510
2511 if (ReadyCycle < MinReadyCycle)
2512 MinReadyCycle = ReadyCycle;
2513
2514 // Check for interlocks first. For the purpose of other heuristics, an
2515 // instruction that cannot issue appears as if it's not in the ReadyQueue.
2516 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2517 bool HazardDetected = (!IsBuffered && ReadyCycle > CurrCycle) ||
2519
2520 if (!HazardDetected) {
2521 Available.push(SU);
2522
2523 if (InPQueue)
2525 return;
2526 }
2527
2528 if (!InPQueue)
2529 Pending.push(SU);
2530}
2531
2532/// Move the boundary of scheduled code by one cycle.
2533void SchedBoundary::bumpCycle(unsigned NextCycle) {
2534 if (SchedModel->getMicroOpBufferSize() == 0) {
2535 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&
2536 "MinReadyCycle uninitialized");
2537 if (MinReadyCycle > NextCycle)
2538 NextCycle = MinReadyCycle;
2539 }
2540 // Update the current micro-ops, which will issue in the next cycle.
2541 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
2542 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
2543
2544 // Decrement DependentLatency based on the next cycle.
2545 if ((NextCycle - CurrCycle) > DependentLatency)
2546 DependentLatency = 0;
2547 else
2548 DependentLatency -= (NextCycle - CurrCycle);
2549
2550 if (!HazardRec->isEnabled()) {
2551 // Bypass HazardRec virtual calls.
2552 CurrCycle = NextCycle;
2553 } else {
2554 // Bypass getHazardType calls in case of long latency.
2555 for (; CurrCycle != NextCycle; ++CurrCycle) {
2556 if (isTop())
2558 else
2560 }
2561 }
2562 CheckPending = true;
2563 IsResourceLimited =
2565 getScheduledLatency(), true);
2566
2567 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName()
2568 << '\n');
2569}
2570
2571void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2572 ExecutedResCounts[PIdx] += Count;
2573 if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2574 MaxExecutedResCount = ExecutedResCounts[PIdx];
2575}
2576
2577/// Add the given processor resource to this scheduled zone.
2578///
2579/// \param ReleaseAtCycle indicates the number of consecutive (non-pipelined)
2580/// cycles during which this resource is released.
2581///
2582/// \param AcquireAtCycle indicates the number of consecutive (non-pipelined)
2583/// cycles at which the resource is aquired after issue (assuming no stalls).
2584///
2585/// \return the next cycle at which the instruction may execute without
2586/// oversubscribing resources.
2587unsigned SchedBoundary::countResource(const MCSchedClassDesc *SC, unsigned PIdx,
2588 unsigned ReleaseAtCycle,
2589 unsigned NextCycle,
2590 unsigned AcquireAtCycle) {
2591 unsigned Factor = SchedModel->getResourceFactor(PIdx);
2592 unsigned Count = Factor * (ReleaseAtCycle- AcquireAtCycle);
2593 LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +"
2594 << ReleaseAtCycle << "x" << Factor << "u\n");
2595
2596 // Update Executed resources counts.
2597 incExecutedResources(PIdx, Count);
2598 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2599 Rem->RemainingCounts[PIdx] -= Count;
2600
2601 // Check if this resource exceeds the current critical resource. If so, it
2602 // becomes the critical resource.
2603 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2604 ZoneCritResIdx = PIdx;
2605 LLVM_DEBUG(dbgs() << " *** Critical resource "
2606 << SchedModel->getResourceName(PIdx) << ": "
2608 << "c\n");
2609 }
2610 // For reserved resources, record the highest cycle using the resource.
2611 unsigned NextAvailable, InstanceIdx;
2612 std::tie(NextAvailable, InstanceIdx) =
2613 getNextResourceCycle(SC, PIdx, ReleaseAtCycle, AcquireAtCycle);
2614 if (NextAvailable > CurrCycle) {
2615 LLVM_DEBUG(dbgs() << " Resource conflict: "
2616 << SchedModel->getResourceName(PIdx)
2617 << '[' << InstanceIdx - ReservedCyclesIndex[PIdx] << ']'
2618 << " reserved until @" << NextAvailable << "\n");
2619 }
2620 return NextAvailable;
2621}
2622
2623/// Move the boundary of scheduled code by one SUnit.
2625 // Update the reservation table.
2626 if (HazardRec->isEnabled()) {
2627 if (!isTop() && SU->isCall) {
2628 // Calls are scheduled with their preceding instructions. For bottom-up
2629 // scheduling, clear the pipeline state before emitting.
2630 HazardRec->Reset();
2631 }
2633 // Scheduling an instruction may have made pending instructions available.
2634 CheckPending = true;
2635 }
2636 // checkHazard should prevent scheduling multiple instructions per cycle that
2637 // exceed the issue width.
2638 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2639 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2640 assert(
2641 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2642 "Cannot schedule this instruction's MicroOps in the current cycle.");
2643
2644 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2645 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
2646
2647 unsigned NextCycle = CurrCycle;
2648 switch (SchedModel->getMicroOpBufferSize()) {
2649 case 0:
2650 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2651 break;
2652 case 1:
2653 if (ReadyCycle > NextCycle) {
2654 NextCycle = ReadyCycle;
2655 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
2656 }
2657 break;
2658 default:
2659 // We don't currently model the OOO reorder buffer, so consider all
2660 // scheduled MOps to be "retired". We do loosely model in-order resource
2661 // latency. If this instruction uses an in-order resource, account for any
2662 // likely stall cycles.
2663 if (SU->isUnbuffered && ReadyCycle > NextCycle)
2664 NextCycle = ReadyCycle;
2665 break;
2666 }
2667 RetiredMOps += IncMOps;
2668
2669 // Update resource counts and critical resource.
2671 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2672 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2673 Rem->RemIssueCount -= DecRemIssue;
2674 if (ZoneCritResIdx) {
2675 // Scale scheduled micro-ops for comparing with the critical resource.
2676 unsigned ScaledMOps =
2677 RetiredMOps * SchedModel->getMicroOpFactor();
2678
2679 // If scaled micro-ops are now more than the previous critical resource by
2680 // a full cycle, then micro-ops issue becomes critical.
2681 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2682 >= (int)SchedModel->getLatencyFactor()) {
2683 ZoneCritResIdx = 0;
2684 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
2685 << ScaledMOps / SchedModel->getLatencyFactor()
2686 << "c\n");
2687 }
2688 }
2691 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2692 unsigned RCycle =
2693 countResource(SC, PI->ProcResourceIdx, PI->ReleaseAtCycle, NextCycle,
2694 PI->AcquireAtCycle);
2695 if (RCycle > NextCycle)
2696 NextCycle = RCycle;
2697 }
2698 if (SU->hasReservedResource) {
2699 // For reserved resources, record the highest cycle using the resource.
2700 // For top-down scheduling, this is the cycle in which we schedule this
2701 // instruction plus the number of cycles the operations reserves the
2702 // resource. For bottom-up is it simply the instruction's cycle.
2705 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2706 unsigned PIdx = PI->ProcResourceIdx;
2707 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2708
2710 unsigned ReservedUntil, InstanceIdx;
2711 std::tie(ReservedUntil, InstanceIdx) = getNextResourceCycle(
2712 SC, PIdx, PI->ReleaseAtCycle, PI->AcquireAtCycle);
2713 if (isTop()) {
2714 ReservedResourceSegments[InstanceIdx].add(
2716 NextCycle, PI->AcquireAtCycle, PI->ReleaseAtCycle),
2718 } else {
2719 ReservedResourceSegments[InstanceIdx].add(
2721 NextCycle, PI->AcquireAtCycle, PI->ReleaseAtCycle),
2723 }
2724 } else {
2725
2726 unsigned ReservedUntil, InstanceIdx;
2727 std::tie(ReservedUntil, InstanceIdx) = getNextResourceCycle(
2728 SC, PIdx, PI->ReleaseAtCycle, PI->AcquireAtCycle);
2729 if (isTop()) {
2730 ReservedCycles[InstanceIdx] =
2731 std::max(ReservedUntil, NextCycle + PI->ReleaseAtCycle);
2732 } else
2733 ReservedCycles[InstanceIdx] = NextCycle;
2734 }
2735 }
2736 }
2737 }
2738 }
2739 // Update ExpectedLatency and DependentLatency.
2740 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2741 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2742 if (SU->getDepth() > TopLatency) {
2743 TopLatency = SU->getDepth();
2744 LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU("
2745 << SU->NodeNum << ") " << TopLatency << "c\n");
2746 }
2747 if (SU->getHeight() > BotLatency) {
2748 BotLatency = SU->getHeight();
2749 LLVM_DEBUG(dbgs() << " " << Available.getName() << " BotLatency SU("
2750 << SU->NodeNum << ") " << BotLatency << "c\n");
2751 }
2752 // If we stall for any reason, bump the cycle.
2753 if (NextCycle > CurrCycle)
2754 bumpCycle(NextCycle);
2755 else
2756 // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2757 // resource limited. If a stall occurred, bumpCycle does this.
2758 IsResourceLimited =
2760 getScheduledLatency(), true);
2761
2762 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2763 // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2764 // one cycle. Since we commonly reach the max MOps here, opportunistically
2765 // bump the cycle to avoid uselessly checking everything in the readyQ.
2766 CurrMOps += IncMOps;
2767
2768 // Bump the cycle count for issue group constraints.
2769 // This must be done after NextCycle has been adjust for all other stalls.
2770 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set
2771 // currCycle to X.
2772 if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) ||
2773 (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) {
2774 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin")
2775 << " group\n");
2776 bumpCycle(++NextCycle);
2777 }
2778
2779 while (CurrMOps >= SchedModel->getIssueWidth()) {
2780 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle "
2781 << CurrCycle << '\n');
2782 bumpCycle(++NextCycle);
2783 }
2785}
2786
2787/// Release pending ready nodes in to the available queue. This makes them
2788/// visible to heuristics.
2790 // If the available queue is empty, it is safe to reset MinReadyCycle.
2791 if (Available.empty())
2792 MinReadyCycle = std::numeric_limits<unsigned>::max();
2793
2794 // Check to see if any of the pending instructions are ready to issue. If
2795 // so, add them to the available queue.
2796 for (unsigned I = 0, E = Pending.size(); I < E; ++I) {
2797 SUnit *SU = *(Pending.begin() + I);
2798 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2799
2800 if (ReadyCycle < MinReadyCycle)
2801 MinReadyCycle = ReadyCycle;
2802
2803 if (Available.size() >= ReadyListLimit)
2804 break;
2805
2806 releaseNode(SU, ReadyCycle, true, I);
2807 if (E != Pending.size()) {
2808 --I;
2809 --E;
2810 }
2811 }
2812 CheckPending = false;
2813}
2814
2815/// Remove SU from the ready set for this boundary.
2817 if (Available.isInQueue(SU))
2819 else {
2820 assert(Pending.isInQueue(SU) && "bad ready count");
2822 }
2823}
2824
2825/// If this queue only has one ready candidate, return it. As a side effect,
2826/// defer any nodes that now hit a hazard, and advance the cycle until at least
2827/// one node is ready. If multiple instructions are ready, return NULL.
2829 if (CheckPending)
2831
2832 // Defer any ready instrs that now have a hazard.
2834 if (checkHazard(*I)) {
2835 Pending.push(*I);
2836 I = Available.remove(I);
2837 continue;
2838 }
2839 ++I;
2840 }
2841 for (unsigned i = 0; Available.empty(); ++i) {
2842// FIXME: Re-enable assert once PR20057 is resolved.
2843// assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2844// "permanent hazard");
2845 (void)i;
2846 bumpCycle(CurrCycle + 1);
2848 }
2849
2852
2853 if (Available.size() == 1)
2854 return *Available.begin();
2855 return nullptr;
2856}
2857
2858#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2859
2860/// Dump the content of the \ref ReservedCycles vector for the
2861/// resources that are used in the basic block.
2862///
2865 return;
2866
2867 unsigned ResourceCount = SchedModel->getNumProcResourceKinds();
2868 unsigned StartIdx = 0;
2869
2870 for (unsigned ResIdx = 0; ResIdx < ResourceCount; ++ResIdx) {
2871 const unsigned NumUnits = SchedModel->getProcResource(ResIdx)->NumUnits;
2872 std::string ResName = SchedModel->getResourceName(ResIdx);
2873 for (unsigned UnitIdx = 0; UnitIdx < NumUnits; ++UnitIdx) {
2874 dbgs() << ResName << "(" << UnitIdx << ") = ";
2876 if (ReservedResourceSegments.count(StartIdx + UnitIdx))
2877 dbgs() << ReservedResourceSegments.at(StartIdx + UnitIdx);
2878 else
2879 dbgs() << "{ }\n";
2880 } else
2881 dbgs() << ReservedCycles[StartIdx + UnitIdx] << "\n";
2882 }
2883 StartIdx += NumUnits;
2884 }
2885}
2886
2887// This is useful information to dump after bumpNode.
2888// Note that the Queue contents are more useful before pickNodeFromQueue.
2890 unsigned ResFactor;
2891 unsigned ResCount;
2892 if (ZoneCritResIdx) {
2893 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2894 ResCount = getResourceCount(ZoneCritResIdx);
2895 } else {
2896 ResFactor = SchedModel->getMicroOpFactor();
2897 ResCount = RetiredMOps * ResFactor;
2898 }
2899 unsigned LFactor = SchedModel->getLatencyFactor();
2900 dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2901 << " Retired: " << RetiredMOps;
2902 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
2903 dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
2904 << ResCount / ResFactor << " "
2905 << SchedModel->getResourceName(ZoneCritResIdx)
2906 << "\n ExpectedLatency: " << ExpectedLatency << "c\n"
2907 << (IsResourceLimited ? " - Resource" : " - Latency")
2908 << " limited.\n";
2911}
2912#endif
2913
2914//===----------------------------------------------------------------------===//
2915// GenericScheduler - Generic implementation of MachineSchedStrategy.
2916//===----------------------------------------------------------------------===//
2917
2920 const TargetSchedModel *SchedModel) {
2922 return;
2923
2924 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2927 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2928 if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2929 ResDelta.CritResources += PI->ReleaseAtCycle;
2930 if (PI->ProcResourceIdx == Policy.DemandResIdx)
2931 ResDelta.DemandedResources += PI->ReleaseAtCycle;
2932 }
2933}
2934
2935/// Compute remaining latency. We need this both to determine whether the
2936/// overall schedule has become latency-limited and whether the instructions
2937/// outside this zone are resource or latency limited.
2938///
2939/// The "dependent" latency is updated incrementally during scheduling as the
2940/// max height/depth of scheduled nodes minus the cycles since it was
2941/// scheduled:
2942/// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2943///
2944/// The "independent" latency is the max ready queue depth:
2945/// ILat = max N.depth for N in Available|Pending
2946///
2947/// RemainingLatency is the greater of independent and dependent latency.
2948///
2949/// These computations are expensive, especially in DAGs with many edges, so
2950/// only do them if necessary.
2951static unsigned computeRemLatency(SchedBoundary &CurrZone) {
2952 unsigned RemLatency = CurrZone.getDependentLatency();
2953 RemLatency = std::max(RemLatency,
2954 CurrZone.findMaxLatency(CurrZone.Available.elements()));
2955 RemLatency = std::max(RemLatency,
2956 CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2957 return RemLatency;
2958}
2959
2960/// Returns true if the current cycle plus remaning latency is greater than
2961/// the critical path in the scheduling region.
2962bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy,
2963 SchedBoundary &CurrZone,
2964 bool ComputeRemLatency,
2965 unsigned &RemLatency) const {
2966 // The current cycle is already greater than the critical path, so we are
2967 // already latency limited and don't need to compute the remaining latency.
2968 if (CurrZone.getCurrCycle() > Rem.CriticalPath)
2969 return true;
2970
2971 // If we haven't scheduled anything yet, then we aren't latency limited.
2972 if (CurrZone.getCurrCycle() == 0)
2973 return false;
2974
2975 if (ComputeRemLatency)
2976 RemLatency = computeRemLatency(CurrZone);
2977
2978 return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath;
2979}
2980
2981/// Set the CandPolicy given a scheduling zone given the current resources and
2982/// latencies inside and outside the zone.
2984 SchedBoundary &CurrZone,
2985 SchedBoundary *OtherZone) {
2986 // Apply preemptive heuristics based on the total latency and resources
2987 // inside and outside this zone. Potential stalls should be considered before
2988 // following this policy.
2989
2990 // Compute the critical resource outside the zone.
2991 unsigned OtherCritIdx = 0;
2992 unsigned OtherCount =
2993 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2994
2995 bool OtherResLimited = false;
2996 unsigned RemLatency = 0;
2997 bool RemLatencyComputed = false;
2998 if (SchedModel->hasInstrSchedModel() && OtherCount != 0) {
2999 RemLatency = computeRemLatency(CurrZone);
3000 RemLatencyComputed = true;
3001 OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(),
3002 OtherCount, RemLatency, false);
3003 }
3004
3005 // Schedule aggressively for latency in PostRA mode. We don't check for
3006 // acyclic latency during PostRA, and highly out-of-order processors will
3007 // skip PostRA scheduling.
3008 if (!OtherResLimited &&
3009 (IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed,
3010 RemLatency))) {
3011 Policy.ReduceLatency |= true;
3012 LLVM_DEBUG(dbgs() << " " << CurrZone.Available.getName()
3013 << " RemainingLatency " << RemLatency << " + "
3014 << CurrZone.getCurrCycle() << "c > CritPath "
3015 << Rem.CriticalPath << "\n");
3016 }
3017 // If the same resource is limiting inside and outside the zone, do nothing.
3018 if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
3019 return;
3020
3021 LLVM_DEBUG(if (CurrZone.isResourceLimited()) {
3022 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
3023 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n";
3024 } if (OtherResLimited) dbgs()
3025 << " RemainingLimit: "
3026 << SchedModel->getResourceName(OtherCritIdx) << "\n";
3027 if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs()
3028 << " Latency limited both directions.\n");
3029
3030 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
3031 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
3032
3033 if (OtherResLimited)
3034 Policy.DemandResIdx = OtherCritIdx;
3035}
3036
3037#ifndef NDEBUG
3040 switch (Reason) {
3041 case NoCand: return "NOCAND ";
3042 case Only1: return "ONLY1 ";
3043 case PhysReg: return "PHYS-REG ";
3044 case RegExcess: return "REG-EXCESS";
3045 case RegCritical: return "REG-CRIT ";
3046 case Stall: return "STALL ";
3047 case Cluster: return "CLUSTER ";
3048 case Weak: return "WEAK ";
3049 case RegMax: return "REG-MAX ";
3050 case ResourceReduce: return "RES-REDUCE";
3051 case ResourceDemand: return "RES-DEMAND";
3052 case TopDepthReduce: return "TOP-DEPTH ";
3053 case TopPathReduce: return "TOP-PATH ";
3054 case BotHeightReduce:return "BOT-HEIGHT";
3055 case BotPathReduce: return "BOT-PATH ";
3056 case NextDefUse: return "DEF-USE ";
3057 case NodeOrder: return "ORDER ";
3058 };
3059 llvm_unreachable("Unknown reason!");
3060}
3061
3064 unsigned ResIdx = 0;
3065 unsigned Latency = 0;
3066 switch (Cand.Reason) {
3067 default:
3068 break;
3069 case RegExcess:
3070 P = Cand.RPDelta.Excess;
3071 break;
3072 case RegCritical:
3073 P = Cand.RPDelta.CriticalMax;
3074 break;
3075 case RegMax:
3076 P = Cand.RPDelta.CurrentMax;
3077 break;
3078 case ResourceReduce:
3079 ResIdx = Cand.Policy.ReduceResIdx;
3080 break;
3081 case ResourceDemand:
3082 ResIdx = Cand.Policy.DemandResIdx;
3083 break;
3084 case TopDepthReduce:
3085 Latency = Cand.SU->getDepth();
3086 break;
3087 case TopPathReduce:
3088 Latency = Cand.SU->getHeight();
3089 break;
3090 case BotHeightReduce:
3091 Latency = Cand.SU->getHeight();
3092 break;
3093 case BotPathReduce:
3094 Latency = Cand.SU->getDepth();
3095 break;
3096 }
3097 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
3098 if (P.isValid())
3099 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
3100 << ":" << P.getUnitInc() << " ";
3101 else
3102 dbgs() << " ";
3103 if (ResIdx)
3104 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
3105 else
3106 dbgs() << " ";
3107 if (Latency)
3108 dbgs() << " " << Latency << " cycles ";
3109 else
3110 dbgs() << " ";
3111 dbgs() << '\n';
3112}
3113#endif
3114
3115namespace llvm {
3116/// Return true if this heuristic determines order.
3117/// TODO: Consider refactor return type of these functions as integer or enum,
3118/// as we may need to differentiate whether TryCand is better than Cand.
3119bool tryLess(int TryVal, int CandVal,
3123 if (TryVal < CandVal) {
3124 TryCand.Reason = Reason;
3125 return true;
3126 }
3127 if (TryVal > CandVal) {
3128 if (Cand.Reason > Reason)
3129 Cand.Reason = Reason;
3130 return true;
3131 }
3132 return false;
3133}
3134
3135bool tryGreater(int TryVal, int CandVal,
3139 if (TryVal > CandVal) {
3140 TryCand.Reason = Reason;
3141 return true;
3142 }
3143 if (TryVal < CandVal) {
3144 if (Cand.Reason > Reason)
3145 Cand.Reason = Reason;
3146 return true;
3147 }
3148 return false;
3149}
3150
3153 SchedBoundary &Zone) {
3154 if (Zone.isTop()) {
3155 // Prefer the candidate with the lesser depth, but only if one of them has
3156 // depth greater than the total latency scheduled so far, otherwise either
3157 // of them could be scheduled now with no stall.
3158 if (std::max(TryCand.SU->getDepth(), Cand.SU->getDepth()) >
3159 Zone.getScheduledLatency()) {
3160 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
3162 return true;
3163 }
3164 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
3166 return true;
3167 } else {
3168 // Prefer the candidate with the lesser height, but only if one of them has
3169 // height greater than the total latency scheduled so far, otherwise either
3170 // of them could be scheduled now with no stall.
3171 if (std::max(TryCand.SU->getHeight(), Cand.SU->getHeight()) >
3172 Zone.getScheduledLatency()) {
3173 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
3175 return true;
3176 }
3177 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
3179 return true;
3180 }
3181 return false;
3182}
3183} // end namespace llvm
3184
3185static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
3186 LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
3187 << GenericSchedulerBase::getReasonStr(Reason) << '\n');
3188}
3189
3191 tracePick(Cand.Reason, Cand.AtTop);
3192}
3193
3195 assert(dag->hasVRegLiveness() &&
3196 "(PreRA)GenericScheduler needs vreg liveness");
3197 DAG = static_cast<ScheduleDAGMILive*>(dag);
3198 SchedModel = DAG->getSchedModel();
3199 TRI = DAG->TRI;
3200
3201 if (RegionPolicy.ComputeDFSResult)
3202 DAG->computeDFSResult();
3203
3204 Rem.init(DAG, SchedModel);
3205 Top.init(DAG, SchedModel, &Rem);
3206 Bot.init(DAG, SchedModel, &Rem);
3207
3208 // Initialize resource counts.
3209
3210 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
3211 // are disabled, then these HazardRecs will be disabled.
3213 if (!Top.HazardRec) {
3214 Top.HazardRec =
3216 Itin, DAG);
3217 }
3218 if (!Bot.HazardRec) {
3219 Bot.HazardRec =
3221 Itin, DAG);
3222 }
3223 TopCand.SU = nullptr;
3224 BotCand.SU = nullptr;
3225}
3226
3227/// Initialize the per-region scheduling policy.
3230 unsigned NumRegionInstrs) {
3231 const MachineFunction &MF = *Begin->getMF();
3232 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
3233
3234 // Avoid setting up the register pressure tracker for small regions to save
3235 // compile time. As a rough heuristic, only track pressure when the number of
3236 // schedulable instructions exceeds half the integer register file.
3237 RegionPolicy.ShouldTrackPressure = true;
3238 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
3240 if (TLI->isTypeLegal(LegalIntVT)) {
3241 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
3242 TLI->getRegClassFor(LegalIntVT));
3243 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
3244 }
3245 }
3246
3247 // For generic targets, we default to bottom-up, because it's simpler and more
3248 // compile-time optimizations have been implemented in that direction.
3249 RegionPolicy.OnlyBottomUp = true;
3250
3251 // Allow the subtarget to override default policy.
3252 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
3253
3254 // After subtarget overrides, apply command line options.
3255 if (!EnableRegPressure) {
3256 RegionPolicy.ShouldTrackPressure = false;
3257 RegionPolicy.ShouldTrackLaneMasks = false;
3258 }
3259
3260 // Check -misched-topdown/bottomup can force or unforce scheduling direction.
3261 // e.g. -misched-bottomup=false allows scheduling in both directions.
3263 "-misched-topdown incompatible with -misched-bottomup");
3264 if (ForceBottomUp.getNumOccurrences() > 0) {
3265 RegionPolicy.OnlyBottomUp = ForceBottomUp;
3266 if (RegionPolicy.OnlyBottomUp)
3267 RegionPolicy.OnlyTopDown = false;
3268 }
3269 if (ForceTopDown.getNumOccurrences() > 0) {
3270 RegionPolicy.OnlyTopDown = ForceTopDown;
3271 if (RegionPolicy.OnlyTopDown)
3272 RegionPolicy.OnlyBottomUp = false;
3273 }
3274}
3275
3277 // Cannot completely remove virtual function even in release mode.
3278#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3279 dbgs() << "GenericScheduler RegionPolicy: "
3280 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
3281 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
3282 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
3283 << "\n";
3284#endif
3285}
3286
3287/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
3288/// critical path by more cycles than it takes to drain the instruction buffer.
3289/// We estimate an upper bounds on in-flight instructions as:
3290///
3291/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
3292/// InFlightIterations = AcyclicPath / CyclesPerIteration
3293/// InFlightResources = InFlightIterations * LoopResources
3294///
3295/// TODO: Check execution resources in addition to IssueCount.
3298 return;
3299
3300 // Scaled number of cycles per loop iteration.
3301 unsigned IterCount =
3304 // Scaled acyclic critical path.
3305 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
3306 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
3307 unsigned InFlightCount =
3308 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
3309 unsigned BufferLimit =
3311
3312 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
3313
3314 LLVM_DEBUG(
3315 dbgs() << "IssueCycles="
3317 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
3318 << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount
3319 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
3320 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
3321 if (Rem.IsAcyclicLatencyLimited) dbgs() << " ACYCLIC LATENCY LIMIT\n");
3322}
3323
3326
3327 // Some roots may not feed into ExitSU. Check all of them in case.
3328 for (const SUnit *SU : Bot.Available) {
3329 if (SU->getDepth() > Rem.CriticalPath)
3330 Rem.CriticalPath = SU->getDepth();
3331 }
3332 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
3334 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
3335 }
3336
3338 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
3339 checkAcyclicLatency();
3340 }
3341}
3342
3343namespace llvm {
3345 const PressureChange &CandP,
3349 const TargetRegisterInfo *TRI,
3350 const MachineFunction &MF) {
3351 // If one candidate decreases and the other increases, go with it.
3352 // Invalid candidates have UnitInc==0.
3353 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
3354 Reason)) {
3355 return true;
3356 }
3357 // Do not compare the magnitude of pressure changes between top and bottom
3358 // boundary.
3359 if (Cand.AtTop != TryCand.AtTop)
3360 return false;
3361
3362 // If both candidates affect the same set in the same boundary, go with the
3363 // smallest increase.
3364 unsigned TryPSet = TryP.getPSetOrMax();
3365 unsigned CandPSet = CandP.getPSetOrMax();
3366 if (TryPSet == CandPSet) {
3367 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
3368 Reason);
3369 }
3370
3371 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
3372 std::numeric_limits<int>::max();
3373
3374 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
3375 std::numeric_limits<int>::max();
3376
3377 // If the candidates are decreasing pressure, reverse priority.
3378 if (TryP.getUnitInc() < 0)
3379 std::swap(TryRank, CandRank);
3380 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
3381}
3382
3383unsigned getWeakLeft(const SUnit *SU, bool isTop) {
3384 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
3385}
3386
3387/// Minimize physical register live ranges. Regalloc wants them adjacent to
3388/// their physreg def/use.
3389///
3390/// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
3391/// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
3392/// with the operation that produces or consumes the physreg. We'll do this when
3393/// regalloc has support for parallel copies.
3394int biasPhysReg(const SUnit *SU, bool isTop) {
3395 const MachineInstr *MI = SU->getInstr();
3396
3397 if (MI->isCopy()) {
3398 unsigned ScheduledOper = isTop ? 1 : 0;
3399 unsigned UnscheduledOper = isTop ? 0 : 1;
3400 // If we have already scheduled the physreg produce/consumer, immediately
3401 // schedule the copy.
3402 if (MI->getOperand(ScheduledOper).getReg().isPhysical())
3403 return 1;
3404 // If the physreg is at the boundary, defer it. Otherwise schedule it
3405 // immediately to free the dependent. We can hoist the copy later.
3406 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
3407 if (MI->getOperand(UnscheduledOper).getReg().isPhysical())
3408 return AtBoundary ? -1 : 1;
3409 }
3410
3411 if (MI->isMoveImmediate()) {
3412 // If we have a move immediate and all successors have been assigned, bias
3413 // towards scheduling this later. Make sure all register defs are to
3414 // physical registers.
3415 bool DoBias = true;
3416 for (const MachineOperand &Op : MI->defs()) {
3417 if (Op.isReg() && !Op.getReg().isPhysical()) {
3418 DoBias = false;
3419 break;
3420 }
3421 }
3422
3423 if (DoBias)
3424 return isTop ? -1 : 1;
3425 }
3426
3427 return 0;
3428}
3429} // end namespace llvm
3430
3432 bool AtTop,
3433 const RegPressureTracker &RPTracker,
3434 RegPressureTracker &TempTracker) {
3435 Cand.SU = SU;
3436 Cand.AtTop = AtTop;
3437 if (DAG->isTrackingPressure()) {
3438 if (AtTop) {
3439 TempTracker.getMaxDownwardPressureDelta(
3440 Cand.SU->getInstr(),
3441 Cand.RPDelta,
3442 DAG->getRegionCriticalPSets(),
3443 DAG->getRegPressure().MaxSetPressure);
3444 } else {
3445 if (VerifyScheduling) {
3446 TempTracker.getMaxUpwardPressureDelta(
3447 Cand.SU->getInstr(),
3448 &DAG->getPressureDiff(Cand.SU),
3449 Cand.RPDelta,
3450 DAG->getRegionCriticalPSets(),
3451 DAG->getRegPressure().MaxSetPressure);
3452 } else {
3453 RPTracker.getUpwardPressureDelta(
3454 Cand.SU->getInstr(),
3455 DAG->getPressureDiff(Cand.SU),
3456 Cand.RPDelta,
3457 DAG->getRegionCriticalPSets(),
3458 DAG->getRegPressure().MaxSetPressure);
3459 }
3460 }
3461 }
3462 LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs()
3463 << " Try SU(" << Cand.SU->NodeNum << ") "
3465 << Cand.RPDelta.Excess.getUnitInc() << "\n");
3466}
3467
3468/// Apply a set of heuristics to a new candidate. Heuristics are currently
3469/// hierarchical. This may be more efficient than a graduated cost model because
3470/// we don't need to evaluate all aspects of the model for each node in the
3471/// queue. But it's really done to make the heuristics easier to debug and
3472/// statistically analyze.
3473///
3474/// \param Cand provides the policy and current best candidate.
3475/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3476/// \param Zone describes the scheduled zone that we are extending, or nullptr
3477/// if Cand is from a different zone than TryCand.
3478/// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3480 SchedCandidate &TryCand,
3481 SchedBoundary *Zone) const {
3482 // Initialize the candidate if needed.
3483 if (!Cand.isValid()) {
3484 TryCand.Reason = NodeOrder;
3485 return true;
3486 }
3487
3488 // Bias PhysReg Defs and copies to their uses and defined respectively.
3489 if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop),
3490 biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg))
3491 return TryCand.Reason != NoCand;
3492
3493 // Avoid exceeding the target's limit.
3494 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
3495 Cand.RPDelta.Excess,
3496 TryCand, Cand, RegExcess, TRI,
3497 DAG->MF))
3498 return TryCand.Reason != NoCand;
3499
3500 // Avoid increasing the max critical pressure in the scheduled region.
3501 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
3502 Cand.RPDelta.CriticalMax,
3503 TryCand, Cand, RegCritical, TRI,
3504 DAG->MF))
3505 return TryCand.Reason != NoCand;
3506
3507 // We only compare a subset of features when comparing nodes between
3508 // Top and Bottom boundary. Some properties are simply incomparable, in many
3509 // other instances we should only override the other boundary if something
3510 // is a clear good pick on one boundary. Skip heuristics that are more
3511 // "tie-breaking" in nature.
3512 bool SameBoundary = Zone != nullptr;
3513 if (SameBoundary) {
3514 // For loops that are acyclic path limited, aggressively schedule for
3515 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
3516 // heuristics to take precedence.
3517 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
3518 tryLatency(TryCand, Cand, *Zone))
3519 return TryCand.Reason != NoCand;
3520
3521 // Prioritize instructions that read unbuffered resources by stall cycles.
3522 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
3523 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3524 return TryCand.Reason != NoCand;
3525 }
3526
3527 // Keep clustered nodes together to encourage downstream peephole
3528 // optimizations which may reduce resource requirements.
3529 //
3530 // This is a best effort to set things up for a post-RA pass. Optimizations
3531 // like generating loads of multiple registers should ideally be done within
3532 // the scheduler pass by combining the loads during DAG postprocessing.
3533 const SUnit *CandNextClusterSU =
3534 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
3535 const SUnit *TryCandNextClusterSU =
3536 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
3537 if (tryGreater(TryCand.SU == TryCandNextClusterSU,
3538 Cand.SU == CandNextClusterSU,
3539 TryCand, Cand, Cluster))
3540 return TryCand.Reason != NoCand;
3541
3542 if (SameBoundary) {
3543 // Weak edges are for clustering and other constraints.
3544 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
3545 getWeakLeft(Cand.SU, Cand.AtTop),
3546 TryCand, Cand, Weak))
3547 return TryCand.Reason != NoCand;
3548 }
3549
3550 // Avoid increasing the max pressure of the entire region.
3551 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
3552 Cand.RPDelta.CurrentMax,
3553 TryCand, Cand, RegMax, TRI,
3554 DAG->MF))
3555 return TryCand.Reason != NoCand;
3556
3557 if (SameBoundary) {
3558 // Avoid critical resource consumption and balance the schedule.
3559 TryCand.initResourceDelta(DAG, SchedModel);
3561 TryCand, Cand, ResourceReduce))
3562 return TryCand.Reason != NoCand;
3565 TryCand, Cand, ResourceDemand))
3566 return TryCand.Reason != NoCand;
3567
3568 // Avoid serializing long latency dependence chains.
3569 // For acyclic path limited loops, latency was already checked above.
3570 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
3571 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
3572 return TryCand.Reason != NoCand;
3573
3574 // Fall through to original instruction order.
3575 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
3576 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
3577 TryCand.Reason = NodeOrder;
3578 return true;
3579 }
3580 }
3581
3582 return false;
3583}
3584
3585/// Pick the best candidate from the queue.
3586///
3587/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
3588/// DAG building. To adjust for the current scheduling location we need to
3589/// maintain the number of vreg uses remaining to be top-scheduled.
3591 const CandPolicy &ZonePolicy,
3592 const RegPressureTracker &RPTracker,
3593 SchedCandidate &Cand) {
3594 // getMaxPressureDelta temporarily modifies the tracker.
3595 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
3596
3597 ReadyQueue &Q = Zone.Available;
3598 for (SUnit *SU : Q) {
3599
3600 SchedCandidate TryCand(ZonePolicy);
3601 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker);
3602 // Pass SchedBoundary only when comparing nodes from the same boundary.
3603 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
3604 if (tryCandidate(Cand, TryCand, ZoneArg)) {
3605 // Initialize resource delta if needed in case future heuristics query it.
3606 if (TryCand.ResDelta == SchedResourceDelta())
3607 TryCand.initResourceDelta(DAG, SchedModel);
3608 Cand.setBest(TryCand);
3610 }
3611 }
3612}
3613
3614/// Pick the best candidate node from either the top or bottom queue.
3616 // Schedule as far as possible in the direction of no choice. This is most
3617 // efficient, but also provides the best heuristics for CriticalPSets.
3618 if (SUnit *SU = Bot.pickOnlyChoice()) {
3619 IsTopNode = false;
3620 tracePick(Only1, false);
3621 return SU;
3622 }
3623 if (SUnit *SU = Top.pickOnlyChoice()) {
3624 IsTopNode = true;
3625 tracePick(Only1, true);
3626 return SU;
3627 }
3628 // Set the bottom-up policy based on the state of the current bottom zone and
3629 // the instructions outside the zone, including the top zone.
3630 CandPolicy BotPolicy;
3631 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
3632 // Set the top-down policy based on the state of the current top zone and
3633 // the instructions outside the zone, including the bottom zone.
3634 CandPolicy TopPolicy;
3635 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
3636
3637 // See if BotCand is still valid (because we previously scheduled from Top).
3638 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
3639 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
3640 BotCand.Policy != BotPolicy) {
3641 BotCand.reset(CandPolicy());
3642 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
3643 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
3644 } else {
3645 LLVM_DEBUG(traceCandidate(BotCand));
3646#ifndef NDEBUG
3647 if (VerifyScheduling) {
3648 SchedCandidate TCand;
3649 TCand.reset(CandPolicy());
3650 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
3651 assert(TCand.SU == BotCand.SU &&
3652 "Last pick result should correspond to re-picking right now");
3653 }
3654#endif
3655 }
3656
3657 // Check if the top Q has a better candidate.
3658 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
3659 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
3660 TopCand.Policy != TopPolicy) {
3661 TopCand.reset(CandPolicy());
3662 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
3663 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
3664 } else {
3665 LLVM_DEBUG(traceCandidate(TopCand));
3666#ifndef NDEBUG
3667 if (VerifyScheduling) {
3668 SchedCandidate TCand;
3669 TCand.reset(CandPolicy());
3670 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
3671 assert(TCand.SU == TopCand.SU &&
3672 "Last pick result should correspond to re-picking right now");
3673 }
3674#endif
3675 }
3676
3677 // Pick best from BotCand and TopCand.
3678 assert(BotCand.isValid());
3679 assert(TopCand.isValid());
3680 SchedCandidate Cand = BotCand;
3681 TopCand.Reason = NoCand;
3682 if (tryCandidate(Cand, TopCand, nullptr)) {
3683 Cand.setBest(TopCand);
3685 }
3686
3687 IsTopNode = Cand.AtTop;
3688 tracePick(Cand);
3689 return Cand.SU;
3690}
3691
3692/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
3694 if (DAG->top() == DAG->bottom()) {
3695 assert(Top.Available.empty() && Top.Pending.empty() &&
3696 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
3697 return nullptr;
3698 }
3699 SUnit *SU;
3700 do {
3701 if (RegionPolicy.OnlyTopDown) {
3702 SU = Top.pickOnlyChoice();
3703 if (!SU) {
3704 CandPolicy NoPolicy;
3705 TopCand.reset(NoPolicy);
3706 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
3707 assert(TopCand.Reason != NoCand && "failed to find a candidate");
3708 tracePick(TopCand);
3709 SU = TopCand.SU;
3710 }
3711 IsTopNode = true;
3712 } else if (RegionPolicy.OnlyBottomUp) {
3713 SU = Bot.pickOnlyChoice();
3714 if (!SU) {
3715 CandPolicy NoPolicy;
3716 BotCand.reset(NoPolicy);
3717 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
3718 assert(BotCand.Reason != NoCand && "failed to find a candidate");
3719 tracePick(BotCand);
3720 SU = BotCand.SU;
3721 }
3722 IsTopNode = false;
3723 } else {
3724 SU = pickNodeBidirectional(IsTopNode);
3725 }
3726 } while (SU->isScheduled);
3727
3728 if (SU->isTopReady())
3729 Top.removeReady(SU);
3730 if (SU->isBottomReady())
3731 Bot.removeReady(SU);
3732
3733 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
3734 << *SU->getInstr());
3735 return SU;
3736}
3737
3739 MachineBasicBlock::iterator InsertPos = SU->getInstr();
3740 if (!isTop)
3741 ++InsertPos;
3742 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3743
3744 // Find already scheduled copies with a single physreg dependence and move
3745 // them just above the scheduled instruction.
3746 for (SDep &Dep : Deps) {
3747 if (Dep.getKind() != SDep::Data ||
3748 !Register::isPhysicalRegister(Dep.getReg()))
3749 continue;
3750 SUnit *DepSU = Dep.getSUnit();
3751 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3752 continue;
3753 MachineInstr *Copy = DepSU->getInstr();
3754 if (!Copy->isCopy() && !Copy->isMoveImmediate())
3755 continue;
3756 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy ";
3757 DAG->dumpNode(*Dep.getSUnit()));
3758 DAG->moveInstruction(Copy, InsertPos);
3759 }
3760}
3761
3762/// Update the scheduler's state after scheduling a node. This is the same node
3763/// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3764/// update it's state based on the current cycle before MachineSchedStrategy
3765/// does.
3766///
3767/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3768/// them here. See comments in biasPhysReg.
3769void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3770 if (IsTopNode) {
3771 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3772 Top.bumpNode(SU);
3773 if (SU->hasPhysRegUses)
3774 reschedulePhysReg(SU, true);
3775 } else {
3776 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3777 Bot.bumpNode(SU);
3778 if (SU->hasPhysRegDefs)
3779 reschedulePhysReg(SU, false);
3780 }
3781}
3782
3783/// Create the standard converging machine scheduler. This will be used as the
3784/// default scheduler if the target does not set a default.
3786 ScheduleDAGMILive *DAG =
3787 new ScheduleDAGMILive(C, std::make_unique<GenericScheduler>(C));
3788 // Register DAG post-processors.
3789 //
3790 // FIXME: extend the mutation API to allow earlier mutations to instantiate
3791 // data and pass it to later mutations. Have a single mutation that gathers
3792 // the interesting nodes in one pass.
3794 return DAG;
3795}
3796
3798 return createGenericSchedLive(C);
3799}
3800
3802GenericSchedRegistry("converge", "Standard converging scheduler.",
3804
3805//===----------------------------------------------------------------------===//
3806// PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3807//===----------------------------------------------------------------------===//
3808
3810 DAG = Dag;
3811 SchedModel = DAG->getSchedModel();
3812 TRI = DAG->TRI;
3813
3814 Rem.init(DAG, SchedModel);
3815 Top.init(DAG, SchedModel, &Rem);
3816 BotRoots.clear();
3817
3818 // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3819 // or are disabled, then these HazardRecs will be disabled.
3821 if (!Top.HazardRec) {
3822 Top.HazardRec =
3824 Itin, DAG);
3825 }
3826}
3827
3830
3831 // Some roots may not feed into ExitSU. Check all of them in case.
3832 for (const SUnit *SU : BotRoots) {
3833 if (SU->getDepth() > Rem.CriticalPath)
3834 Rem.CriticalPath = SU->getDepth();
3835 }
3836 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3838 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3839 }
3840}
3841
3842/// Apply a set of heuristics to a new candidate for PostRA scheduling.
3843///
3844/// \param Cand provides the policy and current best candidate.
3845/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3846/// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3848 SchedCandidate &TryCand) {
3849 // Initialize the candidate if needed.
3850 if (!Cand.isValid()) {
3851 TryCand.Reason = NodeOrder;
3852 return true;
3853 }
3854
3855 // Prioritize instructions that read unbuffered resources by stall cycles.
3856 if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3857 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3858 return TryCand.Reason != NoCand;
3859
3860 // Keep clustered nodes together.
3861 if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(),
3862 Cand.SU == DAG->getNextClusterSucc(),
3863 TryCand, Cand, Cluster))
3864 return TryCand.Reason != NoCand;
3865
3866 // Avoid critical resource consumption and balance the schedule.
3868 TryCand, Cand, ResourceReduce))
3869 return TryCand.Reason != NoCand;
3872 TryCand, Cand, ResourceDemand))
3873 return TryCand.Reason != NoCand;
3874
3875 // Avoid serializing long latency dependence chains.
3876 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3877 return TryCand.Reason != NoCand;
3878 }
3879
3880 // Fall through to original instruction order.
3881 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) {
3882 TryCand.Reason = NodeOrder;
3883 return true;
3884 }
3885
3886 return false;
3887}
3888
3890 ReadyQueue &Q = Top.Available;
3891 for (SUnit *SU : Q) {
3892 SchedCandidate TryCand(Cand.Policy);
3893 TryCand.SU = SU;
3894 TryCand.AtTop = true;
3895 TryCand.initResourceDelta(DAG, SchedModel);
3896 if (tryCandidate(Cand, TryCand)) {
3897 Cand.setBest(TryCand);
3899 }
3900 }
3901}
3902
3903/// Pick the next node to schedule.
3905 if (DAG->top() == DAG->bottom()) {
3906 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3907 return nullptr;
3908 }
3909 SUnit *SU;
3910 do {
3911 SU = Top.pickOnlyChoice();
3912 if (SU) {
3913 tracePick(Only1, true);
3914 } else {
3915 CandPolicy NoPolicy;
3916 SchedCandidate TopCand(NoPolicy);
3917 // Set the top-down policy based on the state of the current top zone and
3918 // the instructions outside the zone, including the bottom zone.
3919 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3920 pickNodeFromQueue(TopCand);
3921 assert(TopCand.Reason != NoCand && "failed to find a candidate");
3922 tracePick(TopCand);
3923 SU = TopCand.SU;
3924 }
3925 } while (SU->isScheduled);
3926
3927 IsTopNode = true;
3928 Top.removeReady(SU);
3929
3930 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
3931 << *SU->getInstr());
3932 return SU;
3933}
3934
3935/// Called after ScheduleDAGMI has scheduled an instruction and updated
3936/// scheduled/remaining flags in the DAG nodes.
3937void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3938 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3939 Top.bumpNode(SU);
3940}
3941
3943 return new ScheduleDAGMI(C, std::make_unique<PostGenericScheduler>(C),
3944 /*RemoveKillFlags=*/true);
3945}
3946
3947//===----------------------------------------------------------------------===//
3948// ILP Scheduler. Currently for experimental analysis of heuristics.
3949//===----------------------------------------------------------------------===//
3950
3951namespace {
3952
3953/// Order nodes by the ILP metric.
3954struct ILPOrder {
3955 const SchedDFSResult *DFSResult = nullptr;
3956 const BitVector *ScheduledTrees = nullptr;
3957 bool MaximizeILP;
3958
3959 ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {}
3960
3961 /// Apply a less-than relation on node priority.
3962 ///
3963 /// (Return true if A comes after B in the Q.)
3964 bool operator()(const SUnit *A, const SUnit *B) const {
3965 unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3966 unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3967 if (SchedTreeA != SchedTreeB) {
3968 // Unscheduled trees have lower priority.
3969 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3970 return ScheduledTrees->test(SchedTreeB);
3971
3972 // Trees with shallower connections have lower priority.
3973 if (DFSResult->getSubtreeLevel(SchedTreeA)
3974 != DFSResult->getSubtreeLevel(SchedTreeB)) {
3975 return DFSResult->getSubtreeLevel(SchedTreeA)
3976 < DFSResult->getSubtreeLevel(SchedTreeB);
3977 }
3978 }
3979 if (MaximizeILP)
3980 return DFSResult->getILP(A) < DFSResult->getILP(B);
3981 else
3982 return DFSResult->getILP(A) > DFSResult->getILP(B);
3983 }
3984};
3985
3986/// Schedule based on the ILP metric.
3987class ILPScheduler : public MachineSchedStrategy {
3988 ScheduleDAGMILive *DAG = nullptr;
3989 ILPOrder Cmp;
3990
3991 std::vector<SUnit*> ReadyQ;
3992
3993public:
3994 ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {}
3995
3996 void initialize(ScheduleDAGMI *dag) override {
3997 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3998 DAG = static_cast<ScheduleDAGMILive*>(dag);
3999 DAG->computeDFSResult();
4000 Cmp.DFSResult = DAG->getDFSResult();
4001 Cmp.ScheduledTrees = &DAG->getScheduledTrees();
4002 ReadyQ.clear();
4003 }
4004
4005 void registerRoots() override {
4006 // Restore the heap in ReadyQ with the updated DFS results.
4007 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
4008 }
4009
4010 /// Implement MachineSchedStrategy interface.
4011 /// -----------------------------------------
4012
4013 /// Callback to select the highest priority node from the ready Q.
4014 SUnit *pickNode(bool &IsTopNode) override {
4015 if (ReadyQ.empty()) return nullptr;
4016 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
4017 SUnit *SU = ReadyQ.back();
4018 ReadyQ.pop_back();
4019 IsTopNode = false;
4020 LLVM_DEBUG(dbgs() << "Pick node "
4021 << "SU(" << SU->NodeNum << ") "
4022 << " ILP: " << DAG->getDFSResult()->getILP(SU)
4023 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU)
4024 << " @"
4025 << DAG->getDFSResult()->getSubtreeLevel(
4026 DAG->getDFSResult()->getSubtreeID(SU))
4027 << '\n'
4028 << "Scheduling " << *SU->getInstr());
4029 return SU;
4030 }
4031
4032 /// Scheduler callback to notify that a new subtree is scheduled.
4033 void scheduleTree(unsigned SubtreeID) override {
4034 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
4035 }
4036
4037 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
4038 /// DFSResults, and resort the priority Q.
4039 void schedNode(SUnit *SU, bool IsTopNode) override {
4040 assert(!IsTopNode && "SchedDFSResult needs bottom-up");
4041 }
4042
4043 void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
4044
4045 void releaseBottomNode(SUnit *SU) override {
4046 ReadyQ.push_back(SU);
4047 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
4048 }
4049};
4050
4051} // end anonymous namespace
4052
4054 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(true));
4055}
4057 return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(false));
4058}
4059
4061 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
4063 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
4064
4065//===----------------------------------------------------------------------===//
4066// Machine Instruction Shuffler for Correctness Testing
4067//===----------------------------------------------------------------------===//
4068
4069#ifndef NDEBUG
4070namespace {
4071
4072/// Apply a less-than relation on the node order, which corresponds to the
4073/// instruction order prior to scheduling. IsReverse implements greater-than.
4074template<bool IsReverse>
4075struct SUnitOrder {
4076 bool operator()(SUnit *A, SUnit *B) const {
4077 if (IsReverse)
4078 return A->NodeNum > B->NodeNum;
4079 else
4080 return A->NodeNum < B->NodeNum;
4081 }
4082};
4083
4084/// Reorder instructions as much as possible.
4085class InstructionShuffler : public MachineSchedStrategy {
4086 bool IsAlternating;
4087 bool IsTopDown;
4088
4089 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
4090 // gives nodes with a higher number higher priority causing the latest
4091 // instructions to be scheduled first.
4093 TopQ;
4094
4095 // When scheduling bottom-up, use greater-than as the queue priority.
4097 BottomQ;
4098
4099public:
4100 InstructionShuffler(bool alternate, bool topdown)
4101 : IsAlternating(alternate), IsTopDown(topdown) {}
4102
4103 void initialize(ScheduleDAGMI*) override {
4104 TopQ.clear();
4105 BottomQ.clear();
4106 }
4107
4108 /// Implement MachineSchedStrategy interface.
4109 /// -----------------------------------------
4110
4111 SUnit *pickNode(bool &IsTopNode) override {
4112 SUnit *SU;
4113 if (IsTopDown) {
4114 do {
4115 if (TopQ.empty()) return nullptr;
4116 SU = TopQ.top();
4117 TopQ.pop();
4118 } while (SU->isScheduled);
4119 IsTopNode = true;
4120 } else {
4121 do {
4122 if (BottomQ.empty()) return nullptr;
4123 SU = BottomQ.top();
4124 BottomQ.pop();
4125 } while (SU->isScheduled);
4126 IsTopNode = false;
4127 }
4128 if (IsAlternating)
4129 IsTopDown = !IsTopDown;
4130 return SU;
4131 }
4132
4133 void schedNode(SUnit *SU, bool IsTopNode) override {}
4134
4135 void releaseTopNode(SUnit *SU) override {
4136 TopQ.push(SU);
4137 }
4138 void releaseBottomNode(SUnit *SU) override {
4139 BottomQ.push(SU);
4140 }
4141};
4142
4143} // end anonymous namespace
4144
4146 bool Alternate = !ForceTopDown && !ForceBottomUp;
4147 bool TopDown = !ForceBottomUp;
4148 assert((TopDown || !ForceTopDown) &&
4149 "-misched-topdown incompatible with -misched-bottomup");
4150 return new ScheduleDAGMILive(
4151 C, std::make_unique<InstructionShuffler>(Alternate, TopDown));
4152}
4153
4155 "shuffle", "Shuffle machine instructions alternating directions",
4157#endif // !NDEBUG
4158
4159//===----------------------------------------------------------------------===//
4160// GraphWriter support for ScheduleDAGMILive.
4161//===----------------------------------------------------------------------===//
4162
4163#ifndef NDEBUG
4164namespace llvm {
4165
4166template<> struct GraphTraits<
4168
4169template<>
4172
4173 static std::string getGraphName(const ScheduleDAG *G) {
4174 return std::string(G->MF.getName());
4175 }
4176
4178 return true;
4179 }
4180
4181 static bool isNodeHidden(const SUnit *Node, const ScheduleDAG *G) {
4182 if (ViewMISchedCutoff == 0)
4183 return false;
4184 return (Node->Preds.size() > ViewMISchedCutoff
4185 || Node->Succs.size() > ViewMISchedCutoff);
4186 }
4187
4188 /// If you want to override the dot attributes printed for a particular
4189 /// edge, override this method.
4190 static std::string getEdgeAttributes(const SUnit *Node,
4191 SUnitIterator EI,
4192 const ScheduleDAG *Graph) {
4193 if (EI.isArtificialDep())
4194 return "color=cyan,style=dashed";
4195 if (EI.isCtrlDep())
4196 return "color=blue,style=dashed";
4197 return "";
4198 }
4199
4200 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
4201 std::string Str;
4202 raw_string_ostream SS(Str);
4203 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
4204 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
4205 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
4206 SS << "SU:" << SU->NodeNum;
4207 if (DFS)
4208 SS << " I:" << DFS->getNumInstrs(SU);
4209 return SS.str();
4210 }
4211
4212 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
4213 return G->getGraphNodeLabel(SU);
4214 }
4215
4216 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
4217 std::string Str("shape=Mrecord");
4218 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
4219 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
4220 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
4221 if (DFS) {
4222 Str += ",style=filled,fillcolor=\"#";
4223 Str += DOT::getColorString(DFS->getSubtreeID(N));
4224 Str += '"';
4225 }
4226 return Str;
4227 }
4228};
4229
4230} // end namespace llvm
4231#endif // NDEBUG
4232
4233/// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
4234/// rendered using 'dot'.
4235void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
4236#ifndef NDEBUG
4237 ViewGraph(this, Name, false, Title);
4238#else
4239 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
4240 << "systems with Graphviz or gv!\n";
4241#endif // NDEBUG
4242}
4243
4244/// Out-of-line implementation with no arguments is handy for gdb.
4246 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
4247}
4248
4249/// Sort predicate for the intervals stored in an instance of
4250/// ResourceSegments. Intervals are always disjoint (no intersection
4251/// for any pairs of intervals), therefore we can sort the totality of
4252/// the intervals by looking only at the left boundary.
4255 return A.first < B.first;
4256}
4257
4258unsigned ResourceSegments::getFirstAvailableAt(
4259 unsigned CurrCycle, unsigned AcquireAtCycle, unsigned Cycle,
4260 std::function<ResourceSegments::IntervalTy(unsigned, unsigned, unsigned)>
4261 IntervalBuilder) const {
4262 assert(std::is_sorted(std::begin(_Intervals), std::end(_Intervals),
4263 sortIntervals) &&
4264 "Cannot execute on an un-sorted set of intervals.");
4265 unsigned RetCycle = CurrCycle;
4266 ResourceSegments::IntervalTy NewInterval =
4267 IntervalBuilder(RetCycle, AcquireAtCycle, Cycle);
4268 for (auto &Interval : _Intervals) {
4269 if (!intersects(NewInterval, Interval))
4270 continue;
4271
4272 // Move the interval right next to the top of the one it
4273 // intersects.
4274 assert(Interval.second > NewInterval.first &&
4275 "Invalid intervals configuration.");
4276 RetCycle += (unsigned)Interval.second - (unsigned)NewInterval.first;
4277 NewInterval = IntervalBuilder(RetCycle, AcquireAtCycle, Cycle);
4278 }
4279 return RetCycle;
4280}
4281
4283 const unsigned CutOff) {
4284 assert(A.first < A.second && "Cannot add empty resource usage");
4285 assert(CutOff > 0 && "0-size interval history has no use.");
4286 assert(all_of(_Intervals,
4287 [&A](const ResourceSegments::IntervalTy &Interval) -> bool {
4288 return !intersects(A, Interval);
4289 }) &&
4290 "A resource is being overwritten");
4291 _Intervals.push_back(A);
4292
4293 sortAndMerge();
4294
4295 // Do not keep the full history of the intervals, just the
4296 // latest #CutOff.
4297 while (_Intervals.size() > CutOff)
4298 _Intervals.pop_front();
4299}
4300
4303 assert(A.first <= A.second && "Invalid interval");
4304 assert(B.first <= B.second && "Invalid interval");
4305
4306 // Share one boundary.
4307 if ((A.first == B.first) || (A.second == B.second))
4308 return true;
4309
4310 // full intersersect: [ *** ) B
4311 // [***) A
4312 if ((A.first > B.first) && (A.second < B.second))
4313 return true;
4314
4315 // right intersect: [ ***) B
4316 // [*** ) A
4317 if ((A.first > B.first) && (A.first < B.second) && (A.second > B.second))
4318 return true;
4319
4320 // left intersect: [*** ) B
4321 // [ ***) A
4322 if ((A.first < B.first) && (B.first < A.second) && (B.second > B.first))
4323 return true;
4324
4325 return false;
4326}
4327
4328void ResourceSegments::sortAndMerge() {
4329 if (_Intervals.size() <= 1)
4330 return;
4331
4332 // First sort the collection.
4333 _Intervals.sort(sortIntervals);
4334
4335 // can use next because I have at least 2 elements in the list
4336 auto next = std::next(std::begin(_Intervals));
4337 auto E = std::end(_Intervals);
4338 for (; next != E; ++next) {
4339 if (std::prev(next)->second >= next->first) {
4340 next->first = std::prev(next)->first;
4341 _Intervals.erase(std::prev(next));
4342 continue;
4343 }
4344 }
4345}
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static const Function * getParent(const Value *V)
basic Basic Alias true
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:510
static std::optional< ArrayRef< InsnRange >::iterator > intersects(const MachineInstr *StartMI, const MachineInstr *EndMI, const ArrayRef< InsnRange > &Ranges, const InstructionOrdering &Ordering)
Check if the instruction range [StartMI, EndMI] intersects any instruction range in Ranges.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
std::string Name
bool End
Definition: ELF_riscv.cpp:478
expand large div rem
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
static bool isSchedBoundary(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB, MachineFunction *MF, const TargetInstrInfo *TII)
Return true of the given instruction should not be included in a scheduling region.
static MachineSchedRegistry ILPMaxRegistry("ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler)
static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop)
static cl::opt< bool > EnableMemOpCluster("misched-cluster", cl::Hidden, cl::desc("Enable memop clustering."), cl::init(true))
Machine Instruction Scheduler
static MachineBasicBlock::const_iterator nextIfDebug(MachineBasicBlock::const_iterator I, MachineBasicBlock::const_iterator End)
If this iterator is a debug value, increment until reaching the End or a non-debug instruction.
static const unsigned MinSubtreeSize
static const unsigned InvalidCycle
static cl::opt< bool > MISchedSortResourcesInTrace("misched-sort-resources-in-trace", cl::Hidden, cl::init(true), cl::desc("Sort the resources printed in the dump trace"))
static cl::opt< bool > EnableCyclicPath("misched-cyclicpath", cl::Hidden, cl::desc("Enable cyclic critical path analysis."), cl::init(true))
static MachineBasicBlock::const_iterator priorNonDebug(MachineBasicBlock::const_iterator I, MachineBasicBlock::const_iterator Beg)
Decrement this iterator until reaching the top or a non-debug instr.
static cl::opt< MachineSchedRegistry::ScheduleDAGCtor, false, RegisterPassParser< MachineSchedRegistry > > MachineSchedOpt("misched", cl::init(&useDefaultMachineSched), cl::Hidden, cl::desc("Machine instruction scheduler to use"))
MachineSchedOpt allows command line selection of the scheduler.
static cl::opt< bool > EnableMachineSched("enable-misched", cl::desc("Enable the machine instruction scheduling pass."), cl::init(true), cl::Hidden)
static unsigned computeRemLatency(SchedBoundary &CurrZone)
Compute remaining latency.
static cl::opt< unsigned > MISchedCutoff("misched-cutoff", cl::Hidden, cl::desc("Stop scheduling after N instructions"), cl::init(~0U))
static cl::opt< unsigned > SchedOnlyBlock("misched-only-block", cl::Hidden, cl::desc("Only schedule this MBB#"))
static cl::opt< bool > EnableRegPressure("misched-regpressure", cl::Hidden, cl::desc("Enable register pressure scheduling."), cl::init(true))
static MachineSchedRegistry GenericSchedRegistry("converge", "Standard converging scheduler.", createConvergingSched)
static cl::opt< unsigned > HeaderColWidth("misched-dump-schedule-trace-col-header-width", cl::Hidden, cl::desc("Set width of the columns with " "the resources and schedule units"), cl::init(19))
static cl::opt< bool > ForceFastCluster("force-fast-cluster", cl::Hidden, cl::desc("Switch to fast cluster algorithm with the lost " "of some fusion opportunities"), cl::init(false))
static cl::opt< unsigned > FastClusterThreshold("fast-cluster-threshold", cl::Hidden, cl::desc("The threshold for fast cluster"), cl::init(1000))
static bool checkResourceLimit(unsigned LFactor, unsigned Count, unsigned Latency, bool AfterSchedNode)
Given a Count of resource usage and a Latency value, return true if a SchedBoundary becomes resource ...
Definition: MachineScheduler