80#define DEBUG_TYPE "regalloc"
82STATISTIC(NumGlobalSplits,
"Number of split global live ranges");
83STATISTIC(NumLocalSplits,
"Number of split local live ranges");
84STATISTIC(NumEvicted,
"Number of interferences evicted");
88 cl::desc(
"Spill mode for splitting live ranges"),
96 cl::desc(
"Last chance recoloring max depth"),
101 cl::desc(
"Last chance recoloring maximum number of considered"
102 " interference at a time"),
107 cl::desc(
"Exhaustive Search for registers bypassing the depth "
108 "and interference cutoffs of last chance recoloring"),
115 cl::desc(
"Cost for first time use of callee-saved register."),
119 "regalloc-csr-cost-scale",
120 cl::desc(
"Scale for the callee-saved register cost, in percentage."),
124 "grow-region-complexity-budget",
125 cl::desc(
"growRegion() does not scale with the number of BB edges, so "
126 "limit its budget and bail out once we reach the limit."),
130 "greedy-regclass-priority-trumps-globalness",
131 cl::desc(
"Change the greedy register allocator's live range priority "
132 "calculation to make the AllocationPriority of the register class "
133 "more important then whether the range is global"),
137 "greedy-reverse-local-assignment",
138 cl::desc(
"Reverse allocation order of local live ranges, such that "
139 "shorter local live ranges will tend to be allocated first"),
143 "split-threshold-for-reg-with-hint",
144 cl::desc(
"The threshold for splitting a virtual register with a hint, in "
160 StringRef getPassName()
const override {
return "Greedy Register Allocator"; }
163 void getAnalysisUsage(AnalysisUsage &AU)
const override;
165 bool runOnMachineFunction(MachineFunction &mf)
override;
167 MachineFunctionProperties getRequiredProperties()
const override {
168 return MachineFunctionProperties().setNoPHIs();
171 MachineFunctionProperties getClearedProperties()
const override {
172 return MachineFunctionProperties().setIsSSA();
211 MBFI = Analyses.
MBFI;
213 Loops = Analyses.
Loops;
226 StringRef FilterName = Opts.FilterName.
empty() ?
"all" : Opts.FilterName;
227 OS <<
"greedy<" << FilterName <<
'>';
254 RAGreedy Impl(Analyses, Opts.Filter);
296char RAGreedyLegacy::ID = 0;
320const char *
const RAGreedy::StageName[] = {
335 return new RAGreedyLegacy();
339 return new RAGreedyLegacy(Ftor);
342void RAGreedyLegacy::getAnalysisUsage(
AnalysisUsage &AU)
const {
374bool RAGreedy::LRE_CanEraseVirtReg(
Register VirtReg) {
375 LiveInterval &LI =
LIS->getInterval(VirtReg);
376 if (
VRM->hasPhys(VirtReg)) {
389void RAGreedy::LRE_WillShrinkVirtReg(
Register VirtReg) {
390 if (!
VRM->hasPhys(VirtReg))
394 LiveInterval &LI =
LIS->getInterval(VirtReg);
400 ExtraInfo->LRE_DidCloneVirtReg(New, Old);
405 if (!Info.inBounds(Old))
414 Info[New] = Info[Old];
418 SpillerInstance.reset();
424void RAGreedy::enqueue(PQueue &CurQueue,
const LiveInterval *LI) {
428 assert(Reg.isVirtual() &&
"Can only enqueue virtual registers");
430 auto Stage = ExtraInfo->getOrInitStage(Reg);
433 ExtraInfo->setStage(Reg, Stage);
436 unsigned Ret = PriorityAdvisor->getPriority(*LI);
440 CurQueue.push(std::make_pair(Ret, ~
Reg.id()));
443unsigned DefaultPriorityAdvisor::getPriority(
const LiveInterval &LI)
const {
456 const TargetRegisterClass &RC = *
MRI->getRegClass(
Reg);
458 (!ReverseLocalAssignment &&
461 unsigned GlobalBit = 0;
464 LIS->intervalIsInOneMBB(LI)) {
468 if (!ReverseLocalAssignment)
474 Prio = Indexes->getZeroIndex().getApproxInstrDistance(LI.
endIndex());
496 Prio = std::min(Prio, (
unsigned)
maxUIntN(24));
499 if (RegClassPriorityTrumpsGlobalness)
508 if (
VRM->hasKnownPreference(
Reg))
515unsigned DummyPriorityAdvisor::getPriority(
const LiveInterval &LI)
const {
524 if (CurQueue.empty())
541 for (
auto I = Order.
begin(),
E = Order.
end();
I !=
E && !PhysReg; ++
I) {
543 if (!
Matrix->checkInterference(VirtReg, *
I)) {
559 MCRegister PhysHint =
Hint.asMCReg();
562 if (EvictAdvisor->canEvictHintInterference(VirtReg, PhysHint,
564 evictInterference(VirtReg, PhysHint, NewVRegs);
569 if (trySplitAroundHintReg(PhysHint, VirtReg, NewVRegs, Order))
574 SetOfBrokenHints.insert(&VirtReg);
578 uint8_t
Cost = RegCosts[PhysReg.
id()];
585 << (
unsigned)
Cost <<
'\n');
586 MCRegister CheapReg = tryEvict(VirtReg, Order, NewVRegs,
Cost, FixedRegisters);
587 return CheapReg ? CheapReg : PhysReg;
596 auto HasRegUnitInterference = [&](MCRegUnit Unit) {
599 VirtReg,
Matrix->getLiveUnions()[
static_cast<unsigned>(Unit)]);
608 if (
none_of(
TRI->regunits(Reg), HasRegUnitInterference)) {
621void RAGreedy::evictInterference(
const LiveInterval &VirtReg,
627 unsigned Cascade = ExtraInfo->getOrAssignNewCascade(VirtReg.
reg());
630 <<
" interference: Cascade " << Cascade <<
'\n');
634 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
651 assert((ExtraInfo->getCascade(Intf->reg()) < Cascade ||
652 (Cascade < ExtraInfo->getCascade(Intf->reg()) &&
653 EvictAdvisor->isUrgentEviction(VirtReg, *Intf)) ||
655 "Cannot decrease cascade number, illegal eviction");
656 ExtraInfo->setCascade(Intf->reg(), Cascade);
669 return !
Matrix->isPhysRegUsed(PhysReg);
672std::optional<unsigned>
675 unsigned CostPerUseLimit)
const {
676 unsigned OrderLimit = Order.
getOrder().size();
678 if (CostPerUseLimit <
uint8_t(~0u)) {
682 if (MinCost >= CostPerUseLimit) {
684 << MinCost <<
", no cheaper registers to be found.\n");
701 if (
RegCosts[PhysReg.
id()] >= CostPerUseLimit)
727 MCRegister BestPhys = EvictAdvisor->tryFindEvictionCandidate(
728 VirtReg, Order, CostPerUseLimit, FixedRegisters);
730 evictInterference(VirtReg, BestPhys, NewVRegs);
748 SplitConstraints.resize(UseBlocks.
size());
750 for (
unsigned I = 0;
I != UseBlocks.
size(); ++
I) {
771 if (Intf.
first() <= Indexes->getMBBStartIdx(BC.
Number)) {
785 SA->getFirstSplitPoint(BC.
Number)))
791 if (Intf.
last() >= SA->getLastSplitPoint(BC.
Number)) {
804 StaticCost += SpillPlacer->getBlockFrequency(BC.
Number);
810 SpillPlacer->addConstraints(SplitConstraints);
811 return SpillPlacer->scanActiveBundles();
816bool RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
817 ArrayRef<unsigned> Blocks) {
818 const unsigned GroupSize = 8;
819 SpillPlacement::BlockConstraint BCS[GroupSize];
820 unsigned TBS[GroupSize];
821 unsigned B = 0,
T = 0;
823 for (
unsigned Number : Blocks) {
827 assert(
T < GroupSize &&
"Array overflow");
829 if (++
T == GroupSize) {
836 assert(
B < GroupSize &&
"Array overflow");
840 MachineBasicBlock *
MBB = MF->getBlockNumbered(
Number);
842 if (FirstNonDebugInstr !=
MBB->
end() &&
844 SA->getFirstSplitPoint(
Number)))
847 if (Intf.
first() <= Indexes->getMBBStartIdx(
Number))
853 if (Intf.
last() >= SA->getLastSplitPoint(
Number))
858 if (++
B == GroupSize) {
859 SpillPlacer->addConstraints(
ArrayRef(BCS,
B));
864 SpillPlacer->addConstraints(
ArrayRef(BCS,
B));
869bool RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
871 BitVector Todo = SA->getThroughBlocks();
872 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
873 unsigned AddedTo = 0;
875 unsigned Visited = 0;
880 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
882 for (
unsigned Bundle : NewBundles) {
884 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
886 if (Blocks.
size() >= Budget)
888 Budget -= Blocks.
size();
889 for (
unsigned Block : Blocks) {
901 if (ActiveBlocks.
size() == AddedTo)
906 auto NewBlocks =
ArrayRef(ActiveBlocks).slice(AddedTo);
908 if (!addThroughConstraints(Cand.Intf, NewBlocks))
916 bool PrefSpill =
true;
917 if (SA->looksLikeLoopIV() && NewBlocks.size() >= 2) {
922 MachineLoop *
L = Loops->getLoopFor(MF->getBlockNumbered(NewBlocks[0]));
923 if (L &&
L->getHeader()->getNumber() == (
int)NewBlocks[0] &&
924 all_of(NewBlocks.drop_front(), [&](
unsigned Block) {
925 return L == Loops->getLoopFor(MF->getBlockNumbered(Block));
930 SpillPlacer->addPrefSpill(NewBlocks,
true);
932 AddedTo = ActiveBlocks.
size();
935 SpillPlacer->iterate();
948bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
950 if (!SA->getNumThroughBlocks())
960 SpillPlacer->prepare(Cand.LiveBundles);
964 if (!addSplitConstraints(Cand.Intf,
Cost)) {
969 if (!growRegion(Cand)) {
974 SpillPlacer->finish();
976 if (!Cand.LiveBundles.any()) {
982 for (
int I : Cand.LiveBundles.set_bits())
983 dbgs() <<
" EB#" <<
I;
991BlockFrequency RAGreedy::calcBlockSplitCost() {
992 BlockFrequency
Cost = BlockFrequency(0);
994 for (
const SplitAnalysis::BlockInfo &BI : UseBlocks) {
997 Cost += SpillPlacer->getBlockFrequency(
Number);
1001 Cost += SpillPlacer->getBlockFrequency(
Number);
1010BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
1011 const AllocationOrder &Order) {
1012 BlockFrequency GlobalCost = BlockFrequency(0);
1013 const BitVector &LiveBundles = Cand.LiveBundles;
1015 for (
unsigned I = 0;
I != UseBlocks.
size(); ++
I) {
1016 const SplitAnalysis::BlockInfo &BI = UseBlocks[
I];
1017 SpillPlacement::BlockConstraint &BC = SplitConstraints[
I];
1018 bool RegIn = LiveBundles[Bundles->getBundle(BC.
Number,
false)];
1019 bool RegOut = LiveBundles[Bundles->getBundle(BC.
Number,
true)];
1022 Cand.Intf.moveToBlock(BC.
Number);
1029 GlobalCost += SpillPlacer->getBlockFrequency(BC.
Number);
1032 for (
unsigned Number : Cand.ActiveBlocks) {
1033 bool RegIn = LiveBundles[Bundles->getBundle(
Number,
false)];
1034 bool RegOut = LiveBundles[Bundles->getBundle(
Number,
true)];
1035 if (!RegIn && !RegOut)
1037 if (RegIn && RegOut) {
1039 Cand.Intf.moveToBlock(
Number);
1040 if (Cand.Intf.hasInterference()) {
1041 GlobalCost += SpillPlacer->getBlockFrequency(
Number);
1042 GlobalCost += SpillPlacer->getBlockFrequency(
Number);
1047 GlobalCost += SpillPlacer->getBlockFrequency(
Number);
1064void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
1065 ArrayRef<unsigned> UsedCands) {
1068 const unsigned NumGlobalIntvs = LREdit.
size();
1071 assert(NumGlobalIntvs &&
"No global intervals configured");
1081 for (
const SplitAnalysis::BlockInfo &BI : UseBlocks) {
1083 unsigned IntvIn = 0, IntvOut = 0;
1084 SlotIndex IntfIn, IntfOut;
1086 unsigned CandIn = BundleCand[Bundles->getBundle(
Number,
false)];
1087 if (CandIn != NoCand) {
1088 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1089 IntvIn = Cand.IntvIdx;
1090 Cand.Intf.moveToBlock(
Number);
1091 IntfIn = Cand.Intf.first();
1095 unsigned CandOut = BundleCand[Bundles->getBundle(
Number,
true)];
1096 if (CandOut != NoCand) {
1097 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1098 IntvOut = Cand.IntvIdx;
1099 Cand.Intf.moveToBlock(
Number);
1100 IntfOut = Cand.Intf.last();
1105 if (!IntvIn && !IntvOut) {
1107 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1108 SE->splitSingleBlock(BI);
1112 if (IntvIn && IntvOut)
1113 SE->splitLiveThroughBlock(
Number, IntvIn, IntfIn, IntvOut, IntfOut);
1115 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1117 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1123 BitVector Todo = SA->getThroughBlocks();
1124 for (
unsigned UsedCand : UsedCands) {
1125 ArrayRef<unsigned> Blocks = GlobalCand[UsedCand].ActiveBlocks;
1126 for (
unsigned Number : Blocks) {
1131 unsigned IntvIn = 0, IntvOut = 0;
1132 SlotIndex IntfIn, IntfOut;
1134 unsigned CandIn = BundleCand[Bundles->getBundle(
Number,
false)];
1135 if (CandIn != NoCand) {
1136 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1137 IntvIn = Cand.IntvIdx;
1138 Cand.Intf.moveToBlock(
Number);
1139 IntfIn = Cand.Intf.first();
1142 unsigned CandOut = BundleCand[Bundles->getBundle(
Number,
true)];
1143 if (CandOut != NoCand) {
1144 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1145 IntvOut = Cand.IntvIdx;
1146 Cand.Intf.moveToBlock(
Number);
1147 IntfOut = Cand.Intf.last();
1149 if (!IntvIn && !IntvOut)
1151 SE->splitLiveThroughBlock(
Number, IntvIn, IntfIn, IntvOut, IntfOut);
1157 SmallVector<unsigned, 8> IntvMap;
1158 SE->finish(&IntvMap);
1159 DebugVars->splitRegister(
Reg, LREdit.
regs(), *
LIS);
1161 unsigned OrigBlocks = SA->getNumLiveBlocks();
1168 for (
unsigned I = 0,
E = LREdit.
size();
I !=
E; ++
I) {
1169 const LiveInterval &
Reg =
LIS->getInterval(LREdit.
get(
I));
1172 if (ExtraInfo->getOrInitStage(
Reg.reg()) !=
RS_New)
1177 if (IntvMap[
I] == 0) {
1184 if (IntvMap[
I] < NumGlobalIntvs) {
1185 if (SA->countLiveBlocks(&
Reg) >= OrigBlocks) {
1186 LLVM_DEBUG(
dbgs() <<
"Main interval covers the same " << OrigBlocks
1187 <<
" blocks as original.\n");
1199 MF->verify(
LIS, Indexes,
"After splitting live range around region",
1203MCRegister RAGreedy::tryRegionSplit(
const LiveInterval &VirtReg,
1204 AllocationOrder &Order,
1205 SmallVectorImpl<Register> &NewVRegs) {
1206 if (!
TRI->shouldRegionSplitForVirtReg(*MF, VirtReg))
1208 unsigned NumCands = 0;
1209 BlockFrequency SpillCost = calcBlockSplitCost();
1210 BlockFrequency BestCost;
1213 bool HasCompact = calcCompactRegion(GlobalCand.front());
1221 BestCost = SpillCost;
1226 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
1230 if (!HasCompact && BestCand == NoCand)
1233 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
1236unsigned RAGreedy::calculateRegionSplitCostAroundReg(MCRegister PhysReg,
1237 AllocationOrder &Order,
1238 BlockFrequency &BestCost,
1240 unsigned &BestCand) {
1243 if (NumCands == IntfCache.getMaxCursors()) {
1244 unsigned WorstCount = ~0
u;
1246 for (
unsigned CandIndex = 0; CandIndex != NumCands; ++CandIndex) {
1247 if (CandIndex == BestCand || !GlobalCand[CandIndex].PhysReg)
1249 unsigned Count = GlobalCand[CandIndex].LiveBundles.count();
1250 if (
Count < WorstCount) {
1256 GlobalCand[Worst] = GlobalCand[NumCands];
1257 if (BestCand == NumCands)
1261 if (GlobalCand.size() <= NumCands)
1262 GlobalCand.resize(NumCands+1);
1263 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1264 Cand.reset(IntfCache, PhysReg);
1266 SpillPlacer->prepare(Cand.LiveBundles);
1267 BlockFrequency
Cost;
1268 if (!addSplitConstraints(Cand.Intf,
Cost)) {
1274 if (
Cost >= BestCost) {
1276 if (BestCand == NoCand)
1277 dbgs() <<
" worse than no bundles\n";
1279 dbgs() <<
" worse than "
1280 <<
printReg(GlobalCand[BestCand].PhysReg,
TRI) <<
'\n';
1284 if (!growRegion(Cand)) {
1289 SpillPlacer->finish();
1292 if (!Cand.LiveBundles.any()) {
1297 Cost += calcGlobalSplitCost(Cand, Order);
1300 for (
int I : Cand.LiveBundles.set_bits())
1301 dbgs() <<
" EB#" <<
I;
1304 if (
Cost < BestCost) {
1305 BestCand = NumCands;
1313unsigned RAGreedy::calculateRegionSplitCost(
const LiveInterval &VirtReg,
1314 AllocationOrder &Order,
1315 BlockFrequency &BestCost,
1318 unsigned BestCand = NoCand;
1319 for (MCRegister PhysReg : Order) {
1321 if (IgnoreCSR && EvictAdvisor->isUnusedCalleeSavedReg(PhysReg))
1324 calculateRegionSplitCostAroundReg(PhysReg, Order, BestCost, NumCands,
1331MCRegister RAGreedy::doRegionSplit(
const LiveInterval &VirtReg,
1332 unsigned BestCand,
bool HasCompact,
1333 SmallVectorImpl<Register> &NewVRegs) {
1334 SmallVector<unsigned, 8> UsedCands;
1336 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *
LIS,
VRM,
this, &
DeadRemats);
1340 BundleCand.assign(Bundles->getNumBundles(), NoCand);
1343 if (BestCand != NoCand) {
1344 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1345 if (
unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1347 Cand.IntvIdx = SE->openIntv();
1349 <<
B <<
" bundles, intv " << Cand.IntvIdx <<
".\n");
1356 GlobalSplitCandidate &Cand = GlobalCand.front();
1357 assert(!Cand.PhysReg &&
"Compact region has no physreg");
1358 if (
unsigned B = Cand.getBundles(BundleCand, 0)) {
1360 Cand.IntvIdx = SE->openIntv();
1362 <<
" bundles, intv " << Cand.IntvIdx <<
".\n");
1367 splitAroundRegion(LREdit, UsedCands);
1368 return MCRegister();
1373bool RAGreedy::trySplitAroundHintReg(MCRegister Hint,
1374 const LiveInterval &VirtReg,
1375 SmallVectorImpl<Register> &NewVRegs,
1376 AllocationOrder &Order) {
1380 if (MF->getFunction().hasOptSize())
1384 if (ExtraInfo->getStage(VirtReg) >=
RS_Split2)
1387 BlockFrequency
Cost = BlockFrequency(0);
1397 for (
const MachineOperand &Opnd :
MRI->reg_nodbg_operands(
Reg)) {
1398 const MachineInstr &
Instr = *Opnd.getParent();
1399 if (!
Instr.isCopy() || Opnd.isImplicit())
1403 const bool IsDef = Opnd.isDef();
1404 const MachineOperand &OtherOpnd =
Instr.getOperand(IsDef);
1407 if (OtherReg ==
Reg)
1410 unsigned SubReg = Opnd.getSubReg();
1411 unsigned OtherSubReg = OtherOpnd.
getSubReg();
1412 if (SubReg && OtherSubReg && SubReg != OtherSubReg)
1416 if (Opnd.readsReg()) {
1417 SlotIndex
Index =
LIS->getInstructionIndex(Instr).getRegSlot();
1420 LaneBitmask
Mask =
TRI->getSubRegIndexLaneMask(SubReg);
1424 if (
any_of(VirtReg.
subranges(), [=](
const LiveInterval::SubRange &S) {
1425 return (S.LaneMask & Mask).any() && S.liveAt(Index);
1430 if (VirtReg.
liveAt(Index))
1435 MCRegister OtherPhysReg =
1437 MCRegister ThisHint = SubReg ?
TRI->getSubReg(Hint, SubReg) :
Hint;
1438 if (OtherPhysReg == ThisHint)
1439 Cost += MBFI->getBlockFreq(
Instr.getParent());
1445 if (
Cost == BlockFrequency(0))
1448 unsigned NumCands = 0;
1449 unsigned BestCand = NoCand;
1450 SA->analyze(&VirtReg);
1451 calculateRegionSplitCostAroundReg(Hint, Order,
Cost, NumCands, BestCand);
1452 if (BestCand == NoCand)
1455 doRegionSplit(VirtReg, BestCand,
false, NewVRegs);
1466MCRegister RAGreedy::tryBlockSplit(
const LiveInterval &VirtReg,
1467 AllocationOrder &Order,
1468 SmallVectorImpl<Register> &NewVRegs) {
1469 assert(&SA->getParent() == &VirtReg &&
"Live range wasn't analyzed");
1472 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *
LIS,
VRM,
this, &
DeadRemats);
1475 for (
const SplitAnalysis::BlockInfo &BI : UseBlocks) {
1476 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1477 SE->splitSingleBlock(BI);
1481 return MCRegister();
1484 SmallVector<unsigned, 8> IntvMap;
1485 SE->finish(&IntvMap);
1488 DebugVars->splitRegister(
Reg, LREdit.
regs(), *
LIS);
1492 for (
unsigned I = 0,
E = LREdit.
size();
I !=
E; ++
I) {
1493 const LiveInterval &LI =
LIS->getInterval(LREdit.
get(
I));
1494 if (ExtraInfo->getOrInitStage(LI.
reg()) ==
RS_New && IntvMap[
I] == 0)
1499 MF->verify(
LIS, Indexes,
"After splitting live range around basic blocks",
1501 return MCRegister();
1514 assert(SuperRC &&
"Invalid register class");
1517 MI->getRegClassConstraintEffectForVReg(
Reg, SuperRC,
TII,
TRI,
1536 if (SubReg == 0 && MO.
isUse()) {
1545 Mask |= ~SubRegMask;
1562 auto DestSrc =
TII->isCopyInstr(*
MI);
1563 if (DestSrc && !
MI->isBundled() &&
1564 DestSrc->Destination->getSubReg() == DestSrc->Source->getSubReg())
1573 LiveAtMask |= S.LaneMask;
1578 return (ReadMask & ~(LiveAtMask &
TRI->getCoveringLanes())).
any();
1588MCRegister RAGreedy::tryInstructionSplit(
const LiveInterval &VirtReg,
1589 AllocationOrder &Order,
1590 SmallVectorImpl<Register> &NewVRegs) {
1591 const TargetRegisterClass *CurRC =
MRI->getRegClass(VirtReg.
reg());
1594 bool SplitSubClass =
true;
1597 return MCRegister();
1598 SplitSubClass =
false;
1603 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *
LIS,
VRM,
this, &
DeadRemats);
1607 if (
Uses.size() <= 1)
1608 return MCRegister();
1611 <<
" individual instrs.\n");
1613 const TargetRegisterClass *SuperRC =
1614 TRI->getLargestLegalSuperClass(CurRC, *MF);
1615 unsigned SuperRCNumAllocatableRegs =
1621 for (
const SlotIndex Use :
Uses) {
1622 if (
const MachineInstr *
MI = Indexes->getInstructionFromIndex(Use)) {
1623 if (TII->isFullCopyInstr(*
MI) ||
1625 SuperRCNumAllocatableRegs ==
1636 SlotIndex SegStart = SE->enterIntvBefore(Use);
1637 SlotIndex SegStop = SE->leaveIntvAfter(Use);
1638 SE->useIntv(SegStart, SegStop);
1641 if (LREdit.
empty()) {
1643 return MCRegister();
1646 SmallVector<unsigned, 8> IntvMap;
1647 SE->finish(&IntvMap);
1648 DebugVars->splitRegister(VirtReg.
reg(), LREdit.
regs(), *
LIS);
1651 return MCRegister();
1663void RAGreedy::calcGapWeights(MCRegister PhysReg,
1664 SmallVectorImpl<float> &GapWeight) {
1665 assert(SA->getUseBlocks().size() == 1 &&
"Not a local interval");
1666 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1668 const unsigned NumGaps =
Uses.size()-1;
1671 SlotIndex StartIdx =
1676 GapWeight.
assign(NumGaps, 0.0f);
1679 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
1680 if (!
Matrix->query(
const_cast<LiveInterval &
>(SA->getParent()), Unit)
1681 .checkInterference())
1692 Matrix->getLiveUnions()[
static_cast<unsigned>(
Unit)].
find(StartIdx);
1693 for (
unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1695 while (
Uses[Gap+1].getBoundaryIndex() < IntI.start())
1696 if (++Gap == NumGaps)
1702 const float weight = IntI.value()->weight();
1703 for (; Gap != NumGaps; ++Gap) {
1704 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1705 if (
Uses[Gap+1].getBaseIndex() >= IntI.stop())
1714 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
1720 for (
unsigned Gap = 0;
I !=
E &&
I->start < StopIdx; ++
I) {
1721 while (
Uses[Gap+1].getBoundaryIndex() <
I->start)
1722 if (++Gap == NumGaps)
1727 for (; Gap != NumGaps; ++Gap) {
1729 if (
Uses[Gap+1].getBaseIndex() >=
I->end)
1741MCRegister RAGreedy::tryLocalSplit(
const LiveInterval &VirtReg,
1742 AllocationOrder &Order,
1743 SmallVectorImpl<Register> &NewVRegs) {
1746 if (SA->getUseBlocks().size() != 1)
1747 return MCRegister();
1749 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1759 if (
Uses.size() <= 2)
1760 return MCRegister();
1761 const unsigned NumGaps =
Uses.size()-1;
1764 dbgs() <<
"tryLocalSplit: ";
1765 for (
const auto &Use :
Uses)
1772 SmallVector<unsigned, 8> RegMaskGaps;
1773 if (
Matrix->checkRegMaskInterference(VirtReg)) {
1780 unsigned RE = RMS.
size();
1781 for (
unsigned I = 0;
I != NumGaps && RI != RE; ++
I) {
1792 RegMaskGaps.push_back(
I);
1819 bool ProgressRequired = ExtraInfo->getStage(VirtReg) >=
RS_Split2;
1822 unsigned BestBefore = NumGaps;
1823 unsigned BestAfter = 0;
1826 const float blockFreq =
1827 SpillPlacer->getBlockFrequency(BI.
MBB->
getNumber()).getFrequency() *
1828 (1.0f / MBFI->getEntryFreq().getFrequency());
1831 for (MCRegister PhysReg : Order) {
1835 calcGapWeights(PhysReg, GapWeight);
1838 if (
Matrix->checkRegMaskInterference(VirtReg, PhysReg))
1839 for (
unsigned Gap : RegMaskGaps)
1846 unsigned SplitBefore = 0, SplitAfter = 1;
1850 float MaxGap = GapWeight[0];
1854 const bool LiveBefore = SplitBefore != 0 || BI.
LiveIn;
1855 const bool LiveAfter = SplitAfter != NumGaps || BI.
LiveOut;
1858 <<
'-' <<
Uses[SplitAfter] <<
" I=" << MaxGap);
1861 if (!LiveBefore && !LiveAfter) {
1869 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1872 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1881 blockFreq * (NewGaps + 1),
1882 Uses[SplitBefore].distance(
Uses[SplitAfter]) +
1890 float Diff = EstWeight - MaxGap;
1891 if (Diff > BestDiff) {
1894 BestBefore = SplitBefore;
1895 BestAfter = SplitAfter;
1902 if (++SplitBefore < SplitAfter) {
1905 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1906 MaxGap = GapWeight[SplitBefore];
1907 for (
unsigned I = SplitBefore + 1;
I != SplitAfter; ++
I)
1908 MaxGap = std::max(MaxGap, GapWeight[
I]);
1916 if (SplitAfter >= NumGaps) {
1922 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1927 if (BestBefore == NumGaps)
1928 return MCRegister();
1931 <<
Uses[BestAfter] <<
", " << BestDiff <<
", "
1932 << (BestAfter - BestBefore + 1) <<
" instrs\n");
1934 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *
LIS,
VRM,
this, &
DeadRemats);
1938 SlotIndex SegStart = SE->enterIntvBefore(
Uses[BestBefore]);
1939 SlotIndex SegStop = SE->leaveIntvAfter(
Uses[BestAfter]);
1940 SE->useIntv(SegStart, SegStop);
1941 SmallVector<unsigned, 8> IntvMap;
1942 SE->finish(&IntvMap);
1943 DebugVars->splitRegister(VirtReg.
reg(), LREdit.
regs(), *
LIS);
1947 bool LiveBefore = BestBefore != 0 || BI.
LiveIn;
1948 bool LiveAfter = BestAfter != NumGaps || BI.
LiveOut;
1949 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1950 if (NewGaps >= NumGaps) {
1952 assert(!ProgressRequired &&
"Didn't make progress when it was required.");
1953 for (
unsigned I = 0,
E = IntvMap.
size();
I !=
E; ++
I)
1954 if (IntvMap[
I] == 1) {
1962 return MCRegister();
1972MCRegister RAGreedy::trySplit(
const LiveInterval &VirtReg,
1973 AllocationOrder &Order,
1974 SmallVectorImpl<Register> &NewVRegs,
1977 if (ExtraInfo->getStage(VirtReg) >=
RS_Spill)
1978 return MCRegister();
1981 if (
LIS->intervalIsInOneMBB(VirtReg)) {
1984 SA->analyze(&VirtReg);
1985 MCRegister PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1986 if (PhysReg || !NewVRegs.
empty())
1988 return tryInstructionSplit(VirtReg, Order, NewVRegs);
1991 NamedRegionTimer
T(
"global_split",
"Global Splitting",
TimerGroupName,
1994 SA->analyze(&VirtReg);
1999 if (ExtraInfo->getStage(VirtReg) <
RS_Split2) {
2000 MCRegister PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
2001 if (PhysReg || !NewVRegs.
empty())
2006 return tryBlockSplit(VirtReg, Order, NewVRegs);
2029 if (PhysReg == AssignedReg)
2031 return TRI.regsOverlap(PhysReg, AssignedReg);
2042bool RAGreedy::mayRecolorAllInterferences(
2043 MCRegister PhysReg,
const LiveInterval &VirtReg,
2044 SmallLISet &RecoloringCandidates,
const SmallVirtRegSet &FixedRegisters) {
2045 const TargetRegisterClass *CurRC =
MRI->getRegClass(VirtReg.
reg());
2047 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
2048 LiveIntervalUnion::Query &Q =
Matrix->query(VirtReg, Unit);
2055 CutOffInfo |= CO_Interf;
2070 if (((ExtraInfo->getStage(*Intf) ==
RS_Done &&
2071 MRI->getRegClass(Intf->reg()) == CurRC &&
2075 FixedRegisters.
count(Intf->reg())) {
2077 dbgs() <<
"Early abort: the interference is not recolorable.\n");
2080 RecoloringCandidates.insert(Intf);
2129MCRegister RAGreedy::tryLastChanceRecoloring(
2130 const LiveInterval &VirtReg, AllocationOrder &Order,
2132 RecoloringStack &RecolorStack,
unsigned Depth) {
2133 if (!
TRI->shouldUseLastChanceRecoloringForVirtReg(*MF, VirtReg))
2136 LLVM_DEBUG(
dbgs() <<
"Try last chance recoloring for " << VirtReg <<
'\n');
2138 const ssize_t EntryStackSize = RecolorStack.size();
2142 "Last chance recoloring should really be last chance");
2148 LLVM_DEBUG(
dbgs() <<
"Abort because max depth has been reached.\n");
2149 CutOffInfo |= CO_Depth;
2154 SmallLISet RecoloringCandidates;
2162 for (MCRegister PhysReg : Order) {
2166 RecoloringCandidates.clear();
2167 CurrentNewVRegs.
clear();
2170 if (
Matrix->checkInterference(VirtReg, PhysReg) >
2173 dbgs() <<
"Some interferences are not with virtual registers.\n");
2180 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
2182 LLVM_DEBUG(
dbgs() <<
"Some interferences cannot be recolored.\n");
2189 PQueue RecoloringQueue;
2190 for (
const LiveInterval *RC : RecoloringCandidates) {
2192 enqueue(RecoloringQueue, RC);
2194 "Interferences are supposed to be with allocated variables");
2197 RecolorStack.push_back(std::make_pair(RC,
VRM->getPhys(ItVirtReg)));
2206 Matrix->assign(VirtReg, PhysReg);
2215 if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs,
2216 FixedRegisters, RecolorStack,
Depth)) {
2221 if (
VRM->hasPhys(ThisVirtReg)) {
2222 Matrix->unassign(VirtReg);
2227 LLVM_DEBUG(
dbgs() <<
"tryRecoloringCandidates deleted a fixed register "
2229 FixedRegisters.
erase(ThisVirtReg);
2230 return MCRegister();
2237 FixedRegisters = SaveFixedRegisters;
2238 Matrix->unassign(VirtReg);
2244 for (
Register R : CurrentNewVRegs) {
2245 if (RecoloringCandidates.count(&
LIS->getInterval(R)))
2256 for (ssize_t
I = RecolorStack.size() - 1;
I >= EntryStackSize; --
I) {
2257 const LiveInterval *LI;
2259 std::tie(LI, PhysReg) = RecolorStack[
I];
2261 if (
VRM->hasPhys(LI->
reg()))
2265 for (
size_t I = EntryStackSize;
I != RecolorStack.size(); ++
I) {
2266 const LiveInterval *LI;
2268 std::tie(LI, PhysReg) = RecolorStack[
I];
2269 if (!LI->
empty() && !
MRI->reg_nodbg_empty(LI->
reg()))
2270 Matrix->assign(*LI, PhysReg);
2274 RecolorStack.resize(EntryStackSize);
2289bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
2290 SmallVectorImpl<Register> &NewVRegs,
2292 RecoloringStack &RecolorStack,
2294 while (!RecoloringQueue.empty()) {
2295 const LiveInterval *LI =
dequeue(RecoloringQueue);
2297 MCRegister PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters,
2298 RecolorStack,
Depth + 1);
2303 if (PhysReg == ~0u || (!PhysReg && !LI->
empty()))
2307 assert(LI->
empty() &&
"Only empty live-range do not require a register");
2309 <<
" succeeded. Empty LI.\n");
2313 <<
" succeeded with: " <<
printReg(PhysReg,
TRI) <<
'\n');
2315 Matrix->assign(*LI, PhysReg);
2327 CutOffInfo = CO_None;
2328 LLVMContext &Ctx = MF->getFunction().getContext();
2330 RecoloringStack RecolorStack;
2332 selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters, RecolorStack);
2333 if (Reg == ~0U && (CutOffInfo != CO_None)) {
2334 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
2335 if (CutOffEncountered == CO_Depth)
2336 Ctx.emitError(
"register allocation failed: maximum depth for recoloring "
2337 "reached. Use -fexhaustive-register-search to skip "
2339 else if (CutOffEncountered == CO_Interf)
2340 Ctx.emitError(
"register allocation failed: maximum interference for "
2341 "recoloring reached. Use -fexhaustive-register-search "
2343 else if (CutOffEncountered == (CO_Depth | CO_Interf))
2344 Ctx.emitError(
"register allocation failed: maximum interference and "
2345 "depth for recoloring reached. Use "
2346 "-fexhaustive-register-search to skip cutoffs");
2362 if (
MI->isMetaInstruction())
2367 auto [Reads, Writes] =
MI->readsWritesVirtualRegister(LI.
reg());
2368 auto MBBFreq = SpillPlacer->getBlockFrequency(
MI->getParent()->getNumber());
2369 SpillCost += (Reads + Writes) * MBBFreq.getFrequency();
2381MCRegister RAGreedy::tryAssignCSRFirstTime(
2382 const LiveInterval &VirtReg, AllocationOrder &Order, MCRegister PhysReg,
2383 uint8_t &CostPerUseLimit, SmallVectorImpl<Register> &NewVRegs) {
2387 SA->analyze(&VirtReg);
2388 if (calcSpillCost(VirtReg) >= CSRCost)
2393 CostPerUseLimit = 1;
2394 return MCRegister();
2396 if (ExtraInfo->getStage(VirtReg) <
RS_Split) {
2399 SA->analyze(&VirtReg);
2400 unsigned NumCands = 0;
2401 BlockFrequency BestCost = CSRCost;
2402 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
2404 if (BestCand == NoCand)
2409 doRegionSplit(VirtReg, BestCand,
false, NewVRegs);
2410 return MCRegister();
2417 SetOfBrokenHints.remove(&LI);
2420void RAGreedy::initializeCSRCost() {
2430 if (!CSRCost.getFrequency())
2434 uint64_t ActualEntry = MBFI->getEntryFreq().getFrequency();
2440 if (ActualEntry < FixedEntry) {
2442 }
else if (ActualEntry <= UINT32_MAX) {
2444 CSRCost /= BranchProbability(FixedEntry, ActualEntry);
2448 BlockFrequency(CSRCost.getFrequency() * (ActualEntry / FixedEntry));
2451 uint64_t EntryFreq = MBFI->getEntryFreq().getFrequency();
2452 CSRCost = BlockFrequency(
TRI->getCSRFirstUseCost() * EntryFreq);
2463void RAGreedy::collectHintInfo(
Register Reg, HintsInfo &Out) {
2464 const TargetRegisterClass *RC =
MRI->getRegClass(
Reg);
2466 for (
const MachineOperand &Opnd :
MRI->reg_nodbg_operands(
Reg)) {
2467 const MachineInstr &
Instr = *Opnd.getParent();
2468 if (!
Instr.isCopy() || Opnd.isImplicit())
2472 const MachineOperand &OtherOpnd =
Instr.getOperand(Opnd.isDef());
2474 if (OtherReg ==
Reg)
2476 unsigned OtherSubReg = OtherOpnd.
getSubReg();
2477 unsigned SubReg = Opnd.getSubReg();
2480 MCRegister OtherPhysReg;
2483 OtherPhysReg =
TRI->getMatchingSuperReg(OtherReg, OtherSubReg, RC);
2485 OtherPhysReg =
TRI->getMatchingSuperReg(OtherReg, SubReg, RC);
2487 OtherPhysReg = OtherReg;
2489 OtherPhysReg =
VRM->getPhys(OtherReg);
2493 if (SubReg && OtherSubReg && SubReg != OtherSubReg)
2499 Out.push_back(HintInfo(MBFI->getBlockFreq(
Instr.getParent()), OtherReg,
2508BlockFrequency RAGreedy::getBrokenHintFreq(
const HintsInfo &
List,
2509 MCRegister PhysReg) {
2510 BlockFrequency
Cost = BlockFrequency(0);
2511 for (
const HintInfo &Info :
List) {
2512 if (
Info.PhysReg != PhysReg)
2526void RAGreedy::tryHintRecoloring(
const LiveInterval &VirtReg) {
2532 MCRegister PhysReg =
VRM->getPhys(
Reg);
2535 SmallSet<Register, 4> Visited = {
Reg};
2544 MCRegister CurrPhys =
VRM->getPhys(
Reg);
2549 "We have an unallocated variable which should have been handled");
2555 LiveInterval &LI =
LIS->getInterval(
Reg);
2558 if (CurrPhys != PhysReg && (!
MRI->getRegClass(
Reg)->contains(PhysReg) ||
2559 Matrix->checkInterference(LI, PhysReg)))
2563 <<
") is recolorable.\n");
2567 collectHintInfo(
Reg, Info);
2570 if (CurrPhys != PhysReg) {
2572 BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
2573 BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg);
2577 if (OldCopiesCost < NewCopiesCost) {
2587 Matrix->assign(LI, PhysReg);
2591 for (
const HintInfo &HI : Info) {
2593 if (
HI.Reg.isVirtual() && Visited.
insert(
HI.Reg).second)
2596 }
while (!RecoloringCandidates.
empty());
2635void RAGreedy::tryHintsRecoloring() {
2636 for (
const LiveInterval *LI : SetOfBrokenHints) {
2638 "Recoloring is possible only for virtual registers");
2641 if (!
VRM->hasPhys(LI->
reg()))
2643 tryHintRecoloring(*LI);
2647MCRegister RAGreedy::selectOrSplitImpl(
const LiveInterval &VirtReg,
2648 SmallVectorImpl<Register> &NewVRegs,
2650 RecoloringStack &RecolorStack,
2652 uint8_t CostPerUseLimit = uint8_t(~0u);
2656 if (MCRegister PhysReg =
2657 tryAssign(VirtReg, Order, NewVRegs, FixedRegisters)) {
2661 if (CSRCost.getFrequency() &&
2662 EvictAdvisor->isUnusedCalleeSavedReg(PhysReg) && NewVRegs.
empty()) {
2663 MCRegister CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
2664 CostPerUseLimit, NewVRegs);
2665 if (CSRReg || !NewVRegs.
empty())
2673 if (!NewVRegs.
empty())
2674 return MCRegister();
2678 << ExtraInfo->getCascade(VirtReg.
reg()) <<
'\n');
2684 if (MCRegister PhysReg =
2685 tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit,
2693 if (Hint && Hint != PhysReg)
2694 SetOfBrokenHints.insert(&VirtReg);
2699 assert((NewVRegs.
empty() ||
Depth) &&
"Cannot append to existing NewVRegs");
2705 ExtraInfo->setStage(VirtReg,
RS_Split);
2708 return MCRegister();
2713 unsigned NewVRegSizeBefore = NewVRegs.
size();
2714 MCRegister PhysReg = trySplit(VirtReg, Order, NewVRegs, FixedRegisters);
2715 if (PhysReg || (NewVRegs.
size() - NewVRegSizeBefore))
2722 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
2723 RecolorStack,
Depth);
2737 DebugVars->splitRegister(r, LRE.regs(), *
LIS);
2739 DebugVars->splitRegister(r, LRE.regs(), *
LIS);
2742 MF->verify(
LIS, Indexes,
"After spilling", &
errs());
2746 return MCRegister();
2749void RAGreedy::RAGreedyStats::report(MachineOptimizationRemarkMissed &R) {
2750 using namespace ore;
2752 R <<
NV(
"NumSpills", Spills) <<
" spills ";
2753 R <<
NV(
"TotalSpillsCost", SpillsCost) <<
" total spills cost ";
2756 R <<
NV(
"NumFoldedSpills", FoldedSpills) <<
" folded spills ";
2757 R <<
NV(
"TotalFoldedSpillsCost", FoldedSpillsCost)
2758 <<
" total folded spills cost ";
2761 R <<
NV(
"NumReloads", Reloads) <<
" reloads ";
2762 R <<
NV(
"TotalReloadsCost", ReloadsCost) <<
" total reloads cost ";
2764 if (FoldedReloads) {
2765 R <<
NV(
"NumFoldedReloads", FoldedReloads) <<
" folded reloads ";
2766 R <<
NV(
"TotalFoldedReloadsCost", FoldedReloadsCost)
2767 <<
" total folded reloads cost ";
2769 if (ZeroCostFoldedReloads)
2770 R <<
NV(
"NumZeroCostFoldedReloads", ZeroCostFoldedReloads)
2771 <<
" zero cost folded reloads ";
2773 R <<
NV(
"NumVRCopies",
Copies) <<
" virtual registers copies ";
2774 R <<
NV(
"TotalCopiesCost", CopiesCost) <<
" total copies cost ";
2778RAGreedy::RAGreedyStats RAGreedy::computeStats(MachineBasicBlock &
MBB) {
2779 RAGreedyStats
Stats;
2780 const MachineFrameInfo &MFI = MF->getFrameInfo();
2783 auto isSpillSlotAccess = [&MFI](
const MachineMemOperand *
A) {
2785 A->getPseudoValue())->getFrameIndex());
2787 auto isPatchpointInstr = [](
const MachineInstr &
MI) {
2788 return MI.getOpcode() == TargetOpcode::PATCHPOINT ||
2789 MI.getOpcode() == TargetOpcode::STACKMAP ||
2790 MI.getOpcode() == TargetOpcode::STATEPOINT;
2792 for (MachineInstr &
MI :
MBB) {
2793 auto DestSrc = TII->isCopyInstr(
MI);
2795 const MachineOperand &Dest = *DestSrc->Destination;
2796 const MachineOperand &Src = *DestSrc->Source;
2802 SrcReg =
VRM->getPhys(SrcReg);
2803 if (SrcReg && Src.getSubReg())
2804 SrcReg =
TRI->getSubReg(SrcReg, Src.getSubReg());
2807 DestReg =
VRM->getPhys(DestReg);
2811 if (SrcReg != DestReg)
2817 SmallVector<const MachineMemOperand *, 2>
Accesses;
2826 if (TII->hasLoadFromStackSlot(
MI,
Accesses) &&
2828 if (!isPatchpointInstr(
MI)) {
2833 std::pair<unsigned, unsigned> NonZeroCostRange =
2834 TII->getPatchpointUnfoldableRange(
MI);
2835 SmallSet<unsigned, 16> FoldedReloads;
2836 SmallSet<unsigned, 16> ZeroCostFoldedReloads;
2837 for (
unsigned Idx = 0,
E =
MI.getNumOperands(); Idx <
E; ++Idx) {
2838 MachineOperand &MO =
MI.getOperand(Idx);
2841 if (Idx >= NonZeroCostRange.first && Idx < NonZeroCostRange.second)
2847 for (
unsigned Slot : FoldedReloads)
2848 ZeroCostFoldedReloads.
erase(Slot);
2849 Stats.FoldedReloads += FoldedReloads.size();
2850 Stats.ZeroCostFoldedReloads += ZeroCostFoldedReloads.
size();
2854 if (TII->hasStoreToStackSlot(
MI,
Accesses) &&
2861 float RelFreq = MBFI->getBlockFreqRelativeToEntryBlock(&
MBB);
2863 Stats.FoldedReloadsCost = RelFreq *
Stats.FoldedReloads;
2865 Stats.FoldedSpillsCost = RelFreq *
Stats.FoldedSpills;
2870RAGreedy::RAGreedyStats RAGreedy::reportStats(MachineLoop *L) {
2871 RAGreedyStats
Stats;
2874 for (MachineLoop *SubLoop : *L)
2875 Stats.add(reportStats(SubLoop));
2877 for (MachineBasicBlock *
MBB :
L->getBlocks())
2879 if (Loops->getLoopFor(
MBB) == L)
2882 if (!
Stats.isEmpty()) {
2883 using namespace ore;
2886 MachineOptimizationRemarkMissed
R(
DEBUG_TYPE,
"LoopSpillReloadCopies",
2887 L->getStartLoc(),
L->getHeader());
2889 R <<
"generated in loop";
2896void RAGreedy::reportStats() {
2899 RAGreedyStats
Stats;
2900 for (MachineLoop *L : *Loops)
2901 Stats.add(reportStats(L));
2903 for (MachineBasicBlock &
MBB : *MF)
2904 if (!Loops->getLoopFor(&
MBB))
2906 if (!
Stats.isEmpty()) {
2907 using namespace ore;
2911 if (
auto *SP = MF->getFunction().getSubprogram())
2913 MachineOptimizationRemarkMissed
R(
DEBUG_TYPE,
"SpillReloadCopies", Loc,
2916 R <<
"generated in function";
2922bool RAGreedy::hasVirtRegAlloc() {
2923 for (
unsigned I = 0,
E =
MRI->getNumVirtRegs();
I !=
E; ++
I) {
2925 if (
MRI->reg_nodbg_empty(
Reg))
2935 LLVM_DEBUG(
dbgs() <<
"********** GREEDY REGISTER ALLOCATION **********\n"
2936 <<
"********** Function: " << mf.
getName() <<
'\n');
2942 MF->verify(
LIS, Indexes,
"Before greedy register allocator", &
errs());
2948 if (!hasVirtRegAlloc())
2953 Indexes->packIndexes();
2955 initializeCSRCost();
2957 RegCosts =
TRI->getRegisterCosts(*MF);
2958 RegClassPriorityTrumpsGlobalness =
2961 :
TRI->regClassPriorityTrumpsGlobalness(*MF);
2965 :
TRI->reverseLocalAssignment();
2967 ExtraInfo.emplace();
2969 EvictAdvisor = EvictProvider->getAdvisor(*MF, *
this, MBFI, Loops);
2970 PriorityAdvisor = PriorityProvider->getAdvisor(*MF, *
this, *Indexes);
2972 VRAI = std::make_unique<VirtRegAuxInfo>(*MF, *
LIS, *
VRM, *Loops, *MBFI);
2976 VRAI->calculateSpillWeightsAndHints();
2983 IntfCache.init(MF,
Matrix->getLiveUnions(), Indexes,
LIS,
TRI);
2984 GlobalCand.resize(32);
2985 SetOfBrokenHints.clear();
2988 tryHintsRecoloring();
2991 MF->verify(
LIS, Indexes,
"Before post optimization", &
errs());
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
DXIL Forward Handle Accesses
const HexagonInstrInfo * TII
This file implements an indexed map.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
block placement Basic Block Placement Stats
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This header defines classes/functions to handle pass execution timing information with interfaces for...
static DominatorTree getDomTree(Function &F)
static bool hasTiedDef(MachineRegisterInfo *MRI, Register reg)
Return true if reg has any tied def operand.
static cl::opt< bool > GreedyRegClassPriorityTrumpsGlobalness("greedy-regclass-priority-trumps-globalness", cl::desc("Change the greedy register allocator's live range priority " "calculation to make the AllocationPriority of the register class " "more important then whether the range is global"), cl::Hidden)
static cl::opt< bool > ExhaustiveSearch("exhaustive-register-search", cl::NotHidden, cl::desc("Exhaustive Search for registers bypassing the depth " "and interference cutoffs of last chance recoloring"), cl::Hidden)
static cl::opt< unsigned > CSRCostScale("regalloc-csr-cost-scale", cl::desc("Scale for the callee-saved register cost, in percentage."), cl::init(80), cl::Hidden)
static cl::opt< unsigned > LastChanceRecoloringMaxInterference("lcr-max-interf", cl::Hidden, cl::desc("Last chance recoloring maximum number of considered" " interference at a time"), cl::init(8))
static bool readsLaneSubset(const MachineRegisterInfo &MRI, const MachineInstr *MI, const LiveInterval &VirtReg, const TargetRegisterInfo *TRI, SlotIndex Use, const TargetInstrInfo *TII)
Return true if MI at \P Use reads a subset of the lanes live in VirtReg.
static bool assignedRegPartiallyOverlaps(const TargetRegisterInfo &TRI, const VirtRegMap &VRM, MCRegister PhysReg, const LiveInterval &Intf)
Return true if the existing assignment of Intf overlaps, but is not the same, as PhysReg.
static cl::opt< unsigned > CSRFirstTimeCost("regalloc-csr-first-time-cost", cl::desc("Cost for first time use of callee-saved register."), cl::init(0), cl::Hidden)
static cl::opt< unsigned > LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden, cl::desc("Last chance recoloring max depth"), cl::init(5))
static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", createGreedyRegisterAllocator)
static cl::opt< unsigned long > GrowRegionComplexityBudget("grow-region-complexity-budget", cl::desc("growRegion() does not scale with the number of BB edges, so " "limit its budget and bail out once we reach the limit."), cl::init(10000), cl::Hidden)
static cl::opt< unsigned > SplitThresholdForRegWithHint("split-threshold-for-reg-with-hint", cl::desc("The threshold for splitting a virtual register with a hint, in " "percentage"), cl::init(75), cl::Hidden)
static cl::opt< SplitEditor::ComplementSpillMode > SplitSpillMode("split-spill-mode", cl::Hidden, cl::desc("Spill mode for splitting live ranges"), cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")), cl::init(SplitEditor::SM_Speed))
static unsigned getNumAllocatableRegsForConstraints(const MachineInstr *MI, Register Reg, const TargetRegisterClass *SuperRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, const RegisterClassInfo &RCI)
Get the number of allocatable registers that match the constraints of Reg on MI and that are also in ...
static cl::opt< bool > GreedyReverseLocalAssignment("greedy-reverse-local-assignment", cl::desc("Reverse allocation order of local live ranges, such that " "shorter local live ranges will tend to be allocated first"), cl::Hidden)
static LaneBitmask getInstReadLaneMask(const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const MachineInstr &FirstMI, Register Reg)
Remove Loads Into Fake Uses
SI optimize exec mask operations pre RA
SI Optimize VGPR LiveRange
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName) const
PreservedAnalyses run(MachineFunction &F, MachineFunctionAnalysisManager &AM)
bool isHint(Register Reg) const
Return true if Reg is a preferred physical register.
ArrayRef< MCPhysReg > getOrder() const
Get the allocation order without reordered hints.
static AllocationOrder create(Register VirtReg, const VirtRegMap &VRM, const RegisterClassInfo &RegClassInfo, const LiveRegMatrix *Matrix)
Create a new AllocationOrder for VirtReg.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
Get the array size.
bool test(unsigned Idx) const
Returns true if bit Idx is set.
BitVector & reset()
Reset all bits in the bitvector.
static BlockFrequency max()
Returns the maximum possible frequency, the saturation value.
Represents analyses that only rely on functions' control flow.
FunctionPass class - This class is used to implement most global optimizations.
Cursor - The primary query interface for the block interference cache.
SlotIndex first()
first - Return the starting index of the first interfering range in the current block.
SlotIndex last()
last - Return the ending index of the last interfering range in the current block.
bool hasInterference()
hasInterference - Return true if the current block has any interference.
void moveToBlock(unsigned MBBNum)
moveTo - Move cursor to basic block MBBNum.
This is an important class for using LLVM in a threaded context.
Query interferences between a single live virtual register and a live interval union.
const SmallVectorImpl< const LiveInterval * > & interferingVRegs(unsigned MaxInterferingRegs=std::numeric_limits< unsigned >::max())
LiveSegments::iterator SegmentIter
A live range for subregisters.
LiveInterval - This class represents the liveness of a register, or stack slot.
bool isSpillable() const
isSpillable - Can this interval be spilled?
bool hasSubRanges() const
Returns true if subregister liveness information is available.
LLVM_ABI unsigned getSize() const
getSize - Returns the sum of sizes of all the LiveRange's.
iterator_range< subrange_iterator > subranges()
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
LiveInterval & getInterval(Register Reg)
Register get(unsigned idx) const
ArrayRef< Register > regs() const
Segments::const_iterator const_iterator
bool liveAt(SlotIndex index) const
SlotIndex beginIndex() const
beginIndex - Return the lowest numbered slot covered.
SlotIndex endIndex() const
endNumber - return the maximum point of the range of the whole, exclusive.
LLVM_ABI iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
@ IK_VirtReg
Virtual register interference.
Wrapper class representing physical registers. Should be passed by value.
constexpr bool isValid() const
static constexpr unsigned NoRegister
constexpr unsigned id() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
An RAII based helper class to modify MachineFunctionProperties when running pass.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the first non-debug instruction in the basic block, or end().
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Representation of each machine instruction.
bool isImplicitDef() const
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static reg_instr_nodbg_iterator reg_instr_nodbg_end()
defusechain_instr_iterator< true, true, true, true > reg_instr_nodbg_iterator
reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk all defs and uses of the sp...
iterator_range< def_iterator > def_operands(Register Reg) const
LLVM_ABI LaneBitmask getMaxLaneMaskForVReg(Register Reg) const
Returns a mask covering all bits that can appear in lane masks of subregisters of the virtual registe...
reg_instr_nodbg_iterator reg_instr_nodbg_begin(Register RegNo) const
Pass interface - Implemented by all 'passes'.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
bool run(MachineFunction &mf)
Perform register allocation.
Spiller & spiller() override
MCRegister selectOrSplit(const LiveInterval &, SmallVectorImpl< Register > &) override
RAGreedy(RequiredAnalyses &Analyses, const RegAllocFilterFunc F=nullptr)
const LiveInterval * dequeue() override
dequeue - Return the next unassigned register, or NULL.
void enqueueImpl(const LiveInterval *LI) override
enqueue - Add VirtReg to the priority queue of unassigned registers.
void aboutToRemoveInterval(const LiveInterval &) override
Method called when the allocator is about to remove a LiveInterval.
RegAllocBase(const RegAllocFilterFunc F=nullptr)
void enqueue(const LiveInterval *LI)
enqueue - Add VirtReg to the priority queue of unassigned registers.
void init(VirtRegMap &vrm, LiveIntervals &lis, LiveRegMatrix &mat)
SmallPtrSet< MachineInstr *, 32 > DeadRemats
Inst which is a def of an original reg and whose defs are already all dead after remat is saved in De...
const TargetRegisterInfo * TRI
static const char TimerGroupName[]
static const char TimerGroupDescription[]
virtual void postOptimization()
RegisterClassInfo RegClassInfo
MachineRegisterInfo * MRI
bool shouldAllocateRegister(Register Reg)
Get whether a given register should be allocated.
static bool VerifyEnabled
VerifyEnabled - True when -verify-regalloc is given.
ImmutableAnalysis abstraction for fetching the Eviction Advisor.
A MachineFunction analysis for fetching the Eviction Advisor.
Common provider for legacy and new pass managers.
const TargetRegisterInfo *const TRI
std::optional< unsigned > getOrderLimit(const LiveInterval &VirtReg, const AllocationOrder &Order, unsigned CostPerUseLimit) const
const ArrayRef< uint8_t > RegCosts
MachineRegisterInfo *const MRI
const RegisterClassInfo & RegClassInfo
bool isUnusedCalleeSavedReg(MCRegister PhysReg) const
Returns true if the given PhysReg is a callee saved register and has not been used for allocation yet...
bool canReassign(const LiveInterval &VirtReg, MCRegister FromReg) const
bool canAllocatePhysReg(unsigned CostPerUseLimit, MCRegister PhysReg) const
LiveRegMatrix *const Matrix
Common provider for getting the priority advisor and logging rewards.
unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const
getNumAllocatableRegs - Returns the number of actually allocatable registers in RC in the current fun...
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
SlotIndex - An opaque wrapper around machine indexes.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
static bool isEarlierInstr(SlotIndex A, SlotIndex B)
isEarlierInstr - Return true if A refers to an instruction earlier than B.
@ InstrDist
The default distance between instructions as returned by distance().
bool isValid() const
Returns true if this is a valid index.
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
int getApproxInstrDistance(SlotIndex other) const
Return the scaled distance from this index to the given one, where all slots on the same instruction ...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
@ MustSpill
A register is impossible, variable must be spilled.
@ DontCare
Block doesn't care / variable not live.
@ PrefReg
Block entry/exit prefers a register.
@ PrefSpill
Block entry/exit prefers a stack slot.
virtual void spill(LiveRangeEdit &LRE, AllocationOrder *Order=nullptr)=0
spill - Spill the LRE.getParent() live interval.
SplitAnalysis - Analyze a LiveInterval, looking for live range splitting opportunities.
SplitEditor - Edit machine code and LiveIntervals for live range splitting.
@ SM_Partition
SM_Partition(Default) - Try to create the complement interval so it doesn't overlap any other interva...
@ SM_Speed
SM_Speed - Overlap intervals to minimize the expected execution frequency of the inserted copies.
@ SM_Size
SM_Size - Overlap intervals to minimize the number of inserted COPY instructions.
Represent a constant reference to a string, i.e.
constexpr bool empty() const
Check if the string is empty.
TargetInstrInfo - Interface to description of machine instruction set.
const bool GlobalPriority
const uint8_t AllocationPriority
Classes with a higher priority value are assigned first by register allocators using a greedy heurist...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
A Use represents the edge between a Value definition and its users.
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Pass manager infrastructure for declaring and invalidating analyses.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
NodeAddr< UseNode * > Use
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
constexpr uint64_t maxUIntN(uint64_t N)
Gets the maximum value for a N-bit unsigned integer.
SmallSet< Register, 16 > SmallVirtRegSet
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool TimePassesIsEnabled
If the user specifies the -time-passes argument on an LLVM tool command line then the value of this b...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
@ RS_Split2
Attempt more aggressive live range splitting that is guaranteed to make progress.
@ RS_Spill
Live range will be spilled. No more splitting will be attempted.
@ RS_Split
Attempt live range splitting if assignment is impossible.
@ RS_New
Newly created live range that has never been queued.
@ RS_Done
There is nothing more we can do to this live range.
@ RS_Assign
Only attempt assignment and eviction. Then requeue as RS_Split.
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI const float huge_valf
Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Spiller * createInlineSpiller(const Spiller::RequiredAnalyses &Analyses, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI, LiveRegMatrix *Matrix=nullptr)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
ArrayRef(const T &OneElt) -> ArrayRef< T >
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Printable printBlockFreq(const BlockFrequencyInfo &BFI, BlockFrequency Freq)
Print the block frequency Freq relative to the current functions entry frequency.
LLVM_ABI char & RAGreedyLegacyID
Greedy register allocator.
static float normalizeSpillWeight(float UseDefFreq, unsigned Size, unsigned NumInstr)
Normalize the spill weight of a live interval.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Implement std::hash so that hash_code can be used in STL containers.
MachineBlockFrequencyInfo * MBFI
RegAllocEvictionAdvisorProvider * EvictProvider
MachineOptimizationRemarkEmitter * ORE
LiveDebugVariables * DebugVars
SpillPlacement * SpillPlacer
RegAllocPriorityAdvisorProvider * PriorityProvider
MachineDominatorTree * DomTree
RequiredAnalyses()=delete
constexpr bool any() const
This class is basically a combination of TimeRegion and Timer.
BlockConstraint - Entry and exit constraints for a basic block.
BorderConstraint Exit
Constraint on block exit.
bool ChangesValue
True when this block changes the value of the live range.
BorderConstraint Entry
Constraint on block entry.
unsigned Number
Basic block number (from MBB::getNumber()).
Additional information about basic blocks where the current variable is live.
SlotIndex FirstDef
First non-phi valno->def, or SlotIndex().
bool LiveOut
Current reg is live out.
bool LiveIn
Current reg is live in.
SlotIndex LastInstr
Last instr accessing current reg.
SlotIndex FirstInstr
First instr accessing current reg.