51#define DEBUG_TYPE "regalloc"
55STATISTIC(NumCoalesced,
"Number of copies coalesced");
68class InstrPosIndexes {
70 void unsetInitialized() { IsInitialized =
false; }
72 void init(
const MachineBasicBlock &
MBB) {
74 Instr2PosIndex.
clear();
75 uint64_t LastIndex = 0;
76 for (
const MachineInstr &
MI :
MBB) {
77 LastIndex += InstrDist;
78 Instr2PosIndex[&
MI] = LastIndex;
85 bool getIndex(
const MachineInstr &
MI, uint64_t &Index) {
93 assert(
MI.getParent() == CurMBB &&
"MI is not in CurMBB");
94 auto It = Instr2PosIndex.find(&
MI);
95 if (It != Instr2PosIndex.end()) {
109 unsigned Distance = 1;
111 End = std::next(Start);
112 while (Start != CurMBB->begin() &&
113 !Instr2PosIndex.count(&*std::prev(Start))) {
117 while (End != CurMBB->end() && !Instr2PosIndex.count(&*(End))) {
125 Start == CurMBB->begin() ? 0 : Instr2PosIndex.at(&*std::prev(Start));
127 if (End == CurMBB->end())
128 Step =
static_cast<uint64_t
>(InstrDist);
131 uint64_t EndIndex = Instr2PosIndex.at(&*End);
132 assert(EndIndex > LastIndex &&
"Index must be ascending order");
133 unsigned NumAvailableIndexes = EndIndex - LastIndex - 1;
152 Step = (NumAvailableIndexes + 1) / (Distance + 1);
157 if (
LLVM_UNLIKELY(!Step || (!LastIndex && Step == InstrDist))) {
159 Index = Instr2PosIndex.at(&
MI);
163 for (
auto I = Start;
I != End; ++
I) {
165 Instr2PosIndex[&*
I] = LastIndex;
167 Index = Instr2PosIndex.at(&
MI);
172 bool IsInitialized =
false;
173 enum { InstrDist = 1024 };
174 const MachineBasicBlock *CurMBB =
nullptr;
175 DenseMap<const MachineInstr *, uint64_t> Instr2PosIndex;
178class RegAllocFastImpl {
181 bool ClearVirtRegs_ =
true)
182 : ShouldAllocateRegisterImpl(
F), StackSlotForVirtReg(-1),
183 ClearVirtRegs(ClearVirtRegs_) {}
186 MachineFrameInfo *MFI =
nullptr;
187 MachineRegisterInfo *MRI =
nullptr;
188 const TargetRegisterInfo *TRI =
nullptr;
189 const TargetInstrInfo *TII =
nullptr;
190 RegisterClassInfo RegClassInfo;
194 MachineBasicBlock *MBB =
nullptr;
197 IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
201 MachineInstr *LastUse =
nullptr;
204 bool LiveOut =
false;
205 bool Reloaded =
false;
208 explicit LiveReg(
Register VirtReg) : VirtReg(VirtReg) {}
209 explicit LiveReg() =
default;
211 unsigned getSparseSetIndex()
const {
return VirtReg.virtRegIndex(); }
214 using LiveRegMap = SparseSet<LiveReg, unsigned, identity, uint16_t>;
217 LiveRegMap LiveVirtRegs;
220 DenseMap<Register, LiveReg> BundleVirtRegsMap;
222 DenseMap<Register, SmallVector<MachineOperand *, 2>> LiveDbgValueMap;
225 DenseMap<Register, SmallVector<MachineInstr *, 1>> DanglingDbgValues;
229 BitVector MayLiveAcrossBlocks;
251 std::vector<unsigned> RegUnitStates;
269 SmallVector<unsigned, 0> UsedInInstr;
271 SmallVector<unsigned, 8> DefOperandIndexes;
276 InstrPosIndexes PosIndexes;
278 void setRegUnitState(MCRegUnit Unit,
unsigned NewState);
279 unsigned getRegUnitState(MCRegUnit Unit)
const;
281 void setPhysRegState(MCRegister PhysReg,
unsigned NewState);
282 bool isPhysRegFree(MCRegister PhysReg)
const;
285 void markRegUsedInInstr(
MCPhysReg PhysReg) {
286 for (MCRegUnit Unit : TRI->regunits(PhysReg))
287 UsedInInstr[
static_cast<unsigned>(
Unit)] = InstrGen | 1;
291 bool isClobberedByRegMasks(MCRegister PhysReg)
const {
292 return llvm::any_of(RegMasks, [PhysReg](
const uint32_t *Mask) {
298 bool isRegUsedInInstr(MCRegister PhysReg,
bool LookAtPhysRegUses)
const {
299 if (LookAtPhysRegUses && isClobberedByRegMasks(PhysReg))
301 for (MCRegUnit Unit : TRI->regunits(PhysReg))
302 if (UsedInInstr[
static_cast<unsigned>(Unit)] >=
303 (InstrGen | !LookAtPhysRegUses))
310 void markPhysRegUsedInInstr(MCRegister PhysReg) {
311 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
312 assert(UsedInInstr[
static_cast<unsigned>(Unit)] <= InstrGen &&
313 "non-phys use before phys use?");
314 UsedInInstr[
static_cast<unsigned>(
Unit)] = InstrGen;
319 void unmarkRegUsedInInstr(MCRegister PhysReg) {
320 for (MCRegUnit Unit : TRI->regunits(PhysReg))
321 UsedInInstr[
static_cast<unsigned>(
Unit)] = 0;
328 spillImpossible = ~0
u
334 bool runOnMachineFunction(MachineFunction &MF);
337 void allocateBasicBlock(MachineBasicBlock &MBB);
342 void findAndSortDefOperandIndexes(
const MachineInstr &
MI);
344 void allocateInstruction(MachineInstr &
MI);
345 void handleDebugValue(MachineInstr &
MI);
346 void handleBundle(MachineInstr &
MI);
348 bool usePhysReg(MachineInstr &
MI, MCRegister PhysReg);
349 bool definePhysReg(MachineInstr &
MI, MCRegister PhysReg);
350 bool displacePhysReg(MachineInstr &
MI, MCRegister PhysReg);
351 void freePhysReg(MCRegister PhysReg);
353 unsigned calcSpillCost(
MCPhysReg PhysReg)
const;
363 void assignVirtToPhysReg(MachineInstr &
MI, LiveReg &, MCRegister PhysReg);
364 void allocVirtReg(MachineInstr &
MI, LiveReg &LR,
Register Hint,
365 bool LookAtPhysRegUses =
false);
366 void allocVirtRegUndef(MachineOperand &MO);
367 void assignDanglingDebugValues(MachineInstr &Def,
Register VirtReg,
369 bool defineLiveThroughVirtReg(MachineInstr &
MI,
unsigned OpNum,
371 bool defineVirtReg(MachineInstr &
MI,
unsigned OpNum,
Register VirtReg,
372 bool LookAtPhysRegUses =
false);
373 bool useVirtReg(MachineInstr &
MI, MachineOperand &MO,
Register VirtReg);
375 MCPhysReg getErrorAssignment(
const LiveReg &LR, MachineInstr &
MI,
376 const TargetRegisterClass &RC);
379 getMBBBeginInsertionPoint(MachineBasicBlock &MBB,
380 SmallSet<Register, 2> &PrologLiveIns)
const;
382 void reloadAtBegin(MachineBasicBlock &MBB);
383 bool setPhysReg(MachineInstr &
MI, MachineOperand &MO,
384 const LiveReg &Assignment);
389 bool shouldAllocateRegister(
const Register Reg)
const;
390 int getStackSpaceFor(
Register VirtReg);
392 MCPhysReg AssignedReg,
bool Kill,
bool LiveOut);
399 bool mayBeSpillFromInlineAsmBr(
const MachineInstr &
MI)
const;
401 void dumpState()
const;
405 RegAllocFastImpl Impl;
411 : MachineFunctionPass(ID), Impl(
F, ClearVirtRegs_) {}
413 bool runOnMachineFunction(MachineFunction &MF)
override {
414 return Impl.runOnMachineFunction(MF);
417 StringRef getPassName()
const override {
return "Fast Register Allocator"; }
419 void getAnalysisUsage(AnalysisUsage &AU)
const override {
424 MachineFunctionProperties getRequiredProperties()
const override {
425 return MachineFunctionProperties().setNoPHIs();
428 MachineFunctionProperties getSetProperties()
const override {
429 if (Impl.ClearVirtRegs) {
430 return MachineFunctionProperties().setNoVRegs();
433 return MachineFunctionProperties();
436 MachineFunctionProperties getClearedProperties()
const override {
437 return MachineFunctionProperties().setIsSSA();
443char RegAllocFast::ID = 0;
450 if (!ShouldAllocateRegisterImpl)
453 return ShouldAllocateRegisterImpl(*
TRI, *
MRI,
Reg);
456void RegAllocFastImpl::setRegUnitState(MCRegUnit Unit,
unsigned NewState) {
457 RegUnitStates[
static_cast<unsigned>(
Unit)] = NewState;
460unsigned RegAllocFastImpl::getRegUnitState(MCRegUnit Unit)
const {
461 return RegUnitStates[
static_cast<unsigned>(
Unit)];
464void RegAllocFastImpl::setPhysRegState(MCRegister PhysReg,
unsigned NewState) {
465 for (MCRegUnit Unit :
TRI->regunits(PhysReg))
466 setRegUnitState(Unit, NewState);
469bool RegAllocFastImpl::isPhysRegFree(MCRegister PhysReg)
const {
470 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
471 if (getRegUnitState(Unit) != regFree)
479int RegAllocFastImpl::getStackSpaceFor(
Register VirtReg) {
481 int SS = StackSlotForVirtReg[VirtReg];
487 const TargetRegisterClass &RC = *
MRI->getRegClass(VirtReg);
488 unsigned Size =
TRI->getSpillSize(RC);
489 Align Alignment =
TRI->getSpillAlign(RC);
491 const MachineFunction &MF =
MRI->getMF();
493 Align CurrentAlign =
ST.getFrameLowering()->getStackAlign();
494 if (Alignment > CurrentAlign && !
TRI->canRealignStack(MF))
495 Alignment = CurrentAlign;
500 StackSlotForVirtReg[VirtReg] = FrameIdx;
507 PosIndexes.getIndex(
A, IndexA);
509 PosIndexes.getIndex(
A, IndexA);
510 return IndexA < IndexB;
517bool RegAllocFastImpl::mayBeSpillFromInlineAsmBr(
const MachineInstr &
MI)
const {
519 auto *
MBB =
MI.getParent();
522 for (
const auto &
Op :
MI.operands())
529bool RegAllocFastImpl::mayLiveOut(
Register VirtReg) {
535 const MachineInstr *SelfLoopDef =
nullptr;
541 for (
const MachineInstr &DefInst :
MRI->def_instructions(VirtReg)) {
542 if (DefInst.getParent() !=
MBB) {
546 if (!SelfLoopDef ||
dominates(PosIndexes, DefInst, *SelfLoopDef))
547 SelfLoopDef = &DefInst;
558 static const unsigned Limit = 8;
560 for (
const MachineInstr &UseInst :
MRI->use_nodbg_instructions(VirtReg)) {
561 if (UseInst.getParent() !=
MBB || ++
C >= Limit) {
570 if (SelfLoopDef == &UseInst ||
571 !
dominates(PosIndexes, *SelfLoopDef, UseInst)) {
582bool RegAllocFastImpl::mayLiveIn(
Register VirtReg) {
587 static const unsigned Limit = 8;
589 for (
const MachineInstr &DefInst :
MRI->def_instructions(VirtReg)) {
590 if (DefInst.getParent() !=
MBB || ++
C >= Limit) {
606 int FI = getStackSpaceFor(VirtReg);
609 const TargetRegisterClass &RC = *
MRI->getRegClass(VirtReg);
618 SmallVectorImpl<MachineOperand *> &LRIDbgOperands = LiveDbgValueMap[VirtReg];
619 SmallMapVector<MachineInstr *, SmallVector<const MachineOperand *>, 2>
621 for (MachineOperand *MO : LRIDbgOperands)
622 SpilledOperandsMap[MO->getParent()].push_back(MO);
623 for (
const auto &MISpilledOperands : SpilledOperandsMap) {
624 MachineInstr &DBG = *MISpilledOperands.first;
629 *
MBB, Before, *MISpilledOperands.first, FI, MISpilledOperands.second);
632 LLVM_DEBUG(
dbgs() <<
"Inserting debug info due to spill:\n" << *NewDV);
639 MachineInstr *ClonedDV =
MBB->
getParent()->CloneMachineInstr(NewDV);
641 LLVM_DEBUG(
dbgs() <<
"Cloning debug info due to live out spill\n");
657 LRIDbgOperands.
clear();
665 int FI = getStackSpaceFor(VirtReg);
666 const TargetRegisterClass &RC = *
MRI->getRegClass(VirtReg);
676 MachineBasicBlock &
MBB, SmallSet<Register, 2> &PrologLiveIns)
const {
685 if (!
TII->isBasicBlockPrologue(*
I) && !mayBeSpillFromInlineAsmBr(*
I))
690 for (MachineOperand &MO :
I->operands()) {
702void RegAllocFastImpl::reloadAtBegin(MachineBasicBlock &
MBB) {
703 if (LiveVirtRegs.empty())
706 for (MachineBasicBlock::RegisterMaskPair
P :
MBB.
liveins()) {
707 MCRegister
Reg =
P.PhysReg;
710 setPhysRegState(
Reg, regLiveIn);
713 SmallSet<Register, 2> PrologLiveIns;
718 getMBBBeginInsertionPoint(
MBB, PrologLiveIns);
719 for (
const LiveReg &LR : LiveVirtRegs) {
721 if (PhysReg == 0 || LR.Error)
724 MCRegUnit FirstUnit = *
TRI->regunits(PhysReg).begin();
725 if (getRegUnitState(FirstUnit) == regLiveIn)
729 "no reload in start block. Missing vreg def?");
731 if (PrologLiveIns.
count(PhysReg)) {
735 reload(
MBB.
begin(), LR.VirtReg, PhysReg);
737 reload(InsertBefore, LR.VirtReg, PhysReg);
739 LiveVirtRegs.clear();
745bool RegAllocFastImpl::usePhysReg(MachineInstr &
MI, MCRegister
Reg) {
746 assert(Register::isPhysicalRegister(
Reg) &&
"expected physreg");
747 bool displacedAny = displacePhysReg(
MI,
Reg);
748 setPhysRegState(
Reg, regPreAssigned);
749 markRegUsedInInstr(
Reg);
753bool RegAllocFastImpl::definePhysReg(MachineInstr &
MI, MCRegister
Reg) {
754 bool displacedAny = displacePhysReg(
MI,
Reg);
755 setPhysRegState(
Reg, regPreAssigned);
762bool RegAllocFastImpl::displacePhysReg(MachineInstr &
MI, MCRegister PhysReg) {
763 bool displacedAny =
false;
765 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
766 switch (
unsigned VirtReg = getRegUnitState(Unit)) {
768 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
769 assert(LRI != LiveVirtRegs.end() &&
"datastructures in sync");
772 while (mayBeSpillFromInlineAsmBr(*ReloadBefore))
774 reload(ReloadBefore, VirtReg, LRI->PhysReg);
776 setPhysRegState(LRI->PhysReg, regFree);
778 LRI->Reloaded =
true;
783 setRegUnitState(Unit, regFree);
793void RegAllocFastImpl::freePhysReg(MCRegister PhysReg) {
796 MCRegUnit FirstUnit = *
TRI->regunits(PhysReg).begin();
797 switch (
unsigned VirtReg = getRegUnitState(FirstUnit)) {
803 setPhysRegState(PhysReg, regFree);
806 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
807 assert(LRI != LiveVirtRegs.end());
809 setPhysRegState(LRI->PhysReg, regFree);
820unsigned RegAllocFastImpl::calcSpillCost(
MCPhysReg PhysReg)
const {
821 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
822 switch (
unsigned VirtReg = getRegUnitState(Unit)) {
828 return spillImpossible;
830 bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 ||
831 findLiveVirtReg(VirtReg)->LiveOut;
832 return SureSpill ? spillClean : spillDirty;
839void RegAllocFastImpl::assignDanglingDebugValues(MachineInstr &Definition,
842 auto UDBGValIter = DanglingDbgValues.
find(VirtReg);
843 if (UDBGValIter == DanglingDbgValues.
end())
846 SmallVectorImpl<MachineInstr *> &Dangling = UDBGValIter->second;
847 for (MachineInstr *DbgValue : Dangling) {
848 assert(DbgValue->isDebugValue());
849 if (!DbgValue->hasDebugOperandForReg(VirtReg))
856 E = DbgValue->getIterator();
858 if (
I->modifiesRegister(
Reg,
TRI) || --Limit == 0) {
865 for (MachineOperand &MO : DbgValue->getDebugOperandsForReg(VirtReg)) {
877void RegAllocFastImpl::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
878 MCRegister PhysReg) {
882 assert(LR.PhysReg == 0 &&
"Already assigned a physreg");
883 assert(PhysReg != 0 &&
"Trying to assign no register");
884 LR.PhysReg = PhysReg;
885 setPhysRegState(PhysReg, VirtReg.
id());
887 assignDanglingDebugValues(AtMI, VirtReg, PhysReg);
893 static const unsigned ChainLengthLimit = 3;
900 MachineInstr *VRegDef =
MRI->getUniqueVRegDef(
Reg);
904 }
while (++
C <= ChainLengthLimit);
912 static const unsigned DefLimit = 3;
914 for (
const MachineInstr &
MI :
MRI->def_instructions(VirtReg)) {
917 Reg = traceCopyChain(
Reg);
929void RegAllocFastImpl::allocVirtReg(MachineInstr &
MI, LiveReg &LR,
930 Register Hint0,
bool LookAtPhysRegUses) {
931 const Register VirtReg = LR.VirtReg;
934 const TargetRegisterClass &RC = *
MRI->getRegClass(VirtReg);
936 <<
" in class " <<
TRI->getRegClassName(&RC)
941 !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) {
943 if (isPhysRegFree(Hint0)) {
946 assignVirtToPhysReg(
MI, LR, Hint0);
957 Register Hint1 = traceCopies(VirtReg);
959 !isRegUsedInInstr(Hint1, LookAtPhysRegUses)) {
961 if (isPhysRegFree(Hint1)) {
964 assignVirtToPhysReg(
MI, LR, Hint1);
975 unsigned BestCost = spillImpossible;
977 for (
MCPhysReg PhysReg : AllocationOrder) {
979 if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) {
984 unsigned Cost = calcSpillCost(PhysReg);
988 assignVirtToPhysReg(
MI, LR, PhysReg);
992 if (PhysReg == Hint0 || PhysReg == Hint1)
993 Cost -= spillPrefBonus;
995 if (
Cost < BestCost) {
1004 LR.PhysReg = getErrorAssignment(LR,
MI, RC);
1009 displacePhysReg(
MI, BestReg);
1010 assignVirtToPhysReg(
MI, LR, BestReg);
1013void RegAllocFastImpl::allocVirtRegUndef(MachineOperand &MO) {
1017 if (!shouldAllocateRegister(VirtReg))
1020 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
1022 bool IsRenamable =
true;
1023 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1024 PhysReg = LRI->PhysReg;
1026 const TargetRegisterClass &RC = *
MRI->getRegClass(VirtReg);
1028 if (AllocationOrder.
empty()) {
1034 PhysReg = getErrorAssignment(*LRI, *MO.
getParent(), RC);
1036 IsRenamable =
false;
1038 PhysReg = AllocationOrder.
front();
1042 if (SubRegIdx != 0) {
1043 PhysReg =
TRI->getSubReg(PhysReg, SubRegIdx);
1053bool RegAllocFastImpl::defineLiveThroughVirtReg(MachineInstr &
MI,
1056 if (!shouldAllocateRegister(VirtReg))
1058 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
1059 if (LRI != LiveVirtRegs.end()) {
1061 if (PrevReg != 0 && isRegUsedInInstr(PrevReg,
true)) {
1063 <<
" (tied/earlyclobber resolution)\n");
1064 freePhysReg(PrevReg);
1066 allocVirtReg(
MI, *LRI, 0,
true);
1072 TII->get(TargetOpcode::COPY), PrevReg)
1075 MachineOperand &MO =
MI.getOperand(OpNum);
1080 return defineVirtReg(
MI, OpNum, VirtReg,
true);
1090bool RegAllocFastImpl::defineVirtReg(MachineInstr &
MI,
unsigned OpNum,
1091 Register VirtReg,
bool LookAtPhysRegUses) {
1093 if (!shouldAllocateRegister(VirtReg))
1095 MachineOperand &MO =
MI.getOperand(OpNum);
1096 LiveRegMap::iterator LRI;
1098 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
1101 if (mayLiveOut(VirtReg)) {
1102 LRI->LiveOut =
true;
1109 if (LRI->PhysReg == 0) {
1110 allocVirtReg(
MI, *LRI, 0, LookAtPhysRegUses);
1112 assert((!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) || LRI->Error) &&
1113 "TODO: preassign mismatch");
1115 <<
" use existing assignment to "
1120 if (LRI->Reloaded || LRI->LiveOut) {
1121 if (!
MI.isImplicitDef()) {
1125 <<
" RL: " << LRI->Reloaded <<
'\n');
1126 bool Kill = LRI->LastUse ==
nullptr;
1127 spill(SpillBefore, VirtReg, PhysReg, Kill, LRI->LiveOut);
1131 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR) {
1132 int FI = StackSlotForVirtReg[VirtReg];
1133 const TargetRegisterClass &RC = *
MRI->getRegClass(VirtReg);
1134 for (MachineOperand &MO :
MI.operands()) {
1136 MachineBasicBlock *Succ = MO.
getMBB();
1145 LRI->LastUse =
nullptr;
1147 LRI->LiveOut =
false;
1148 LRI->Reloaded =
false;
1150 if (
MI.getOpcode() == TargetOpcode::BUNDLE) {
1151 BundleVirtRegsMap[VirtReg] = *LRI;
1153 markRegUsedInInstr(PhysReg);
1154 return setPhysReg(
MI, MO, *LRI);
1159bool RegAllocFastImpl::useVirtReg(MachineInstr &
MI, MachineOperand &MO,
1162 if (!shouldAllocateRegister(VirtReg))
1164 LiveRegMap::iterator LRI;
1166 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
1169 if (mayLiveOut(VirtReg)) {
1170 LRI->LiveOut =
true;
1177 assert((!MO.
isKill() || LRI->LastUse == &
MI) &&
"Invalid kill flag");
1181 if (LRI->PhysReg == 0) {
1184 if (
MI.isCopy() &&
MI.getOperand(1).getSubReg() == 0) {
1185 Hint =
MI.getOperand(0).getReg();
1186 if (
Hint.isVirtual()) {
1187 assert(!shouldAllocateRegister(Hint));
1191 "Copy destination should already be assigned");
1194 allocVirtReg(
MI, *LRI, Hint,
false);
1199 if (
MI.getOpcode() == TargetOpcode::BUNDLE) {
1200 BundleVirtRegsMap[VirtReg] = *LRI;
1202 markRegUsedInInstr(LRI->PhysReg);
1203 return setPhysReg(
MI, MO, *LRI);
1209MCPhysReg RegAllocFastImpl::getErrorAssignment(
const LiveReg &LR,
1211 const TargetRegisterClass &RC) {
1212 MachineFunction &MF = *
MI.getMF();
1223 if (AllocationOrder.
empty()) {
1227 "no registers from class available to allocate", Fn,
1232 assert(!RawRegs.
empty() &&
"register classes cannot have no registers");
1233 return RawRegs.
front();
1236 if (!LR.Error && EmitError) {
1239 if (
MI.isInlineAsm()) {
1240 MI.emitInlineAsmError(
1241 "inline assembly requires more registers than available");
1245 "ran out of registers during register allocation", Fn,
1250 return AllocationOrder.
front();
1255bool RegAllocFastImpl::setPhysReg(MachineInstr &
MI, MachineOperand &MO,
1256 const LiveReg &Assignment) {
1258 assert(PhysReg &&
"assignments should always be to a valid physreg");
1286 MI.addRegisterKilled(PhysReg,
TRI,
true);
1295 MI.addRegisterDead(PhysReg,
TRI,
true);
1297 MI.addRegisterDefined(PhysReg,
TRI);
1306void RegAllocFastImpl::dumpState()
const {
1307 for (MCRegUnit Unit :
TRI->regunits()) {
1308 switch (
unsigned VirtReg = getRegUnitState(Unit)) {
1311 case regPreAssigned:
1318 LiveRegMap::const_iterator
I = findLiveVirtReg(VirtReg);
1319 assert(
I != LiveVirtRegs.end() &&
"have LiveVirtRegs entry");
1320 if (
I->LiveOut ||
I->Reloaded) {
1328 assert(
TRI->hasRegUnit(
I->PhysReg, Unit) &&
"inverse mapping present");
1335 for (
const LiveReg &LR : LiveVirtRegs) {
1340 assert(Register::isPhysicalRegister(PhysReg) &&
"mapped to physreg");
1341 for (MCRegUnit Unit :
TRI->regunits(PhysReg)) {
1342 assert(getRegUnitState(Unit) == VirtReg &&
"inverse map valid");
1350void RegAllocFastImpl::addRegClassDefCounts(
1352 assert(RegClassDefCounts.
size() ==
TRI->getNumRegClasses());
1355 if (!shouldAllocateRegister(
Reg))
1357 const TargetRegisterClass *OpRC =
MRI->getRegClass(
Reg);
1358 for (
unsigned RCIdx = 0, RCIdxEnd =
TRI->getNumRegClasses();
1359 RCIdx != RCIdxEnd; ++RCIdx) {
1360 const TargetRegisterClass *IdxRC =
TRI->getRegClass(RCIdx);
1363 ++RegClassDefCounts[RCIdx];
1369 for (
unsigned RCIdx = 0, RCIdxEnd =
TRI->getNumRegClasses();
1370 RCIdx != RCIdxEnd; ++RCIdx) {
1371 const TargetRegisterClass *IdxRC =
TRI->getRegClass(RCIdx);
1372 for (MCRegAliasIterator Alias(
Reg,
TRI,
true); Alias.isValid(); ++Alias) {
1374 ++RegClassDefCounts[RCIdx];
1384void RegAllocFastImpl::findAndSortDefOperandIndexes(
const MachineInstr &
MI) {
1385 DefOperandIndexes.
clear();
1388 for (
unsigned I = 0,
E =
MI.getNumOperands();
I <
E; ++
I) {
1389 const MachineOperand &MO =
MI.getOperand(
I);
1396 markPhysRegUsedInInstr(
Reg);
1406 if (DefOperandIndexes.
size() <= 1)
1414 SmallVector<unsigned> RegClassDefCounts(
TRI->getNumRegClasses(), 0);
1416 for (
const MachineOperand &MO :
MI.all_defs())
1417 addRegClassDefCounts(RegClassDefCounts, MO.
getReg());
1419 llvm::sort(DefOperandIndexes, [&](
unsigned I0,
unsigned I1) {
1420 const MachineOperand &MO0 =
MI.getOperand(I0);
1421 const MachineOperand &MO1 =
MI.getOperand(I1);
1424 const TargetRegisterClass &RC0 = *
MRI->getRegClass(Reg0);
1425 const TargetRegisterClass &RC1 = *
MRI->getRegClass(Reg1);
1429 unsigned ClassSize0 = RegClassInfo.
getOrder(&RC0).size();
1430 unsigned ClassSize1 = RegClassInfo.
getOrder(&RC1).size();
1432 bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.
getID()];
1433 bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.
getID()];
1434 if (SmallClass0 > SmallClass1)
1436 if (SmallClass0 < SmallClass1)
1444 if (Livethrough0 > Livethrough1)
1446 if (Livethrough0 < Livethrough1)
1460 unsigned TiedIdx =
MI.findTiedOperandIdx(
MI.getOperandNo(&MO));
1465void RegAllocFastImpl::allocateInstruction(MachineInstr &
MI) {
1485 BundleVirtRegsMap.
clear();
1488 bool HasPhysRegUse =
false;
1489 bool HasRegMask =
false;
1490 bool HasVRegDef =
false;
1491 bool HasDef =
false;
1492 bool HasEarlyClobber =
false;
1493 bool NeedToAssignLiveThroughs =
false;
1494 for (MachineOperand &MO :
MI.operands()) {
1498 if (!shouldAllocateRegister(
Reg))
1504 HasEarlyClobber =
true;
1505 NeedToAssignLiveThroughs =
true;
1508 NeedToAssignLiveThroughs =
true;
1511 if (!
MRI->isReserved(
Reg)) {
1514 bool displacedAny = definePhysReg(
MI,
Reg);
1516 HasEarlyClobber =
true;
1521 HasPhysRegUse =
true;
1535 bool ReArrangedImplicitOps =
true;
1543 if (NeedToAssignLiveThroughs) {
1544 while (ReArrangedImplicitOps) {
1545 ReArrangedImplicitOps =
false;
1546 findAndSortDefOperandIndexes(
MI);
1547 for (
unsigned OpIdx : DefOperandIndexes) {
1548 MachineOperand &MO =
MI.getOperand(
OpIdx);
1553 ReArrangedImplicitOps = defineLiveThroughVirtReg(
MI,
OpIdx,
Reg);
1555 ReArrangedImplicitOps = defineVirtReg(
MI,
OpIdx,
Reg);
1559 if (ReArrangedImplicitOps)
1565 while (ReArrangedImplicitOps) {
1566 ReArrangedImplicitOps =
false;
1567 for (MachineOperand &MO :
MI.all_defs()) {
1570 ReArrangedImplicitOps =
1571 defineVirtReg(
MI,
MI.getOperandNo(&MO),
Reg);
1572 if (ReArrangedImplicitOps)
1583 for (MachineOperand &MO :
reverse(
MI.all_defs())) {
1594 "tied def assigned to clobbered register");
1606 if (
MRI->isReserved(
Reg))
1609 unmarkRegUsedInInstr(
Reg);
1617 for (
const auto *RM : RegMasks)
1618 MRI->addPhysRegsUsedFromRegMask(RM);
1621 for (
const LiveReg &LR : LiveVirtRegs) {
1623 if (PhysReg != 0 && isClobberedByRegMasks(PhysReg))
1624 displacePhysReg(
MI, PhysReg);
1629 if (HasPhysRegUse) {
1630 for (MachineOperand &MO :
MI.operands()) {
1636 if (
MRI->isReserved(
Reg))
1638 if (!usePhysReg(
MI,
Reg))
1646 bool HasUndefUse =
false;
1647 bool ReArrangedImplicitMOs =
true;
1648 while (ReArrangedImplicitMOs) {
1649 ReArrangedImplicitMOs =
false;
1650 for (MachineOperand &MO :
MI.operands()) {
1668 ReArrangedImplicitMOs = useVirtReg(
MI, MO,
Reg);
1669 if (ReArrangedImplicitMOs)
1678 for (MachineOperand &MO :
MI.all_uses()) {
1683 assert(MO.
isUndef() &&
"Should only have undef virtreg uses left");
1684 allocVirtRegUndef(MO);
1689 if (HasEarlyClobber) {
1690 for (MachineOperand &MO :
reverse(
MI.all_defs())) {
1693 assert(!MO.
getSubReg() &&
"should be already handled in def processing");
1718 if (
MI.isCopy() &&
MI.getOperand(0).getReg() ==
MI.getOperand(1).getReg() &&
1719 MI.getNumOperands() == 2) {
1725void RegAllocFastImpl::handleDebugValue(MachineInstr &
MI) {
1728 assert(
MI.isDebugValue() &&
"not a DBG_VALUE*");
1729 for (
const auto &MO :
MI.debug_operands()) {
1735 if (!shouldAllocateRegister(
Reg))
1739 int SS = StackSlotForVirtReg[
Reg];
1749 LiveRegMap::iterator LRI = findLiveVirtReg(
Reg);
1753 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1755 for (
auto &RegMO : DbgOps)
1756 setPhysReg(
MI, *RegMO, *LRI);
1758 DanglingDbgValues[
Reg].push_back(&
MI);
1763 LiveDbgValueMap[
Reg].append(DbgOps.begin(), DbgOps.end());
1767void RegAllocFastImpl::handleBundle(MachineInstr &
MI) {
1770 while (BundledMI->isBundledWithPred()) {
1771 for (MachineOperand &MO : BundledMI->operands()) {
1779 DenseMap<Register, LiveReg>::iterator DI = BundleVirtRegsMap.
find(
Reg);
1780 assert(DI != BundleVirtRegsMap.
end() &&
"Unassigned virtual register");
1782 setPhysReg(
MI, MO, DI->second);
1789void RegAllocFastImpl::allocateBasicBlock(MachineBasicBlock &
MBB) {
1793 PosIndexes.unsetInitialized();
1794 RegUnitStates.assign(
TRI->getNumRegUnits(), regFree);
1795 assert(LiveVirtRegs.empty() &&
"Mapping not cleared from last block?");
1798 setPhysRegState(LiveReg.PhysReg, regPreAssigned);
1808 if (
MI.isDebugValue()) {
1809 handleDebugValue(
MI);
1813 allocateInstruction(
MI);
1817 if (
MI.getOpcode() == TargetOpcode::BUNDLE) {
1825 LLVM_DEBUG(
dbgs() <<
"Loading live registers at begin of block.\n");
1830 for (MachineInstr *
MI : Coalesced)
1832 NumCoalesced += Coalesced.size();
1834 for (
auto &UDBGPair : DanglingDbgValues) {
1835 for (MachineInstr *DbgValue : UDBGPair.second) {
1840 LLVM_DEBUG(
dbgs() <<
"Register did not survive for " << *DbgValue
1845 DanglingDbgValues.clear();
1850bool RegAllocFastImpl::runOnMachineFunction(MachineFunction &MF) {
1851 LLVM_DEBUG(
dbgs() <<
"********** FAST REGISTER ALLOCATION **********\n"
1852 <<
"********** Function: " << MF.
getName() <<
'\n');
1858 MRI->freezeReservedRegs();
1860 unsigned NumRegUnits =
TRI->getNumRegUnits();
1862 UsedInInstr.
assign(NumRegUnits, 0);
1866 unsigned NumVirtRegs =
MRI->getNumVirtRegs();
1867 StackSlotForVirtReg.
resize(NumVirtRegs);
1868 LiveVirtRegs.setUniverse(NumVirtRegs);
1869 MayLiveAcrossBlocks.
clear();
1870 MayLiveAcrossBlocks.
resize(NumVirtRegs);
1873 for (MachineBasicBlock &
MBB : MF)
1874 allocateBasicBlock(
MBB);
1876 if (ClearVirtRegs) {
1879 MRI->clearVirtRegs();
1882 StackSlotForVirtReg.
clear();
1883 LiveDbgValueMap.
clear();
1890 RegAllocFastImpl Impl(Opts.Filter, Opts.ClearVRegs);
1891 bool Changed = Impl.runOnMachineFunction(MF);
1901 bool PrintFilterName = Opts.FilterName !=
"all";
1902 bool PrintNoClearVRegs = !Opts.ClearVRegs;
1903 bool PrintSemicolon = PrintFilterName && PrintNoClearVRegs;
1905 OS <<
"regallocfast";
1906 if (PrintFilterName || PrintNoClearVRegs) {
1908 if (PrintFilterName)
1909 OS <<
"filter=" << Opts.FilterName;
1912 if (PrintNoClearVRegs)
1913 OS <<
"no-clear-vregs";
1921 bool ClearVirtRegs) {
1922 return new RegAllocFast(Ftor, ClearVirtRegs);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_UNLIKELY(EXPR)
This file defines the DenseMap class.
const HexagonInstrInfo * TII
This file implements an indexed map.
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
Promote Memory to Register
MachineInstr unsigned OpIdx
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static bool isCoalescable(const MachineInstr &MI)
static cl::opt< bool > IgnoreMissingDefs("rafast-ignore-missing-defs", cl::Hidden)
static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A, const MachineInstr &B)
static bool isTiedToNotUndef(const MachineOperand &MO)
static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator)
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the SparseSet class derived from the version described in Briggs,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool test(unsigned Idx) const
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
void clear()
clear - Removes all bits from the bitvector.
Represents analyses that only rely on functions' control flow.
iterator find(const_arg_type_t< KeyT > Val)
FunctionPass class - This class is used to implement most global optimizations.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Load the specified register of the given register class from the specified stack frame index.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
void resize(typename StorageT::size_type S)
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
An RAII based helper class to modify MachineFunctionProperties when running pass.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
iterator_range< liveout_iterator > liveouts() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI void dump() const
Instructions::iterator instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
bool hasDebugOperandForReg(Register Reg) const
Returns whether this debug value has at least one debug operand with the register Reg.
bool isDebugValueList() const
void setDebugValueUndef()
Sets all register debug operands in this debug value instruction to be undef.
const MachineBasicBlock * getParent() const
bool isNonListDebugValue() const
bool isDebugValue() const
MachineOperand & getDebugOperand(unsigned Index)
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
LLVM_ABI void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
ArrayRef< MCPhysReg > getOrder(const TargetRegisterClass *RC) const
getOrder - Returns the preferred allocation order for RC.
Wrapper class representing virtual and physical registers.
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr unsigned id() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
void assign(size_type NumElts, ValueParamT Elt)
void push_back(const T &Elt)
typename DenseT::const_iterator const_iterator
typename DenseT::iterator iterator
StringRef - Represent a constant reference to a string, i.e.
ArrayRef< MCPhysReg > getRegisters() const
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
LLVM_ABI Printable printRegUnit(MCRegUnit Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MutableArrayRef(T &OneElt) -> MutableArrayRef< T >
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.