65#define DEBUG_TYPE "regalloc"
67STATISTIC(numJoins,
"Number of interval joins performed");
68STATISTIC(numCrossRCs,
"Number of cross class joins performed");
69STATISTIC(numCommutes,
"Number of instruction commuting performed");
71STATISTIC(NumReMats,
"Number of instructions re-materialized");
72STATISTIC(NumInflated,
"Number of register classes inflated");
73STATISTIC(NumLaneConflicts,
"Number of dead lane conflicts tested");
74STATISTIC(NumLaneResolves,
"Number of dead lane conflicts resolved");
75STATISTIC(NumShrinkToUses,
"Number of shrinkToUses called");
78 cl::desc(
"Coalesce copies (default=true)"),
93 cl::desc(
"Coalesce copies that span blocks (default=subtarget)"),
98 cl::desc(
"Verify machine instrs before and after register coalescing"),
103 cl::desc(
"During rematerialization for a copy, if the def instruction has "
104 "many other copy uses to be rematerialized, delay the multiple "
105 "separate live interval update work and do them all at once after "
106 "all those rematerialization are done. It will save a lot of "
112 cl::desc(
"If the valnos size of an interval is larger than the threshold, "
113 "it is regarded as a large interval. "),
118 cl::desc(
"For a large interval, if it is coalesced with other live "
119 "intervals many times more than the threshold, stop its "
120 "coalescing to control the compile time. "),
145 DenseMap<unsigned, PHIValPos> PHIValToPos;
149 DenseMap<Register, SmallVector<unsigned, 2>> RegToPHIIdx;
154 using DbgValueLoc = std::pair<SlotIndex, MachineInstr *>;
155 DenseMap<Register, std::vector<DbgValueLoc>> DbgVRegToValues;
159 LaneBitmask ShrinkMask;
163 bool ShrinkMainRange =
false;
167 bool JoinGlobalCopies =
false;
171 bool JoinSplitEdges =
false;
174 SmallVector<MachineInstr *, 8> WorkList;
175 SmallVector<MachineInstr *, 8> LocalWorkList;
179 SmallPtrSet<MachineInstr *, 8> ErasedInstrs;
182 SmallVector<MachineInstr *, 8> DeadDefs;
190 DenseSet<Register> ToBeUpdated;
194 DenseMap<Register, unsigned long> LargeLIVisitCounter;
197 void eliminateDeadDefs(LiveRangeEdit *Edit =
nullptr);
200 void LRE_WillEraseInstruction(MachineInstr *
MI)
override;
203 void coalesceLocals();
206 void joinAllIntervals();
210 void copyCoalesceInMBB(MachineBasicBlock *
MBB);
221 void lateLiveIntervalUpdate();
226 bool copyValueUndefInPredecessors(
LiveRange &S,
const MachineBasicBlock *
MBB,
227 LiveQueryResult SLRQ);
231 void setUndefOnPrunedSubRegUses(LiveInterval &LI,
Register Reg,
232 LaneBitmask PrunedLanes);
239 bool joinCopy(MachineInstr *CopyMI,
bool &Again,
240 SmallPtrSetImpl<MachineInstr *> &CurrentErasedInstrs);
245 bool joinIntervals(CoalescerPair &CP);
248 bool joinVirtRegs(CoalescerPair &CP);
253 bool isHighCostLiveInterval(LiveInterval &LI);
256 bool joinReservedPhysReg(CoalescerPair &CP);
263 void mergeSubRangeInto(LiveInterval &LI,
const LiveRange &ToMerge,
264 LaneBitmask LaneMask, CoalescerPair &CP,
270 LaneBitmask LaneMask,
const CoalescerPair &CP);
276 bool adjustCopiesBackFrom(
const CoalescerPair &CP, MachineInstr *CopyMI);
280 bool hasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB,
281 VNInfo *AValNo, VNInfo *BValNo);
291 std::pair<bool, bool> removeCopyByCommutingDef(
const CoalescerPair &CP,
292 MachineInstr *CopyMI);
295 bool removePartialRedundancy(
const CoalescerPair &CP, MachineInstr &CopyMI);
299 bool reMaterializeTrivialDef(
const CoalescerPair &CP, MachineInstr *CopyMI,
303 bool canJoinPhys(
const CoalescerPair &CP);
318 void addUndefFlag(
const LiveInterval &
Int, SlotIndex UseIdx,
319 MachineOperand &MO,
unsigned SubRegIdx);
325 MachineInstr *eliminateUndefCopy(MachineInstr *CopyMI);
339 bool applyTerminalRule(
const MachineInstr &Copy)
const;
345 SmallVectorImpl<MachineInstr *> *Dead =
nullptr) {
347 if (LIS->shrinkToUses(LI, Dead)) {
351 LIS->splitSeparateComponents(*LI, SplitLIs);
359 void deleteInstr(MachineInstr *
MI) {
360 ErasedInstrs.insert(
MI);
361 LIS->RemoveMachineInstrFromMaps(*
MI);
362 MI->eraseFromParent();
371 void checkMergingChangesDbgValues(CoalescerPair &CP,
LiveRange &
LHS,
380 RegisterCoalescer() {}
381 RegisterCoalescer &operator=(RegisterCoalescer &&
Other) =
default;
383 RegisterCoalescer(LiveIntervals *LIS, SlotIndexes *SI,
384 const MachineLoopInfo *Loops)
385 : LIS(LIS), SI(SI), Loops(Loops) {}
387 bool run(MachineFunction &MF);
394 RegisterCoalescerLegacy() : MachineFunctionPass(ID) {
398 void getAnalysisUsage(AnalysisUsage &AU)
const override;
400 MachineFunctionProperties getClearedProperties()
const override {
401 return MachineFunctionProperties().setIsSSA();
405 bool runOnMachineFunction(MachineFunction &)
override;
410char RegisterCoalescerLegacy::ID = 0;
415 "Register Coalescer",
false,
false)
427 Dst = MI->getOperand(0).getReg();
428 DstSub = MI->getOperand(0).getSubReg();
429 Src = MI->getOperand(1).getReg();
430 SrcSub = MI->getOperand(1).getSubReg();
431 }
else if (
MI->isSubregToReg()) {
432 Dst = MI->getOperand(0).getReg();
433 DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(),
434 MI->getOperand(3).getImm());
435 Src = MI->getOperand(2).getReg();
436 SrcSub = MI->getOperand(2).getSubReg();
448 if (
MBB->pred_size() != 1 ||
MBB->succ_size() != 1)
451 for (
const auto &
MI : *
MBB) {
452 if (!
MI.isCopyLike() && !
MI.isUnconditionalBranch())
462 Flipped = CrossClass =
false;
465 unsigned SrcSub = 0, DstSub = 0;
468 Partial = SrcSub || DstSub;
471 if (Src.isPhysical()) {
472 if (Dst.isPhysical())
482 if (Dst.isPhysical()) {
485 Dst = TRI.getSubReg(Dst, DstSub);
493 Dst = TRI.getMatchingSuperReg(Dst, SrcSub, SrcRC);
504 if (SrcSub && DstSub) {
506 if (Src == Dst && SrcSub != DstSub)
509 NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub, SrcIdx,
516 NewRC = TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
520 NewRC = TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSub);
523 NewRC = TRI.getCommonSubClass(DstRC, SrcRC);
532 if (DstIdx && !SrcIdx) {
538 CrossClass = NewRC != DstRC || NewRC != SrcRC;
541 assert(Src.isVirtual() &&
"Src must be virtual");
542 assert(!(Dst.isPhysical() && DstSub) &&
"Cannot have a physical SubIdx");
549 if (DstReg.isPhysical())
561 unsigned SrcSub = 0, DstSub = 0;
569 }
else if (Src != SrcReg) {
574 if (DstReg.isPhysical()) {
575 if (!Dst.isPhysical())
577 assert(!DstIdx && !SrcIdx &&
"Inconsistent CoalescerPair state.");
580 Dst = TRI.getSubReg(Dst, DstSub);
583 return DstReg == Dst;
585 return Register(TRI.getSubReg(DstReg, SrcSub)) == Dst;
591 return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
592 TRI.composeSubRegIndices(DstIdx, DstSub);
596void RegisterCoalescerLegacy::getAnalysisUsage(
AnalysisUsage &AU)
const {
608void RegisterCoalescer::eliminateDeadDefs(
LiveRangeEdit *Edit) {
618void RegisterCoalescer::LRE_WillEraseInstruction(
MachineInstr *
MI) {
623bool RegisterCoalescer::adjustCopiesBackFrom(
const CoalescerPair &CP,
625 assert(!
CP.isPartial() &&
"This doesn't work for partial copies.");
626 assert(!
CP.isPhys() &&
"This doesn't work for physreg copies.");
651 if (BS == IntB.
end())
653 VNInfo *BValNo = BS->valno;
658 if (BValNo->
def != CopyIdx)
665 if (AS == IntA.
end())
667 VNInfo *AValNo = AS->valno;
673 if (!
CP.isCoalescable(ACopyMI) || !ACopyMI->
isFullCopy())
679 if (ValS == IntB.
end())
697 SlotIndex FillerStart = ValS->end, FillerEnd = BS->start;
701 BValNo->
def = FillerStart;
709 if (BValNo != ValS->valno)
718 S.removeSegment(*SS,
true);
722 if (!S.getVNInfoAt(FillerStart)) {
725 S.extendInBlock(BBStart, FillerStart);
727 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
730 if (SubBValNo != SubValSNo)
731 S.MergeValueNumberInto(SubBValNo, SubValSNo);
748 bool RecomputeLiveRange = AS->end == CopyIdx;
749 if (!RecomputeLiveRange) {
752 if (SS != S.end() &&
SS->end == CopyIdx) {
753 RecomputeLiveRange =
true;
758 if (RecomputeLiveRange)
765bool RegisterCoalescer::hasOtherReachingDefs(
LiveInterval &IntA,
774 if (ASeg.
valno != AValNo)
777 if (BI != IntB.
begin())
779 for (; BI != IntB.
end() && ASeg.
end >= BI->start; ++BI) {
780 if (BI->valno == BValNo)
782 if (BI->start <= ASeg.
start && BI->end > ASeg.
start)
784 if (BI->start > ASeg.
start && BI->start < ASeg.
end)
798 bool MergedWithDead =
false;
800 if (S.
valno != SrcValNo)
811 MergedWithDead =
true;
814 return std::make_pair(
Changed, MergedWithDead);
818RegisterCoalescer::removeCopyByCommutingDef(
const CoalescerPair &CP,
851 assert(BValNo !=
nullptr && BValNo->
def == CopyIdx);
857 return {
false,
false};
860 return {
false,
false};
862 return {
false,
false};
869 return {
false,
false};
881 if (!
TII->findCommutedOpIndices(*
DefMI, UseOpIdx, NewDstIdx))
882 return {
false,
false};
887 return {
false,
false};
891 if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
892 return {
false,
false};
901 if (US == IntA.
end() || US->valno != AValNo)
905 return {
false,
false};
915 TII->commuteInstruction(*
DefMI,
false, UseOpIdx, NewDstIdx);
917 return {
false,
false};
919 !
MRI->constrainRegClass(IntB.
reg(),
MRI->getRegClass(IntA.
reg())))
920 return {
false,
false};
921 if (NewMI !=
DefMI) {
946 UseMO.setReg(NewReg);
951 assert(US != IntA.
end() &&
"Use must be live");
952 if (US->valno != AValNo)
955 UseMO.setIsKill(
false);
957 UseMO.substPhysReg(NewReg, *
TRI);
959 UseMO.setReg(NewReg);
978 VNInfo *SubDVNI = S.getVNInfoAt(DefIdx);
981 VNInfo *SubBValNo = S.getVNInfoAt(CopyIdx);
983 S.MergeValueNumberInto(SubDVNI, SubBValNo);
991 bool ShrinkB =
false;
1005 VNInfo *ASubValNo = SA.getVNInfoAt(AIdx);
1014 MaskA |= SA.LaneMask;
1020 VNInfo *BSubValNo = SR.empty() ? SR.getNextValue(CopyIdx, Allocator)
1021 : SR.getVNInfoAt(CopyIdx);
1022 assert(BSubValNo != nullptr);
1023 auto P = addSegmentsWithValNo(SR, BSubValNo, SA, ASubValNo);
1024 ShrinkB |= P.second;
1026 BSubValNo->def = ASubValNo->def;
1034 if ((SB.LaneMask & MaskA).any())
1038 SB.removeSegment(*S,
true);
1042 BValNo->
def = AValNo->
def;
1044 ShrinkB |=
P.second;
1051 return {
true, ShrinkB};
1101bool RegisterCoalescer::removePartialRedundancy(
const CoalescerPair &CP,
1134 bool FoundReverseCopy =
false;
1153 bool ValB_Changed =
false;
1154 for (
auto *VNI : IntB.
valnos) {
1155 if (VNI->isUnused())
1158 ValB_Changed =
true;
1166 FoundReverseCopy =
true;
1170 if (!FoundReverseCopy)
1180 if (CopyLeftBB && CopyLeftBB->
succ_size() > 1)
1191 if (InsPos != CopyLeftBB->
end()) {
1197 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Move the copy to "
1202 TII->get(TargetOpcode::COPY), IntB.
reg())
1213 ErasedInstrs.
erase(NewCopyMI);
1215 LLVM_DEBUG(
dbgs() <<
"\tremovePartialRedundancy: Remove the copy from "
1226 deleteInstr(&CopyMI);
1242 if (!IntB.
liveAt(UseIdx))
1243 MO.setIsUndef(
true);
1253 VNInfo *BValNo = SR.Query(CopyIdx).valueOutOrDead();
1254 assert(BValNo &&
"All sublanes should be live");
1263 for (
unsigned I = 0;
I != EndPoints.
size();) {
1265 EndPoints[
I] = EndPoints.
back();
1287 assert(!
Reg.isPhysical() &&
"This code cannot handle physreg aliasing");
1290 if (
Op.getReg() !=
Reg)
1294 if (
Op.getSubReg() == 0 ||
Op.isUndef())
1300bool RegisterCoalescer::reMaterializeTrivialDef(
const CoalescerPair &CP,
1304 Register SrcReg =
CP.isFlipped() ?
CP.getDstReg() :
CP.getSrcReg();
1305 unsigned SrcIdx =
CP.isFlipped() ?
CP.getDstIdx() :
CP.getSrcIdx();
1306 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
1307 unsigned DstIdx =
CP.isFlipped() ?
CP.getSrcIdx() :
CP.getDstIdx();
1328 if (!
TII->isTriviallyReMaterializable(*
DefMI))
1333 bool SawStore =
false;
1337 if (
MCID.getNumDefs() != 1)
1345 if (SrcIdx && DstIdx)
1380 unsigned NewDstIdx =
TRI->composeSubRegIndices(
CP.getSrcIdx(), DefSubIdx);
1382 NewDstReg =
TRI->getSubReg(DstReg, NewDstIdx);
1392 "Only expect to deal with virtual or physical registers");
1397 LiveRangeEdit Edit(&SrcInt, NewRegs, *MF, *LIS,
nullptr,
this);
1421 assert(SrcIdx == 0 &&
CP.isFlipped() &&
1422 "Shouldn't have SrcIdx+DstIdx at this point");
1425 TRI->getCommonSubClass(DefRC, DstRC);
1426 if (CommonRC !=
nullptr) {
1434 if (MO.isReg() && MO.getReg() == DstReg && MO.getSubReg() == DstIdx) {
1456 "No explicit operands after implicit operands.");
1459 "unexpected implicit virtual register def");
1465 ErasedInstrs.
insert(CopyMI);
1479 bool NewMIDefinesFullReg =
false;
1489 if (MO.
getReg() == DstReg)
1490 NewMIDefinesFullReg =
true;
1495 ((
TRI->getSubReg(MO.
getReg(), DefSubIdx) ==
1508 assert(!
MRI->shouldTrackSubRegLiveness(DstReg) &&
1509 "subrange update for implicit-def of super register may not be "
1510 "properly handled");
1518 if (DefRC !=
nullptr) {
1520 NewRC =
TRI->getMatchingSuperRegClass(NewRC, DefRC, NewIdx);
1522 NewRC =
TRI->getCommonSubClass(NewRC, DefRC);
1523 assert(NewRC &&
"subreg chosen for remat incompatible with instruction");
1529 SR.LaneMask =
TRI->composeSubRegIndexLaneMask(DstIdx, SR.LaneMask);
1531 MRI->setRegClass(DstReg, NewRC);
1534 updateRegDefsUses(DstReg, DstReg, DstIdx);
1553 MRI->shouldTrackSubRegLiveness(DstReg)) {
1583 if (!SR.liveAt(DefIndex))
1584 SR.createDeadDef(DefIndex,
Alloc);
1585 MaxMask &= ~SR.LaneMask;
1587 if (MaxMask.
any()) {
1605 bool UpdatedSubRanges =
false;
1610 if ((SR.
LaneMask & DstMask).none()) {
1612 <<
"Removing undefined SubRange "
1625 UpdatedSubRanges =
true;
1632 if (!SR.
liveAt(DefIndex))
1636 if (UpdatedSubRanges)
1643 "Only expect virtual or physical registers in remat");
1646 if (!NewMIDefinesFullReg) {
1648 CopyDstReg,
true ,
true ,
false ));
1691 if (
MRI->use_nodbg_empty(SrcReg)) {
1697 UseMO.substPhysReg(DstReg, *
TRI);
1699 UseMO.setReg(DstReg);
1708 if (ToBeUpdated.
count(SrcReg))
1711 unsigned NumCopyUses = 0;
1713 if (UseMO.getParent()->isCopyLike())
1719 if (!DeadDefs.
empty())
1720 eliminateDeadDefs(&Edit);
1722 ToBeUpdated.
insert(SrcReg);
1740 unsigned SrcSubIdx = 0, DstSubIdx = 0;
1741 if (!
isMoveInstr(*
TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
1750 if ((SR.
LaneMask & SrcMask).none())
1755 }
else if (SrcLI.
liveAt(Idx))
1763 assert(Seg !=
nullptr &&
"No segment for defining instruction");
1768 if (((V &&
V->isPHIDef()) || (!V && !DstLI.
liveAt(Idx)))) {
1776 CopyMI->
getOpcode() == TargetOpcode::SUBREG_TO_REG);
1781 CopyMI->
setDesc(
TII->get(TargetOpcode::IMPLICIT_DEF));
1798 if ((SR.
LaneMask & DstMask).none())
1820 if ((SR.
LaneMask & UseMask).none())
1828 isLive = DstLI.
liveAt(UseIdx);
1841 if (MO.
getReg() == DstReg)
1853 bool IsUndef =
true;
1855 if ((S.LaneMask & Mask).none())
1857 if (S.liveAt(UseIdx)) {
1870 ShrinkMainRange =
true;
1879 if (DstInt && DstInt->
hasSubRanges() && DstReg != SrcReg) {
1888 if (
MI.isDebugInstr())
1891 addUndefFlag(*DstInt, UseIdx, MO,
SubReg);
1897 E =
MRI->reg_instr_end();
1906 if (SrcReg == DstReg && !Visited.
insert(
UseMI).second)
1919 for (
unsigned Op :
Ops) {
1925 if (SubIdx && MO.
isDef())
1931 unsigned SubUseIdx =
TRI->composeSubRegIndices(SubIdx, MO.
getSubReg());
1932 if (SubUseIdx != 0 &&
MRI->shouldTrackSubRegLiveness(DstReg)) {
1949 addUndefFlag(*DstInt, UseIdx, MO, SubUseIdx);
1960 dbgs() <<
"\t\tupdated: ";
1968bool RegisterCoalescer::canJoinPhys(
const CoalescerPair &CP) {
1972 if (!
MRI->isReserved(
CP.getDstReg())) {
1973 LLVM_DEBUG(
dbgs() <<
"\tCan only merge into reserved registers.\n");
1982 dbgs() <<
"\tCannot join complex intervals into reserved register.\n");
1986bool RegisterCoalescer::copyValueUndefInPredecessors(
2000void RegisterCoalescer::setUndefOnPrunedSubRegUses(
LiveInterval &LI,
2007 if (SubRegIdx == 0 || MO.
isUndef())
2013 if (!S.
liveAt(Pos) && (PrunedLanes & SubRegMask).any()) {
2029bool RegisterCoalescer::joinCopy(
2036 if (!
CP.setRegisters(CopyMI)) {
2041 if (
CP.getNewRC()) {
2042 auto SrcRC =
MRI->getRegClass(
CP.getSrcReg());
2043 auto DstRC =
MRI->getRegClass(
CP.getDstReg());
2044 unsigned SrcIdx =
CP.getSrcIdx();
2045 unsigned DstIdx =
CP.getDstIdx();
2046 if (
CP.isFlipped()) {
2050 if (!
TRI->shouldCoalesce(CopyMI, SrcRC, SrcIdx, DstRC, DstIdx,
2051 CP.getNewRC(), *LIS)) {
2063 eliminateDeadDefs();
2070 if (
MachineInstr *UndefMI = eliminateUndefCopy(CopyMI)) {
2071 if (UndefMI->isImplicitDef())
2073 deleteInstr(CopyMI);
2081 if (
CP.getSrcReg() ==
CP.getDstReg()) {
2083 LLVM_DEBUG(
dbgs() <<
"\tCopy already coalesced: " << LI <<
'\n');
2088 assert(ReadVNI &&
"No value before copy and no <undef> flag.");
2089 assert(ReadVNI != DefVNI &&
"Cannot read and define the same value.");
2104 if (copyValueUndefInPredecessors(S,
MBB, SLRQ)) {
2105 LLVM_DEBUG(
dbgs() <<
"Incoming sublane value is undef at copy\n");
2106 PrunedLanes |= S.LaneMask;
2113 if (PrunedLanes.
any()) {
2114 LLVM_DEBUG(
dbgs() <<
"Pruning undef incoming lanes: " << PrunedLanes
2116 setUndefOnPrunedSubRegUses(LI,
CP.getSrcReg(), PrunedLanes);
2121 deleteInstr(CopyMI);
2130 if (!canJoinPhys(CP)) {
2133 bool IsDefCopy =
false;
2134 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
2147 dbgs() <<
"\tConsidering merging to "
2148 <<
TRI->getRegClassName(
CP.getNewRC()) <<
" with ";
2149 if (
CP.getDstIdx() &&
CP.getSrcIdx())
2151 <<
TRI->getSubRegIndexName(
CP.getDstIdx()) <<
" and "
2153 <<
TRI->getSubRegIndexName(
CP.getSrcIdx()) <<
'\n';
2161 ShrinkMainRange =
false;
2167 if (!joinIntervals(CP)) {
2172 bool IsDefCopy =
false;
2173 if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
2178 if (!
CP.isPartial() && !
CP.isPhys()) {
2179 bool Changed = adjustCopiesBackFrom(CP, CopyMI);
2180 bool Shrink =
false;
2182 std::tie(
Changed, Shrink) = removeCopyByCommutingDef(CP, CopyMI);
2184 deleteInstr(CopyMI);
2186 Register DstReg =
CP.isFlipped() ?
CP.getSrcReg() :
CP.getDstReg();
2198 if (!
CP.isPartial() && !
CP.isPhys())
2199 if (removePartialRedundancy(CP, *CopyMI))
2210 if (
CP.isCrossClass()) {
2212 MRI->setRegClass(
CP.getDstReg(),
CP.getNewRC());
2223 if (ErasedInstrs.
erase(CopyMI))
2225 CurrentErasedInstrs.
insert(CopyMI);
2230 updateRegDefsUses(
CP.getDstReg(),
CP.getDstReg(),
CP.getDstIdx());
2231 updateRegDefsUses(
CP.getSrcReg(),
CP.getDstReg(),
CP.getSrcIdx());
2234 if (ShrinkMask.
any()) {
2237 if ((S.LaneMask & ShrinkMask).none())
2242 ShrinkMainRange =
true;
2250 if (ToBeUpdated.
count(
CP.getSrcReg()))
2251 ShrinkMainRange =
true;
2253 if (ShrinkMainRange) {
2263 TRI->updateRegAllocHint(
CP.getSrcReg(),
CP.getDstReg(), *MF);
2268 dbgs() <<
"\tResult = ";
2280bool RegisterCoalescer::joinReservedPhysReg(
CoalescerPair &CP) {
2283 assert(
CP.isPhys() &&
"Must be a physreg copy");
2284 assert(
MRI->isReserved(DstReg) &&
"Not a reserved register");
2288 assert(
RHS.containsOneValue() &&
"Invalid join with reserved register");
2297 if (!
MRI->isConstantPhysReg(DstReg)) {
2301 if (!
MRI->isReserved(*RI))
2314 !RegMaskUsable.
test(DstReg.
id())) {
2327 if (
CP.isFlipped()) {
2335 CopyMI =
MRI->getVRegDef(SrcReg);
2336 deleteInstr(CopyMI);
2345 if (!
MRI->hasOneNonDBGUse(SrcReg)) {
2356 CopyMI = &*
MRI->use_instr_nodbg_begin(SrcReg);
2360 if (!
MRI->isConstantPhysReg(DstReg)) {
2368 if (
MI->readsRegister(DstReg,
TRI)) {
2378 <<
printReg(DstReg,
TRI) <<
" at " << CopyRegIdx <<
"\n");
2381 deleteInstr(CopyMI);
2391 MRI->clearKillFlags(
CP.getSrcReg());
2476 const unsigned SubIdx;
2480 const LaneBitmask LaneMask;
2484 const bool SubRangeJoin;
2487 const bool TrackSubRegLiveness;
2490 SmallVectorImpl<VNInfo *> &NewVNInfo;
2492 const CoalescerPair &
CP;
2494 SlotIndexes *Indexes;
2495 const TargetRegisterInfo *
TRI;
2499 SmallVector<int, 8> Assignments;
2503 enum ConflictResolution {
2535 ConflictResolution Resolution = CR_Keep;
2538 LaneBitmask WriteLanes;
2542 LaneBitmask ValidLanes;
2545 VNInfo *RedefVNI =
nullptr;
2548 VNInfo *OtherVNI =
nullptr;
2561 bool ErasableImplicitDef =
false;
2565 bool Pruned =
false;
2568 bool PrunedComputed =
false;
2575 bool Identical =
false;
2579 bool isAnalyzed()
const {
return WriteLanes.
any(); }
2583 void mustKeepImplicitDef(
const TargetRegisterInfo &
TRI,
2584 const MachineInstr &ImpDef) {
2586 ErasableImplicitDef =
false;
2597 LaneBitmask computeWriteLanes(
const MachineInstr *
DefMI,
bool &Redef)
const;
2600 std::pair<const VNInfo *, Register> followCopyChain(
const VNInfo *VNI)
const;
2602 bool valuesIdentical(VNInfo *Value0, VNInfo *Value1,
2603 const JoinVals &
Other)
const;
2612 ConflictResolution analyzeValue(
unsigned ValNo, JoinVals &
Other);
2617 void computeAssignment(
unsigned ValNo, JoinVals &
Other);
2635 taintExtent(
unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &
Other,
2636 SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent);
2640 bool usesLanes(
const MachineInstr &
MI,
Register,
unsigned, LaneBitmask)
const;
2648 bool isPrunedValue(
unsigned ValNo, JoinVals &
Other);
2652 SmallVectorImpl<VNInfo *> &newVNInfo,
const CoalescerPair &cp,
2653 LiveIntervals *lis,
const TargetRegisterInfo *
TRI,
bool SubRangeJoin,
2654 bool TrackSubRegLiveness)
2655 : LR(LR),
Reg(
Reg), SubIdx(SubIdx), LaneMask(LaneMask),
2656 SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness),
2657 NewVNInfo(newVNInfo),
CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()),
2658 TRI(
TRI), Assignments(LR.getNumValNums(), -1),
2659 Vals(LR.getNumValNums()) {}
2663 bool mapValues(JoinVals &
Other);
2667 bool resolveConflicts(JoinVals &
Other);
2672 void pruneValues(JoinVals &
Other, SmallVectorImpl<SlotIndex> &EndPoints,
2678 void pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask);
2687 void pruneMainSegments(LiveInterval &LI,
bool &ShrinkMainRange);
2693 void eraseInstrs(SmallPtrSetImpl<MachineInstr *> &ErasedInstrs,
2694 SmallVectorImpl<Register> &ShrinkRegs,
2695 LiveInterval *LI =
nullptr);
2698 void removeImplicitDefs();
2701 const int *getAssignments()
const {
return Assignments.
data(); }
2704 ConflictResolution getResolution(
unsigned Num)
const {
2705 return Vals[Num].Resolution;
2712 bool &Redef)
const {
2717 L |=
TRI->getSubRegIndexLaneMask(
2725std::pair<const VNInfo *, Register>
2726JoinVals::followCopyChain(
const VNInfo *VNI)
const {
2732 assert(
MI &&
"No defining instruction");
2733 if (!
MI->isFullCopy())
2734 return std::make_pair(VNI, TrackReg);
2735 Register SrcReg =
MI->getOperand(1).getReg();
2737 return std::make_pair(VNI, TrackReg);
2751 LaneBitmask SMask =
TRI->composeSubRegIndexLaneMask(SubIdx, S.LaneMask);
2752 if ((SMask & LaneMask).
none())
2760 return std::make_pair(VNI, TrackReg);
2763 if (ValueIn ==
nullptr) {
2770 return std::make_pair(
nullptr, SrcReg);
2775 return std::make_pair(VNI, TrackReg);
2778bool JoinVals::valuesIdentical(
VNInfo *Value0,
VNInfo *Value1,
2779 const JoinVals &
Other)
const {
2782 std::tie(Orig0, Reg0) = followCopyChain(Value0);
2783 if (Orig0 == Value1 && Reg0 ==
Other.Reg)
2788 std::tie(Orig1, Reg1) =
Other.followCopyChain(Value1);
2792 if (Orig0 ==
nullptr || Orig1 ==
nullptr)
2793 return Orig0 == Orig1 && Reg0 == Reg1;
2799 return Orig0->
def == Orig1->
def && Reg0 == Reg1;
2802JoinVals::ConflictResolution JoinVals::analyzeValue(
unsigned ValNo,
2804 Val &
V = Vals[ValNo];
2805 assert(!
V.isAnalyzed() &&
"Value has already been analyzed!");
2817 :
TRI->getSubRegIndexLaneMask(SubIdx);
2818 V.ValidLanes =
V.WriteLanes = Lanes;
2827 V.ErasableImplicitDef =
true;
2831 V.ValidLanes =
V.WriteLanes = computeWriteLanes(
DefMI, Redef);
2850 assert((TrackSubRegLiveness ||
V.RedefVNI) &&
2851 "Instruction is reading nonexistent value");
2852 if (
V.RedefVNI !=
nullptr) {
2853 computeAssignment(
V.RedefVNI->id,
Other);
2854 V.ValidLanes |= Vals[
V.RedefVNI->id].ValidLanes;
2866 V.ErasableImplicitDef =
true;
2883 if (OtherVNI->
def < VNI->
def)
2884 Other.computeAssignment(OtherVNI->
id, *
this);
2889 return CR_Impossible;
2891 V.OtherVNI = OtherVNI;
2892 Val &OtherV =
Other.Vals[OtherVNI->
id];
2896 if (!OtherV.isAnalyzed() ||
Other.Assignments[OtherVNI->
id] == -1)
2903 if ((
V.ValidLanes & OtherV.ValidLanes).any())
2905 return CR_Impossible;
2920 Other.computeAssignment(
V.OtherVNI->id, *
this);
2921 Val &OtherV =
Other.Vals[
V.OtherVNI->id];
2923 if (OtherV.ErasableImplicitDef) {
2943 <<
", keeping it.\n");
2944 OtherV.mustKeepImplicitDef(*
TRI, *OtherImpDef);
2951 dbgs() <<
"IMPLICIT_DEF defined at " <<
V.OtherVNI->def
2952 <<
" may be live into EH pad successors, keeping it.\n");
2953 OtherV.mustKeepImplicitDef(*
TRI, *OtherImpDef);
2956 OtherV.ValidLanes &= ~OtherV.WriteLanes;
2971 if (
CP.isCoalescable(
DefMI)) {
2974 V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes;
2989 valuesIdentical(VNI,
V.OtherVNI,
Other)) {
3012 if ((
V.WriteLanes & OtherV.ValidLanes).none())
3025 "Only early clobber defs can overlap a kill");
3026 return CR_Impossible;
3033 if ((
TRI->getSubRegIndexLaneMask(
Other.SubIdx) & ~
V.WriteLanes).none())
3034 return CR_Impossible;
3036 if (TrackSubRegLiveness) {
3041 if (!OtherLI.hasSubRanges()) {
3043 return (OtherMask &
V.WriteLanes).none() ? CR_Replace : CR_Impossible;
3051 TRI->composeSubRegIndexLaneMask(
Other.SubIdx, OtherSR.LaneMask);
3052 if ((OtherMask &
V.WriteLanes).none())
3055 auto OtherSRQ = OtherSR.Query(VNI->
def);
3056 if (OtherSRQ.valueIn() && OtherSRQ.endPoint() > VNI->
def) {
3058 return CR_Impossible;
3071 return CR_Impossible;
3080 return CR_Unresolved;
3083void JoinVals::computeAssignment(
unsigned ValNo, JoinVals &
Other) {
3084 Val &
V = Vals[ValNo];
3085 if (
V.isAnalyzed()) {
3088 assert(Assignments[ValNo] != -1 &&
"Bad recursion?");
3091 switch ((
V.Resolution = analyzeValue(ValNo,
Other))) {
3095 assert(
V.OtherVNI &&
"OtherVNI not assigned, can't merge.");
3096 assert(
Other.Vals[
V.OtherVNI->id].isAnalyzed() &&
"Missing recursion");
3097 Assignments[ValNo] =
Other.Assignments[
V.OtherVNI->id];
3101 <<
V.OtherVNI->def <<
" --> @"
3102 << NewVNInfo[Assignments[ValNo]]->def <<
'\n');
3105 case CR_Unresolved: {
3107 assert(
V.OtherVNI &&
"OtherVNI not assigned, can't prune");
3108 Val &OtherV =
Other.Vals[
V.OtherVNI->id];
3109 OtherV.Pruned =
true;
3114 Assignments[ValNo] = NewVNInfo.
size();
3120bool JoinVals::mapValues(JoinVals &
Other) {
3122 computeAssignment(i,
Other);
3123 if (Vals[i].Resolution == CR_Impossible) {
3132bool JoinVals::taintExtent(
3141 assert(OtherI !=
Other.LR.end() &&
"No conflict?");
3146 if (End >= MBBEnd) {
3148 << OtherI->valno->id <<
'@' << OtherI->start <<
'\n');
3152 << OtherI->valno->id <<
'@' << OtherI->start <<
" to "
3157 TaintExtent.push_back(std::make_pair(End, TaintedLanes));
3160 if (++OtherI ==
Other.LR.end() || OtherI->start >= MBBEnd)
3164 const Val &OV =
Other.Vals[OtherI->valno->id];
3165 TaintedLanes &= ~OV.WriteLanes;
3168 }
while (TaintedLanes.
any());
3174 if (
MI.isDebugOrPseudoInstr())
3181 unsigned S =
TRI->composeSubRegIndices(SubIdx, MO.
getSubReg());
3182 if ((Lanes &
TRI->getSubRegIndexLaneMask(S)).any())
3188bool JoinVals::resolveConflicts(JoinVals &
Other) {
3191 assert(
V.Resolution != CR_Impossible &&
"Unresolvable conflict");
3192 if (
V.Resolution != CR_Unresolved)
3201 assert(
V.OtherVNI &&
"Inconsistent conflict resolution.");
3203 const Val &OtherV =
Other.Vals[
V.OtherVNI->id];
3208 LaneBitmask TaintedLanes =
V.WriteLanes & OtherV.ValidLanes;
3210 if (!taintExtent(i, TaintedLanes,
Other, TaintExtent))
3214 assert(!TaintExtent.
empty() &&
"There should be at least one conflict.");
3227 "Interference ends on VNI->def. Should have been handled earlier");
3230 assert(LastMI &&
"Range must end at a proper instruction");
3231 unsigned TaintNum = 0;
3234 if (usesLanes(*
MI,
Other.Reg,
Other.SubIdx, TaintedLanes)) {
3239 if (&*
MI == LastMI) {
3240 if (++TaintNum == TaintExtent.
size())
3243 assert(LastMI &&
"Range must end at a proper instruction");
3244 TaintedLanes = TaintExtent[TaintNum].second;
3250 V.Resolution = CR_Replace;
3256bool JoinVals::isPrunedValue(
unsigned ValNo, JoinVals &
Other) {
3257 Val &
V = Vals[ValNo];
3258 if (
V.Pruned ||
V.PrunedComputed)
3261 if (
V.Resolution != CR_Erase &&
V.Resolution != CR_Merge)
3266 V.PrunedComputed =
true;
3267 V.Pruned =
Other.isPrunedValue(
V.OtherVNI->id, *
this);
3271void JoinVals::pruneValues(JoinVals &
Other,
3273 bool changeInstrs) {
3276 switch (Vals[i].Resolution) {
3286 Val &OtherV =
Other.Vals[Vals[i].OtherVNI->id];
3288 OtherV.ErasableImplicitDef && OtherV.Resolution == CR_Keep;
3289 if (!
Def.isBlock()) {
3309 <<
": " <<
Other.LR <<
'\n');
3314 if (isPrunedValue(i,
Other)) {
3321 << Def <<
": " << LR <<
'\n');
3379 bool DidPrune =
false;
3384 if (
V.Resolution != CR_Erase &&
3385 (
V.Resolution != CR_Keep || !
V.ErasableImplicitDef || !
V.Pruned))
3392 OtherDef =
V.OtherVNI->def;
3395 LLVM_DEBUG(
dbgs() <<
"\t\tExpecting instruction removal at " << Def
3403 if (ValueOut !=
nullptr &&
3405 (
V.Identical &&
V.Resolution == CR_Erase && ValueOut->
def == Def))) {
3407 <<
" at " << Def <<
"\n");
3414 if (
V.Identical && S.Query(OtherDef).valueOutOrDead()) {
3424 ShrinkMask |= S.LaneMask;
3438 ShrinkMask |= S.LaneMask;
3450 if (VNI->
def == Def)
3456void JoinVals::pruneMainSegments(
LiveInterval &LI,
bool &ShrinkMainRange) {
3460 if (Vals[i].Resolution != CR_Keep)
3465 Vals[i].Pruned =
true;
3466 ShrinkMainRange =
true;
3470void JoinVals::removeImplicitDefs() {
3473 if (
V.Resolution != CR_Keep || !
V.ErasableImplicitDef || !
V.Pruned)
3489 switch (Vals[i].Resolution) {
3494 if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned)
3506 if (LI !=
nullptr) {
3531 ED = ED.
isValid() ? std::min(ED,
I->start) :
I->start;
3533 LE =
LE.isValid() ? std::max(LE,
I->end) :
I->
end;
3536 NewEnd = std::min(NewEnd, LE);
3538 NewEnd = std::min(NewEnd, ED);
3544 if (S != LR.
begin())
3545 std::prev(S)->end = NewEnd;
3549 dbgs() <<
"\t\tremoved " << i <<
'@' <<
Def <<
": " << LR <<
'\n';
3551 dbgs() <<
"\t\t LHS = " << *LI <<
'\n';
3558 assert(
MI &&
"No instruction to erase");
3567 MI->eraseFromParent();
3580 JoinVals RHSVals(RRange,
CP.getSrcReg(),
CP.getSrcIdx(), LaneMask, NewVNInfo,
3581 CP, LIS,
TRI,
true,
true);
3582 JoinVals LHSVals(LRange,
CP.getDstReg(),
CP.getDstIdx(), LaneMask, NewVNInfo,
3583 CP, LIS,
TRI,
true,
true);
3590 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) {
3595 if (!LHSVals.resolveConflicts(RHSVals) ||
3596 !RHSVals.resolveConflicts(LHSVals)) {
3607 LHSVals.pruneValues(RHSVals, EndPoints,
false);
3608 RHSVals.pruneValues(LHSVals, EndPoints,
false);
3610 LHSVals.removeImplicitDefs();
3611 RHSVals.removeImplicitDefs();
3616 LRange.
join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
3621 if (EndPoints.
empty())
3627 dbgs() <<
"\t\trestoring liveness to " << EndPoints.
size() <<
" points: ";
3628 for (
unsigned i = 0, n = EndPoints.
size(); i != n; ++i) {
3629 dbgs() << EndPoints[i];
3633 dbgs() <<
": " << LRange <<
'\n';
3638void RegisterCoalescer::mergeSubRangeInto(
LiveInterval &LI,
3642 unsigned ComposeSubRegIdx) {
3652 joinSubRegRanges(SR, RangeCopy, SR.
LaneMask, CP);
3658bool RegisterCoalescer::isHighCostLiveInterval(
LiveInterval &LI) {
3661 auto &Counter = LargeLIVisitCounter[LI.
reg()];
3673 bool TrackSubRegLiveness =
MRI->shouldTrackSubRegLiveness(*
CP.getNewRC());
3675 NewVNInfo, CP, LIS,
TRI,
false, TrackSubRegLiveness);
3677 NewVNInfo, CP, LIS,
TRI,
false, TrackSubRegLiveness);
3681 if (isHighCostLiveInterval(
LHS) || isHighCostLiveInterval(
RHS))
3686 if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals))
3690 if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals))
3694 if (
RHS.hasSubRanges() ||
LHS.hasSubRanges()) {
3699 unsigned DstIdx =
CP.getDstIdx();
3700 if (!
LHS.hasSubRanges()) {
3702 :
TRI->getSubRegIndexLaneMask(DstIdx);
3706 }
else if (DstIdx != 0) {
3717 unsigned SrcIdx =
CP.getSrcIdx();
3718 if (!
RHS.hasSubRanges()) {
3720 :
TRI->getSubRegIndexLaneMask(SrcIdx);
3721 mergeSubRangeInto(
LHS,
RHS, Mask, CP, DstIdx);
3726 mergeSubRangeInto(
LHS, R, Mask, CP, DstIdx);
3733 LHSVals.pruneMainSegments(
LHS, ShrinkMainRange);
3735 LHSVals.pruneSubRegValues(
LHS, ShrinkMask);
3736 RHSVals.pruneSubRegValues(
LHS, ShrinkMask);
3737 }
else if (TrackSubRegLiveness && !
CP.getDstIdx() &&
CP.getSrcIdx()) {
3739 CP.getNewRC()->getLaneMask(),
LHS);
3740 mergeSubRangeInto(
LHS,
RHS,
TRI->getSubRegIndexLaneMask(
CP.getSrcIdx()), CP,
3742 LHSVals.pruneMainSegments(
LHS, ShrinkMainRange);
3743 LHSVals.pruneSubRegValues(
LHS, ShrinkMask);
3751 LHSVals.pruneValues(RHSVals, EndPoints,
true);
3752 RHSVals.pruneValues(LHSVals, EndPoints,
true);
3757 LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs, &
LHS);
3758 RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
3759 while (!ShrinkRegs.
empty())
3763 checkMergingChangesDbgValues(CP,
LHS, LHSVals,
RHS, RHSVals);
3767 auto RegIt = RegToPHIIdx.
find(
CP.getSrcReg());
3768 if (RegIt != RegToPHIIdx.
end()) {
3770 for (
unsigned InstID : RegIt->second) {
3771 auto PHIIt = PHIValToPos.
find(InstID);
3776 auto LII =
RHS.find(
SI);
3777 if (LII ==
RHS.end() || LII->start >
SI)
3792 if (
CP.getSrcIdx() != 0 ||
CP.getDstIdx() != 0)
3795 if (PHIIt->second.SubReg && PHIIt->second.SubReg !=
CP.getSrcIdx())
3799 PHIIt->second.Reg =
CP.getDstReg();
3803 if (
CP.getSrcIdx() != 0)
3804 PHIIt->second.SubReg =
CP.getSrcIdx();
3810 auto InstrNums = RegIt->second;
3811 RegToPHIIdx.
erase(RegIt);
3815 RegIt = RegToPHIIdx.
find(
CP.getDstReg());
3816 if (RegIt != RegToPHIIdx.
end())
3819 RegToPHIIdx.
insert({
CP.getDstReg(), InstrNums});
3823 LHS.join(
RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo);
3828 MRI->clearKillFlags(
LHS.reg());
3829 MRI->clearKillFlags(
RHS.reg());
3831 if (!EndPoints.
empty()) {
3835 dbgs() <<
"\t\trestoring liveness to " << EndPoints.
size() <<
" points: ";
3836 for (
unsigned i = 0, n = EndPoints.
size(); i != n; ++i) {
3837 dbgs() << EndPoints[i];
3841 dbgs() <<
": " <<
LHS <<
'\n';
3850 return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(
CP);
3860 for (
auto *
X : ToInsert) {
3861 for (
const auto &
Op :
X->debug_operands()) {
3862 if (
Op.isReg() &&
Op.getReg().isVirtual())
3863 DbgVRegToValues[
Op.getReg()].push_back({
Slot,
X});
3873 for (
auto &
MBB : MF) {
3876 for (
auto &
MI :
MBB) {
3877 if (
MI.isDebugValue()) {
3879 return MO.isReg() && MO.getReg().isVirtual();
3881 ToInsert.push_back(&
MI);
3882 }
else if (!
MI.isDebugOrPseudoInstr()) {
3884 CloseNewDVRange(CurrentSlot);
3893 for (
auto &Pair : DbgVRegToValues)
3897void RegisterCoalescer::checkMergingChangesDbgValues(
CoalescerPair &CP,
3901 JoinVals &RHSVals) {
3903 checkMergingChangesDbgValuesImpl(
Reg,
RHS,
LHS, LHSVals);
3907 checkMergingChangesDbgValuesImpl(
Reg,
LHS,
RHS, RHSVals);
3911 ScanForSrcReg(
CP.getSrcReg());
3912 ScanForDstReg(
CP.getDstReg());
3915void RegisterCoalescer::checkMergingChangesDbgValuesImpl(
Register Reg,
3918 JoinVals &RegVals) {
3920 auto VRegMapIt = DbgVRegToValues.
find(
Reg);
3921 if (VRegMapIt == DbgVRegToValues.
end())
3924 auto &DbgValueSet = VRegMapIt->second;
3925 auto DbgValueSetIt = DbgValueSet.begin();
3926 auto SegmentIt = OtherLR.
begin();
3928 bool LastUndefResult =
false;
3933 auto ShouldUndef = [&RegVals, &
RegLR, &LastUndefResult,
3938 if (LastUndefIdx == Idx)
3939 return LastUndefResult;
3945 auto OtherIt =
RegLR.find(Idx);
3946 if (OtherIt ==
RegLR.end())
3955 auto Resolution = RegVals.getResolution(OtherIt->valno->id);
3957 Resolution != JoinVals::CR_Keep && Resolution != JoinVals::CR_Erase;
3959 return LastUndefResult;
3965 while (DbgValueSetIt != DbgValueSet.end() && SegmentIt != OtherLR.
end()) {
3966 if (DbgValueSetIt->first < SegmentIt->end) {
3969 if (DbgValueSetIt->first >= SegmentIt->start) {
3970 bool HasReg = DbgValueSetIt->second->hasDebugOperandForReg(
Reg);
3971 bool ShouldUndefReg = ShouldUndef(DbgValueSetIt->first);
3972 if (HasReg && ShouldUndefReg) {
3974 DbgValueSetIt->second->setDebugValueUndef();
3988struct MBBPriorityInfo {
3989 MachineBasicBlock *
MBB;
3993 MBBPriorityInfo(MachineBasicBlock *mbb,
unsigned depth,
bool issplit)
3994 :
MBB(mbb),
Depth(depth), IsSplit(issplit) {}
4004 const MBBPriorityInfo *
RHS) {
4006 if (
LHS->Depth !=
RHS->Depth)
4007 return LHS->Depth >
RHS->Depth ? -1 : 1;
4010 if (
LHS->IsSplit !=
RHS->IsSplit)
4011 return LHS->IsSplit ? -1 : 1;
4015 unsigned cl =
LHS->MBB->pred_size() +
LHS->MBB->succ_size();
4016 unsigned cr =
RHS->MBB->pred_size() +
RHS->MBB->succ_size();
4018 return cl > cr ? -1 : 1;
4021 return LHS->MBB->getNumber() <
RHS->MBB->getNumber() ? -1 : 1;
4026 if (!Copy->isCopy())
4029 if (Copy->getOperand(1).isUndef())
4032 Register SrcReg = Copy->getOperand(1).getReg();
4033 Register DstReg = Copy->getOperand(0).getReg();
4041void RegisterCoalescer::lateLiveIntervalUpdate() {
4047 if (!DeadDefs.
empty())
4048 eliminateDeadDefs();
4050 ToBeUpdated.
clear();
4053bool RegisterCoalescer::copyCoalesceWorkList(
4055 bool Progress =
false;
4067 bool Success = joinCopy(
MI, Again, CurrentErasedInstrs);
4073 if (!CurrentErasedInstrs.
empty()) {
4075 if (
MI && CurrentErasedInstrs.
count(
MI))
4079 if (
MI && CurrentErasedInstrs.
count(
MI))
4090 assert(Copy.isCopyLike());
4093 if (&
MI != &Copy &&
MI.isCopyLike())
4098bool RegisterCoalescer::applyTerminalRule(
const MachineInstr &Copy)
const {
4103 unsigned SrcSubReg = 0, DstSubReg = 0;
4104 if (!
isMoveInstr(*
TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
4125 if (&
MI == &Copy || !
MI.isCopyLike() ||
MI.getParent() != OrigBB)
4128 unsigned OtherSrcSubReg = 0, OtherSubReg = 0;
4129 if (!
isMoveInstr(*
TRI, &Copy, OtherSrcReg, OtherReg, OtherSrcSubReg,
4132 if (OtherReg == SrcReg)
4133 OtherReg = OtherSrcReg;
4152 const unsigned PrevSize = WorkList.
size();
4153 if (JoinGlobalCopies) {
4159 if (!
MI.isCopyLike())
4161 bool ApplyTerminalRule = applyTerminalRule(
MI);
4163 if (ApplyTerminalRule)
4168 if (ApplyTerminalRule)
4175 LocalWorkList.
append(LocalTerminals.
begin(), LocalTerminals.
end());
4182 if (MII.isCopyLike()) {
4183 if (applyTerminalRule(MII))
4196 if (copyCoalesceWorkList(CurrList))
4198 std::remove(WorkList.
begin() + PrevSize, WorkList.
end(),
nullptr),
4202void RegisterCoalescer::coalesceLocals() {
4203 copyCoalesceWorkList(LocalWorkList);
4208 LocalWorkList.clear();
4211void RegisterCoalescer::joinAllIntervals() {
4212 LLVM_DEBUG(
dbgs() <<
"********** JOINING INTERVALS ***********\n");
4213 assert(WorkList.
empty() && LocalWorkList.empty() &&
"Old data still around.");
4215 std::vector<MBBPriorityInfo> MBBs;
4216 MBBs.reserve(MF->size());
4218 MBBs.push_back(MBBPriorityInfo(&
MBB,
Loops->getLoopDepth(&
MBB),
4224 unsigned CurrDepth = std::numeric_limits<unsigned>::max();
4225 for (MBBPriorityInfo &
MBB : MBBs) {
4227 if (JoinGlobalCopies &&
MBB.Depth < CurrDepth) {
4229 CurrDepth =
MBB.Depth;
4231 copyCoalesceInMBB(
MBB.MBB);
4233 lateLiveIntervalUpdate();
4238 while (copyCoalesceWorkList(WorkList))
4240 lateLiveIntervalUpdate();
4250 RegisterCoalescer Impl(&LIS,
SI, &
Loops);
4262bool RegisterCoalescerLegacy::runOnMachineFunction(
MachineFunction &MF) {
4263 auto *LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
4264 auto *
Loops = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
4265 auto *SIWrapper = getAnalysisIfAvailable<SlotIndexesWrapperPass>();
4266 SlotIndexes *
SI = SIWrapper ? &SIWrapper->getSI() :
nullptr;
4267 RegisterCoalescer Impl(LIS,
SI,
Loops);
4268 return Impl.run(MF);
4272 LLVM_DEBUG(
dbgs() <<
"********** REGISTER COALESCER **********\n"
4273 <<
"********** Function: " << fn.
getName() <<
'\n');
4285 dbgs() <<
"* Skipped as it exposes functions that returns twice.\n");
4305 unsigned SubReg = DebugPHI.second.SubReg;
4308 PHIValToPos.
insert(std::make_pair(DebugPHI.first,
P));
4309 RegToPHIIdx[
Reg].push_back(DebugPHI.first);
4318 MF->
verify(LIS,
SI,
"Before register coalescing", &
errs());
4320 DbgVRegToValues.
clear();
4337 if (
MRI->reg_nodbg_empty(
Reg))
4339 if (
MRI->recomputeRegClass(
Reg)) {
4341 <<
TRI->getRegClassName(
MRI->getRegClass(
Reg)) <<
'\n');
4348 if (!
MRI->shouldTrackSubRegLiveness(
Reg)) {
4356 assert((S.LaneMask & ~MaxMask).none());
4367 auto it = PHIValToPos.
find(
p.first);
4369 p.second.Reg = it->second.Reg;
4370 p.second.SubReg = it->second.SubReg;
4373 PHIValToPos.
clear();
4374 RegToPHIIdx.
clear();
4379 MF->
verify(LIS,
SI,
"After register coalescing", &
errs());
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file defines the DenseSet and SmallDenseSet classes.
const HexagonInstrInfo * TII
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
A common definition of LaneBitmask for use in TableGen and CodeGen.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static cl::opt< cl::boolOrDefault > EnableGlobalCopies("join-globalcopies", cl::desc("Coalesce copies that span blocks (default=subtarget)"), cl::init(cl::BOU_UNSET), cl::Hidden)
Temporary flag to test global copy optimization.
static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS)
static bool isSplitEdge(const MachineBasicBlock *MBB)
Return true if this block should be vacated by the coalescer to eliminate branches.
static int compareMBBPriority(const MBBPriorityInfo *LHS, const MBBPriorityInfo *RHS)
C-style comparator that sorts first based on the loop depth of the basic block (the unsigned),...
static cl::opt< unsigned > LargeIntervalSizeThreshold("large-interval-size-threshold", cl::Hidden, cl::desc("If the valnos size of an interval is larger than the threshold, " "it is regarded as a large interval. "), cl::init(100))
static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def)
Check if any of the subranges of LI contain a definition at Def.
static std::pair< bool, bool > addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo, const LiveRange &Src, const VNInfo *SrcValNo)
Copy segments with value number SrcValNo from liverange Src to live range @Dst and use value number D...
static bool isLiveThrough(const LiveQueryResult Q)
static bool isTerminalReg(Register DstReg, const MachineInstr &Copy, const MachineRegisterInfo *MRI)
Check if DstReg is a terminal node.
static cl::opt< bool > VerifyCoalescing("verify-coalescing", cl::desc("Verify machine instrs before and after register coalescing"), cl::Hidden)
register Register static false bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, Register &Src, Register &Dst, unsigned &SrcSub, unsigned &DstSub)
static cl::opt< bool > EnableJoinSplits("join-splitedges", cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden)
Temporary flag to test critical edge unsplitting.
static cl::opt< bool > EnableJoining("join-liveintervals", cl::desc("Coalesce copies (default=true)"), cl::init(true), cl::Hidden)
static cl::opt< unsigned > LargeIntervalFreqThreshold("large-interval-freq-threshold", cl::Hidden, cl::desc("For a large interval, if it is coalesced with other live " "intervals many times more than the threshold, stop its " "coalescing to control the compile time. "), cl::init(256))
static bool definesFullReg(const MachineInstr &MI, Register Reg)
Returns true if MI defines the full vreg Reg, as opposed to just defining a subregister.
static cl::opt< unsigned > LateRematUpdateThreshold("late-remat-update-threshold", cl::Hidden, cl::desc("During rematerialization for a copy, if the def instruction has " "many other copy uses to be rematerialized, delay the multiple " "separate live interval update work and do them all at once after " "all those rematerialization are done. It will save a lot of " "repeated work. "), cl::init(100))
static cl::opt< bool > UseTerminalRule("terminal-rule", cl::desc("Apply the terminal rule"), cl::init(false), cl::Hidden)
SI Optimize VGPR LiveRange
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static DenseMap< Register, std::vector< std::pair< SlotIndex, MachineInstr * > > > buildVRegToDbgValueMap(MachineFunction &MF, const LiveIntervals *Liveness)
static void shrinkToUses(LiveInterval &LI, LiveIntervals &LIS)
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreservedID(const void *ID)
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
bool test(unsigned Idx) const
Represents analyses that only rely on functions' control flow.
A helper class for register coalescers.
bool flip()
Swap SrcReg and DstReg.
bool isCoalescable(const MachineInstr *) const
Return true if MI is a copy instruction that will become an identity copy after coalescing.
bool setRegisters(const MachineInstr *)
Set registers to match the copy instruction MI.
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool isAsCheapAsAMove(const MachineInstr &MI) const override
A live range for subregisters.
LiveInterval - This class represents the liveness of a register, or stack slot.
LLVM_ABI void removeEmptySubRanges()
Removes all subranges without any segments (subranges without segments are not considered valid and s...
bool hasSubRanges() const
Returns true if subregister liveness information is available.
SubRange * createSubRangeFrom(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, const LiveRange &CopyFrom)
Like createSubRange() but the new range is filled with a copy of the liveness information in CopyFrom...
iterator_range< subrange_iterator > subranges()
LLVM_ABI void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask, std::function< void(LiveInterval::SubRange &)> Apply, const SlotIndexes &Indexes, const TargetRegisterInfo &TRI, unsigned ComposeSubRegIdx=0)
Refines the subranges to support LaneMask.
LLVM_ABI void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
SubRange * createSubRange(BumpPtrAllocator &Allocator, LaneBitmask LaneMask)
Creates a new empty subregister live range.
LLVM_ABI void clearSubRanges()
Removes all subregister liveness information.
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
LLVM_ABI bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const
Returns true if VNI is killed by any PHI-def values in LI.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
LLVM_ABI bool checkRegMaskInterference(const LiveInterval &LI, BitVector &UsableRegs)
Test if LI is live across any register mask instructions, and compute a bit mask of physical register...
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
VNInfo::Allocator & getVNInfoAllocator()
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange & getRegUnit(unsigned Unit)
Return the live range for register unit Unit.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
LLVM_ABI void pruneValue(LiveRange &LR, SlotIndex Kill, SmallVectorImpl< SlotIndex > *EndPoints)
If LR has a live value at Kill, prune its live range by removing any liveness reachable from Kill.
void removeInterval(Register Reg)
Interval removal.
LLVM_ABI MachineBasicBlock * intervalIsInOneMBB(const LiveInterval &LI) const
If LI is confined to a single basic block, return a pointer to that block.
LLVM_ABI void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos)
Remove value number and related live segments of LI and its subranges that start at position Pos.
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
LLVM_ABI void extendToIndices(LiveRange &LR, ArrayRef< SlotIndex > Indices, ArrayRef< SlotIndex > Undefs)
Extend the live range LR to reach all points in Indices.
LLVM_ABI void dump() const
LLVM_ABI void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Result of a LiveRange query.
VNInfo * valueOutOrDead() const
Returns the value alive at the end of the instruction, if any.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
VNInfo * valueDefined() const
Return the value defined by this instruction, if any.
SlotIndex endPoint() const
Return the end point of the last live range segment to interact with the instruction,...
bool isKill() const
Return true if the live-in value is killed by this instruction.
Callback methods for LiveRangeEdit owners.
SlotIndex rematerializeAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, const Remat &RM, const TargetRegisterInfo &, bool Late=false, unsigned SubIdx=0, MachineInstr *ReplaceIndexMI=nullptr)
rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an instruction into MBB before...
bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, SlotIndex UseIdx) const
allUsesAvailableAt - Return true if all registers used by OrigMI at OrigIdx are also available with t...
void eliminateDeadDefs(SmallVectorImpl< MachineInstr * > &Dead, ArrayRef< Register > RegsBeingSpilled={})
eliminateDeadDefs - Try to delete machine instructions that are now dead (allDefsAreDead returns true...
This class represents the liveness of a register, stack slot, etc.
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
LLVM_ABI iterator addSegment(Segment S)
Add the specified Segment to this range, merging segments as appropriate.
Segments::iterator iterator
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void join(LiveRange &Other, const int *ValNoAssignments, const int *RHSValNoAssignments, SmallVectorImpl< VNInfo * > &NewVNInfo)
join - Join two live ranges (this, and other) together.
bool liveAt(SlotIndex index) const
LLVM_ABI VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
LLVM_ABI void removeValNo(VNInfo *ValNo)
removeValNo - Remove all the segments defined by the specified value#.
bool overlaps(const LiveRange &other) const
overlaps - Return true if the intersection of the two live ranges is not empty.
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
bool verify() const
Walk the range and assert if any invariants fail to hold.
LLVM_ABI VNInfo * MergeValueNumberInto(VNInfo *V1, VNInfo *V2)
MergeValueNumberInto - This method is called when two value numbers are found to be equivalent.
unsigned getNumValNums() const
bool containsOneValue() const
iterator FindSegmentContaining(SlotIndex Idx)
Return an iterator to the segment that contains the specified index, or end() if there is none.
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
LLVM_ABI iterator find(SlotIndex Pos)
find - Return an iterator pointing to the first segment that ends after Pos, or end().
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
MCRegUnitRootIterator enumerates the root registers of a register unit.
bool isValid() const
Check if the iterator is at the end of the list.
Wrapper class representing physical registers. Should be passed by value.
An RAII based helper class to modify MachineFunctionProperties when running pass.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
LLVM_ABI bool hasEHPadSuccessor() const
bool isEHPad() const
Returns true if the block is a landing pad.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
Analysis pass which computes a MachineDominatorTree.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
DenseMap< unsigned, DebugPHIRegallocPos > DebugPHIPositions
Map of debug instruction numbers to the position of their PHI instructions during register allocation...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
LLVM_ABI std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
bool isDebugInstr() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx=nullptr) const
Given the index of a register def operand, check if the register def is tied to a source operand,...
LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
LLVM_ABI bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
LLVM_ABI void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
LLVM_ABI void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsUndef(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, true, false, true > reg_instr_iterator
reg_instr_iterator/reg_instr_begin/reg_instr_end - Walk all defs and uses of the specified register,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
bool isProperSubClass(const TargetRegisterClass *RC) const
isProperSubClass - Returns true if RC has a legal super-class with more allocatable registers.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr unsigned id() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
SlotIndex - An opaque wrapper around machine indexes.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
bool isValid() const
Returns true if this is a valid index.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Returns the basic block which the given index falls in.
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
SlotIndex getNextNonNullIndex(SlotIndex Index)
Returns the next non-null index, if one exists.
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
SlotIndex getIndexBefore(const MachineInstr &MI) const
getIndexBefore - Returns the index of the last indexed instruction before MI, or the start index of i...
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction for the given index, or null if the given index has no instruction associated...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
static const unsigned CommuteAnyOperandIndex
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool enableJoinGlobalCopies() const
True if the subtarget should enable joining global copies.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
VNInfo - Value Number Information.
void markUnused()
Mark this value as unused.
BumpPtrAllocator Allocator
bool isUnused() const
Returns true if this value is unused.
unsigned id
The ID number of this value.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
This namespace contains all of the command line option processing machinery.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI char & RegisterCoalescerID
RegisterCoalescer - This pass merges live ranges to eliminate copies.
LLVM_ABI char & MachineDominatorsID
MachineDominators - This pass is a machine dominators analysis pass.
LLVM_ABI Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
auto unique(Range &&R, Predicate P)
auto upper_bound(R &&Range, T &&Value)
Provide wrappers to std::upper_bound which take ranges instead of having to pass begin/end explicitly...
LLVM_ABI void initializeRegisterCoalescerLegacyPass(PassRegistry &)
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned MCRegUnit
Register units are used to compute register aliasing.
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Success
The lock was released successfully.
MutableArrayRef(T &OneElt) -> MutableArrayRef< T >
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr LaneBitmask getLane(unsigned Lane)
static constexpr LaneBitmask getAll()
constexpr bool any() const
static constexpr LaneBitmask getNone()
Remat - Information needed to rematerialize at a specific location.
This represents a simple continuous liveness interval for a value.