87#define DEBUG_TYPE "si-wqm"
96 StateStrict = StateStrictWWM | StateStrictWQM,
103 explicit PrintState(
int State) : State(State) {}
109 static const std::pair<char, const char *> Mapping[] = {
110 std::pair(StateWQM,
"WQM"), std::pair(StateStrictWWM,
"StrictWWM"),
111 std::pair(StateStrictWQM,
"StrictWQM"), std::pair(StateExact,
"Exact")};
112 char State = PS.State;
113 for (
auto M : Mapping) {
114 if (State & M.first) {
137 char InitialState = 0;
138 bool NeedsLowering =
false;
164 unsigned AndSaveExecOpc;
165 unsigned AndSaveExecTermOpc;
184 std::vector<WorkItem> &Worklist);
186 unsigned SubReg,
char Flag, std::vector<WorkItem> &Worklist);
188 std::vector<WorkItem> &Worklist);
190 std::vector<WorkItem> &Worklist);
191 char scanInstructions(
MachineFunction &MF, std::vector<WorkItem> &Worklist);
192 void propagateInstruction(
MachineInstr &
MI, std::vector<WorkItem> &Worklist);
207 Register SaveOrig,
char StrictStateNeeded);
210 char NonStrictState,
char CurrentStrictState);
223 void lowerLiveMaskQueries();
224 void lowerCopyInstrs();
225 void lowerKillInstrs(
bool IsWQM);
250 MachineFunctionProperties::Property::IsSSA);
256char SIWholeQuadMode::ID = 0;
269 return new SIWholeQuadMode;
274 for (
const auto &BII :
Blocks) {
277 <<
" InNeeds = " << PrintState(BII.second.InNeeds)
278 <<
", Needs = " << PrintState(BII.second.Needs)
279 <<
", OutNeeds = " << PrintState(BII.second.OutNeeds) <<
"\n\n";
282 auto III = Instructions.find(&
MI);
283 if (III == Instructions.end())
286 dbgs() <<
" " <<
MI <<
" Needs = " << PrintState(III->second.Needs)
287 <<
", OutNeeds = " << PrintState(III->second.OutNeeds) <<
'\n';
294 std::vector<WorkItem> &Worklist) {
297 assert(!(Flag & StateExact) && Flag != 0);
303 Flag &= ~II.Disabled;
307 if ((II.Needs & Flag) == Flag)
312 Worklist.push_back(&
MI);
318 std::vector<WorkItem> &Worklist) {
330 : (
Reg.isVirtual() ?
MRI->getMaxLaneMaskForVReg(Reg)
342 :
Phi(
Phi), PredIdx(PredIdx), DefinedLanes(DefinedLanes) {}
344 using VisitKey = std::pair<const VNInfo *, LaneBitmask>;
348 unsigned NextPredIdx = 0;
350 const VNInfo *NextValue =
nullptr;
351 const VisitKey
Key(
Value, DefinedLanes);
353 if (Visited.
insert(Key).second) {
358 if (
Value->isPHIDef()) {
361 assert(
MBB &&
"Phi-def has no defining MBB");
364 unsigned Idx = NextPredIdx;
367 for (; PI != PE && !NextValue; ++PI, ++
Idx) {
369 if (!Visited.
count(VisitKey(VN, DefinedLanes)))
379 assert(
MI &&
"Def has no defining instruction");
381 if (
Reg.isVirtual()) {
385 if (
Op.getReg() != Reg)
391 :
TRI->getSubRegIndexLaneMask(
Op.getSubReg());
395 HasDef |= Overlap.
any();
398 DefinedLanes |= OpLanes;
402 if ((DefinedLanes & UseLanes) != UseLanes) {
406 if (!Visited.
count(VisitKey(VN, DefinedLanes)))
413 markInstruction(*
MI, Flag, Worklist);
416 markInstruction(*
MI, Flag, Worklist);
420 if (!NextValue && !PhiStack.
empty()) {
422 PhiEntry &Entry = PhiStack.
back();
423 NextValue = Entry.Phi;
424 NextPredIdx = Entry.PredIdx;
425 DefinedLanes = Entry.DefinedLanes;
435 std::vector<WorkItem> &Worklist) {
442 case AMDGPU::EXEC_LO:
450 if (
Reg.isVirtual()) {
452 markDefs(
MI, LR, Reg,
Op.getSubReg(), Flag, Worklist);
463 markDefs(
MI, LR, Unit, AMDGPU::NoSubRegister, Flag, Worklist);
469void SIWholeQuadMode::markInstructionUses(
const MachineInstr &
MI,
char Flag,
470 std::vector<WorkItem> &Worklist) {
471 LLVM_DEBUG(
dbgs() <<
"markInstructionUses " << PrintState(Flag) <<
": "
475 markOperand(
MI,
Use, Flag, Worklist);
481 std::vector<WorkItem> &Worklist) {
482 char GlobalFlags = 0;
486 bool HasImplicitDerivatives =
499 unsigned Opcode =
MI.getOpcode();
502 if (
TII->isWQM(Opcode)) {
504 if (!
ST->hasExtendedImageInsts())
509 if (!HasImplicitDerivatives)
514 markInstructionUses(
MI, StateWQM, Worklist);
515 GlobalFlags |= StateWQM;
517 }
else if (Opcode == AMDGPU::WQM) {
521 LowerToCopyInstrs.push_back(&
MI);
522 }
else if (Opcode == AMDGPU::SOFT_WQM) {
523 LowerToCopyInstrs.push_back(&
MI);
526 }
else if (Opcode == AMDGPU::STRICT_WWM) {
530 markInstructionUses(
MI, StateStrictWWM, Worklist);
531 GlobalFlags |= StateStrictWWM;
532 LowerToMovInstrs.push_back(&
MI);
534 }
else if (Opcode == AMDGPU::STRICT_WQM ||
535 TII->isDualSourceBlendEXP(
MI)) {
539 markInstructionUses(
MI, StateStrictWQM, Worklist);
540 GlobalFlags |= StateStrictWQM;
542 if (Opcode == AMDGPU::STRICT_WQM) {
543 LowerToMovInstrs.push_back(&
MI);
548 BBI.Needs |= StateExact;
549 if (!(BBI.InNeeds & StateExact)) {
550 BBI.InNeeds |= StateExact;
551 Worklist.push_back(
MBB);
553 GlobalFlags |= StateExact;
554 III.Disabled = StateWQM | StateStrict;
557 }
else if (Opcode == AMDGPU::LDS_PARAM_LOAD ||
558 Opcode == AMDGPU::LDS_DIRECT_LOAD) {
562 II.Needs |= StateStrictWQM;
563 GlobalFlags |= StateStrictWQM;
565 }
else if (Opcode == AMDGPU::V_SET_INACTIVE_B32 ||
566 Opcode == AMDGPU::V_SET_INACTIVE_B64) {
567 III.Disabled = StateStrict;
569 if (Inactive.
isReg()) {
571 LowerToCopyInstrs.push_back(&
MI);
573 markOperand(
MI, Inactive, StateStrictWWM, Worklist);
578 }
else if (
TII->isDisableWQM(
MI)) {
579 BBI.Needs |= StateExact;
580 if (!(BBI.InNeeds & StateExact)) {
581 BBI.InNeeds |= StateExact;
582 Worklist.push_back(
MBB);
584 GlobalFlags |= StateExact;
585 III.Disabled = StateWQM | StateStrict;
588 if (Opcode == AMDGPU::SI_PS_LIVE || Opcode == AMDGPU::SI_LIVE_MASK) {
589 LiveMaskQueries.push_back(&
MI);
590 }
else if (Opcode == AMDGPU::SI_KILL_I1_TERMINATOR ||
591 Opcode == AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR ||
592 Opcode == AMDGPU::SI_DEMOTE_I1) {
593 KillInstrs.push_back(&
MI);
594 BBI.NeedsLowering =
true;
595 }
else if (WQMOutputs) {
606 if (!
Reg.isVirtual() &&
607 TRI->hasVectorRegisters(
TRI->getPhysRegBaseClass(Reg))) {
618 markInstruction(
MI, Flags, Worklist);
619 GlobalFlags |=
Flags;
627 if (GlobalFlags & StateWQM) {
629 markInstruction(*
MI, StateWQM, Worklist);
631 markInstruction(*
MI, StateWQM, Worklist);
638 std::vector<WorkItem>& Worklist) {
645 if ((II.OutNeeds & StateWQM) && !(II.Disabled & StateWQM) &&
646 (
MI.isTerminator() || (
TII->usesVM_CNT(
MI) &&
MI.mayStore()))) {
652 if (II.Needs & StateWQM) {
653 BI.Needs |= StateWQM;
654 if (!(BI.InNeeds & StateWQM)) {
655 BI.InNeeds |= StateWQM;
656 Worklist.push_back(
MBB);
662 char InNeeds = (II.Needs & ~StateStrict) | II.OutNeeds;
663 if (!PrevMI->isPHI()) {
665 if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) {
666 PrevII.OutNeeds |= InNeeds;
667 Worklist.push_back(PrevMI);
673 assert(!(II.Needs & StateExact));
676 markInstructionUses(
MI, II.Needs, Worklist);
680 if (II.Needs & StateStrictWWM)
681 BI.Needs |= StateStrictWWM;
682 if (II.Needs & StateStrictWQM)
683 BI.Needs |= StateStrictWQM;
687 std::vector<WorkItem>& Worklist) {
694 if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) {
695 LastII.OutNeeds |= BI.OutNeeds;
696 Worklist.push_back(LastMI);
702 BlockInfo &PredBI =
Blocks[Pred];
703 if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds)
706 PredBI.OutNeeds |= BI.InNeeds;
707 PredBI.InNeeds |= BI.InNeeds;
708 Worklist.push_back(Pred);
713 BlockInfo &SuccBI =
Blocks[Succ];
714 if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds)
717 SuccBI.InNeeds |= BI.OutNeeds;
718 Worklist.push_back(Succ);
723 std::vector<WorkItem> Worklist;
724 char GlobalFlags = scanInstructions(MF, Worklist);
726 while (!Worklist.empty()) {
731 propagateInstruction(*WI.MI, Worklist);
733 propagateBlock(*WI.MBB, Worklist);
742 Register SaveReg =
MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
751 LIS->InsertMachineInstrInMaps(*Save);
752 LIS->InsertMachineInstrInMaps(*Restore);
753 LIS->createAndComputeVirtRegInterval(SaveReg);
764 BB->
splitAt(*TermMI,
true, LIS);
768 unsigned NewOpcode = 0;
770 case AMDGPU::S_AND_B32:
771 NewOpcode = AMDGPU::S_AND_B32_term;
773 case AMDGPU::S_AND_B64:
774 NewOpcode = AMDGPU::S_AND_B64_term;
776 case AMDGPU::S_MOV_B32:
777 NewOpcode = AMDGPU::S_MOV_B32_term;
779 case AMDGPU::S_MOV_B64:
780 NewOpcode = AMDGPU::S_MOV_B64_term;
793 DTUpdates.
push_back({DomTreeT::Insert, SplitBB, Succ});
794 DTUpdates.
push_back({DomTreeT::Delete, BB, Succ});
796 DTUpdates.
push_back({DomTreeT::Insert, BB, SplitBB});
798 MDT->getBase().applyUpdates(DTUpdates);
800 PDT->getBase().applyUpdates(DTUpdates);
806 LIS->InsertMachineInstrInMaps(*
MI);
826 switch (
MI.getOperand(2).getImm()) {
828 Opcode = AMDGPU::V_CMP_LG_F32_e64;
831 Opcode = AMDGPU::V_CMP_GE_F32_e64;
834 Opcode = AMDGPU::V_CMP_GT_F32_e64;
837 Opcode = AMDGPU::V_CMP_LE_F32_e64;
840 Opcode = AMDGPU::V_CMP_LT_F32_e64;
843 Opcode = AMDGPU::V_CMP_EQ_F32_e64;
846 Opcode = AMDGPU::V_CMP_O_F32_e64;
849 Opcode = AMDGPU::V_CMP_U_F32_e64;
853 Opcode = AMDGPU::V_CMP_NEQ_F32_e64;
857 Opcode = AMDGPU::V_CMP_NLT_F32_e64;
861 Opcode = AMDGPU::V_CMP_NLE_F32_e64;
865 Opcode = AMDGPU::V_CMP_NGT_F32_e64;
869 Opcode = AMDGPU::V_CMP_NGE_F32_e64;
873 Opcode = AMDGPU::V_CMP_NLG_F32_e64;
885 Register VCC =
ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
918 LIS->ReplaceMachineInstrInMaps(
MI, *VcmpMI);
921 LIS->InsertMachineInstrInMaps(*MaskUpdateMI);
922 LIS->InsertMachineInstrInMaps(*ExecMaskMI);
923 LIS->InsertMachineInstrInMaps(*EarlyTermMI);
924 LIS->InsertMachineInstrInMaps(*NewTerm);
934 const bool IsDemote = IsWQM && (
MI.getOpcode() == AMDGPU::SI_DEMOTE_I1);
936 int64_t KillVal =
MI.getOperand(1).getImm();
943 if (
Op.getImm() == KillVal) {
951 if (
MI.getOpcode() == AMDGPU::SI_DEMOTE_I1) {
952 LIS->RemoveMachineInstrFromMaps(
MI);
957 LIS->ReplaceMachineInstrInMaps(
MI, *NewTerm);
966 TmpReg =
MRI->createVirtualRegister(
TRI->getBoolRC());
967 ComputeKilledMaskMI =
992 LiveMaskWQM =
MRI->createVirtualRegister(
TRI->getBoolRC());
1001 unsigned MovOpc =
ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
1003 }
else if (!IsWQM) {
1008 unsigned Opcode = KillVal ? AndN2Opc : AndOpc;
1015 LIS->RemoveMachineInstrFromMaps(
MI);
1020 if (ComputeKilledMaskMI)
1021 LIS->InsertMachineInstrInMaps(*ComputeKilledMaskMI);
1022 LIS->InsertMachineInstrInMaps(*MaskUpdateMI);
1023 LIS->InsertMachineInstrInMaps(*EarlyTermMI);
1025 LIS->InsertMachineInstrInMaps(*WQMMaskMI);
1026 LIS->InsertMachineInstrInMaps(*NewTerm);
1029 LIS->removeInterval(CndReg);
1030 LIS->createAndComputeVirtRegInterval(CndReg);
1033 LIS->createAndComputeVirtRegInterval(TmpReg);
1035 LIS->createAndComputeVirtRegInterval(LiveMaskWQM);
1046 assert(Entry->getOpcode() == AMDGPU::ENTER_STRICT_WQM);
1049 Register SaveOrig = Entry->getOperand(0).getReg();
1056 LIS->ReplaceMachineInstrInMaps(*Exit, *NewExit);
1059 LIS->ReplaceMachineInstrInMaps(*Entry, *NewEntry);
1060 Entry->eraseFromParent();
1062 LIS->removeInterval(SaveOrig);
1073 const BlockInfo &BI = BII->second;
1074 if (!BI.NeedsLowering)
1080 char State = BI.InitialState;
1085 char PreviousState = State;
1087 if (StateTransition.count(&
MI))
1088 State = StateTransition[&
MI];
1091 switch (
MI.getOpcode()) {
1092 case AMDGPU::SI_DEMOTE_I1:
1093 case AMDGPU::SI_KILL_I1_TERMINATOR:
1094 SplitPoint = lowerKillI1(
MBB,
MI, State == StateWQM);
1096 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
1097 SplitPoint = lowerKillF32(
MBB,
MI);
1099 case AMDGPU::ENTER_STRICT_WQM:
1100 StrictEntry = PreviousState == StateWQM ? &
MI :
nullptr;
1102 case AMDGPU::EXIT_STRICT_WQM:
1103 if (State == StateWQM && StrictEntry) {
1105 lowerPseudoStrictMode(
MBB, StrictEntry, &
MI);
1107 StrictEntry =
nullptr;
1109 case AMDGPU::ENTER_STRICT_WWM:
1110 case AMDGPU::EXIT_STRICT_WWM:
1111 StrictEntry =
nullptr;
1121 if (!SplitPoints.
empty()) {
1142 : LIS->getMBBEndIdx(&
MBB);
1144 Last != MBBE ? LIS->getInstructionIndex(*
Last) : LIS->getMBBEndIdx(&
MBB);
1155 if (Next < FirstIdx)
1160 assert(EndMI &&
"Segment does not end on valid instruction");
1164 SlotIndex Next = LIS->getInstructionIndex(*NextI);
1184 bool IsExecDef =
false;
1187 MO.getReg() == AMDGPU::EXEC_LO || MO.getReg() == AMDGPU::EXEC;
1204 bool IsTerminator = Before ==
MBB.
end();
1205 if (!IsTerminator) {
1207 if (FirstTerm !=
MBB.
end()) {
1208 SlotIndex FirstTermIdx = LIS->getInstructionIndex(*FirstTerm);
1209 SlotIndex BeforeIdx = LIS->getInstructionIndex(*Before);
1210 IsTerminator = BeforeIdx > FirstTermIdx;
1217 unsigned Opcode = IsTerminator ? AndSaveExecTermOpc : AndSaveExecOpc;
1221 unsigned Opcode = IsTerminator ? AndTermOpc : AndOpc;
1227 LIS->InsertMachineInstrInMaps(*
MI);
1228 StateTransition[
MI] = StateExact;
1243 LIS->InsertMachineInstrInMaps(*
MI);
1244 StateTransition[
MI] = StateWQM;
1249 Register SaveOrig,
char StrictStateNeeded) {
1252 assert(StrictStateNeeded == StateStrictWWM ||
1253 StrictStateNeeded == StateStrictWQM);
1255 if (StrictStateNeeded == StateStrictWWM) {
1264 LIS->InsertMachineInstrInMaps(*
MI);
1265 StateTransition[
MI] = StrictStateNeeded;
1270 BII->second.NeedsLowering =
true;
1275 Register SavedOrig,
char NonStrictState,
1276 char CurrentStrictState) {
1280 assert(CurrentStrictState == StateStrictWWM ||
1281 CurrentStrictState == StateStrictWQM);
1283 if (CurrentStrictState == StateStrictWWM) {
1292 LIS->InsertMachineInstrInMaps(*
MI);
1293 StateTransition[
MI] = NonStrictState;
1301 BlockInfo &BI = BII->second;
1305 if (!IsEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact) {
1306 BI.InitialState = StateWQM;
1315 bool WQMFromExec = IsEntry;
1316 char State = (IsEntry || !(BI.InNeeds & StateWQM)) ? StateExact : StateWQM;
1317 char NonStrictState = 0;
1323 if (II != IE && II->getOpcode() == AMDGPU::COPY)
1338 BI.InitialState = State;
1342 char Needs = StateExact | StateWQM;
1348 if (FirstStrict == IE)
1356 if (
MI.isTerminator() ||
TII->mayReadEXEC(*
MRI,
MI)) {
1359 if (III->second.Needs & StateStrictWWM)
1360 Needs = StateStrictWWM;
1361 else if (III->second.Needs & StateStrictWQM)
1362 Needs = StateStrictWQM;
1363 else if (III->second.Needs & StateWQM)
1366 Needs &= ~III->second.Disabled;
1367 OutNeeds = III->second.OutNeeds;
1372 Needs = StateExact | StateWQM | StateStrict;
1376 if (
MI.isBranch() && OutNeeds == StateExact)
1382 if (BI.OutNeeds & StateWQM)
1384 else if (BI.OutNeeds == StateExact)
1387 Needs = StateWQM | StateExact;
1391 if (!(Needs & State)) {
1393 if (State == StateStrictWWM || Needs == StateStrictWWM ||
1394 State == StateStrictWQM || Needs == StateStrictWQM) {
1396 First = FirstStrict;
1403 bool SaveSCC =
false;
1406 case StateStrictWWM:
1407 case StateStrictWQM:
1411 SaveSCC = (Needs & StateStrict) || ((Needs & StateWQM) && WQMFromExec);
1415 SaveSCC = !(Needs & StateWQM);
1422 prepareInsertion(
MBB,
First, II, Needs == StateWQM, SaveSCC);
1424 if (State & StateStrict) {
1425 assert(State == StateStrictWWM || State == StateStrictWQM);
1426 assert(SavedNonStrictReg);
1427 fromStrictMode(
MBB, Before, SavedNonStrictReg, NonStrictState, State);
1429 LIS->createAndComputeVirtRegInterval(SavedNonStrictReg);
1430 SavedNonStrictReg = 0;
1431 State = NonStrictState;
1434 if (Needs & StateStrict) {
1435 NonStrictState = State;
1436 assert(Needs == StateStrictWWM || Needs == StateStrictWQM);
1437 assert(!SavedNonStrictReg);
1438 SavedNonStrictReg =
MRI->createVirtualRegister(BoolRC);
1440 toStrictMode(
MBB, Before, SavedNonStrictReg, Needs);
1444 if (State == StateWQM && (Needs & StateExact) && !(Needs & StateWQM)) {
1445 if (!WQMFromExec && (OutNeeds & StateWQM)) {
1447 SavedWQMReg =
MRI->createVirtualRegister(BoolRC);
1450 toExact(
MBB, Before, SavedWQMReg);
1452 }
else if (State == StateExact && (Needs & StateWQM) &&
1453 !(Needs & StateExact)) {
1454 assert(WQMFromExec == (SavedWQMReg == 0));
1456 toWQM(
MBB, Before, SavedWQMReg);
1459 LIS->createAndComputeVirtRegInterval(SavedWQMReg);
1472 if (Needs != (StateExact | StateWQM | StateStrict)) {
1473 if (Needs != (StateExact | StateWQM))
1484 assert(!SavedNonStrictReg);
1487void SIWholeQuadMode::lowerLiveMaskQueries() {
1496 LIS->ReplaceMachineInstrInMaps(*
MI, *Copy);
1497 MI->eraseFromParent();
1501void SIWholeQuadMode::lowerCopyInstrs() {
1503 assert(
MI->getNumExplicitOperands() == 2);
1508 TRI->getRegClassForOperandReg(*
MRI,
MI->getOperand(0));
1509 if (
TRI->isVGPRClass(regClass)) {
1510 const unsigned MovOp =
TII->getMovOpcode(regClass);
1511 MI->setDesc(
TII->get(MovOp));
1516 return MO.isUse() && MO.getReg() == AMDGPU::EXEC;
1522 if (
MI->getOperand(0).isEarlyClobber()) {
1523 LIS->removeInterval(Reg);
1524 MI->getOperand(0).setIsEarlyClobber(
false);
1525 LIS->createAndComputeVirtRegInterval(Reg);
1527 int Index =
MI->findRegisterUseOperandIdx(AMDGPU::EXEC);
1528 while (
Index >= 0) {
1530 Index =
MI->findRegisterUseOperandIdx(AMDGPU::EXEC);
1532 MI->setDesc(
TII->get(AMDGPU::COPY));
1537 if (
MI->getOpcode() == AMDGPU::V_SET_INACTIVE_B32 ||
1538 MI->getOpcode() == AMDGPU::V_SET_INACTIVE_B64) {
1539 assert(
MI->getNumExplicitOperands() == 3);
1543 assert(
MI->getOperand(2).isUndef());
1544 MI->removeOperand(2);
1545 MI->untieRegOperand(1);
1547 assert(
MI->getNumExplicitOperands() == 2);
1550 unsigned CopyOp =
MI->getOperand(1).isReg()
1552 :
TII->getMovOpcode(
TRI->getRegClassForOperandReg(
1553 *
MRI,
MI->getOperand(0)));
1554 MI->setDesc(
TII->get(CopyOp));
1558void SIWholeQuadMode::lowerKillInstrs(
bool IsWQM) {
1562 switch (
MI->getOpcode()) {
1563 case AMDGPU::SI_DEMOTE_I1:
1564 case AMDGPU::SI_KILL_I1_TERMINATOR:
1565 SplitPoint = lowerKillI1(*
MBB, *
MI, IsWQM);
1567 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
1568 SplitPoint = lowerKillF32(*
MBB, *
MI);
1580 <<
" ------------- \n");
1585 LiveMaskQueries.clear();
1586 LowerToCopyInstrs.clear();
1587 LowerToMovInstrs.clear();
1589 StateTransition.clear();
1593 TII =
ST->getInstrInfo();
1594 TRI = &
TII->getRegisterInfo();
1596 LIS = &getAnalysis<LiveIntervals>();
1597 MDT = &getAnalysis<MachineDominatorTree>();
1598 PDT = &getAnalysis<MachinePostDominatorTree>();
1600 if (
ST->isWave32()) {
1601 AndOpc = AMDGPU::S_AND_B32;
1602 AndTermOpc = AMDGPU::S_AND_B32_term;
1603 AndN2Opc = AMDGPU::S_ANDN2_B32;
1604 XorOpc = AMDGPU::S_XOR_B32;
1605 AndSaveExecOpc = AMDGPU::S_AND_SAVEEXEC_B32;
1606 AndSaveExecTermOpc = AMDGPU::S_AND_SAVEEXEC_B32_term;
1607 WQMOpc = AMDGPU::S_WQM_B32;
1608 Exec = AMDGPU::EXEC_LO;
1610 AndOpc = AMDGPU::S_AND_B64;
1611 AndTermOpc = AMDGPU::S_AND_B64_term;
1612 AndN2Opc = AMDGPU::S_ANDN2_B64;
1613 XorOpc = AMDGPU::S_XOR_B64;
1614 AndSaveExecOpc = AMDGPU::S_AND_SAVEEXEC_B64;
1615 AndSaveExecTermOpc = AMDGPU::S_AND_SAVEEXEC_B64_term;
1616 WQMOpc = AMDGPU::S_WQM_B64;
1617 Exec = AMDGPU::EXEC;
1621 const bool NeedsLiveMask = !(KillInstrs.empty() && LiveMaskQueries.empty());
1626 if (!(GlobalFlags & (StateWQM | StateStrict)) && LowerToCopyInstrs.empty() &&
1627 LowerToMovInstrs.empty() && KillInstrs.empty()) {
1628 lowerLiveMaskQueries();
1629 return !LiveMaskQueries.empty();
1636 if (NeedsLiveMask || (GlobalFlags & StateWQM)) {
1637 LiveMaskReg =
MRI->createVirtualRegister(
TRI->getBoolRC());
1641 LIS->InsertMachineInstrInMaps(*
MI);
1646 lowerLiveMaskQueries();
1650 if (GlobalFlags == StateWQM) {
1653 LIS->InsertMachineInstrInMaps(*
MI);
1654 lowerKillInstrs(
true);
1657 processBlock(*BII.first, BII.first == &Entry);
1660 lowerBlock(*BII.first);
1664 if (LiveMaskReg != Exec)
1665 LIS->createAndComputeVirtRegInterval(LiveMaskReg);
1670 LIS->removeAllRegUnitsForPhysReg(AMDGPU::SCC);
1673 if (!KillInstrs.empty())
1674 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Provides AMDGPU specific target descriptions.
static void analyzeFunction(Function &Fn, const DataLayout &Layout, FunctionVarLocsBuilder *FnVarLocs)
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
DenseMap< Block *, BlockRelaxAux > Blocks
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static void splitBlock(MachineBasicBlock &MBB, MachineInstr &MI, MachineDominatorTree *MDT)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an Operation in the Expression.
Core dominator tree base class.
FunctionPass class - This class is used to implement most global optimizations.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Result of a LiveRange query.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
This class represents the liveness of a register, stack slot, etc.
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
succ_iterator succ_begin()
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
pred_iterator pred_begin()
MachineBasicBlock * splitAt(MachineInstr &SplitInst, bool UpdateLiveIns=true, LiveIntervals *LIS=nullptr)
Split a basic block into 2 pieces at SplitPoint.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
iterator_range< pred_iterator > predecessors()
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual MachineFunctionProperties getClearedProperties() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements a map that also provides access to all stored values in a deterministic order.
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
A Use represents the edge between a Value definition and its users.
VNInfo - Value Number Information.
LLVM Value Representation.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_READONLY int getVOPe32(uint16_t Opcode)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ Define
Register definition.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
FunctionPass * createSIWholeQuadModePass()
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
static constexpr LaneBitmask getAll()
constexpr bool any() const
static constexpr LaneBitmask getNone()
This represents a simple continuous liveness interval for a value.