47#define DEBUG_TYPE "si-insert-waitcnts"
50 "Force emit s_waitcnt expcnt(0) instrs");
52 "Force emit s_waitcnt lgkmcnt(0) instrs");
54 "Force emit s_waitcnt vmcnt(0) instrs");
58 cl::desc(
"Force all waitcnt instrs to be emitted as "
59 "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
63 "amdgpu-waitcnt-load-forcezero",
64 cl::desc(
"Force all waitcnt load counters to wait until 0"),
68 "amdgpu-expert-scheduling-mode",
69 cl::desc(
"Enable expert scheduling mode 2 for all functions (GFX12+ only)"),
117 TRACKINGID_RANGE_LEN = (1 << 16),
122 REGUNITS_END = REGUNITS_BEGIN + TRACKINGID_RANGE_LEN,
127 NUM_LDSDMA = TRACKINGID_RANGE_LEN,
128 LDSDMA_BEGIN = REGUNITS_END,
129 LDSDMA_END = LDSDMA_BEGIN + NUM_LDSDMA,
133static constexpr VMEMID toVMEMID(MCRegUnit RU) {
134 return static_cast<unsigned>(RU);
137#define AMDGPU_DECLARE_WAIT_EVENTS(DECL) \
139 DECL(VMEM_SAMPLER_READ_ACCESS) \
140 DECL(VMEM_BVH_READ_ACCESS) \
141 DECL(GLOBAL_INV_ACCESS) \
142 DECL(VMEM_WRITE_ACCESS) \
143 DECL(SCRATCH_WRITE_ACCESS) \
153 DECL(EXP_POS_ACCESS) \
154 DECL(EXP_PARAM_ACCESS) \
156 DECL(EXP_LDS_ACCESS) \
157 DECL(VGPR_CSMACC_WRITE) \
158 DECL(VGPR_DPMACC_WRITE) \
159 DECL(VGPR_TRANS_WRITE) \
160 DECL(VGPR_XDL_WRITE) \
161 DECL(VGPR_LDS_READ) \
162 DECL(VGPR_FLAT_READ) \
163 DECL(VGPR_VMEM_READ) \
167#define AMDGPU_EVENT_ENUM(Name) Name,
172#undef AMDGPU_EVENT_ENUM
186auto wait_events(WaitEventType MaxEvent = NUM_WAIT_EVENTS) {
187 return enum_seq(VMEM_ACCESS, MaxEvent);
190#define AMDGPU_EVENT_NAME(Name) #Name,
194#undef AMDGPU_EVENT_NAME
195static constexpr StringLiteral getWaitEventTypeName(WaitEventType Event) {
196 return WaitEventTypeName[
Event];
220 AMDGPU::S_WAIT_LOADCNT, AMDGPU::S_WAIT_DSCNT,
221 AMDGPU::S_WAIT_EXPCNT, AMDGPU::S_WAIT_STORECNT,
222 AMDGPU::S_WAIT_SAMPLECNT, AMDGPU::S_WAIT_BVHCNT,
223 AMDGPU::S_WAIT_KMCNT, AMDGPU::S_WAIT_XCNT,
224 AMDGPU::S_WAIT_ASYNCCNT};
229 switch (
MI.getOpcode()) {
230 case AMDGPU::ASYNCMARK:
231 case AMDGPU::WAIT_ASYNCMARK:
234 return MI.isMetaInstruction();
250 assert(updateVMCntOnly(Inst));
252 return VMEM_NOSAMPLER;
266 return VMEM_NOSAMPLER;
282 WaitEventSet() =
default;
283 explicit constexpr WaitEventSet(WaitEventType Event) {
284 static_assert(NUM_WAIT_EVENTS <=
sizeof(Mask) * 8,
285 "Not enough bits in Mask for all the events");
288 constexpr WaitEventSet(std::initializer_list<WaitEventType> Events) {
289 for (
auto &
E : Events) {
293 void insert(
const WaitEventType &Event) { Mask |= 1 <<
Event; }
294 void remove(
const WaitEventType &Event) { Mask &= ~(1 <<
Event); }
295 void remove(
const WaitEventSet &
Other) { Mask &= ~Other.Mask; }
296 bool contains(
const WaitEventType &Event)
const {
297 return Mask & (1 <<
Event);
301 return (~Mask &
Other.Mask) == 0;
326 return Mask ==
Other.Mask;
329 bool empty()
const {
return Mask == 0; }
331 bool twoOrMore()
const {
return Mask & (Mask - 1); }
332 operator bool()
const {
return !
empty(); }
333 void print(raw_ostream &OS)
const {
334 ListSeparator
LS(
", ");
335 for (WaitEventType Event : wait_events()) {
337 OS <<
LS << getWaitEventTypeName(Event);
343void WaitEventSet::dump()
const {
348class WaitcntBrackets;
356class WaitcntGenerator {
358 const GCNSubtarget &ST;
359 const SIInstrInfo &
TII;
360 AMDGPU::IsaVersion
IV;
363 bool ExpandWaitcntProfiling =
false;
364 const AMDGPU::HardwareLimits &Limits;
367 WaitcntGenerator() =
delete;
368 WaitcntGenerator(
const WaitcntGenerator &) =
delete;
369 WaitcntGenerator(
const MachineFunction &MF,
371 const AMDGPU::HardwareLimits &Limits)
372 :
ST(MF.getSubtarget<GCNSubtarget>()),
TII(*
ST.getInstrInfo()),
376 ExpandWaitcntProfiling(
377 MF.
getFunction().hasFnAttribute(
"amdgpu-expand-waitcnt-profiling")),
382 bool isOptNone()
const {
return OptNone; }
384 const AMDGPU::HardwareLimits &getLimits()
const {
return Limits; }
398 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
399 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
403 bool promoteSoftWaitCnt(MachineInstr *Waitcnt)
const;
408 virtual bool createNewWaitcnt(MachineBasicBlock &
Block,
410 AMDGPU::Waitcnt
Wait,
411 const WaitcntBrackets &ScoreBrackets) = 0;
414 virtual const WaitEventSet &
430 virtual AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const = 0;
432 virtual ~WaitcntGenerator() =
default;
435class WaitcntGeneratorPreGFX12 final :
public WaitcntGenerator {
436 static constexpr const WaitEventSet
439 {VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS}),
440 WaitEventSet({SMEM_ACCESS, LDS_ACCESS, GDS_ACCESS, SQ_MESSAGE}),
441 WaitEventSet({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK,
442 EXP_PARAM_ACCESS, EXP_POS_ACCESS, EXP_LDS_ACCESS}),
443 WaitEventSet({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
452 using WaitcntGenerator::WaitcntGenerator;
454 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
455 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
458 bool createNewWaitcnt(MachineBasicBlock &
Block,
460 AMDGPU::Waitcnt
Wait,
461 const WaitcntBrackets &ScoreBrackets)
override;
464 return WaitEventMaskForInstPreGFX12[
T];
467 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
470class WaitcntGeneratorGFX12Plus final :
public WaitcntGenerator {
473 static constexpr const WaitEventSet
475 WaitEventSet({VMEM_ACCESS, GLOBAL_INV_ACCESS}),
476 WaitEventSet({LDS_ACCESS, GDS_ACCESS}),
477 WaitEventSet({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK,
478 EXP_PARAM_ACCESS, EXP_POS_ACCESS, EXP_LDS_ACCESS}),
479 WaitEventSet({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
480 WaitEventSet({VMEM_SAMPLER_READ_ACCESS}),
481 WaitEventSet({VMEM_BVH_READ_ACCESS}),
482 WaitEventSet({SMEM_ACCESS, SQ_MESSAGE, SCC_WRITE}),
483 WaitEventSet({VMEM_GROUP, SMEM_GROUP}),
484 WaitEventSet({ASYNC_ACCESS}),
485 WaitEventSet({VGPR_CSMACC_WRITE, VGPR_DPMACC_WRITE, VGPR_TRANS_WRITE,
487 WaitEventSet({VGPR_LDS_READ, VGPR_FLAT_READ, VGPR_VMEM_READ})};
490 WaitcntGeneratorGFX12Plus() =
delete;
491 WaitcntGeneratorGFX12Plus(
const MachineFunction &MF,
493 const AMDGPU::HardwareLimits &Limits,
495 : WaitcntGenerator(MF, MaxCounter, Limits), IsExpertMode(IsExpertMode) {}
498 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
499 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
502 bool createNewWaitcnt(MachineBasicBlock &
Block,
504 AMDGPU::Waitcnt
Wait,
505 const WaitcntBrackets &ScoreBrackets)
override;
508 return WaitEventMaskForInstGFX12Plus[
T];
511 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
515struct PreheaderFlushFlags {
516 bool FlushVmCnt =
false;
517 bool FlushDsCnt =
false;
520class SIInsertWaitcnts {
521 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
522 DenseMap<MachineBasicBlock *, PreheaderFlushFlags> PreheadersToFlush;
523 MachineLoopInfo &MLI;
524 MachinePostDominatorTree &PDT;
529 std::unique_ptr<WaitcntBrackets> Incoming;
531 BlockInfo() =
default;
532 BlockInfo(BlockInfo &&) =
default;
533 BlockInfo &operator=(BlockInfo &&) =
default;
537 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos;
541 std::unique_ptr<WaitcntGenerator> WCG;
544 DenseSet<MachineInstr *> CallInsts;
545 DenseSet<MachineInstr *> ReturnInsts;
550 DenseMap<MachineInstr *, bool> EndPgmInsts;
552 AMDGPU::HardwareLimits Limits;
555 const GCNSubtarget &
ST;
556 const SIInstrInfo &
TII;
557 const SIRegisterInfo &
TRI;
558 const MachineRegisterInfo &MRI;
561 bool IsExpertMode =
false;
563 SIInsertWaitcnts(MachineLoopInfo &MLI, MachinePostDominatorTree &PDT,
565 : MLI(MLI), PDT(PDT), AA(AA), MF(MF),
ST(MF.getSubtarget<GCNSubtarget>()),
566 TII(*
ST.getInstrInfo()),
TRI(
TII.getRegisterInfo()),
567 MRI(MF.getRegInfo()) {
568 (void)ForceExpCounter;
569 (void)ForceLgkmCounter;
570 (void)ForceVMCounter;
573 const AMDGPU::HardwareLimits &getLimits()
const {
return Limits; }
575 PreheaderFlushFlags getPreheaderFlushFlags(MachineLoop *
ML,
576 const WaitcntBrackets &Brackets);
577 PreheaderFlushFlags isPreheaderToFlush(MachineBasicBlock &
MBB,
578 const WaitcntBrackets &ScoreBrackets);
579 bool isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const;
580 bool isDSRead(
const MachineInstr &
MI)
const;
581 bool mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const;
584 void setForceEmitWaitcnt() {
622 WaitEventType getVmemWaitEventType(
const MachineInstr &Inst)
const {
625 case AMDGPU::GLOBAL_INV:
626 return GLOBAL_INV_ACCESS;
628 case AMDGPU::GLOBAL_WB:
629 case AMDGPU::GLOBAL_WBINV:
630 return VMEM_WRITE_ACCESS;
636 static const WaitEventType VmemReadMapping[NUM_VMEM_TYPES] = {
637 VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS};
646 if (
TII.mayAccessScratch(Inst))
647 return SCRATCH_WRITE_ACCESS;
648 return VMEM_WRITE_ACCESS;
652 return VmemReadMapping[getVmemType(Inst)];
655 std::optional<WaitEventType>
656 getExpertSchedulingEventType(
const MachineInstr &Inst)
const;
658 bool isAsync(
const MachineInstr &
MI)
const {
663 const MachineOperand *
Async =
664 TII.getNamedOperand(
MI, AMDGPU::OpName::IsAsync);
668 bool isNonAsyncLdsDmaWrite(
const MachineInstr &
MI)
const {
672 bool isAsyncLdsDmaWrite(
const MachineInstr &
MI)
const {
676 bool shouldUpdateAsyncMark(
const MachineInstr &
MI,
678 if (!isAsyncLdsDmaWrite(
MI))
685 bool isVmemAccess(
const MachineInstr &
MI)
const;
686 bool generateWaitcntInstBefore(MachineInstr &
MI,
687 WaitcntBrackets &ScoreBrackets,
688 MachineInstr *OldWaitcntInstr,
689 PreheaderFlushFlags FlushFlags);
690 bool generateWaitcnt(AMDGPU::Waitcnt
Wait,
692 MachineBasicBlock &
Block, WaitcntBrackets &ScoreBrackets,
693 MachineInstr *OldWaitcntInstr);
695 WaitEventSet getEventsFor(
const MachineInstr &Inst)
const;
696 void updateEventWaitcntAfter(MachineInstr &Inst,
697 WaitcntBrackets *ScoreBrackets);
699 MachineBasicBlock *
Block)
const;
700 bool insertForcedWaitAfter(MachineInstr &Inst, MachineBasicBlock &
Block,
701 WaitcntBrackets &ScoreBrackets);
702 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &
Block,
703 WaitcntBrackets &ScoreBrackets);
706 bool removeRedundantSoftXcnts(MachineBasicBlock &
Block);
708 bool ExpertMode)
const;
710 return WCG->getWaitEvents(
T);
713 return WCG->getCounterFromEvent(
E);
725class WaitcntBrackets {
733 unsigned NumUnusedVmem = 0, NumUnusedSGPRs = 0;
734 for (
auto &[
ID, Val] : VMem) {
738 for (
auto &[
ID, Val] : SGPRs) {
743 if (NumUnusedVmem || NumUnusedSGPRs) {
744 errs() <<
"WaitcntBracket had unused entries at destruction time: "
745 << NumUnusedVmem <<
" VMem and " << NumUnusedSGPRs
746 <<
" SGPR unused entries\n";
757 return ScoreUBs[
T] - ScoreLBs[
T];
761 return getVMemScore(
ID,
T) > getScoreLB(
T);
779 return getScoreUB(
T) - getScoreLB(
T);
783 auto It = SGPRs.find(RU);
784 return It != SGPRs.end() ? It->second.get(
T) : 0;
788 auto It = VMem.find(TID);
789 return It != VMem.end() ? It->second.Scores[
T] : 0;
796 void simplifyWaitcnt(AMDGPU::Waitcnt &
Wait)
const {
799 void simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
800 AMDGPU::Waitcnt &UpdateWait)
const;
803 void simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
804 AMDGPU::Waitcnt &UpdateWait)
const;
805 void simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
806 AMDGPU::Waitcnt &UpdateWait)
const;
809 AMDGPU::Waitcnt &
Wait,
810 const MachineInstr &
MI)
const;
811 MCPhysReg determineVGPR16Dependency(
const MachineInstr &
MI,
815 AMDGPU::Waitcnt &
Wait)
const;
816 AMDGPU::Waitcnt determineAsyncWait(
unsigned N);
817 void tryClearSCCWriteEvent(MachineInstr *Inst);
819 void applyWaitcnt(
const AMDGPU::Waitcnt &
Wait);
822 void updateByEvent(WaitEventType
E, MachineInstr &
MI);
823 void recordAsyncMark(MachineInstr &
MI);
825 bool hasPendingEvent()
const {
return !PendingEvents.empty(); }
826 bool hasPendingEvent(WaitEventType
E)
const {
827 return PendingEvents.contains(
E);
830 bool HasPending = PendingEvents &
Context->getWaitEvents(
T);
832 "Expected pending events iff scoreboard is not empty");
837 WaitEventSet Events = PendingEvents &
Context->getWaitEvents(
T);
839 return Events.twoOrMore();
842 bool hasPendingFlat()
const {
849 void setPendingFlat() {
854 bool hasPendingGDS()
const {
859 unsigned getPendingGDSWait()
const {
868 bool hasOtherPendingVmemTypes(
MCPhysReg Reg, VmemType V)
const {
869 for (MCRegUnit RU : regunits(
Reg)) {
870 auto It = VMem.find(toVMEMID(RU));
871 if (It != VMem.end() && (It->second.VMEMTypes & ~(1 << V)))
878 for (MCRegUnit RU : regunits(
Reg)) {
879 if (
auto It = VMem.find(toVMEMID(RU)); It != VMem.end()) {
880 It->second.VMEMTypes = 0;
881 if (It->second.empty())
887 void setStateOnFunctionEntryOrReturn() {
894 ArrayRef<const MachineInstr *> getLDSDMAStores()
const {
898 bool hasPointSampleAccel(
const MachineInstr &
MI)
const;
899 bool hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
902 void print(raw_ostream &)
const;
907 void purgeEmptyTrackingData();
917 using CounterValueArray = std::array<unsigned, AMDGPU::NUM_INST_CNTS>;
920 AMDGPU::Waitcnt &
Wait)
const;
922 static bool mergeScore(
const MergeInfo &M,
unsigned &Score,
923 unsigned OtherScore);
928 assert(
Reg != AMDGPU::SCC &&
"Shouldn't be used on SCC");
955 if (
Reg == AMDGPU::SCC) {
958 for (MCRegUnit RU : regunits(
Reg))
959 VMem[toVMEMID(RU)].Scores[
T] = Val;
961 for (MCRegUnit RU : regunits(
Reg))
962 SGPRs[RU].get(
T) = Val;
969 VMem[TID].Scores[
T] = Val;
972 void setScoreByOperand(
const MachineOperand &
Op,
975 const SIInsertWaitcnts *
Context;
979 WaitEventSet PendingEvents;
981 unsigned LastFlatDsCnt = 0;
982 unsigned LastFlatLoadCnt = 0;
984 unsigned LastGDS = 0;
1001 CounterValueArray Scores{};
1003 unsigned VMEMTypes = 0;
1012 unsigned ScoreDsKmCnt = 0;
1013 unsigned ScoreXCnt = 0;
1029 bool empty()
const {
return !ScoreDsKmCnt && !ScoreXCnt; }
1032 DenseMap<VMEMID, VMEMInfo> VMem;
1033 DenseMap<MCRegUnit, SGPRInfo> SGPRs;
1036 unsigned SCCScore = 0;
1038 const MachineInstr *PendingSCCWrite =
nullptr;
1042 SmallVector<const MachineInstr *> LDSDMAStores;
1051 static constexpr unsigned MaxAsyncMarks = 16;
1055 CounterValueArray AsyncScore{};
1058SIInsertWaitcnts::BlockInfo::~BlockInfo() =
default;
1060class SIInsertWaitcntsLegacy :
public MachineFunctionPass {
1063 SIInsertWaitcntsLegacy() : MachineFunctionPass(
ID) {}
1065 bool runOnMachineFunction(MachineFunction &MF)
override;
1067 StringRef getPassName()
const override {
1068 return "SI insert wait instructions";
1071 void getAnalysisUsage(AnalysisUsage &AU)
const override {
1074 AU.
addRequired<MachinePostDominatorTreeWrapperPass>();
1083void WaitcntBrackets::setScoreByOperand(
const MachineOperand &
Op,
1086 setRegScore(
Op.getReg().asMCReg(), CntTy, Score);
1094bool WaitcntBrackets::hasPointSampleAccel(
const MachineInstr &
MI)
const {
1099 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo =
1109bool WaitcntBrackets::hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
1111 if (!hasPointSampleAccel(
MI))
1114 return hasOtherPendingVmemTypes(
Reg, VMEM_NOSAMPLER);
1117void WaitcntBrackets::updateByEvent(WaitEventType
E, MachineInstr &Inst) {
1121 unsigned UB = getScoreUB(
T);
1134 PendingEvents.insert(
E);
1135 setScoreUB(
T, CurrScore);
1138 const MachineRegisterInfo &MRI =
Context->MRI;
1147 if (
const auto *AddrOp =
TII.getNamedOperand(Inst, AMDGPU::OpName::addr))
1151 if (
const auto *Data0 =
1152 TII.getNamedOperand(Inst, AMDGPU::OpName::data0))
1154 if (
const auto *Data1 =
1155 TII.getNamedOperand(Inst, AMDGPU::OpName::data1))
1158 Inst.
getOpcode() != AMDGPU::DS_APPEND &&
1159 Inst.
getOpcode() != AMDGPU::DS_CONSUME &&
1160 Inst.
getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
1161 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1162 if (
TRI.isVectorRegister(MRI,
Op.getReg()))
1166 }
else if (
TII.isFLAT(Inst)) {
1168 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1171 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1174 }
else if (
TII.isMIMG(Inst)) {
1178 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1181 }
else if (
TII.isMTBUF(Inst)) {
1184 }
else if (
TII.isMUBUF(Inst)) {
1188 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1191 }
else if (
TII.isLDSDIR(Inst)) {
1193 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::vdst),
1196 if (
TII.isEXP(Inst)) {
1201 for (MachineOperand &DefMO : Inst.
all_defs()) {
1202 if (
TRI.isVGPR(MRI, DefMO.getReg())) {
1207 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1208 if (
TRI.isVectorRegister(MRI,
Op.getReg()))
1213 WaitEventType OtherEvent =
E == SMEM_GROUP ? VMEM_GROUP : SMEM_GROUP;
1214 if (PendingEvents.contains(OtherEvent)) {
1219 setScoreLB(
T, getScoreUB(
T) - 1);
1220 PendingEvents.remove(OtherEvent);
1222 for (
const MachineOperand &
Op : Inst.
all_uses())
1223 setScoreByOperand(
Op,
T, CurrScore);
1227 for (
const MachineOperand &
Op : Inst.
operands()) {
1232 setScoreByOperand(
Op,
T, CurrScore);
1244 for (
const MachineOperand &
Op : Inst.
defs()) {
1247 if (!
TRI.isVectorRegister(MRI,
Op.getReg()))
1249 if (updateVMCntOnly(Inst)) {
1254 VmemType
V = getVmemType(Inst);
1255 unsigned char TypesMask = 1 <<
V;
1258 if (hasPointSampleAccel(Inst))
1259 TypesMask |= 1 << VMEM_NOSAMPLER;
1260 for (MCRegUnit RU : regunits(
Op.getReg().asMCReg()))
1261 VMem[toVMEMID(RU)].VMEMTypes |= TypesMask;
1264 setScoreByOperand(
Op,
T, CurrScore);
1267 (
TII.isDS(Inst) ||
Context->isNonAsyncLdsDmaWrite(Inst))) {
1276 if (!MemOp->isStore() ||
1281 auto AAI = MemOp->getAAInfo();
1287 if (!AAI || !AAI.Scope)
1289 for (
unsigned I = 0,
E = LDSDMAStores.
size();
I !=
E && !Slot; ++
I) {
1290 for (
const auto *MemOp : LDSDMAStores[
I]->memoperands()) {
1291 if (MemOp->isStore() && AAI == MemOp->getAAInfo()) {
1306 setVMemScore(LDSDMA_BEGIN,
T, CurrScore);
1307 if (Slot && Slot < NUM_LDSDMA)
1308 setVMemScore(LDSDMA_BEGIN + Slot,
T, CurrScore);
1311 if (
Context->shouldUpdateAsyncMark(Inst,
T)) {
1312 AsyncScore[
T] = CurrScore;
1316 setRegScore(AMDGPU::SCC,
T, CurrScore);
1317 PendingSCCWrite = &Inst;
1322void WaitcntBrackets::recordAsyncMark(MachineInstr &Inst) {
1328 AsyncMarks.push_back(AsyncScore);
1331 dbgs() <<
"recordAsyncMark:\n" << Inst;
1332 for (
const auto &Mark : AsyncMarks) {
1339void WaitcntBrackets::print(raw_ostream &OS)
const {
1343 unsigned SR = getScoreRange(
T);
1346 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"LOAD" :
"VM") <<
"_CNT("
1350 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"DS" :
"LGKM") <<
"_CNT("
1354 OS <<
" EXP_CNT(" << SR <<
"):";
1357 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"STORE" :
"VS") <<
"_CNT("
1361 OS <<
" SAMPLE_CNT(" << SR <<
"):";
1364 OS <<
" BVH_CNT(" << SR <<
"):";
1367 OS <<
" KM_CNT(" << SR <<
"):";
1370 OS <<
" X_CNT(" << SR <<
"):";
1373 OS <<
" ASYNC_CNT(" << SR <<
"):";
1376 OS <<
" VA_VDST(" << SR <<
"): ";
1379 OS <<
" VM_VSRC(" << SR <<
"): ";
1382 OS <<
" UNKNOWN(" << SR <<
"):";
1388 unsigned LB = getScoreLB(
T);
1391 sort(SortedVMEMIDs);
1393 for (
auto ID : SortedVMEMIDs) {
1394 unsigned RegScore = VMem.at(
ID).Scores[
T];
1397 unsigned RelScore = RegScore - LB - 1;
1398 if (
ID < REGUNITS_END) {
1399 OS <<
' ' << RelScore <<
":vRU" <<
ID;
1401 assert(
ID >= LDSDMA_BEGIN &&
ID < LDSDMA_END &&
1402 "Unhandled/unexpected ID value!");
1403 OS <<
' ' << RelScore <<
":LDSDMA" <<
ID;
1408 if (isSmemCounter(
T)) {
1410 sort(SortedSMEMIDs);
1411 for (
auto ID : SortedSMEMIDs) {
1412 unsigned RegScore = SGPRs.at(
ID).get(
T);
1415 unsigned RelScore = RegScore - LB - 1;
1416 OS <<
' ' << RelScore <<
":sRU" <<
static_cast<unsigned>(
ID);
1421 OS <<
' ' << SCCScore <<
":scc";
1426 OS <<
"Pending Events: ";
1427 if (hasPendingEvent()) {
1429 for (
unsigned I = 0;
I != NUM_WAIT_EVENTS; ++
I) {
1430 if (hasPendingEvent((WaitEventType)
I)) {
1431 OS <<
LS << WaitEventTypeName[
I];
1439 OS <<
"Async score: ";
1440 if (AsyncScore.empty())
1446 OS <<
"Async marks: " << AsyncMarks.size() <<
'\n';
1448 for (
const auto &Mark : AsyncMarks) {
1450 unsigned MarkedScore = Mark[
T];
1453 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"LOAD" :
"VM")
1454 <<
"_CNT: " << MarkedScore;
1457 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"DS" :
"LGKM")
1458 <<
"_CNT: " << MarkedScore;
1461 OS <<
" EXP_CNT: " << MarkedScore;
1464 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"STORE" :
"VS")
1465 <<
"_CNT: " << MarkedScore;
1468 OS <<
" SAMPLE_CNT: " << MarkedScore;
1471 OS <<
" BVH_CNT: " << MarkedScore;
1474 OS <<
" KM_CNT: " << MarkedScore;
1477 OS <<
" X_CNT: " << MarkedScore;
1480 OS <<
" ASYNC_CNT: " << MarkedScore;
1483 OS <<
" UNKNOWN: " << MarkedScore;
1494void WaitcntBrackets::simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
1495 AMDGPU::Waitcnt &UpdateWait)
const {
1503 simplifyXcnt(CheckWait, UpdateWait);
1505 simplifyVmVsrc(CheckWait, UpdateWait);
1510 unsigned &
Count)
const {
1514 if (
Count >= getScoreRange(
T))
1518void WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &
Wait,
1520 unsigned Cnt =
Wait.get(
T);
1521 simplifyWaitcnt(
T, Cnt);
1525void WaitcntBrackets::simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
1526 AMDGPU::Waitcnt &UpdateWait)
const {
1547void WaitcntBrackets::simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
1548 AMDGPU::Waitcnt &UpdateWait)
const {
1553 std::min({CheckWait.get(AMDGPU::LOAD_CNT),
1554 CheckWait.get(AMDGPU::STORE_CNT),
1555 CheckWait.get(AMDGPU::SAMPLE_CNT),
1556 CheckWait.get(AMDGPU::BVH_CNT), CheckWait.get(AMDGPU::DS_CNT)}))
1561void WaitcntBrackets::purgeEmptyTrackingData() {
1573 unsigned ScoreToWait,
1574 AMDGPU::Waitcnt &
Wait)
const {
1575 const unsigned LB = getScoreLB(
T);
1576 const unsigned UB = getScoreUB(
T);
1579 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
1581 !
Context->ST.hasFlatLgkmVMemCountInOrder()) {
1585 addWait(
Wait,
T, 0);
1586 }
else if (counterOutOfOrder(
T)) {
1590 addWait(
Wait,
T, 0);
1594 unsigned NeededWait = std::min(
1595 UB - ScoreToWait, getWaitCountMax(
Context->getLimits(),
T) - 1);
1596 addWait(
Wait,
T, NeededWait);
1601AMDGPU::Waitcnt WaitcntBrackets::determineAsyncWait(
unsigned N) {
1603 dbgs() <<
"Need " <<
N <<
" async marks. Found " << AsyncMarks.size()
1605 for (
const auto &Mark : AsyncMarks) {
1611 if (AsyncMarks.size() == MaxAsyncMarks) {
1616 LLVM_DEBUG(
dbgs() <<
"Possible truncation. Ensuring a non-trivial wait.\n");
1617 N = std::min(
N, (
unsigned)MaxAsyncMarks - 1);
1620 AMDGPU::Waitcnt
Wait;
1621 if (AsyncMarks.size() <=
N) {
1626 size_t MarkIndex = AsyncMarks.size() -
N - 1;
1627 const auto &RequiredMark = AsyncMarks[MarkIndex];
1629 determineWaitForScore(
T, RequiredMark[
T],
Wait);
1635 dbgs() <<
"Removing " << (MarkIndex + 1)
1636 <<
" async marks after determining wait\n";
1638 AsyncMarks.erase(AsyncMarks.begin(), AsyncMarks.begin() + MarkIndex + 1);
1651MCPhysReg WaitcntBrackets::determineVGPR16Dependency(
const MachineInstr &
MI,
1654 const TargetRegisterClass *RC =
Context->TRI.getPhysRegBaseClass(
Reg);
1655 unsigned Size =
Context->TRI.getRegSizeInBits(*RC);
1657 if (
Size != 16 || !
Context->ST.hasD16Writes32BitVgpr())
1667 AMDGPU::Waitcnt
Wait;
1668 for (MCRegUnit RU : regunits(OtherHalf))
1669 determineWaitForScore(
T, getVMemScore(toVMEMID(RU),
T),
Wait);
1672 if (!
Wait.hasWait())
1679 WaitEventSet MIEvents =
Context->getEventsFor(
MI);
1680 WaitEventSet OtherHalfEvents =
Context->getWaitEvents(
T);
1681 WaitEventSet Events = MIEvents & OtherHalfEvents;
1682 if (Events.twoOrMore())
1689 AMDGPU::Waitcnt &
Wait,
1690 const MachineInstr &
MI)
const {
1691 if (
Reg == AMDGPU::SCC) {
1692 determineWaitForScore(
T, SCCScore,
Wait);
1696 Reg = determineVGPR16Dependency(
MI,
T,
Reg);
1697 for (MCRegUnit RU : regunits(
Reg))
1698 determineWaitForScore(
1699 T, IsVGPR ? getVMemScore(toVMEMID(RU),
T) : getSGPRScore(RU,
T),
1706 AMDGPU::Waitcnt &
Wait)
const {
1707 assert(TID >= LDSDMA_BEGIN && TID < LDSDMA_END);
1708 determineWaitForScore(
T, getVMemScore(TID,
T),
Wait);
1711void WaitcntBrackets::tryClearSCCWriteEvent(MachineInstr *Inst) {
1714 if (PendingSCCWrite &&
1715 PendingSCCWrite->
getOpcode() == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM &&
1717 WaitEventSet SCC_WRITE_PendingEvent(SCC_WRITE);
1720 SCC_WRITE_PendingEvent) {
1724 PendingEvents.remove(SCC_WRITE_PendingEvent);
1725 PendingSCCWrite =
nullptr;
1729void WaitcntBrackets::applyWaitcnt(
const AMDGPU::Waitcnt &
Wait) {
1731 applyWaitcnt(
Wait,
T);
1735 const unsigned UB = getScoreUB(
T);
1739 if (counterOutOfOrder(
T))
1741 setScoreLB(
T, std::max(getScoreLB(
T), UB -
Count));
1744 PendingEvents.remove(
Context->getWaitEvents(
T));
1751 PendingEvents.remove(SMEM_GROUP);
1757 else if (
Count == 0)
1758 PendingEvents.remove(VMEM_GROUP);
1762void WaitcntBrackets::applyWaitcnt(
const AMDGPU::Waitcnt &
Wait,
1764 unsigned Cnt =
Wait.get(
T);
1765 applyWaitcnt(
T, Cnt);
1772 if ((
T ==
Context->SmemAccessCounter && hasPendingEvent(SMEM_ACCESS)) ||
1780 WaitEventSet Events = PendingEvents &
Context->getWaitEvents(
T);
1783 Events.remove(GLOBAL_INV_ACCESS);
1786 return Events.twoOrMore();
1789 return hasMixedPendingEvents(
T);
1799char SIInsertWaitcntsLegacy::
ID = 0;
1804 return new SIInsertWaitcntsLegacy();
1809 int OpIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
OpName);
1814 if (NewEnc == MO.
getImm())
1823static std::optional<AMDGPU::InstCounterType>
1826 case AMDGPU::S_WAIT_LOADCNT:
1828 case AMDGPU::S_WAIT_EXPCNT:
1830 case AMDGPU::S_WAIT_STORECNT:
1832 case AMDGPU::S_WAIT_SAMPLECNT:
1834 case AMDGPU::S_WAIT_BVHCNT:
1836 case AMDGPU::S_WAIT_DSCNT:
1838 case AMDGPU::S_WAIT_KMCNT:
1840 case AMDGPU::S_WAIT_XCNT:
1842 case AMDGPU::S_WAIT_ASYNCCNT:
1849bool WaitcntGenerator::promoteSoftWaitCnt(MachineInstr *Waitcnt)
const {
1863bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt(
1864 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
1866 assert(isNormalMode(MaxCounter));
1869 MachineInstr *WaitcntInstr =
nullptr;
1870 MachineInstr *WaitcntVsCntInstr =
nullptr;
1873 dbgs() <<
"PreGFX12::applyPreexistingWaitcnt at: ";
1875 dbgs() <<
"end of block\n";
1883 if (isNonWaitcntMetaInst(
II)) {
1889 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
1893 if (Opcode == AMDGPU::S_WAITCNT) {
1894 unsigned IEnc =
II.getOperand(0).getImm();
1897 ScoreBrackets.simplifyWaitcnt(OldWait);
1901 if (WaitcntInstr || (!
Wait.hasWaitExceptStoreCnt() && TrySimplify)) {
1902 II.eraseFromParent();
1906 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
1909 <<
"Before: " <<
Wait <<
'\n';);
1920 II.eraseFromParent();
1921 }
else if (Opcode == AMDGPU::WAIT_ASYNCMARK) {
1922 unsigned N =
II.getOperand(0).getImm();
1924 AMDGPU::Waitcnt OldWait = ScoreBrackets.determineAsyncWait(
N);
1927 assert(Opcode == AMDGPU::S_WAITCNT_VSCNT);
1928 assert(
II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1931 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1937 if (WaitcntVsCntInstr || (!
Wait.hasWaitStoreCnt() && TrySimplify)) {
1938 II.eraseFromParent();
1941 WaitcntVsCntInstr = &
II;
1948 Modified |= promoteSoftWaitCnt(WaitcntInstr);
1957 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
1958 <<
"New Instr at block end: "
1959 << *WaitcntInstr <<
'\n'
1960 :
dbgs() <<
"applied pre-existing waitcnt\n"
1961 <<
"Old Instr: " << *It
1962 <<
"New Instr: " << *WaitcntInstr <<
'\n');
1965 if (WaitcntVsCntInstr) {
1969 Modified |= promoteSoftWaitCnt(WaitcntVsCntInstr);
1975 ?
dbgs() <<
"applied pre-existing waitcnt\n"
1976 <<
"New Instr at block end: " << *WaitcntVsCntInstr
1978 :
dbgs() <<
"applied pre-existing waitcnt\n"
1979 <<
"Old Instr: " << *It
1980 <<
"New Instr: " << *WaitcntVsCntInstr <<
'\n');
1988bool WaitcntGeneratorPreGFX12::createNewWaitcnt(
1990 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
1991 assert(isNormalMode(MaxCounter));
1999 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
2002 EmitWaitcnt(--Outstanding);
2003 }
while (Outstanding > Target);
2009 if (
Wait.hasWaitExceptStoreCnt()) {
2011 if (ExpandWaitcntProfiling) {
2015 bool AnyOutOfOrder =
false;
2017 unsigned WaitCnt =
Wait.get(CT);
2018 if (WaitCnt != ~0u && ScoreBrackets.counterOutOfOrder(CT)) {
2019 AnyOutOfOrder =
true;
2024 if (AnyOutOfOrder) {
2032 unsigned WaitCnt =
Wait.get(CT);
2036 unsigned Outstanding = std::min(ScoreBrackets.getOutstanding(CT),
2037 getWaitCountMax(getLimits(), CT) - 1);
2038 EmitExpandedWaitcnt(Outstanding, WaitCnt, [&](
unsigned Count) {
2049 [[maybe_unused]]
auto SWaitInst =
2054 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2055 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2059 if (
Wait.hasWaitStoreCnt()) {
2065 unsigned Outstanding =
2068 EmitExpandedWaitcnt(
2070 BuildMI(Block, It, DL, TII.get(AMDGPU::S_WAITCNT_VSCNT))
2071 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
2075 [[maybe_unused]]
auto SWaitInst =
2077 .
addReg(AMDGPU::SGPR_NULL, RegState::Undef)
2082 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2083 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2091WaitcntGeneratorPreGFX12::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
2092 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt &&
ST.hasVscnt() ? 0 : ~0u);
2096WaitcntGeneratorGFX12Plus::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
2097 unsigned ExpertVal = IsExpertMode ? 0 : ~0
u;
2098 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt ? 0 : ~0u, 0, 0, 0,
2099 ~0u , ~0u , ExpertVal,
2107bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt(
2108 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
2110 assert(!isNormalMode(MaxCounter));
2113 MachineInstr *CombinedLoadDsCntInstr =
nullptr;
2114 MachineInstr *CombinedStoreDsCntInstr =
nullptr;
2115 MachineInstr *WaitcntDepctrInstr =
nullptr;
2119 dbgs() <<
"GFX12Plus::applyPreexistingWaitcnt at: ";
2121 dbgs() <<
"end of block\n";
2127 AMDGPU::Waitcnt RequiredWait;
2132 if (isNonWaitcntMetaInst(
II)) {
2141 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
2145 if (Opcode == AMDGPU::S_WAITCNT)
2148 if (Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT) {
2150 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2155 RequiredWait = RequiredWait.combined(OldWait);
2157 if (CombinedLoadDsCntInstr ==
nullptr) {
2158 CombinedLoadDsCntInstr = &
II;
2160 II.eraseFromParent();
2163 }
else if (Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT) {
2165 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2170 RequiredWait = RequiredWait.combined(OldWait);
2172 if (CombinedStoreDsCntInstr ==
nullptr) {
2173 CombinedStoreDsCntInstr = &
II;
2175 II.eraseFromParent();
2178 }
else if (Opcode == AMDGPU::S_WAITCNT_DEPCTR) {
2180 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2181 AMDGPU::Waitcnt OldWait;
2185 ScoreBrackets.simplifyWaitcnt(OldWait);
2187 if (WaitcntDepctrInstr ==
nullptr) {
2188 WaitcntDepctrInstr = &
II;
2197 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2205 II.eraseFromParent();
2209 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
2212 II.eraseFromParent();
2214 }
else if (Opcode == AMDGPU::WAIT_ASYNCMARK) {
2217 unsigned N =
II.getOperand(0).getImm();
2218 AMDGPU::Waitcnt OldWait = ScoreBrackets.determineAsyncWait(
N);
2224 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2226 addWait(
Wait, CT.value(), OldCnt);
2228 addWait(RequiredWait, CT.value(), OldCnt);
2230 if (WaitInstrs[CT.value()] ==
nullptr) {
2231 WaitInstrs[CT.value()] = &
II;
2233 II.eraseFromParent();
2239 ScoreBrackets.simplifyWaitcnt(
Wait.combined(RequiredWait),
Wait);
2240 Wait =
Wait.combined(RequiredWait);
2242 if (CombinedLoadDsCntInstr) {
2258 AMDGPU::OpName::simm16, NewEnc);
2259 Modified |= promoteSoftWaitCnt(CombinedLoadDsCntInstr);
2265 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
2266 <<
"New Instr at block end: "
2267 << *CombinedLoadDsCntInstr <<
'\n'
2268 :
dbgs() <<
"applied pre-existing waitcnt\n"
2269 <<
"Old Instr: " << *It <<
"New Instr: "
2270 << *CombinedLoadDsCntInstr <<
'\n');
2277 if (CombinedStoreDsCntInstr) {
2282 AMDGPU::OpName::simm16, NewEnc);
2283 Modified |= promoteSoftWaitCnt(CombinedStoreDsCntInstr);
2289 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
2290 <<
"New Instr at block end: "
2291 << *CombinedStoreDsCntInstr <<
'\n'
2292 :
dbgs() <<
"applied pre-existing waitcnt\n"
2293 <<
"Old Instr: " << *It <<
"New Instr: "
2294 << *CombinedStoreDsCntInstr <<
'\n');
2324 for (MachineInstr **WI : WaitsToErase) {
2328 (*WI)->eraseFromParent();
2335 if (!WaitInstrs[CT])
2338 unsigned NewCnt =
Wait.get(CT);
2339 if (NewCnt != ~0u) {
2341 AMDGPU::OpName::simm16, NewCnt);
2342 Modified |= promoteSoftWaitCnt(WaitInstrs[CT]);
2344 ScoreBrackets.applyWaitcnt(CT, NewCnt);
2345 setNoWait(
Wait, CT);
2348 ?
dbgs() <<
"applied pre-existing waitcnt\n"
2349 <<
"New Instr at block end: " << *WaitInstrs[CT]
2351 :
dbgs() <<
"applied pre-existing waitcnt\n"
2352 <<
"Old Instr: " << *It
2353 <<
"New Instr: " << *WaitInstrs[CT] <<
'\n');
2360 if (WaitcntDepctrInstr) {
2364 TII.getNamedOperand(*WaitcntDepctrInstr, AMDGPU::OpName::simm16)
2379 AMDGPU::OpName::simm16, Enc);
2381 <<
"New Instr at block end: "
2382 << *WaitcntDepctrInstr <<
'\n'
2383 :
dbgs() <<
"applyPreexistingWaitcnt\n"
2384 <<
"Old Instr: " << *It <<
"New Instr: "
2385 << *WaitcntDepctrInstr <<
'\n');
2396bool WaitcntGeneratorGFX12Plus::createNewWaitcnt(
2398 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
2399 assert(!isNormalMode(MaxCounter));
2405 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
2407 for (
unsigned I = Outstanding - 1;
I >
Target &&
I != ~0
u; --
I)
2409 EmitWaitcnt(Target);
2415 if (ExpandWaitcntProfiling) {
2422 if (ScoreBrackets.counterOutOfOrder(CT)) {
2429 unsigned Outstanding = std::min(ScoreBrackets.getOutstanding(CT),
2430 getWaitCountMax(getLimits(), CT) - 1);
2431 EmitExpandedWaitcnt(Outstanding,
Count, [&](
unsigned Val) {
2442 MachineInstr *SWaitInst =
nullptr;
2466 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2467 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2479 [[maybe_unused]]
auto SWaitInst =
2486 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2487 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2490 if (
Wait.hasWaitDepctr()) {
2496 [[maybe_unused]]
auto SWaitInst =
2502 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2503 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2522bool SIInsertWaitcnts::generateWaitcntInstBefore(
2523 MachineInstr &
MI, WaitcntBrackets &ScoreBrackets,
2524 MachineInstr *OldWaitcntInstr, PreheaderFlushFlags FlushFlags) {
2526 setForceEmitWaitcnt();
2530 AMDGPU::Waitcnt
Wait;
2531 const unsigned Opc =
MI.getOpcode();
2534 case AMDGPU::BUFFER_WBINVL1:
2535 case AMDGPU::BUFFER_WBINVL1_SC:
2536 case AMDGPU::BUFFER_WBINVL1_VOL:
2537 case AMDGPU::BUFFER_GL0_INV:
2538 case AMDGPU::BUFFER_GL1_INV: {
2546 case AMDGPU::SI_RETURN_TO_EPILOG:
2547 case AMDGPU::SI_RETURN:
2548 case AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN:
2549 case AMDGPU::S_SETPC_B64_return: {
2554 AMDGPU::Waitcnt AllZeroWait =
2555 WCG->getAllZeroWaitcnt(
false);
2560 if (
ST.hasExtendedWaitCounts() &&
2561 !ScoreBrackets.hasPendingEvent(VMEM_ACCESS))
2566 case AMDGPU::S_ENDPGM:
2567 case AMDGPU::S_ENDPGM_SAVED: {
2577 !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS);
2580 case AMDGPU::S_SENDMSG:
2581 case AMDGPU::S_SENDMSGHALT: {
2582 if (
ST.hasLegacyGeometry() &&
2597 if (
MI.modifiesRegister(AMDGPU::EXEC, &
TRI)) {
2600 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
2601 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
2602 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
2603 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
2610 if (
TII.isAlwaysGDS(
Opc) && ScoreBrackets.hasPendingGDS())
2618 Wait = AMDGPU::Waitcnt();
2620 const MachineOperand &CallAddrOp =
TII.getCalleeOperand(
MI);
2621 if (CallAddrOp.
isReg()) {
2622 ScoreBrackets.determineWaitForPhysReg(
2625 if (
const auto *RtnAddrOp =
2626 TII.getNamedOperand(
MI, AMDGPU::OpName::dst)) {
2627 ScoreBrackets.determineWaitForPhysReg(
2628 SmemAccessCounter, RtnAddrOp->getReg().asMCReg(),
Wait,
MI);
2631 }
else if (
Opc == AMDGPU::S_BARRIER_WAIT) {
2632 ScoreBrackets.tryClearSCCWriteEvent(&
MI);
2648 for (
const MachineMemOperand *Memop :
MI.memoperands()) {
2649 const Value *Ptr = Memop->getValue();
2650 if (Memop->isStore()) {
2651 if (
auto It = SLoadAddresses.
find(Ptr); It != SLoadAddresses.
end()) {
2652 addWait(
Wait, SmemAccessCounter, 0);
2654 SLoadAddresses.
erase(It);
2657 unsigned AS = Memop->getAddrSpace();
2661 if (
TII.mayWriteLDSThroughDMA(
MI))
2665 unsigned TID = LDSDMA_BEGIN;
2666 if (Ptr && Memop->getAAInfo()) {
2667 const auto &LDSDMAStores = ScoreBrackets.getLDSDMAStores();
2668 for (
unsigned I = 0,
E = LDSDMAStores.size();
I !=
E; ++
I) {
2669 if (
MI.mayAlias(AA, *LDSDMAStores[
I],
true)) {
2670 if ((
I + 1) >= NUM_LDSDMA) {
2685 if (Memop->isStore()) {
2691 for (
const MachineOperand &
Op :
MI.operands()) {
2696 if (
Op.isTied() &&
Op.isUse() &&
TII.doesNotReadTiedSource(
MI))
2701 const bool IsVGPR =
TRI.isVectorRegister(MRI,
Op.getReg());
2708 if (
Op.isImplicit() &&
MI.mayLoadOrStore())
2721 if (
Op.isUse() || !updateVMCntOnly(
MI) ||
2722 ScoreBrackets.hasOtherPendingVmemTypes(
Reg, getVmemType(
MI)) ||
2723 ScoreBrackets.hasPointSamplePendingVmemTypes(
MI,
Reg) ||
2724 !
ST.hasVmemWriteVgprInOrder()) {
2731 ScoreBrackets.clearVgprVmemTypes(
Reg);
2734 if (
Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) {
2739 }
else if (
Op.getReg() == AMDGPU::SCC) {
2742 ScoreBrackets.determineWaitForPhysReg(SmemAccessCounter,
Reg,
Wait,
2746 if (
ST.hasWaitXcnt() &&
Op.isDef())
2765 if (
Opc == AMDGPU::S_BARRIER && !
ST.hasAutoWaitcntBeforeBarrier() &&
2766 !
ST.hasBackOffBarrier()) {
2767 Wait =
Wait.combined(WCG->getAllZeroWaitcnt(
true));
2774 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
2779 ScoreBrackets.simplifyWaitcnt(
Wait);
2799 Wait = WCG->getAllZeroWaitcnt(
false);
2803 if (!ForceEmitWaitcnt[
T])
2808 if (FlushFlags.FlushVmCnt) {
2814 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(
AMDGPU::DS_CNT))
2820 return generateWaitcnt(
Wait,
MI.getIterator(), *
MI.getParent(), ScoreBrackets,
2824bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt
Wait,
2826 MachineBasicBlock &
Block,
2827 WaitcntBrackets &ScoreBrackets,
2828 MachineInstr *OldWaitcntInstr) {
2831 if (OldWaitcntInstr)
2835 WCG->applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr,
Wait, It);
2840 MachineOperand *WaitExp =
TII.getNamedOperand(*It, AMDGPU::OpName::waitexp);
2850 <<
"Update Instr: " << *It);
2853 if (WCG->createNewWaitcnt(
Block, It,
Wait, ScoreBrackets))
2858 ScoreBrackets.applyWaitcnt(
Wait);
2863std::optional<WaitEventType>
2864SIInsertWaitcnts::getExpertSchedulingEventType(
const MachineInstr &Inst)
const {
2865 if (
TII.isVALU(Inst)) {
2870 if (
TII.isXDL(Inst))
2871 return VGPR_XDL_WRITE;
2873 if (
TII.isTRANS(Inst))
2874 return VGPR_TRANS_WRITE;
2877 return VGPR_DPMACC_WRITE;
2879 return VGPR_CSMACC_WRITE;
2886 if (
TII.isFLAT(Inst))
2887 return VGPR_FLAT_READ;
2890 return VGPR_LDS_READ;
2892 if (
TII.isVMEM(Inst) ||
TII.isVIMAGE(Inst) ||
TII.isVSAMPLE(Inst))
2893 return VGPR_VMEM_READ;
2900bool SIInsertWaitcnts::isVmemAccess(
const MachineInstr &
MI)
const {
2901 return (
TII.isFLAT(
MI) &&
TII.mayAccessVMEMThroughFlat(
MI)) ||
2908 MachineBasicBlock *
Block)
const {
2909 auto BlockEnd =
Block->getParent()->end();
2910 auto BlockIter =
Block->getIterator();
2914 if (++BlockIter != BlockEnd) {
2915 It = BlockIter->instr_begin();
2922 if (!It->isMetaInstruction())
2930 return It->getOpcode() == AMDGPU::S_ENDPGM;
2934bool SIInsertWaitcnts::insertForcedWaitAfter(MachineInstr &Inst,
2935 MachineBasicBlock &
Block,
2936 WaitcntBrackets &ScoreBrackets) {
2937 AMDGPU::Waitcnt
Wait;
2938 bool NeedsEndPGMCheck =
false;
2946 NeedsEndPGMCheck =
true;
2949 ScoreBrackets.simplifyWaitcnt(
Wait);
2952 bool Result = generateWaitcnt(
Wait, SuccessorIt,
Block, ScoreBrackets,
2955 if (Result && NeedsEndPGMCheck && isNextENDPGM(SuccessorIt, &
Block)) {
2963WaitEventSet SIInsertWaitcnts::getEventsFor(
const MachineInstr &Inst)
const {
2964 WaitEventSet Events;
2966 if (
const auto ET = getExpertSchedulingEventType(Inst))
2970 if (
TII.isDS(Inst) &&
TII.usesLGKM_CNT(Inst)) {
2972 TII.hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
2973 Events.insert(GDS_ACCESS);
2974 Events.insert(GDS_GPR_LOCK);
2976 Events.insert(LDS_ACCESS);
2978 }
else if (
TII.isFLAT(Inst)) {
2980 Events.insert(getVmemWaitEventType(Inst));
2983 if (
TII.mayAccessVMEMThroughFlat(Inst)) {
2984 if (
ST.hasWaitXcnt())
2985 Events.insert(VMEM_GROUP);
2986 Events.insert(getVmemWaitEventType(Inst));
2988 if (
TII.mayAccessLDSThroughFlat(Inst))
2989 Events.insert(LDS_ACCESS);
2993 Inst.
getOpcode() == AMDGPU::BUFFER_WBL2)) {
2997 if (
ST.hasWaitXcnt())
2998 Events.insert(VMEM_GROUP);
2999 Events.insert(getVmemWaitEventType(Inst));
3000 if (
ST.vmemWriteNeedsExpWaitcnt() &&
3002 Events.insert(VMW_GPR_LOCK);
3004 }
else if (
TII.isSMRD(Inst)) {
3005 if (
ST.hasWaitXcnt())
3006 Events.insert(SMEM_GROUP);
3007 Events.insert(SMEM_ACCESS);
3009 Events.insert(EXP_LDS_ACCESS);
3011 unsigned Imm =
TII.getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
3013 Events.insert(EXP_PARAM_ACCESS);
3015 Events.insert(EXP_POS_ACCESS);
3017 Events.insert(EXP_GPR_LOCK);
3019 Events.insert(SCC_WRITE);
3022 case AMDGPU::S_SENDMSG:
3023 case AMDGPU::S_SENDMSG_RTN_B32:
3024 case AMDGPU::S_SENDMSG_RTN_B64:
3025 case AMDGPU::S_SENDMSGHALT:
3026 Events.insert(SQ_MESSAGE);
3028 case AMDGPU::S_MEMTIME:
3029 case AMDGPU::S_MEMREALTIME:
3030 case AMDGPU::S_GET_BARRIER_STATE_M0:
3031 case AMDGPU::S_GET_BARRIER_STATE_IMM:
3032 Events.insert(SMEM_ACCESS);
3039void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
3040 WaitcntBrackets *ScoreBrackets) {
3042 WaitEventSet InstEvents = getEventsFor(Inst);
3043 for (WaitEventType
E : wait_events()) {
3044 if (InstEvents.contains(
E))
3045 ScoreBrackets->updateByEvent(
E, Inst);
3048 if (
TII.isDS(Inst) &&
TII.usesLGKM_CNT(Inst)) {
3050 TII.hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
3051 ScoreBrackets->setPendingGDS();
3053 }
else if (
TII.isFLAT(Inst)) {
3061 ScoreBrackets->setPendingFlat();
3064 ScoreBrackets->updateByEvent(ASYNC_ACCESS, Inst);
3066 }
else if (Inst.
isCall()) {
3069 ScoreBrackets->applyWaitcnt(WCG->getAllZeroWaitcnt(
false));
3070 ScoreBrackets->setStateOnFunctionEntryOrReturn();
3071 }
else if (
TII.isVINTERP(Inst)) {
3072 int64_t
Imm =
TII.getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
3077bool WaitcntBrackets::mergeScore(
const MergeInfo &M,
unsigned &Score,
3078 unsigned OtherScore) {
3079 unsigned MyShifted = Score <=
M.OldLB ? 0 : Score +
M.MyShift;
3080 unsigned OtherShifted =
3081 OtherScore <=
M.OtherLB ? 0 : OtherScore +
M.OtherShift;
3082 Score = std::max(MyShifted, OtherShifted);
3083 return OtherShifted > MyShifted;
3088 bool StrictDom =
false;
3092 if (AsyncMarks.empty() && OtherMarks.
empty()) {
3099 auto MaxSize = (unsigned)std::max(AsyncMarks.size(), OtherMarks.
size());
3100 MaxSize = std::min(MaxSize, MaxAsyncMarks);
3103 if (AsyncMarks.size() > MaxSize)
3104 AsyncMarks.erase(AsyncMarks.begin(),
3105 AsyncMarks.begin() + (AsyncMarks.size() - MaxSize));
3111 constexpr CounterValueArray ZeroMark{};
3112 AsyncMarks.insert(AsyncMarks.begin(), MaxSize - AsyncMarks.size(), ZeroMark);
3115 dbgs() <<
"Before merge:\n";
3116 for (
const auto &Mark : AsyncMarks) {
3120 dbgs() <<
"Other marks:\n";
3121 for (
const auto &Mark : OtherMarks) {
3130 unsigned OtherSize = OtherMarks.size();
3131 unsigned OurSize = AsyncMarks.size();
3132 unsigned MergeCount = std::min(OtherSize, OurSize);
3136 if (MergeCount == 0)
3140 StrictDom |= mergeScore(MergeInfos[
T], AsyncMarks[OurSize - Idx][
T],
3141 OtherMarks[OtherSize - Idx][
T]);
3146 dbgs() <<
"After merge:\n";
3147 for (
const auto &Mark : AsyncMarks) {
3161bool WaitcntBrackets::merge(
const WaitcntBrackets &
Other) {
3162 bool StrictDom =
false;
3166 for (
auto K :
Other.VMem.keys())
3167 VMem.try_emplace(K);
3168 for (
auto K :
Other.SGPRs.keys())
3169 SGPRs.try_emplace(K);
3176 const WaitEventSet &EventsForT =
Context->getWaitEvents(
T);
3177 const WaitEventSet OldEvents = PendingEvents & EventsForT;
3178 const WaitEventSet OtherEvents =
Other.PendingEvents & EventsForT;
3179 if (!OldEvents.contains(OtherEvents))
3181 PendingEvents |= OtherEvents;
3184 const unsigned MyPending = ScoreUBs[
T] - ScoreLBs[
T];
3185 const unsigned OtherPending =
Other.ScoreUBs[
T] -
Other.ScoreLBs[
T];
3186 const unsigned NewUB = ScoreLBs[
T] + std::max(MyPending, OtherPending);
3187 if (NewUB < ScoreLBs[
T])
3190 MergeInfo &
M = MergeInfos[
T];
3191 M.OldLB = ScoreLBs[
T];
3192 M.OtherLB =
Other.ScoreLBs[
T];
3193 M.MyShift = NewUB - ScoreUBs[
T];
3194 M.OtherShift = NewUB -
Other.ScoreUBs[
T];
3196 ScoreUBs[
T] = NewUB;
3199 StrictDom |= mergeScore(M, LastFlatLoadCnt,
Other.LastFlatLoadCnt);
3202 StrictDom |= mergeScore(M, LastFlatDsCnt,
Other.LastFlatDsCnt);
3203 StrictDom |= mergeScore(M, LastGDS,
Other.LastGDS);
3207 StrictDom |= mergeScore(M, SCCScore,
Other.SCCScore);
3208 if (
Other.hasPendingEvent(SCC_WRITE)) {
3209 if (!OldEvents.contains(SCC_WRITE)) {
3210 PendingSCCWrite =
Other.PendingSCCWrite;
3211 }
else if (PendingSCCWrite !=
Other.PendingSCCWrite) {
3212 PendingSCCWrite =
nullptr;
3217 for (
auto &[RegID, Info] : VMem)
3218 StrictDom |= mergeScore(M,
Info.Scores[
T],
Other.getVMemScore(RegID,
T));
3220 if (isSmemCounter(
T)) {
3221 for (
auto &[RegID, Info] : SGPRs) {
3222 auto It =
Other.SGPRs.find(RegID);
3223 unsigned OtherScore = (It !=
Other.SGPRs.end()) ? It->second.get(
T) : 0;
3224 StrictDom |= mergeScore(M,
Info.get(
T), OtherScore);
3229 for (
auto &[TID, Info] : VMem) {
3230 if (
auto It =
Other.VMem.find(TID); It !=
Other.VMem.end()) {
3231 unsigned char NewVmemTypes =
Info.VMEMTypes | It->second.VMEMTypes;
3232 StrictDom |= NewVmemTypes !=
Info.VMEMTypes;
3233 Info.VMEMTypes = NewVmemTypes;
3237 StrictDom |= mergeAsyncMarks(MergeInfos,
Other.AsyncMarks);
3239 StrictDom |= mergeScore(MergeInfos[
T], AsyncScore[
T],
Other.AsyncScore[
T]);
3241 purgeEmptyTrackingData();
3247 return Opcode == AMDGPU::S_WAITCNT ||
3250 Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT ||
3251 Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT ||
3252 Opcode == AMDGPU::S_WAITCNT_lds_direct ||
3253 Opcode == AMDGPU::WAIT_ASYNCMARK ||
3257void SIInsertWaitcnts::setSchedulingMode(MachineBasicBlock &
MBB,
3259 bool ExpertMode)
const {
3263 .
addImm(ExpertMode ? 2 : 0)
3281class VCCZWorkaround {
3282 const WaitcntBrackets &ScoreBrackets;
3283 const GCNSubtarget &
ST;
3284 const SIInstrInfo &
TII;
3285 const SIRegisterInfo &
TRI;
3286 bool VCCZCorruptionBug =
false;
3287 bool VCCZNotUpdatedByPartialWrites =
false;
3290 bool MustRecomputeVCCZ =
true;
3293 VCCZWorkaround(
const WaitcntBrackets &ScoreBrackets,
const GCNSubtarget &ST,
3294 const SIInstrInfo &
TII,
const SIRegisterInfo &
TRI)
3296 VCCZCorruptionBug =
ST.hasReadVCCZBug();
3297 VCCZNotUpdatedByPartialWrites = !
ST.partialVCCWritesUpdateVCCZ();
3304 bool tryRecomputeVCCZ(MachineInstr &
MI) {
3306 if (!VCCZCorruptionBug && !VCCZNotUpdatedByPartialWrites)
3316 MustRecomputeVCCZ |= VCCZCorruptionBug &&
TII.isSMRD(
MI);
3322 std::optional<bool> PartiallyWritesToVCCOpt;
3323 auto PartiallyWritesToVCC = [](MachineInstr &
MI) {
3324 return MI.definesRegister(AMDGPU::VCC_LO,
nullptr) ||
3325 MI.definesRegister(AMDGPU::VCC_HI,
nullptr);
3327 if (VCCZNotUpdatedByPartialWrites) {
3328 PartiallyWritesToVCCOpt = PartiallyWritesToVCC(
MI);
3331 MustRecomputeVCCZ |= *PartiallyWritesToVCCOpt;
3337 if (!ScoreBrackets.hasPendingEvent(SMEM_ACCESS) || !VCCZCorruptionBug) {
3339 if (!PartiallyWritesToVCCOpt)
3340 PartiallyWritesToVCCOpt = PartiallyWritesToVCC(
MI);
3341 bool FullyWritesToVCC = !*PartiallyWritesToVCCOpt &&
3342 MI.definesRegister(AMDGPU::VCC,
nullptr);
3345 bool UpdatesVCCZ = FullyWritesToVCC || (!VCCZNotUpdatedByPartialWrites &&
3346 *PartiallyWritesToVCCOpt);
3348 MustRecomputeVCCZ =
false;
3358 TII.get(
ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
3361 MustRecomputeVCCZ =
false;
3371bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
3372 MachineBasicBlock &
Block,
3373 WaitcntBrackets &ScoreBrackets) {
3377 dbgs() <<
"*** Begin Block: ";
3379 ScoreBrackets.dump();
3381 VCCZWorkaround VCCZW(ScoreBrackets, ST,
TII,
TRI);
3384 MachineInstr *OldWaitcntInstr =
nullptr;
3389 Iter !=
E; ++Iter) {
3390 MachineInstr &Inst = *Iter;
3391 if (isNonWaitcntMetaInst(Inst))
3396 (IsExpertMode && Inst.
getOpcode() == AMDGPU::S_WAITCNT_DEPCTR)) {
3397 if (!OldWaitcntInstr)
3398 OldWaitcntInstr = &Inst;
3402 PreheaderFlushFlags FlushFlags;
3403 if (
Block.getFirstTerminator() == Inst)
3404 FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
3407 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
3409 OldWaitcntInstr =
nullptr;
3411 if (Inst.
getOpcode() == AMDGPU::ASYNCMARK) {
3415 ScoreBrackets.recordAsyncMark(Inst);
3419 if (
TII.isSMRD(Inst)) {
3420 for (
const MachineMemOperand *Memop : Inst.
memoperands()) {
3423 if (!Memop->isInvariant()) {
3424 const Value *Ptr = Memop->getValue();
3430 updateEventWaitcntAfter(Inst, &ScoreBrackets);
3434 Modified |= insertForcedWaitAfter(Inst,
Block, ScoreBrackets);
3438 ScoreBrackets.dump();
3443 Modified |= VCCZW.tryRecomputeVCCZ(Inst);
3448 AMDGPU::Waitcnt
Wait;
3449 if (
Block.getFirstTerminator() ==
Block.end()) {
3450 PreheaderFlushFlags FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
3451 if (FlushFlags.FlushVmCnt) {
3459 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(
AMDGPU::DS_CNT))
3468 dbgs() <<
"*** End Block: ";
3470 ScoreBrackets.dump();
3476bool SIInsertWaitcnts::removeRedundantSoftXcnts(MachineBasicBlock &
Block) {
3477 if (
Block.size() <= 1)
3485 MachineInstr *LastAtomicWithSoftXcnt =
nullptr;
3491 if (!IsLDS && (
MI.mayLoad() ^
MI.mayStore()))
3492 LastAtomicWithSoftXcnt =
nullptr;
3495 MI.mayLoad() &&
MI.mayStore();
3496 MachineInstr &PrevMI = *
MI.getPrevNode();
3498 if (PrevMI.
getOpcode() == AMDGPU::S_WAIT_XCNT_soft && IsAtomicRMW) {
3501 if (LastAtomicWithSoftXcnt) {
3505 LastAtomicWithSoftXcnt = &
MI;
3513SIInsertWaitcnts::isPreheaderToFlush(MachineBasicBlock &
MBB,
3514 const WaitcntBrackets &ScoreBrackets) {
3515 auto [Iterator, IsInserted] =
3518 return Iterator->second;
3522 return PreheaderFlushFlags();
3526 return PreheaderFlushFlags();
3529 Iterator->second = getPreheaderFlushFlags(Loop, ScoreBrackets);
3530 return Iterator->second;
3533 return PreheaderFlushFlags();
3536bool SIInsertWaitcnts::isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const {
3538 return TII.mayAccessVMEMThroughFlat(
MI);
3542bool SIInsertWaitcnts::isDSRead(
const MachineInstr &
MI)
const {
3548bool SIInsertWaitcnts::mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const {
3577SIInsertWaitcnts::getPreheaderFlushFlags(MachineLoop *
ML,
3578 const WaitcntBrackets &Brackets) {
3579 PreheaderFlushFlags
Flags;
3580 bool HasVMemLoad =
false;
3581 bool HasVMemStore =
false;
3582 bool UsesVgprVMEMLoadedOutside =
false;
3583 bool UsesVgprDSReadOutside =
false;
3584 bool VMemInvalidated =
false;
3588 bool TrackSimpleDSOpt =
ST.hasExtendedWaitCounts();
3589 DenseSet<MCRegUnit> VgprUse;
3590 DenseSet<MCRegUnit> VgprDefVMEM;
3591 DenseSet<MCRegUnit> VgprDefDS;
3597 DenseMap<MCRegUnit, unsigned> LastDSReadPositionMap;
3598 unsigned DSReadPosition = 0;
3599 bool IsSingleBlock =
ML->getNumBlocks() == 1;
3600 bool TrackDSFlushPoint =
ST.hasExtendedWaitCounts() && IsSingleBlock;
3601 unsigned LastDSFlushPosition = 0;
3603 for (MachineBasicBlock *
MBB :
ML->blocks()) {
3604 for (MachineInstr &
MI : *
MBB) {
3605 if (isVMEMOrFlatVMEM(
MI)) {
3606 HasVMemLoad |=
MI.mayLoad();
3607 HasVMemStore |=
MI.mayStore();
3611 if (mayStoreIncrementingDSCNT(
MI)) {
3614 if (VMemInvalidated)
3616 TrackSimpleDSOpt =
false;
3617 TrackDSFlushPoint =
false;
3619 bool IsDSRead = isDSRead(
MI);
3624 auto updateDSReadFlushTracking = [&](MCRegUnit RU) {
3625 if (!TrackDSFlushPoint)
3627 if (
auto It = LastDSReadPositionMap.
find(RU);
3628 It != LastDSReadPositionMap.
end()) {
3632 LastDSFlushPosition = std::max(LastDSFlushPosition, It->second);
3636 for (
const MachineOperand &
Op :
MI.all_uses()) {
3637 if (
Op.isDebug() || !
TRI.isVectorRegister(MRI,
Op.getReg()))
3640 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3644 VMemInvalidated =
true;
3648 TrackSimpleDSOpt =
false;
3651 if (VMemInvalidated && !TrackSimpleDSOpt && !TrackDSFlushPoint)
3655 updateDSReadFlushTracking(RU);
3660 VMEMID
ID = toVMEMID(RU);
3664 UsesVgprVMEMLoadedOutside =
true;
3669 UsesVgprDSReadOutside =
true;
3674 if (isVMEMOrFlatVMEM(
MI) &&
MI.mayLoad()) {
3675 for (
const MachineOperand &
Op :
MI.all_defs()) {
3676 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3680 VMemInvalidated =
true;
3685 if (VMemInvalidated && !TrackSimpleDSOpt && !TrackDSFlushPoint)
3696 if (IsDSRead || TrackDSFlushPoint) {
3697 for (
const MachineOperand &
Op :
MI.all_defs()) {
3698 if (!
TRI.isVectorRegister(MRI,
Op.getReg()))
3700 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3703 updateDSReadFlushTracking(RU);
3706 if (TrackDSFlushPoint)
3707 LastDSReadPositionMap[RU] = DSReadPosition;
3716 if (!VMemInvalidated && UsesVgprVMEMLoadedOutside &&
3717 ((!
ST.hasVscnt() && HasVMemStore && !HasVMemLoad) ||
3718 (HasVMemLoad &&
ST.hasVmemWriteVgprInOrder())))
3719 Flags.FlushVmCnt =
true;
3725 bool SimpleDSOpt = TrackSimpleDSOpt && UsesVgprDSReadOutside;
3728 bool HasUnflushedDSReads = DSReadPosition > LastDSFlushPosition;
3729 bool DSFlushPointPrefetch =
3730 TrackDSFlushPoint && UsesVgprDSReadOutside && HasUnflushedDSReads;
3732 if (SimpleDSOpt || DSFlushPointPrefetch)
3733 Flags.FlushDsCnt =
true;
3738bool SIInsertWaitcntsLegacy::runOnMachineFunction(MachineFunction &MF) {
3739 auto &MLI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
3741 getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
3743 if (
auto *AAR = getAnalysisIfAvailable<AAResultsWrapperPass>())
3744 AA = &AAR->getAAResults();
3746 return SIInsertWaitcnts(MLI, PDT, AA, MF).run();
3758 if (!SIInsertWaitcnts(MLI, PDT,
AA, MF).
run())
3763 .preserve<AAManager>();
3766bool SIInsertWaitcnts::run() {
3774 if (ST.hasExtendedWaitCounts()) {
3775 IsExpertMode = ST.hasExpertSchedulingMode() &&
3784 WCG = std::make_unique<WaitcntGeneratorGFX12Plus>(MF, MaxCounter, Limits,
3789 WCG = std::make_unique<WaitcntGeneratorPreGFX12>(
3793 SmemAccessCounter = getCounterFromEvent(SMEM_ACCESS);
3797 MachineBasicBlock &EntryBB = MF.
front();
3808 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3811 if (
ST.hasExtendedWaitCounts()) {
3820 if (!
ST.hasImageInsts() &&
3826 TII.get(instrsForExtendedCounterTypes[CT]))
3839 auto NonKernelInitialState = std::make_unique<WaitcntBrackets>(
this);
3840 NonKernelInitialState->setStateOnFunctionEntryOrReturn();
3841 BlockInfos[&EntryBB].Incoming = std::move(NonKernelInitialState);
3848 for (
auto *
MBB : ReversePostOrderTraversal<MachineFunction *>(&MF))
3851 std::unique_ptr<WaitcntBrackets> Brackets;
3856 for (
auto BII = BlockInfos.
begin(), BIE = BlockInfos.
end(); BII != BIE;
3858 MachineBasicBlock *
MBB = BII->first;
3859 BlockInfo &BI = BII->second;
3865 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
3867 *Brackets = *BI.Incoming;
3870 Brackets = std::make_unique<WaitcntBrackets>(
this);
3875 Brackets->~WaitcntBrackets();
3876 new (Brackets.get()) WaitcntBrackets(
this);
3880 if (
ST.hasWaitXcnt())
3882 Modified |= insertWaitcntInBlock(MF, *
MBB, *Brackets);
3885 if (Brackets->hasPendingEvent()) {
3886 BlockInfo *MoveBracketsToSucc =
nullptr;
3888 auto *SuccBII = BlockInfos.
find(Succ);
3889 BlockInfo &SuccBI = SuccBII->second;
3890 if (!SuccBI.Incoming) {
3891 SuccBI.Dirty =
true;
3892 if (SuccBII <= BII) {
3896 if (!MoveBracketsToSucc) {
3897 MoveBracketsToSucc = &SuccBI;
3899 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
3903 dbgs() <<
"Try to merge ";
3909 if (SuccBI.Incoming->merge(*Brackets)) {
3910 SuccBI.Dirty =
true;
3911 if (SuccBII <= BII) {
3918 if (MoveBracketsToSucc)
3919 MoveBracketsToSucc->Incoming = std::move(Brackets);
3924 if (
ST.hasScalarStores()) {
3925 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
3926 bool HaveScalarStores =
false;
3928 for (MachineBasicBlock &
MBB : MF) {
3929 for (MachineInstr &
MI :
MBB) {
3930 if (!HaveScalarStores &&
TII.isScalarStore(
MI))
3931 HaveScalarStores =
true;
3933 if (
MI.getOpcode() == AMDGPU::S_ENDPGM ||
3934 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
3939 if (HaveScalarStores) {
3948 for (MachineBasicBlock *
MBB : EndPgmBlocks) {
3949 bool SeenDCacheWB =
false;
3953 if (
I->getOpcode() == AMDGPU::S_DCACHE_WB)
3954 SeenDCacheWB =
true;
3955 else if (
TII.isScalarStore(*
I))
3956 SeenDCacheWB =
false;
3959 if ((
I->getOpcode() == AMDGPU::S_ENDPGM ||
3960 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
3976 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3978 setSchedulingMode(EntryBB,
I,
true);
3980 for (MachineInstr *
MI : CallInsts) {
3981 MachineBasicBlock &
MBB = *
MI->getParent();
3982 setSchedulingMode(
MBB,
MI,
false);
3983 setSchedulingMode(
MBB, std::next(
MI->getIterator()),
true);
3986 for (MachineInstr *
MI : ReturnInsts)
3987 setSchedulingMode(*
MI->getParent(),
MI,
false);
3998 for (
auto [
MI,
_] : EndPgmInsts) {
4000 TII.get(AMDGPU::S_ALLOC_VGPR))
4004 }
else if (!WCG->isOptNone() &&
4005 ST.getGeneration() >= AMDGPUSubtarget::GFX11 &&
4006 (MF.getFrameInfo().hasCalls() ||
4007 ST.getOccupancyWithNumVGPRs(
4008 TRI.getNumUsedPhysRegs(MRI, AMDGPU::VGPR_32RegClass),
4011 for (
auto [
MI, Flag] : EndPgmInsts) {
4013 if (
ST.requiresNopBeforeDeallocVGPRs()) {
4015 TII.get(AMDGPU::S_NOP))
4019 TII.get(AMDGPU::S_SENDMSG))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
static bool isOptNone(const MachineFunction &MF)
static LoopDeletionResult merge(LoopDeletionResult A, LoopDeletionResult B)
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
Promote Memory to Register
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static cl::opt< bool > ForceEmitZeroLoadFlag("amdgpu-waitcnt-load-forcezero", cl::desc("Force all waitcnt load counters to wait until 0"), cl::init(false), cl::Hidden)
#define AMDGPU_EVENT_NAME(Name)
static bool updateOperandIfDifferent(MachineInstr &MI, AMDGPU::OpName OpName, unsigned NewEnc)
static std::optional< AMDGPU::InstCounterType > counterTypeForInstr(unsigned Opcode)
Determine if MI is a gfx12+ single-counter S_WAIT_*CNT instruction, and if so, which counter it is wa...
static bool isWaitInstr(MachineInstr &Inst)
static cl::opt< bool > ExpertSchedulingModeFlag("amdgpu-expert-scheduling-mode", cl::desc("Enable expert scheduling mode 2 for all functions (GFX12+ only)"), cl::init(false), cl::Hidden)
static cl::opt< bool > ForceEmitZeroFlag("amdgpu-waitcnt-forcezero", cl::desc("Force all waitcnt instrs to be emitted as " "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), cl::init(false), cl::Hidden)
#define AMDGPU_DECLARE_WAIT_EVENTS(DECL)
#define AMDGPU_EVENT_ENUM(Name)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Provides some synthesis utilities to produce sequences of values.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const uint32_t IV[8]
A manager for alias analyses.
bool isEntryFunction() const
Represents the counter values to wait for in an s_waitcnt instruction.
unsigned get(InstCounterType T) const
void set(InstCounterType T, unsigned Val)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
size_t size() const
Get the array size.
bool empty() const
Check if the array is empty.
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
Represents analyses that only rely on functions' control flow.
static bool shouldExecute(CounterInfo &Counter)
static bool isCounterSet(CounterInfo &Info)
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
FunctionPass class - This class is used to implement most global optimizations.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI const MachineBasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
Instructions::iterator instr_iterator
iterator_range< succ_iterator > successors()
LLVM_ABI void printName(raw_ostream &os, unsigned printNameFlags=PrintNameIr, ModuleSlotTracker *moduleSlotTracker=nullptr) const
Print the basic block's name as:
MachineInstrBundleIterator< MachineInstr > iterator
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool isCall(QueryType Type=AnyInBundle) const
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
iterator find(const KeyT &Key)
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
static bool isCBranchVCCZRead(const MachineInstr &MI)
static bool isDS(const MachineInstr &MI)
static bool isVMEM(const MachineInstr &MI)
static bool isFLATScratch(const MachineInstr &MI)
static bool isEXP(const MachineInstr &MI)
static bool mayWriteLDSThroughDMA(const MachineInstr &MI)
static bool isLDSDIR(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
static bool isFLATGlobal(const MachineInstr &MI)
static bool isVSAMPLE(const MachineInstr &MI)
static bool isAtomicRet(const MachineInstr &MI)
static bool isImage(const MachineInstr &MI)
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static bool isVINTERP(const MachineInstr &MI)
static bool isGFX12CacheInvOrWBInst(unsigned Opc)
static bool isSBarrierSCCWrite(unsigned Opcode)
static bool isMIMG(const MachineInstr &MI)
static bool usesASYNC_CNT(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isLDSDMA(const MachineInstr &MI)
static bool isAtomicNoRet(const MachineInstr &MI)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool isDynamicVGPREnabled() const
void push_back(const T &Elt)
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaVdst(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
@ ID_DEALLOC_VGPRS_GFX11Plus
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
bool isDPMACCInstruction(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
iota_range< InstCounterType > inst_counter_types(InstCounterType MaxCounter)
unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded)
bool getHasMatrixScale(unsigned Opc)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded)
unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
unsigned encodeStorecntDscnt(const IsaVersion &Version, const Waitcnt &Decoded)
bool getMUBUFIsBufferInv(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
initializer< Ty > init(const Ty &Val)
DXILDebugInfoMap run(Module &M)
LLVM_ABI std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
APInt operator&(APInt a, const APInt &b)
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
static StringRef getCPU(StringRef CPU)
Processes a CPU name.
bool operator!=(uint64_t V1, const APInt &V2)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
char & SIInsertWaitcntsID
@ Async
"Asynchronous" unwind tables (instr precise)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
APInt operator|(APInt a, const APInt &b)
@ Increment
Incrementally increasing token ID.
FunctionPass * createSIInsertWaitcntsPass()
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static constexpr ValueType Default
static constexpr uint64_t encode(Fields... Values)
Represents the hardware counter limits for different wait count types.
Instruction set architecture version.
static constexpr bool is_iterable