47#define DEBUG_TYPE "si-insert-waitcnts"
50 "Force emit s_waitcnt expcnt(0) instrs");
52 "Force emit s_waitcnt lgkmcnt(0) instrs");
54 "Force emit s_waitcnt vmcnt(0) instrs");
58 cl::desc(
"Force all waitcnt instrs to be emitted as "
59 "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
63 "amdgpu-waitcnt-load-forcezero",
64 cl::desc(
"Force all waitcnt load counters to wait until 0"),
68 "amdgpu-expert-scheduling-mode",
69 cl::desc(
"Enable expert scheduling mode 2 for all functions (GFX12+ only)"),
117 TRACKINGID_RANGE_LEN = (1 << 16),
122 REGUNITS_END = REGUNITS_BEGIN + TRACKINGID_RANGE_LEN,
127 NUM_LDSDMA = TRACKINGID_RANGE_LEN,
128 LDSDMA_BEGIN = REGUNITS_END,
129 LDSDMA_END = LDSDMA_BEGIN + NUM_LDSDMA,
133static constexpr VMEMID toVMEMID(MCRegUnit RU) {
134 return static_cast<unsigned>(RU);
137#define AMDGPU_DECLARE_WAIT_EVENTS(DECL) \
139 DECL(VMEM_SAMPLER_READ_ACCESS) \
140 DECL(VMEM_BVH_READ_ACCESS) \
141 DECL(GLOBAL_INV_ACCESS) \
142 DECL(VMEM_WRITE_ACCESS) \
143 DECL(SCRATCH_WRITE_ACCESS) \
153 DECL(EXP_POS_ACCESS) \
154 DECL(EXP_PARAM_ACCESS) \
156 DECL(EXP_LDS_ACCESS) \
157 DECL(VGPR_CSMACC_WRITE) \
158 DECL(VGPR_DPMACC_WRITE) \
159 DECL(VGPR_TRANS_WRITE) \
160 DECL(VGPR_XDL_WRITE) \
161 DECL(VGPR_LDS_READ) \
162 DECL(VGPR_FLAT_READ) \
163 DECL(VGPR_VMEM_READ) \
167#define AMDGPU_EVENT_ENUM(Name) Name,
172#undef AMDGPU_EVENT_ENUM
186auto wait_events(WaitEventType MaxEvent = NUM_WAIT_EVENTS) {
187 return enum_seq(VMEM_ACCESS, MaxEvent);
190#define AMDGPU_EVENT_NAME(Name) #Name,
194#undef AMDGPU_EVENT_NAME
195static constexpr StringLiteral getWaitEventTypeName(WaitEventType Event) {
196 return WaitEventTypeName[
Event];
220 AMDGPU::S_WAIT_LOADCNT, AMDGPU::S_WAIT_DSCNT,
221 AMDGPU::S_WAIT_EXPCNT, AMDGPU::S_WAIT_STORECNT,
222 AMDGPU::S_WAIT_SAMPLECNT, AMDGPU::S_WAIT_BVHCNT,
223 AMDGPU::S_WAIT_KMCNT, AMDGPU::S_WAIT_XCNT,
224 AMDGPU::S_WAIT_ASYNCCNT};
229 switch (
MI.getOpcode()) {
230 case AMDGPU::ASYNCMARK:
231 case AMDGPU::WAIT_ASYNCMARK:
234 return MI.isMetaInstruction();
250 assert(updateVMCntOnly(Inst));
252 return VMEM_NOSAMPLER;
266 return VMEM_NOSAMPLER;
282 WaitEventSet() =
default;
283 explicit constexpr WaitEventSet(WaitEventType Event) {
284 static_assert(NUM_WAIT_EVENTS <=
sizeof(Mask) * 8,
285 "Not enough bits in Mask for all the events");
288 constexpr WaitEventSet(std::initializer_list<WaitEventType> Events) {
289 for (
auto &
E : Events) {
293 void insert(
const WaitEventType &Event) { Mask |= 1 <<
Event; }
294 void remove(
const WaitEventType &Event) { Mask &= ~(1 <<
Event); }
295 void remove(
const WaitEventSet &
Other) { Mask &= ~Other.Mask; }
296 bool contains(
const WaitEventType &Event)
const {
297 return Mask & (1 <<
Event);
301 return (~Mask &
Other.Mask) == 0;
326 return Mask ==
Other.Mask;
329 bool empty()
const {
return Mask == 0; }
331 bool twoOrMore()
const {
return Mask & (Mask - 1); }
332 operator bool()
const {
return !
empty(); }
333 void print(raw_ostream &OS)
const {
334 ListSeparator
LS(
", ");
335 for (WaitEventType Event : wait_events()) {
337 OS <<
LS << getWaitEventTypeName(Event);
343void WaitEventSet::dump()
const {
348class WaitcntBrackets;
356class WaitcntGenerator {
358 const GCNSubtarget &ST;
359 const SIInstrInfo &
TII;
360 AMDGPU::IsaVersion
IV;
363 bool ExpandWaitcntProfiling =
false;
364 const AMDGPU::HardwareLimits &Limits;
367 WaitcntGenerator() =
delete;
368 WaitcntGenerator(
const WaitcntGenerator &) =
delete;
369 WaitcntGenerator(
const MachineFunction &MF,
371 const AMDGPU::HardwareLimits &Limits)
372 :
ST(MF.getSubtarget<GCNSubtarget>()),
TII(*
ST.getInstrInfo()),
376 ExpandWaitcntProfiling(
377 MF.
getFunction().hasFnAttribute(
"amdgpu-expand-waitcnt-profiling")),
382 bool isOptNone()
const {
return OptNone; }
384 const AMDGPU::HardwareLimits &getLimits()
const {
return Limits; }
398 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
399 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
403 bool promoteSoftWaitCnt(MachineInstr *Waitcnt)
const;
408 virtual bool createNewWaitcnt(MachineBasicBlock &
Block,
410 AMDGPU::Waitcnt
Wait,
411 const WaitcntBrackets &ScoreBrackets) = 0;
414 virtual const WaitEventSet &
430 virtual AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const = 0;
432 virtual ~WaitcntGenerator() =
default;
435class WaitcntGeneratorPreGFX12 final :
public WaitcntGenerator {
436 static constexpr const WaitEventSet
439 {VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS}),
440 WaitEventSet({SMEM_ACCESS, LDS_ACCESS, GDS_ACCESS, SQ_MESSAGE}),
441 WaitEventSet({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK,
442 EXP_PARAM_ACCESS, EXP_POS_ACCESS, EXP_LDS_ACCESS}),
443 WaitEventSet({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
452 using WaitcntGenerator::WaitcntGenerator;
454 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
455 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
458 bool createNewWaitcnt(MachineBasicBlock &
Block,
460 AMDGPU::Waitcnt
Wait,
461 const WaitcntBrackets &ScoreBrackets)
override;
464 return WaitEventMaskForInstPreGFX12[
T];
467 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
470class WaitcntGeneratorGFX12Plus final :
public WaitcntGenerator {
473 static constexpr const WaitEventSet
475 WaitEventSet({VMEM_ACCESS, GLOBAL_INV_ACCESS}),
476 WaitEventSet({LDS_ACCESS, GDS_ACCESS}),
477 WaitEventSet({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK,
478 EXP_PARAM_ACCESS, EXP_POS_ACCESS, EXP_LDS_ACCESS}),
479 WaitEventSet({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
480 WaitEventSet({VMEM_SAMPLER_READ_ACCESS}),
481 WaitEventSet({VMEM_BVH_READ_ACCESS}),
482 WaitEventSet({SMEM_ACCESS, SQ_MESSAGE, SCC_WRITE}),
483 WaitEventSet({VMEM_GROUP, SMEM_GROUP}),
484 WaitEventSet({ASYNC_ACCESS}),
485 WaitEventSet({VGPR_CSMACC_WRITE, VGPR_DPMACC_WRITE, VGPR_TRANS_WRITE,
487 WaitEventSet({VGPR_LDS_READ, VGPR_FLAT_READ, VGPR_VMEM_READ})};
490 WaitcntGeneratorGFX12Plus() =
delete;
491 WaitcntGeneratorGFX12Plus(
const MachineFunction &MF,
493 const AMDGPU::HardwareLimits &Limits,
495 : WaitcntGenerator(MF, MaxCounter, Limits), IsExpertMode(IsExpertMode) {}
498 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
499 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
502 bool createNewWaitcnt(MachineBasicBlock &
Block,
504 AMDGPU::Waitcnt
Wait,
505 const WaitcntBrackets &ScoreBrackets)
override;
508 return WaitEventMaskForInstGFX12Plus[
T];
511 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
515struct PreheaderFlushFlags {
516 bool FlushVmCnt =
false;
517 bool FlushDsCnt =
false;
520class SIInsertWaitcnts {
521 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
522 DenseMap<MachineBasicBlock *, PreheaderFlushFlags> PreheadersToFlush;
523 MachineLoopInfo &MLI;
524 MachinePostDominatorTree &PDT;
529 std::unique_ptr<WaitcntBrackets> Incoming;
533 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos;
537 std::unique_ptr<WaitcntGenerator> WCG;
540 DenseSet<MachineInstr *> CallInsts;
541 DenseSet<MachineInstr *> ReturnInsts;
546 DenseMap<MachineInstr *, bool> EndPgmInsts;
548 AMDGPU::HardwareLimits Limits;
551 const GCNSubtarget &
ST;
552 const SIInstrInfo &
TII;
553 const SIRegisterInfo &
TRI;
554 const MachineRegisterInfo &MRI;
557 bool IsExpertMode =
false;
559 SIInsertWaitcnts(MachineLoopInfo &MLI, MachinePostDominatorTree &PDT,
561 : MLI(MLI), PDT(PDT), AA(AA), MF(MF),
ST(MF.getSubtarget<GCNSubtarget>()),
562 TII(*
ST.getInstrInfo()),
TRI(
TII.getRegisterInfo()),
563 MRI(MF.getRegInfo()) {
564 (void)ForceExpCounter;
565 (void)ForceLgkmCounter;
566 (void)ForceVMCounter;
569 const AMDGPU::HardwareLimits &getLimits()
const {
return Limits; }
571 PreheaderFlushFlags getPreheaderFlushFlags(MachineLoop *
ML,
572 const WaitcntBrackets &Brackets);
573 PreheaderFlushFlags isPreheaderToFlush(MachineBasicBlock &
MBB,
574 const WaitcntBrackets &ScoreBrackets);
575 bool isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const;
576 bool isDSRead(
const MachineInstr &
MI)
const;
577 bool mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const;
580 void setForceEmitWaitcnt() {
618 WaitEventType getVmemWaitEventType(
const MachineInstr &Inst)
const {
621 case AMDGPU::GLOBAL_INV:
622 return GLOBAL_INV_ACCESS;
624 case AMDGPU::GLOBAL_WB:
625 case AMDGPU::GLOBAL_WBINV:
626 return VMEM_WRITE_ACCESS;
632 static const WaitEventType VmemReadMapping[NUM_VMEM_TYPES] = {
633 VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS};
642 if (
TII.mayAccessScratch(Inst))
643 return SCRATCH_WRITE_ACCESS;
644 return VMEM_WRITE_ACCESS;
648 return VmemReadMapping[getVmemType(Inst)];
651 std::optional<WaitEventType>
652 getExpertSchedulingEventType(
const MachineInstr &Inst)
const;
654 bool isAsync(
const MachineInstr &
MI)
const {
659 const MachineOperand *
Async =
660 TII.getNamedOperand(
MI, AMDGPU::OpName::IsAsync);
664 bool isNonAsyncLdsDmaWrite(
const MachineInstr &
MI)
const {
668 bool isAsyncLdsDmaWrite(
const MachineInstr &
MI)
const {
672 bool shouldUpdateAsyncMark(
const MachineInstr &
MI,
674 if (!isAsyncLdsDmaWrite(
MI))
681 bool isVmemAccess(
const MachineInstr &
MI)
const;
682 bool generateWaitcntInstBefore(MachineInstr &
MI,
683 WaitcntBrackets &ScoreBrackets,
684 MachineInstr *OldWaitcntInstr,
685 PreheaderFlushFlags FlushFlags);
686 bool generateWaitcnt(AMDGPU::Waitcnt
Wait,
688 MachineBasicBlock &
Block, WaitcntBrackets &ScoreBrackets,
689 MachineInstr *OldWaitcntInstr);
691 WaitEventSet getEventsFor(
const MachineInstr &Inst)
const;
692 void updateEventWaitcntAfter(MachineInstr &Inst,
693 WaitcntBrackets *ScoreBrackets);
695 MachineBasicBlock *
Block)
const;
696 bool insertForcedWaitAfter(MachineInstr &Inst, MachineBasicBlock &
Block,
697 WaitcntBrackets &ScoreBrackets);
698 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &
Block,
699 WaitcntBrackets &ScoreBrackets);
702 bool removeRedundantSoftXcnts(MachineBasicBlock &
Block);
704 bool ExpertMode)
const;
706 return WCG->getWaitEvents(
T);
709 return WCG->getCounterFromEvent(
E);
721class WaitcntBrackets {
729 unsigned NumUnusedVmem = 0, NumUnusedSGPRs = 0;
730 for (
auto &[
ID, Val] : VMem) {
734 for (
auto &[
ID, Val] : SGPRs) {
739 if (NumUnusedVmem || NumUnusedSGPRs) {
740 errs() <<
"WaitcntBracket had unused entries at destruction time: "
741 << NumUnusedVmem <<
" VMem and " << NumUnusedSGPRs
742 <<
" SGPR unused entries\n";
753 return ScoreUBs[
T] - ScoreLBs[
T];
757 return getVMemScore(
ID,
T) > getScoreLB(
T);
775 return getScoreUB(
T) - getScoreLB(
T);
779 auto It = SGPRs.find(RU);
780 return It != SGPRs.end() ? It->second.get(
T) : 0;
784 auto It = VMem.find(TID);
785 return It != VMem.end() ? It->second.Scores[
T] : 0;
792 void simplifyWaitcnt(AMDGPU::Waitcnt &
Wait)
const {
795 void simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
796 AMDGPU::Waitcnt &UpdateWait)
const;
799 void simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
800 AMDGPU::Waitcnt &UpdateWait)
const;
801 void simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
802 AMDGPU::Waitcnt &UpdateWait)
const;
805 AMDGPU::Waitcnt &
Wait)
const;
807 AMDGPU::Waitcnt &
Wait)
const;
808 AMDGPU::Waitcnt determineAsyncWait(
unsigned N);
809 void tryClearSCCWriteEvent(MachineInstr *Inst);
811 void applyWaitcnt(
const AMDGPU::Waitcnt &
Wait);
814 void updateByEvent(WaitEventType
E, MachineInstr &
MI);
815 void recordAsyncMark(MachineInstr &
MI);
817 bool hasPendingEvent()
const {
return !PendingEvents.empty(); }
818 bool hasPendingEvent(WaitEventType
E)
const {
819 return PendingEvents.contains(
E);
822 bool HasPending = PendingEvents &
Context->getWaitEvents(
T);
824 "Expected pending events iff scoreboard is not empty");
829 WaitEventSet Events = PendingEvents &
Context->getWaitEvents(
T);
831 return Events.twoOrMore();
834 bool hasPendingFlat()
const {
841 void setPendingFlat() {
846 bool hasPendingGDS()
const {
851 unsigned getPendingGDSWait()
const {
860 bool hasOtherPendingVmemTypes(
MCPhysReg Reg, VmemType V)
const {
861 for (MCRegUnit RU : regunits(
Reg)) {
862 auto It = VMem.find(toVMEMID(RU));
863 if (It != VMem.end() && (It->second.VMEMTypes & ~(1 << V)))
870 for (MCRegUnit RU : regunits(
Reg)) {
871 if (
auto It = VMem.find(toVMEMID(RU)); It != VMem.end()) {
872 It->second.VMEMTypes = 0;
873 if (It->second.empty())
879 void setStateOnFunctionEntryOrReturn() {
886 ArrayRef<const MachineInstr *> getLDSDMAStores()
const {
890 bool hasPointSampleAccel(
const MachineInstr &
MI)
const;
891 bool hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
894 void print(raw_ostream &)
const;
899 void purgeEmptyTrackingData();
909 using CounterValueArray = std::array<unsigned, AMDGPU::NUM_INST_CNTS>;
912 AMDGPU::Waitcnt &
Wait)
const;
914 static bool mergeScore(
const MergeInfo &M,
unsigned &Score,
915 unsigned OtherScore);
920 assert(
Reg != AMDGPU::SCC &&
"Shouldn't be used on SCC");
923 const TargetRegisterClass *RC =
Context->TRI.getPhysRegBaseClass(
Reg);
924 unsigned Size =
Context->TRI.getRegSizeInBits(*RC);
925 if (
Size == 16 &&
Context->ST.hasD16Writes32BitVgpr())
951 if (
Reg == AMDGPU::SCC) {
954 for (MCRegUnit RU : regunits(
Reg))
955 VMem[toVMEMID(RU)].Scores[
T] = Val;
957 for (MCRegUnit RU : regunits(
Reg))
958 SGPRs[RU].get(
T) = Val;
965 VMem[TID].Scores[
T] = Val;
968 void setScoreByOperand(
const MachineOperand &
Op,
971 const SIInsertWaitcnts *
Context;
975 WaitEventSet PendingEvents;
977 unsigned LastFlatDsCnt = 0;
978 unsigned LastFlatLoadCnt = 0;
980 unsigned LastGDS = 0;
997 CounterValueArray Scores{};
999 unsigned VMEMTypes = 0;
1008 unsigned ScoreDsKmCnt = 0;
1009 unsigned ScoreXCnt = 0;
1025 bool empty()
const {
return !ScoreDsKmCnt && !ScoreXCnt; }
1028 DenseMap<VMEMID, VMEMInfo> VMem;
1029 DenseMap<MCRegUnit, SGPRInfo> SGPRs;
1032 unsigned SCCScore = 0;
1034 const MachineInstr *PendingSCCWrite =
nullptr;
1038 SmallVector<const MachineInstr *> LDSDMAStores;
1047 static constexpr unsigned MaxAsyncMarks = 16;
1051 CounterValueArray AsyncScore{};
1054class SIInsertWaitcntsLegacy :
public MachineFunctionPass {
1057 SIInsertWaitcntsLegacy() : MachineFunctionPass(
ID) {}
1059 bool runOnMachineFunction(MachineFunction &MF)
override;
1061 StringRef getPassName()
const override {
1062 return "SI insert wait instructions";
1065 void getAnalysisUsage(AnalysisUsage &AU)
const override {
1068 AU.
addRequired<MachinePostDominatorTreeWrapperPass>();
1077void WaitcntBrackets::setScoreByOperand(
const MachineOperand &
Op,
1080 setRegScore(
Op.getReg().asMCReg(), CntTy, Score);
1088bool WaitcntBrackets::hasPointSampleAccel(
const MachineInstr &
MI)
const {
1093 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo =
1103bool WaitcntBrackets::hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
1105 if (!hasPointSampleAccel(
MI))
1108 return hasOtherPendingVmemTypes(
Reg, VMEM_NOSAMPLER);
1111void WaitcntBrackets::updateByEvent(WaitEventType
E, MachineInstr &Inst) {
1115 unsigned UB = getScoreUB(
T);
1116 unsigned CurrScore = UB + 1;
1122 PendingEvents.insert(
E);
1123 setScoreUB(
T, CurrScore);
1126 const MachineRegisterInfo &MRI =
Context->MRI;
1135 if (
const auto *AddrOp =
TII.getNamedOperand(Inst, AMDGPU::OpName::addr))
1139 if (
const auto *Data0 =
1140 TII.getNamedOperand(Inst, AMDGPU::OpName::data0))
1142 if (
const auto *Data1 =
1143 TII.getNamedOperand(Inst, AMDGPU::OpName::data1))
1146 Inst.
getOpcode() != AMDGPU::DS_APPEND &&
1147 Inst.
getOpcode() != AMDGPU::DS_CONSUME &&
1148 Inst.
getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
1149 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1150 if (
TRI.isVectorRegister(MRI,
Op.getReg()))
1154 }
else if (
TII.isFLAT(Inst)) {
1156 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1159 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1162 }
else if (
TII.isMIMG(Inst)) {
1166 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1169 }
else if (
TII.isMTBUF(Inst)) {
1172 }
else if (
TII.isMUBUF(Inst)) {
1176 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1179 }
else if (
TII.isLDSDIR(Inst)) {
1181 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::vdst),
1184 if (
TII.isEXP(Inst)) {
1189 for (MachineOperand &DefMO : Inst.
all_defs()) {
1190 if (
TRI.isVGPR(MRI, DefMO.getReg())) {
1195 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1196 if (
TRI.isVectorRegister(MRI,
Op.getReg()))
1201 WaitEventType OtherEvent =
E == SMEM_GROUP ? VMEM_GROUP : SMEM_GROUP;
1202 if (PendingEvents.contains(OtherEvent)) {
1207 setScoreLB(
T, getScoreUB(
T) - 1);
1208 PendingEvents.remove(OtherEvent);
1210 for (
const MachineOperand &
Op : Inst.
all_uses())
1211 setScoreByOperand(
Op,
T, CurrScore);
1215 for (
const MachineOperand &
Op : Inst.
operands()) {
1220 setScoreByOperand(
Op,
T, CurrScore);
1232 for (
const MachineOperand &
Op : Inst.
defs()) {
1235 if (!
TRI.isVectorRegister(MRI,
Op.getReg()))
1237 if (updateVMCntOnly(Inst)) {
1242 VmemType
V = getVmemType(Inst);
1243 unsigned char TypesMask = 1 <<
V;
1246 if (hasPointSampleAccel(Inst))
1247 TypesMask |= 1 << VMEM_NOSAMPLER;
1248 for (MCRegUnit RU : regunits(
Op.getReg().asMCReg()))
1249 VMem[toVMEMID(RU)].VMEMTypes |= TypesMask;
1252 setScoreByOperand(
Op,
T, CurrScore);
1255 (
TII.isDS(Inst) ||
Context->isNonAsyncLdsDmaWrite(Inst))) {
1264 if (!MemOp->isStore() ||
1269 auto AAI = MemOp->getAAInfo();
1275 if (!AAI || !AAI.Scope)
1277 for (
unsigned I = 0,
E = LDSDMAStores.
size();
I !=
E && !Slot; ++
I) {
1278 for (
const auto *MemOp : LDSDMAStores[
I]->memoperands()) {
1279 if (MemOp->isStore() && AAI == MemOp->getAAInfo()) {
1294 setVMemScore(LDSDMA_BEGIN,
T, CurrScore);
1295 if (Slot && Slot < NUM_LDSDMA)
1296 setVMemScore(LDSDMA_BEGIN + Slot,
T, CurrScore);
1299 if (
Context->shouldUpdateAsyncMark(Inst,
T)) {
1300 AsyncScore[
T] = CurrScore;
1304 setRegScore(AMDGPU::SCC,
T, CurrScore);
1305 PendingSCCWrite = &Inst;
1310void WaitcntBrackets::recordAsyncMark(MachineInstr &Inst) {
1316 AsyncMarks.push_back(AsyncScore);
1319 dbgs() <<
"recordAsyncMark:\n" << Inst;
1320 for (
const auto &Mark : AsyncMarks) {
1327void WaitcntBrackets::print(raw_ostream &OS)
const {
1331 unsigned SR = getScoreRange(
T);
1334 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"LOAD" :
"VM") <<
"_CNT("
1338 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"DS" :
"LGKM") <<
"_CNT("
1342 OS <<
" EXP_CNT(" << SR <<
"):";
1345 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"STORE" :
"VS") <<
"_CNT("
1349 OS <<
" SAMPLE_CNT(" << SR <<
"):";
1352 OS <<
" BVH_CNT(" << SR <<
"):";
1355 OS <<
" KM_CNT(" << SR <<
"):";
1358 OS <<
" X_CNT(" << SR <<
"):";
1361 OS <<
" ASYNC_CNT(" << SR <<
"):";
1364 OS <<
" VA_VDST(" << SR <<
"): ";
1367 OS <<
" VM_VSRC(" << SR <<
"): ";
1370 OS <<
" UNKNOWN(" << SR <<
"):";
1376 unsigned LB = getScoreLB(
T);
1379 sort(SortedVMEMIDs);
1381 for (
auto ID : SortedVMEMIDs) {
1382 unsigned RegScore = VMem.at(
ID).Scores[
T];
1385 unsigned RelScore = RegScore - LB - 1;
1386 if (
ID < REGUNITS_END) {
1387 OS <<
' ' << RelScore <<
":vRU" <<
ID;
1389 assert(
ID >= LDSDMA_BEGIN &&
ID < LDSDMA_END &&
1390 "Unhandled/unexpected ID value!");
1391 OS <<
' ' << RelScore <<
":LDSDMA" <<
ID;
1396 if (isSmemCounter(
T)) {
1398 sort(SortedSMEMIDs);
1399 for (
auto ID : SortedSMEMIDs) {
1400 unsigned RegScore = SGPRs.at(
ID).get(
T);
1403 unsigned RelScore = RegScore - LB - 1;
1404 OS <<
' ' << RelScore <<
":sRU" <<
static_cast<unsigned>(
ID);
1409 OS <<
' ' << SCCScore <<
":scc";
1414 OS <<
"Pending Events: ";
1415 if (hasPendingEvent()) {
1417 for (
unsigned I = 0;
I != NUM_WAIT_EVENTS; ++
I) {
1418 if (hasPendingEvent((WaitEventType)
I)) {
1419 OS <<
LS << WaitEventTypeName[
I];
1427 OS <<
"Async score: ";
1428 if (AsyncScore.empty())
1434 OS <<
"Async marks: " << AsyncMarks.size() <<
'\n';
1436 for (
const auto &Mark : AsyncMarks) {
1438 unsigned MarkedScore = Mark[
T];
1441 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"LOAD" :
"VM")
1442 <<
"_CNT: " << MarkedScore;
1445 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"DS" :
"LGKM")
1446 <<
"_CNT: " << MarkedScore;
1449 OS <<
" EXP_CNT: " << MarkedScore;
1452 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"STORE" :
"VS")
1453 <<
"_CNT: " << MarkedScore;
1456 OS <<
" SAMPLE_CNT: " << MarkedScore;
1459 OS <<
" BVH_CNT: " << MarkedScore;
1462 OS <<
" KM_CNT: " << MarkedScore;
1465 OS <<
" X_CNT: " << MarkedScore;
1468 OS <<
" ASYNC_CNT: " << MarkedScore;
1471 OS <<
" UNKNOWN: " << MarkedScore;
1482void WaitcntBrackets::simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
1483 AMDGPU::Waitcnt &UpdateWait)
const {
1491 simplifyXcnt(CheckWait, UpdateWait);
1493 simplifyVmVsrc(CheckWait, UpdateWait);
1498 unsigned &
Count)
const {
1502 if (
Count >= getScoreRange(
T))
1506void WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &
Wait,
1508 unsigned Cnt =
Wait.get(
T);
1509 simplifyWaitcnt(
T, Cnt);
1513void WaitcntBrackets::simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
1514 AMDGPU::Waitcnt &UpdateWait)
const {
1535void WaitcntBrackets::simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
1536 AMDGPU::Waitcnt &UpdateWait)
const {
1541 std::min({CheckWait.get(AMDGPU::LOAD_CNT),
1542 CheckWait.get(AMDGPU::STORE_CNT),
1543 CheckWait.get(AMDGPU::SAMPLE_CNT),
1544 CheckWait.get(AMDGPU::BVH_CNT), CheckWait.get(AMDGPU::DS_CNT)}))
1549void WaitcntBrackets::purgeEmptyTrackingData() {
1561 unsigned ScoreToWait,
1562 AMDGPU::Waitcnt &
Wait)
const {
1563 const unsigned LB = getScoreLB(
T);
1564 const unsigned UB = getScoreUB(
T);
1567 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
1569 !
Context->ST.hasFlatLgkmVMemCountInOrder()) {
1573 addWait(
Wait,
T, 0);
1574 }
else if (counterOutOfOrder(
T)) {
1578 addWait(
Wait,
T, 0);
1582 unsigned NeededWait = std::min(
1583 UB - ScoreToWait, getWaitCountMax(
Context->getLimits(),
T) - 1);
1584 addWait(
Wait,
T, NeededWait);
1589AMDGPU::Waitcnt WaitcntBrackets::determineAsyncWait(
unsigned N) {
1591 dbgs() <<
"Need " <<
N <<
" async marks. Found " << AsyncMarks.size()
1593 for (
const auto &Mark : AsyncMarks) {
1599 if (AsyncMarks.size() == MaxAsyncMarks) {
1604 LLVM_DEBUG(
dbgs() <<
"Possible truncation. Ensuring a non-trivial wait.\n");
1605 N = std::min(
N, (
unsigned)MaxAsyncMarks - 1);
1608 AMDGPU::Waitcnt
Wait;
1609 if (AsyncMarks.size() <=
N) {
1614 size_t MarkIndex = AsyncMarks.size() -
N - 1;
1615 const auto &RequiredMark = AsyncMarks[MarkIndex];
1617 determineWaitForScore(
T, RequiredMark[
T],
Wait);
1623 dbgs() <<
"Removing " << (MarkIndex + 1)
1624 <<
" async marks after determining wait\n";
1626 AsyncMarks.erase(AsyncMarks.begin(), AsyncMarks.begin() + MarkIndex + 1);
1634 AMDGPU::Waitcnt &
Wait)
const {
1635 if (
Reg == AMDGPU::SCC) {
1636 determineWaitForScore(
T, SCCScore,
Wait);
1639 for (MCRegUnit RU : regunits(
Reg))
1640 determineWaitForScore(
1641 T, IsVGPR ? getVMemScore(toVMEMID(RU),
T) : getSGPRScore(RU,
T),
1648 AMDGPU::Waitcnt &
Wait)
const {
1649 assert(TID >= LDSDMA_BEGIN && TID < LDSDMA_END);
1650 determineWaitForScore(
T, getVMemScore(TID,
T),
Wait);
1653void WaitcntBrackets::tryClearSCCWriteEvent(MachineInstr *Inst) {
1656 if (PendingSCCWrite &&
1657 PendingSCCWrite->
getOpcode() == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM &&
1659 WaitEventSet SCC_WRITE_PendingEvent(SCC_WRITE);
1662 SCC_WRITE_PendingEvent) {
1666 PendingEvents.remove(SCC_WRITE_PendingEvent);
1667 PendingSCCWrite =
nullptr;
1671void WaitcntBrackets::applyWaitcnt(
const AMDGPU::Waitcnt &
Wait) {
1673 applyWaitcnt(
Wait,
T);
1677 const unsigned UB = getScoreUB(
T);
1681 if (counterOutOfOrder(
T))
1683 setScoreLB(
T, std::max(getScoreLB(
T), UB -
Count));
1686 PendingEvents.remove(
Context->getWaitEvents(
T));
1693 PendingEvents.remove(SMEM_GROUP);
1699 else if (
Count == 0)
1700 PendingEvents.remove(VMEM_GROUP);
1704void WaitcntBrackets::applyWaitcnt(
const AMDGPU::Waitcnt &
Wait,
1706 unsigned Cnt =
Wait.get(
T);
1707 applyWaitcnt(
T, Cnt);
1714 if ((
T ==
Context->SmemAccessCounter && hasPendingEvent(SMEM_ACCESS)) ||
1722 WaitEventSet Events = PendingEvents &
Context->getWaitEvents(
T);
1725 Events.remove(GLOBAL_INV_ACCESS);
1728 return Events.twoOrMore();
1731 return hasMixedPendingEvents(
T);
1741char SIInsertWaitcntsLegacy::
ID = 0;
1746 return new SIInsertWaitcntsLegacy();
1751 int OpIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
OpName);
1756 if (NewEnc == MO.
getImm())
1765static std::optional<AMDGPU::InstCounterType>
1768 case AMDGPU::S_WAIT_LOADCNT:
1770 case AMDGPU::S_WAIT_EXPCNT:
1772 case AMDGPU::S_WAIT_STORECNT:
1774 case AMDGPU::S_WAIT_SAMPLECNT:
1776 case AMDGPU::S_WAIT_BVHCNT:
1778 case AMDGPU::S_WAIT_DSCNT:
1780 case AMDGPU::S_WAIT_KMCNT:
1782 case AMDGPU::S_WAIT_XCNT:
1784 case AMDGPU::S_WAIT_ASYNCCNT:
1791bool WaitcntGenerator::promoteSoftWaitCnt(MachineInstr *Waitcnt)
const {
1805bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt(
1806 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
1808 assert(isNormalMode(MaxCounter));
1811 MachineInstr *WaitcntInstr =
nullptr;
1812 MachineInstr *WaitcntVsCntInstr =
nullptr;
1815 dbgs() <<
"PreGFX12::applyPreexistingWaitcnt at: ";
1817 dbgs() <<
"end of block\n";
1825 if (isNonWaitcntMetaInst(
II)) {
1831 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
1835 if (Opcode == AMDGPU::S_WAITCNT) {
1836 unsigned IEnc =
II.getOperand(0).getImm();
1839 ScoreBrackets.simplifyWaitcnt(OldWait);
1843 if (WaitcntInstr || (!
Wait.hasWaitExceptStoreCnt() && TrySimplify)) {
1844 II.eraseFromParent();
1848 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
1851 <<
"Before: " <<
Wait <<
'\n';);
1862 II.eraseFromParent();
1863 }
else if (Opcode == AMDGPU::WAIT_ASYNCMARK) {
1864 unsigned N =
II.getOperand(0).getImm();
1866 AMDGPU::Waitcnt OldWait = ScoreBrackets.determineAsyncWait(
N);
1869 assert(Opcode == AMDGPU::S_WAITCNT_VSCNT);
1870 assert(
II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1873 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1879 if (WaitcntVsCntInstr || (!
Wait.hasWaitStoreCnt() && TrySimplify)) {
1880 II.eraseFromParent();
1883 WaitcntVsCntInstr = &
II;
1890 Modified |= promoteSoftWaitCnt(WaitcntInstr);
1899 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
1900 <<
"New Instr at block end: "
1901 << *WaitcntInstr <<
'\n'
1902 :
dbgs() <<
"applied pre-existing waitcnt\n"
1903 <<
"Old Instr: " << *It
1904 <<
"New Instr: " << *WaitcntInstr <<
'\n');
1907 if (WaitcntVsCntInstr) {
1911 Modified |= promoteSoftWaitCnt(WaitcntVsCntInstr);
1917 ?
dbgs() <<
"applied pre-existing waitcnt\n"
1918 <<
"New Instr at block end: " << *WaitcntVsCntInstr
1920 :
dbgs() <<
"applied pre-existing waitcnt\n"
1921 <<
"Old Instr: " << *It
1922 <<
"New Instr: " << *WaitcntVsCntInstr <<
'\n');
1930bool WaitcntGeneratorPreGFX12::createNewWaitcnt(
1932 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
1933 assert(isNormalMode(MaxCounter));
1941 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
1944 EmitWaitcnt(--Outstanding);
1945 }
while (Outstanding > Target);
1951 if (
Wait.hasWaitExceptStoreCnt()) {
1953 if (ExpandWaitcntProfiling) {
1957 bool AnyOutOfOrder =
false;
1959 unsigned WaitCnt =
Wait.get(CT);
1960 if (WaitCnt != ~0u && ScoreBrackets.counterOutOfOrder(CT)) {
1961 AnyOutOfOrder =
true;
1966 if (AnyOutOfOrder) {
1974 unsigned WaitCnt =
Wait.get(CT);
1978 unsigned Outstanding = std::min(ScoreBrackets.getOutstanding(CT),
1979 getWaitCountMax(getLimits(), CT) - 1);
1980 EmitExpandedWaitcnt(Outstanding, WaitCnt, [&](
unsigned Count) {
1991 [[maybe_unused]]
auto SWaitInst =
1996 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1997 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2001 if (
Wait.hasWaitStoreCnt()) {
2007 unsigned Outstanding =
2010 EmitExpandedWaitcnt(
2012 BuildMI(Block, It, DL, TII.get(AMDGPU::S_WAITCNT_VSCNT))
2013 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
2017 [[maybe_unused]]
auto SWaitInst =
2019 .
addReg(AMDGPU::SGPR_NULL, RegState::Undef)
2024 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2025 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2033WaitcntGeneratorPreGFX12::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
2034 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt &&
ST.hasVscnt() ? 0 : ~0u);
2038WaitcntGeneratorGFX12Plus::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
2039 unsigned ExpertVal = IsExpertMode ? 0 : ~0
u;
2040 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt ? 0 : ~0u, 0, 0, 0,
2041 ~0u , ~0u , ExpertVal,
2049bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt(
2050 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
2052 assert(!isNormalMode(MaxCounter));
2055 MachineInstr *CombinedLoadDsCntInstr =
nullptr;
2056 MachineInstr *CombinedStoreDsCntInstr =
nullptr;
2057 MachineInstr *WaitcntDepctrInstr =
nullptr;
2061 dbgs() <<
"GFX12Plus::applyPreexistingWaitcnt at: ";
2063 dbgs() <<
"end of block\n";
2069 AMDGPU::Waitcnt RequiredWait;
2074 if (isNonWaitcntMetaInst(
II)) {
2083 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
2087 if (Opcode == AMDGPU::S_WAITCNT)
2090 if (Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT) {
2092 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2097 RequiredWait = RequiredWait.combined(OldWait);
2099 if (CombinedLoadDsCntInstr ==
nullptr) {
2100 CombinedLoadDsCntInstr = &
II;
2102 II.eraseFromParent();
2105 }
else if (Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT) {
2107 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2112 RequiredWait = RequiredWait.combined(OldWait);
2114 if (CombinedStoreDsCntInstr ==
nullptr) {
2115 CombinedStoreDsCntInstr = &
II;
2117 II.eraseFromParent();
2120 }
else if (Opcode == AMDGPU::S_WAITCNT_DEPCTR) {
2122 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2123 AMDGPU::Waitcnt OldWait;
2127 ScoreBrackets.simplifyWaitcnt(OldWait);
2129 if (WaitcntDepctrInstr ==
nullptr) {
2130 WaitcntDepctrInstr = &
II;
2139 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2147 II.eraseFromParent();
2151 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
2154 II.eraseFromParent();
2156 }
else if (Opcode == AMDGPU::WAIT_ASYNCMARK) {
2159 unsigned N =
II.getOperand(0).getImm();
2160 AMDGPU::Waitcnt OldWait = ScoreBrackets.determineAsyncWait(
N);
2166 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2168 addWait(
Wait, CT.value(), OldCnt);
2170 addWait(RequiredWait, CT.value(), OldCnt);
2172 if (WaitInstrs[CT.value()] ==
nullptr) {
2173 WaitInstrs[CT.value()] = &
II;
2175 II.eraseFromParent();
2181 ScoreBrackets.simplifyWaitcnt(
Wait.combined(RequiredWait),
Wait);
2182 Wait =
Wait.combined(RequiredWait);
2184 if (CombinedLoadDsCntInstr) {
2200 AMDGPU::OpName::simm16, NewEnc);
2201 Modified |= promoteSoftWaitCnt(CombinedLoadDsCntInstr);
2207 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
2208 <<
"New Instr at block end: "
2209 << *CombinedLoadDsCntInstr <<
'\n'
2210 :
dbgs() <<
"applied pre-existing waitcnt\n"
2211 <<
"Old Instr: " << *It <<
"New Instr: "
2212 << *CombinedLoadDsCntInstr <<
'\n');
2219 if (CombinedStoreDsCntInstr) {
2224 AMDGPU::OpName::simm16, NewEnc);
2225 Modified |= promoteSoftWaitCnt(CombinedStoreDsCntInstr);
2231 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
2232 <<
"New Instr at block end: "
2233 << *CombinedStoreDsCntInstr <<
'\n'
2234 :
dbgs() <<
"applied pre-existing waitcnt\n"
2235 <<
"Old Instr: " << *It <<
"New Instr: "
2236 << *CombinedStoreDsCntInstr <<
'\n');
2266 for (MachineInstr **WI : WaitsToErase) {
2270 (*WI)->eraseFromParent();
2277 if (!WaitInstrs[CT])
2280 unsigned NewCnt =
Wait.get(CT);
2281 if (NewCnt != ~0u) {
2283 AMDGPU::OpName::simm16, NewCnt);
2284 Modified |= promoteSoftWaitCnt(WaitInstrs[CT]);
2286 ScoreBrackets.applyWaitcnt(CT, NewCnt);
2287 setNoWait(
Wait, CT);
2290 ?
dbgs() <<
"applied pre-existing waitcnt\n"
2291 <<
"New Instr at block end: " << *WaitInstrs[CT]
2293 :
dbgs() <<
"applied pre-existing waitcnt\n"
2294 <<
"Old Instr: " << *It
2295 <<
"New Instr: " << *WaitInstrs[CT] <<
'\n');
2302 if (WaitcntDepctrInstr) {
2306 TII.getNamedOperand(*WaitcntDepctrInstr, AMDGPU::OpName::simm16)
2321 AMDGPU::OpName::simm16, Enc);
2323 <<
"New Instr at block end: "
2324 << *WaitcntDepctrInstr <<
'\n'
2325 :
dbgs() <<
"applyPreexistingWaitcnt\n"
2326 <<
"Old Instr: " << *It <<
"New Instr: "
2327 << *WaitcntDepctrInstr <<
'\n');
2338bool WaitcntGeneratorGFX12Plus::createNewWaitcnt(
2340 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
2341 assert(!isNormalMode(MaxCounter));
2347 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
2349 for (
unsigned I = Outstanding - 1;
I >
Target &&
I != ~0
u; --
I)
2351 EmitWaitcnt(Target);
2357 if (ExpandWaitcntProfiling) {
2364 if (ScoreBrackets.counterOutOfOrder(CT)) {
2371 unsigned Outstanding = std::min(ScoreBrackets.getOutstanding(CT),
2372 getWaitCountMax(getLimits(), CT) - 1);
2373 EmitExpandedWaitcnt(Outstanding,
Count, [&](
unsigned Val) {
2384 MachineInstr *SWaitInst =
nullptr;
2408 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2409 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2421 [[maybe_unused]]
auto SWaitInst =
2428 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2429 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2432 if (
Wait.hasWaitDepctr()) {
2438 [[maybe_unused]]
auto SWaitInst =
2444 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2445 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2464bool SIInsertWaitcnts::generateWaitcntInstBefore(
2465 MachineInstr &
MI, WaitcntBrackets &ScoreBrackets,
2466 MachineInstr *OldWaitcntInstr, PreheaderFlushFlags FlushFlags) {
2468 setForceEmitWaitcnt();
2472 AMDGPU::Waitcnt
Wait;
2473 const unsigned Opc =
MI.getOpcode();
2476 case AMDGPU::BUFFER_WBINVL1:
2477 case AMDGPU::BUFFER_WBINVL1_SC:
2478 case AMDGPU::BUFFER_WBINVL1_VOL:
2479 case AMDGPU::BUFFER_GL0_INV:
2480 case AMDGPU::BUFFER_GL1_INV: {
2488 case AMDGPU::SI_RETURN_TO_EPILOG:
2489 case AMDGPU::SI_RETURN:
2490 case AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN:
2491 case AMDGPU::S_SETPC_B64_return: {
2496 AMDGPU::Waitcnt AllZeroWait =
2497 WCG->getAllZeroWaitcnt(
false);
2502 if (
ST.hasExtendedWaitCounts() &&
2503 !ScoreBrackets.hasPendingEvent(VMEM_ACCESS))
2508 case AMDGPU::S_ENDPGM:
2509 case AMDGPU::S_ENDPGM_SAVED: {
2519 !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS);
2522 case AMDGPU::S_SENDMSG:
2523 case AMDGPU::S_SENDMSGHALT: {
2524 if (
ST.hasLegacyGeometry() &&
2539 if (
MI.modifiesRegister(AMDGPU::EXEC, &
TRI)) {
2542 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
2543 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
2544 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
2545 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
2552 if (
TII.isAlwaysGDS(
Opc) && ScoreBrackets.hasPendingGDS())
2560 Wait = AMDGPU::Waitcnt();
2562 const MachineOperand &CallAddrOp =
TII.getCalleeOperand(
MI);
2563 if (CallAddrOp.
isReg()) {
2564 ScoreBrackets.determineWaitForPhysReg(
2567 if (
const auto *RtnAddrOp =
2568 TII.getNamedOperand(
MI, AMDGPU::OpName::dst)) {
2569 ScoreBrackets.determineWaitForPhysReg(
2570 SmemAccessCounter, RtnAddrOp->getReg().asMCReg(),
Wait);
2573 }
else if (
Opc == AMDGPU::S_BARRIER_WAIT) {
2574 ScoreBrackets.tryClearSCCWriteEvent(&
MI);
2590 for (
const MachineMemOperand *Memop :
MI.memoperands()) {
2591 const Value *Ptr = Memop->getValue();
2592 if (Memop->isStore()) {
2593 if (
auto It = SLoadAddresses.
find(Ptr); It != SLoadAddresses.
end()) {
2594 addWait(
Wait, SmemAccessCounter, 0);
2596 SLoadAddresses.
erase(It);
2599 unsigned AS = Memop->getAddrSpace();
2603 if (
TII.mayWriteLDSThroughDMA(
MI))
2607 unsigned TID = LDSDMA_BEGIN;
2608 if (Ptr && Memop->getAAInfo()) {
2609 const auto &LDSDMAStores = ScoreBrackets.getLDSDMAStores();
2610 for (
unsigned I = 0,
E = LDSDMAStores.size();
I !=
E; ++
I) {
2611 if (
MI.mayAlias(AA, *LDSDMAStores[
I],
true)) {
2612 if ((
I + 1) >= NUM_LDSDMA) {
2627 if (Memop->isStore()) {
2633 for (
const MachineOperand &
Op :
MI.operands()) {
2638 if (
Op.isTied() &&
Op.isUse() &&
TII.doesNotReadTiedSource(
MI))
2643 const bool IsVGPR =
TRI.isVectorRegister(MRI,
Op.getReg());
2650 if (
Op.isImplicit() &&
MI.mayLoadOrStore())
2662 if (
Op.isUse() || !updateVMCntOnly(
MI) ||
2663 ScoreBrackets.hasOtherPendingVmemTypes(
Reg, getVmemType(
MI)) ||
2664 ScoreBrackets.hasPointSamplePendingVmemTypes(
MI,
Reg) ||
2665 !
ST.hasVmemWriteVgprInOrder()) {
2670 ScoreBrackets.clearVgprVmemTypes(
Reg);
2673 if (
Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) {
2677 }
else if (
Op.getReg() == AMDGPU::SCC) {
2680 ScoreBrackets.determineWaitForPhysReg(SmemAccessCounter,
Reg,
Wait);
2683 if (
ST.hasWaitXcnt() &&
Op.isDef())
2702 if (
Opc == AMDGPU::S_BARRIER && !
ST.hasAutoWaitcntBeforeBarrier() &&
2703 !
ST.hasBackOffBarrier()) {
2704 Wait =
Wait.combined(WCG->getAllZeroWaitcnt(
true));
2711 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
2716 ScoreBrackets.simplifyWaitcnt(
Wait);
2736 Wait = WCG->getAllZeroWaitcnt(
false);
2740 if (!ForceEmitWaitcnt[
T])
2745 if (FlushFlags.FlushVmCnt) {
2751 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(
AMDGPU::DS_CNT))
2757 return generateWaitcnt(
Wait,
MI.getIterator(), *
MI.getParent(), ScoreBrackets,
2761bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt
Wait,
2763 MachineBasicBlock &
Block,
2764 WaitcntBrackets &ScoreBrackets,
2765 MachineInstr *OldWaitcntInstr) {
2768 if (OldWaitcntInstr)
2772 WCG->applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr,
Wait, It);
2777 MachineOperand *WaitExp =
TII.getNamedOperand(*It, AMDGPU::OpName::waitexp);
2787 <<
"Update Instr: " << *It);
2790 if (WCG->createNewWaitcnt(
Block, It,
Wait, ScoreBrackets))
2795 ScoreBrackets.applyWaitcnt(
Wait);
2800std::optional<WaitEventType>
2801SIInsertWaitcnts::getExpertSchedulingEventType(
const MachineInstr &Inst)
const {
2802 if (
TII.isVALU(Inst)) {
2807 if (
TII.isXDL(Inst))
2808 return VGPR_XDL_WRITE;
2810 if (
TII.isTRANS(Inst))
2811 return VGPR_TRANS_WRITE;
2814 return VGPR_DPMACC_WRITE;
2816 return VGPR_CSMACC_WRITE;
2823 if (
TII.isFLAT(Inst))
2824 return VGPR_FLAT_READ;
2827 return VGPR_LDS_READ;
2829 if (
TII.isVMEM(Inst) ||
TII.isVIMAGE(Inst) ||
TII.isVSAMPLE(Inst))
2830 return VGPR_VMEM_READ;
2837bool SIInsertWaitcnts::isVmemAccess(
const MachineInstr &
MI)
const {
2838 return (
TII.isFLAT(
MI) &&
TII.mayAccessVMEMThroughFlat(
MI)) ||
2845 MachineBasicBlock *
Block)
const {
2846 auto BlockEnd =
Block->getParent()->end();
2847 auto BlockIter =
Block->getIterator();
2851 if (++BlockIter != BlockEnd) {
2852 It = BlockIter->instr_begin();
2859 if (!It->isMetaInstruction())
2867 return It->getOpcode() == AMDGPU::S_ENDPGM;
2871bool SIInsertWaitcnts::insertForcedWaitAfter(MachineInstr &Inst,
2872 MachineBasicBlock &
Block,
2873 WaitcntBrackets &ScoreBrackets) {
2874 AMDGPU::Waitcnt
Wait;
2875 bool NeedsEndPGMCheck =
false;
2883 NeedsEndPGMCheck =
true;
2886 ScoreBrackets.simplifyWaitcnt(
Wait);
2889 bool Result = generateWaitcnt(
Wait, SuccessorIt,
Block, ScoreBrackets,
2892 if (Result && NeedsEndPGMCheck && isNextENDPGM(SuccessorIt, &
Block)) {
2900WaitEventSet SIInsertWaitcnts::getEventsFor(
const MachineInstr &Inst)
const {
2901 WaitEventSet Events;
2903 if (
const auto ET = getExpertSchedulingEventType(Inst))
2907 if (
TII.isDS(Inst) &&
TII.usesLGKM_CNT(Inst)) {
2909 TII.hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
2910 Events.insert(GDS_ACCESS);
2911 Events.insert(GDS_GPR_LOCK);
2913 Events.insert(LDS_ACCESS);
2915 }
else if (
TII.isFLAT(Inst)) {
2917 Events.insert(getVmemWaitEventType(Inst));
2920 if (
TII.mayAccessVMEMThroughFlat(Inst)) {
2921 if (
ST.hasWaitXcnt())
2922 Events.insert(VMEM_GROUP);
2923 Events.insert(getVmemWaitEventType(Inst));
2925 if (
TII.mayAccessLDSThroughFlat(Inst))
2926 Events.insert(LDS_ACCESS);
2930 Inst.
getOpcode() == AMDGPU::BUFFER_WBL2)) {
2934 if (
ST.hasWaitXcnt())
2935 Events.insert(VMEM_GROUP);
2936 Events.insert(getVmemWaitEventType(Inst));
2937 if (
ST.vmemWriteNeedsExpWaitcnt() &&
2939 Events.insert(VMW_GPR_LOCK);
2941 }
else if (
TII.isSMRD(Inst)) {
2942 if (
ST.hasWaitXcnt())
2943 Events.insert(SMEM_GROUP);
2944 Events.insert(SMEM_ACCESS);
2946 Events.insert(EXP_LDS_ACCESS);
2948 unsigned Imm =
TII.getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
2950 Events.insert(EXP_PARAM_ACCESS);
2952 Events.insert(EXP_POS_ACCESS);
2954 Events.insert(EXP_GPR_LOCK);
2956 Events.insert(SCC_WRITE);
2959 case AMDGPU::S_SENDMSG:
2960 case AMDGPU::S_SENDMSG_RTN_B32:
2961 case AMDGPU::S_SENDMSG_RTN_B64:
2962 case AMDGPU::S_SENDMSGHALT:
2963 Events.insert(SQ_MESSAGE);
2965 case AMDGPU::S_MEMTIME:
2966 case AMDGPU::S_MEMREALTIME:
2967 case AMDGPU::S_GET_BARRIER_STATE_M0:
2968 case AMDGPU::S_GET_BARRIER_STATE_IMM:
2969 Events.insert(SMEM_ACCESS);
2976void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
2977 WaitcntBrackets *ScoreBrackets) {
2979 WaitEventSet InstEvents = getEventsFor(Inst);
2980 for (WaitEventType
E : wait_events()) {
2981 if (InstEvents.contains(
E))
2982 ScoreBrackets->updateByEvent(
E, Inst);
2985 if (
TII.isDS(Inst) &&
TII.usesLGKM_CNT(Inst)) {
2987 TII.hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
2988 ScoreBrackets->setPendingGDS();
2990 }
else if (
TII.isFLAT(Inst)) {
2998 ScoreBrackets->setPendingFlat();
3001 ScoreBrackets->updateByEvent(ASYNC_ACCESS, Inst);
3003 }
else if (Inst.
isCall()) {
3006 ScoreBrackets->applyWaitcnt(WCG->getAllZeroWaitcnt(
false));
3007 ScoreBrackets->setStateOnFunctionEntryOrReturn();
3008 }
else if (
TII.isVINTERP(Inst)) {
3009 int64_t
Imm =
TII.getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
3014bool WaitcntBrackets::mergeScore(
const MergeInfo &M,
unsigned &Score,
3015 unsigned OtherScore) {
3016 unsigned MyShifted = Score <=
M.OldLB ? 0 : Score +
M.MyShift;
3017 unsigned OtherShifted =
3018 OtherScore <=
M.OtherLB ? 0 : OtherScore +
M.OtherShift;
3019 Score = std::max(MyShifted, OtherShifted);
3020 return OtherShifted > MyShifted;
3025 bool StrictDom =
false;
3029 if (AsyncMarks.empty() && OtherMarks.
empty()) {
3036 auto MaxSize = (unsigned)std::max(AsyncMarks.size(), OtherMarks.
size());
3037 MaxSize = std::min(MaxSize, MaxAsyncMarks);
3040 if (AsyncMarks.size() > MaxSize)
3041 AsyncMarks.erase(AsyncMarks.begin(),
3042 AsyncMarks.begin() + (AsyncMarks.size() - MaxSize));
3048 constexpr CounterValueArray ZeroMark{};
3049 AsyncMarks.insert(AsyncMarks.begin(), MaxSize - AsyncMarks.size(), ZeroMark);
3052 dbgs() <<
"Before merge:\n";
3053 for (
const auto &Mark : AsyncMarks) {
3057 dbgs() <<
"Other marks:\n";
3058 for (
const auto &Mark : OtherMarks) {
3067 unsigned OtherSize = OtherMarks.size();
3068 unsigned OurSize = AsyncMarks.size();
3069 unsigned MergeCount = std::min(OtherSize, OurSize);
3072 StrictDom |= mergeScore(MergeInfos[
T], AsyncMarks[OurSize - Idx][
T],
3073 OtherMarks[OtherSize - Idx][
T]);
3078 dbgs() <<
"After merge:\n";
3079 for (
const auto &Mark : AsyncMarks) {
3093bool WaitcntBrackets::merge(
const WaitcntBrackets &
Other) {
3094 bool StrictDom =
false;
3098 for (
auto K :
Other.VMem.keys())
3099 VMem.try_emplace(K);
3100 for (
auto K :
Other.SGPRs.keys())
3101 SGPRs.try_emplace(K);
3108 const WaitEventSet &EventsForT =
Context->getWaitEvents(
T);
3109 const WaitEventSet OldEvents = PendingEvents & EventsForT;
3110 const WaitEventSet OtherEvents =
Other.PendingEvents & EventsForT;
3111 if (!OldEvents.contains(OtherEvents))
3113 PendingEvents |= OtherEvents;
3116 const unsigned MyPending = ScoreUBs[
T] - ScoreLBs[
T];
3117 const unsigned OtherPending =
Other.ScoreUBs[
T] -
Other.ScoreLBs[
T];
3118 const unsigned NewUB = ScoreLBs[
T] + std::max(MyPending, OtherPending);
3119 if (NewUB < ScoreLBs[
T])
3122 MergeInfo &
M = MergeInfos[
T];
3123 M.OldLB = ScoreLBs[
T];
3124 M.OtherLB =
Other.ScoreLBs[
T];
3125 M.MyShift = NewUB - ScoreUBs[
T];
3126 M.OtherShift = NewUB -
Other.ScoreUBs[
T];
3128 ScoreUBs[
T] = NewUB;
3131 StrictDom |= mergeScore(M, LastFlatLoadCnt,
Other.LastFlatLoadCnt);
3134 StrictDom |= mergeScore(M, LastFlatDsCnt,
Other.LastFlatDsCnt);
3135 StrictDom |= mergeScore(M, LastGDS,
Other.LastGDS);
3139 StrictDom |= mergeScore(M, SCCScore,
Other.SCCScore);
3140 if (
Other.hasPendingEvent(SCC_WRITE)) {
3141 if (!OldEvents.contains(SCC_WRITE)) {
3142 PendingSCCWrite =
Other.PendingSCCWrite;
3143 }
else if (PendingSCCWrite !=
Other.PendingSCCWrite) {
3144 PendingSCCWrite =
nullptr;
3149 for (
auto &[RegID, Info] : VMem)
3150 StrictDom |= mergeScore(M,
Info.Scores[
T],
Other.getVMemScore(RegID,
T));
3152 if (isSmemCounter(
T)) {
3153 for (
auto &[RegID, Info] : SGPRs) {
3154 auto It =
Other.SGPRs.find(RegID);
3155 unsigned OtherScore = (It !=
Other.SGPRs.end()) ? It->second.get(
T) : 0;
3156 StrictDom |= mergeScore(M,
Info.get(
T), OtherScore);
3161 for (
auto &[TID, Info] : VMem) {
3162 if (
auto It =
Other.VMem.find(TID); It !=
Other.VMem.end()) {
3163 unsigned char NewVmemTypes =
Info.VMEMTypes | It->second.VMEMTypes;
3164 StrictDom |= NewVmemTypes !=
Info.VMEMTypes;
3165 Info.VMEMTypes = NewVmemTypes;
3169 StrictDom |= mergeAsyncMarks(MergeInfos,
Other.AsyncMarks);
3171 StrictDom |= mergeScore(MergeInfos[
T], AsyncScore[
T],
Other.AsyncScore[
T]);
3173 purgeEmptyTrackingData();
3179 return Opcode == AMDGPU::S_WAITCNT ||
3182 Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT ||
3183 Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT ||
3184 Opcode == AMDGPU::S_WAITCNT_lds_direct ||
3185 Opcode == AMDGPU::WAIT_ASYNCMARK ||
3189void SIInsertWaitcnts::setSchedulingMode(MachineBasicBlock &
MBB,
3191 bool ExpertMode)
const {
3195 .
addImm(ExpertMode ? 2 : 0)
3213class VCCZWorkaround {
3214 const WaitcntBrackets &ScoreBrackets;
3215 const GCNSubtarget &
ST;
3216 const SIInstrInfo &
TII;
3217 const SIRegisterInfo &
TRI;
3218 bool VCCZCorruptionBug =
false;
3219 bool VCCZNotUpdatedByPartialWrites =
false;
3222 bool MustRecomputeVCCZ =
true;
3225 VCCZWorkaround(
const WaitcntBrackets &ScoreBrackets,
const GCNSubtarget &ST,
3226 const SIInstrInfo &
TII,
const SIRegisterInfo &
TRI)
3228 VCCZCorruptionBug =
ST.hasReadVCCZBug();
3229 VCCZNotUpdatedByPartialWrites = !
ST.partialVCCWritesUpdateVCCZ();
3236 bool tryRecomputeVCCZ(MachineInstr &
MI) {
3238 if (!VCCZCorruptionBug && !VCCZNotUpdatedByPartialWrites)
3248 MustRecomputeVCCZ |= VCCZCorruptionBug &&
TII.isSMRD(
MI);
3254 std::optional<bool> PartiallyWritesToVCCOpt;
3255 auto PartiallyWritesToVCC = [](MachineInstr &
MI) {
3256 return MI.definesRegister(AMDGPU::VCC_LO,
nullptr) ||
3257 MI.definesRegister(AMDGPU::VCC_HI,
nullptr);
3259 if (VCCZNotUpdatedByPartialWrites) {
3260 PartiallyWritesToVCCOpt = PartiallyWritesToVCC(
MI);
3263 MustRecomputeVCCZ |= *PartiallyWritesToVCCOpt;
3269 if (!ScoreBrackets.hasPendingEvent(SMEM_ACCESS) || !VCCZCorruptionBug) {
3271 if (!PartiallyWritesToVCCOpt)
3272 PartiallyWritesToVCCOpt = PartiallyWritesToVCC(
MI);
3273 bool FullyWritesToVCC = !*PartiallyWritesToVCCOpt &&
3274 MI.definesRegister(AMDGPU::VCC,
nullptr);
3277 bool UpdatesVCCZ = FullyWritesToVCC || (!VCCZNotUpdatedByPartialWrites &&
3278 *PartiallyWritesToVCCOpt);
3280 MustRecomputeVCCZ =
false;
3290 TII.get(
ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
3293 MustRecomputeVCCZ =
false;
3303bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
3304 MachineBasicBlock &
Block,
3305 WaitcntBrackets &ScoreBrackets) {
3309 dbgs() <<
"*** Begin Block: ";
3311 ScoreBrackets.dump();
3313 VCCZWorkaround VCCZW(ScoreBrackets, ST,
TII,
TRI);
3316 MachineInstr *OldWaitcntInstr =
nullptr;
3321 Iter !=
E; ++Iter) {
3322 MachineInstr &Inst = *Iter;
3323 if (isNonWaitcntMetaInst(Inst))
3328 (IsExpertMode && Inst.
getOpcode() == AMDGPU::S_WAITCNT_DEPCTR)) {
3329 if (!OldWaitcntInstr)
3330 OldWaitcntInstr = &Inst;
3334 PreheaderFlushFlags FlushFlags;
3335 if (
Block.getFirstTerminator() == Inst)
3336 FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
3339 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
3341 OldWaitcntInstr =
nullptr;
3343 if (Inst.
getOpcode() == AMDGPU::ASYNCMARK) {
3347 ScoreBrackets.recordAsyncMark(Inst);
3351 if (
TII.isSMRD(Inst)) {
3352 for (
const MachineMemOperand *Memop : Inst.
memoperands()) {
3355 if (!Memop->isInvariant()) {
3356 const Value *Ptr = Memop->getValue();
3362 updateEventWaitcntAfter(Inst, &ScoreBrackets);
3366 Modified |= insertForcedWaitAfter(Inst,
Block, ScoreBrackets);
3370 ScoreBrackets.dump();
3375 Modified |= VCCZW.tryRecomputeVCCZ(Inst);
3380 AMDGPU::Waitcnt
Wait;
3381 if (
Block.getFirstTerminator() ==
Block.end()) {
3382 PreheaderFlushFlags FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
3383 if (FlushFlags.FlushVmCnt) {
3391 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(
AMDGPU::DS_CNT))
3400 dbgs() <<
"*** End Block: ";
3402 ScoreBrackets.dump();
3408bool SIInsertWaitcnts::removeRedundantSoftXcnts(MachineBasicBlock &
Block) {
3409 if (
Block.size() <= 1)
3417 MachineInstr *LastAtomicWithSoftXcnt =
nullptr;
3423 if (!IsLDS && (
MI.mayLoad() ^
MI.mayStore()))
3424 LastAtomicWithSoftXcnt =
nullptr;
3427 MI.mayLoad() &&
MI.mayStore();
3428 MachineInstr &PrevMI = *
MI.getPrevNode();
3430 if (PrevMI.
getOpcode() == AMDGPU::S_WAIT_XCNT_soft && IsAtomicRMW) {
3433 if (LastAtomicWithSoftXcnt) {
3437 LastAtomicWithSoftXcnt = &
MI;
3445SIInsertWaitcnts::isPreheaderToFlush(MachineBasicBlock &
MBB,
3446 const WaitcntBrackets &ScoreBrackets) {
3447 auto [Iterator, IsInserted] =
3450 return Iterator->second;
3454 return PreheaderFlushFlags();
3458 return PreheaderFlushFlags();
3461 Iterator->second = getPreheaderFlushFlags(Loop, ScoreBrackets);
3462 return Iterator->second;
3465 return PreheaderFlushFlags();
3468bool SIInsertWaitcnts::isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const {
3470 return TII.mayAccessVMEMThroughFlat(
MI);
3474bool SIInsertWaitcnts::isDSRead(
const MachineInstr &
MI)
const {
3480bool SIInsertWaitcnts::mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const {
3509SIInsertWaitcnts::getPreheaderFlushFlags(MachineLoop *
ML,
3510 const WaitcntBrackets &Brackets) {
3511 PreheaderFlushFlags
Flags;
3512 bool HasVMemLoad =
false;
3513 bool HasVMemStore =
false;
3514 bool UsesVgprVMEMLoadedOutside =
false;
3515 bool UsesVgprDSReadOutside =
false;
3516 bool VMemInvalidated =
false;
3520 bool TrackSimpleDSOpt =
ST.hasExtendedWaitCounts();
3521 DenseSet<MCRegUnit> VgprUse;
3522 DenseSet<MCRegUnit> VgprDefVMEM;
3523 DenseSet<MCRegUnit> VgprDefDS;
3529 DenseMap<MCRegUnit, unsigned> LastDSReadPositionMap;
3530 unsigned DSReadPosition = 0;
3531 bool IsSingleBlock =
ML->getNumBlocks() == 1;
3532 bool TrackDSFlushPoint =
ST.hasExtendedWaitCounts() && IsSingleBlock;
3533 unsigned LastDSFlushPosition = 0;
3535 for (MachineBasicBlock *
MBB :
ML->blocks()) {
3536 for (MachineInstr &
MI : *
MBB) {
3537 if (isVMEMOrFlatVMEM(
MI)) {
3538 HasVMemLoad |=
MI.mayLoad();
3539 HasVMemStore |=
MI.mayStore();
3543 if (mayStoreIncrementingDSCNT(
MI)) {
3546 if (VMemInvalidated)
3548 TrackSimpleDSOpt =
false;
3549 TrackDSFlushPoint =
false;
3551 bool IsDSRead = isDSRead(
MI);
3556 auto updateDSReadFlushTracking = [&](MCRegUnit RU) {
3557 if (!TrackDSFlushPoint)
3559 if (
auto It = LastDSReadPositionMap.
find(RU);
3560 It != LastDSReadPositionMap.
end()) {
3564 LastDSFlushPosition = std::max(LastDSFlushPosition, It->second);
3568 for (
const MachineOperand &
Op :
MI.all_uses()) {
3569 if (
Op.isDebug() || !
TRI.isVectorRegister(MRI,
Op.getReg()))
3572 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3576 VMemInvalidated =
true;
3580 TrackSimpleDSOpt =
false;
3583 if (VMemInvalidated && !TrackSimpleDSOpt && !TrackDSFlushPoint)
3587 updateDSReadFlushTracking(RU);
3592 VMEMID
ID = toVMEMID(RU);
3596 UsesVgprVMEMLoadedOutside =
true;
3601 UsesVgprDSReadOutside =
true;
3606 if (isVMEMOrFlatVMEM(
MI) &&
MI.mayLoad()) {
3607 for (
const MachineOperand &
Op :
MI.all_defs()) {
3608 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3612 VMemInvalidated =
true;
3617 if (VMemInvalidated && !TrackSimpleDSOpt && !TrackDSFlushPoint)
3628 if (IsDSRead || TrackDSFlushPoint) {
3629 for (
const MachineOperand &
Op :
MI.all_defs()) {
3630 if (!
TRI.isVectorRegister(MRI,
Op.getReg()))
3632 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3635 updateDSReadFlushTracking(RU);
3638 if (TrackDSFlushPoint)
3639 LastDSReadPositionMap[RU] = DSReadPosition;
3648 if (!VMemInvalidated && UsesVgprVMEMLoadedOutside &&
3649 ((!
ST.hasVscnt() && HasVMemStore && !HasVMemLoad) ||
3650 (HasVMemLoad &&
ST.hasVmemWriteVgprInOrder())))
3651 Flags.FlushVmCnt =
true;
3657 bool SimpleDSOpt = TrackSimpleDSOpt && UsesVgprDSReadOutside;
3660 bool HasUnflushedDSReads = DSReadPosition > LastDSFlushPosition;
3661 bool DSFlushPointPrefetch =
3662 TrackDSFlushPoint && UsesVgprDSReadOutside && HasUnflushedDSReads;
3664 if (SimpleDSOpt || DSFlushPointPrefetch)
3665 Flags.FlushDsCnt =
true;
3670bool SIInsertWaitcntsLegacy::runOnMachineFunction(MachineFunction &MF) {
3671 auto &MLI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
3673 getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
3675 if (
auto *AAR = getAnalysisIfAvailable<AAResultsWrapperPass>())
3676 AA = &AAR->getAAResults();
3678 return SIInsertWaitcnts(MLI, PDT, AA, MF).run();
3690 if (!SIInsertWaitcnts(MLI, PDT,
AA, MF).
run())
3695 .preserve<AAManager>();
3698bool SIInsertWaitcnts::run() {
3706 if (ST.hasExtendedWaitCounts()) {
3707 IsExpertMode = ST.hasExpertSchedulingMode() &&
3716 WCG = std::make_unique<WaitcntGeneratorGFX12Plus>(MF, MaxCounter, Limits,
3721 WCG = std::make_unique<WaitcntGeneratorPreGFX12>(
3725 SmemAccessCounter = getCounterFromEvent(SMEM_ACCESS);
3729 MachineBasicBlock &EntryBB = MF.
front();
3740 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3743 if (
ST.hasExtendedWaitCounts()) {
3752 if (!
ST.hasImageInsts() &&
3758 TII.get(instrsForExtendedCounterTypes[CT]))
3771 auto NonKernelInitialState = std::make_unique<WaitcntBrackets>(
this);
3772 NonKernelInitialState->setStateOnFunctionEntryOrReturn();
3773 BlockInfos[&EntryBB].Incoming = std::move(NonKernelInitialState);
3780 for (
auto *
MBB : ReversePostOrderTraversal<MachineFunction *>(&MF))
3783 std::unique_ptr<WaitcntBrackets> Brackets;
3788 for (
auto BII = BlockInfos.
begin(), BIE = BlockInfos.
end(); BII != BIE;
3790 MachineBasicBlock *
MBB = BII->first;
3791 BlockInfo &BI = BII->second;
3797 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
3799 *Brackets = *BI.Incoming;
3802 Brackets = std::make_unique<WaitcntBrackets>(
this);
3807 Brackets->~WaitcntBrackets();
3808 new (Brackets.get()) WaitcntBrackets(
this);
3812 if (
ST.hasWaitXcnt())
3814 Modified |= insertWaitcntInBlock(MF, *
MBB, *Brackets);
3817 if (Brackets->hasPendingEvent()) {
3818 BlockInfo *MoveBracketsToSucc =
nullptr;
3820 auto *SuccBII = BlockInfos.
find(Succ);
3821 BlockInfo &SuccBI = SuccBII->second;
3822 if (!SuccBI.Incoming) {
3823 SuccBI.Dirty =
true;
3824 if (SuccBII <= BII) {
3828 if (!MoveBracketsToSucc) {
3829 MoveBracketsToSucc = &SuccBI;
3831 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
3835 dbgs() <<
"Try to merge ";
3841 if (SuccBI.Incoming->merge(*Brackets)) {
3842 SuccBI.Dirty =
true;
3843 if (SuccBII <= BII) {
3850 if (MoveBracketsToSucc)
3851 MoveBracketsToSucc->Incoming = std::move(Brackets);
3856 if (
ST.hasScalarStores()) {
3857 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
3858 bool HaveScalarStores =
false;
3860 for (MachineBasicBlock &
MBB : MF) {
3861 for (MachineInstr &
MI :
MBB) {
3862 if (!HaveScalarStores &&
TII.isScalarStore(
MI))
3863 HaveScalarStores =
true;
3865 if (
MI.getOpcode() == AMDGPU::S_ENDPGM ||
3866 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
3871 if (HaveScalarStores) {
3880 for (MachineBasicBlock *
MBB : EndPgmBlocks) {
3881 bool SeenDCacheWB =
false;
3885 if (
I->getOpcode() == AMDGPU::S_DCACHE_WB)
3886 SeenDCacheWB =
true;
3887 else if (
TII.isScalarStore(*
I))
3888 SeenDCacheWB =
false;
3891 if ((
I->getOpcode() == AMDGPU::S_ENDPGM ||
3892 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
3908 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3910 setSchedulingMode(EntryBB,
I,
true);
3912 for (MachineInstr *
MI : CallInsts) {
3913 MachineBasicBlock &
MBB = *
MI->getParent();
3914 setSchedulingMode(
MBB,
MI,
false);
3915 setSchedulingMode(
MBB, std::next(
MI->getIterator()),
true);
3918 for (MachineInstr *
MI : ReturnInsts)
3919 setSchedulingMode(*
MI->getParent(),
MI,
false);
3930 for (
auto [
MI,
_] : EndPgmInsts) {
3932 TII.get(AMDGPU::S_ALLOC_VGPR))
3936 }
else if (!WCG->isOptNone() &&
3937 ST.getGeneration() >= AMDGPUSubtarget::GFX11 &&
3938 (MF.getFrameInfo().hasCalls() ||
3939 ST.getOccupancyWithNumVGPRs(
3940 TRI.getNumUsedPhysRegs(MRI, AMDGPU::VGPR_32RegClass),
3943 for (
auto [
MI, Flag] : EndPgmInsts) {
3945 if (
ST.requiresNopBeforeDeallocVGPRs()) {
3947 TII.get(AMDGPU::S_NOP))
3951 TII.get(AMDGPU::S_SENDMSG))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
static bool isOptNone(const MachineFunction &MF)
static LoopDeletionResult merge(LoopDeletionResult A, LoopDeletionResult B)
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static cl::opt< bool > ForceEmitZeroLoadFlag("amdgpu-waitcnt-load-forcezero", cl::desc("Force all waitcnt load counters to wait until 0"), cl::init(false), cl::Hidden)
#define AMDGPU_EVENT_NAME(Name)
static bool updateOperandIfDifferent(MachineInstr &MI, AMDGPU::OpName OpName, unsigned NewEnc)
static std::optional< AMDGPU::InstCounterType > counterTypeForInstr(unsigned Opcode)
Determine if MI is a gfx12+ single-counter S_WAIT_*CNT instruction, and if so, which counter it is wa...
static bool isWaitInstr(MachineInstr &Inst)
static cl::opt< bool > ExpertSchedulingModeFlag("amdgpu-expert-scheduling-mode", cl::desc("Enable expert scheduling mode 2 for all functions (GFX12+ only)"), cl::init(false), cl::Hidden)
static cl::opt< bool > ForceEmitZeroFlag("amdgpu-waitcnt-forcezero", cl::desc("Force all waitcnt instrs to be emitted as " "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), cl::init(false), cl::Hidden)
#define AMDGPU_DECLARE_WAIT_EVENTS(DECL)
#define AMDGPU_EVENT_ENUM(Name)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Provides some synthesis utilities to produce sequences of values.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const uint32_t IV[8]
A manager for alias analyses.
bool isEntryFunction() const
Represents the counter values to wait for in an s_waitcnt instruction.
unsigned get(InstCounterType T) const
void set(InstCounterType T, unsigned Val)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
Represents analyses that only rely on functions' control flow.
static bool shouldExecute(CounterInfo &Counter)
static bool isCounterSet(CounterInfo &Info)
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
FunctionPass class - This class is used to implement most global optimizations.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI const MachineBasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
Instructions::iterator instr_iterator
iterator_range< succ_iterator > successors()
LLVM_ABI void printName(raw_ostream &os, unsigned printNameFlags=PrintNameIr, ModuleSlotTracker *moduleSlotTracker=nullptr) const
Print the basic block's name as:
MachineInstrBundleIterator< MachineInstr > iterator
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool isCall(QueryType Type=AnyInBundle) const
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
iterator find(const KeyT &Key)
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
static bool isCBranchVCCZRead(const MachineInstr &MI)
static bool isDS(const MachineInstr &MI)
static bool isVMEM(const MachineInstr &MI)
static bool isFLATScratch(const MachineInstr &MI)
static bool isEXP(const MachineInstr &MI)
static bool mayWriteLDSThroughDMA(const MachineInstr &MI)
static bool isLDSDIR(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
static bool isFLATGlobal(const MachineInstr &MI)
static bool isVSAMPLE(const MachineInstr &MI)
static bool isAtomicRet(const MachineInstr &MI)
static bool isImage(const MachineInstr &MI)
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static bool isVINTERP(const MachineInstr &MI)
static bool isGFX12CacheInvOrWBInst(unsigned Opc)
static bool isSBarrierSCCWrite(unsigned Opcode)
static bool isMIMG(const MachineInstr &MI)
static bool usesASYNC_CNT(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isLDSDMA(const MachineInstr &MI)
static bool isAtomicNoRet(const MachineInstr &MI)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool isDynamicVGPREnabled() const
void push_back(const T &Elt)
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaVdst(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
@ ID_DEALLOC_VGPRS_GFX11Plus
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
bool isDPMACCInstruction(unsigned Opc)
iota_range< InstCounterType > inst_counter_types(InstCounterType MaxCounter)
unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded)
unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
unsigned encodeStorecntDscnt(const IsaVersion &Version, const Waitcnt &Decoded)
bool getMUBUFIsBufferInv(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
LLVM_ABI std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
APInt operator&(APInt a, const APInt &b)
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
static StringRef getCPU(StringRef CPU)
Processes a CPU name.
bool operator!=(uint64_t V1, const APInt &V2)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
char & SIInsertWaitcntsID
@ Async
"Asynchronous" unwind tables (instr precise)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
APInt operator|(APInt a, const APInt &b)
FunctionPass * createSIInsertWaitcntsPass()
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static constexpr ValueType Default
static constexpr uint64_t encode(Fields... Values)
Represents the hardware counter limits for different wait count types.
Instruction set architecture version.
static constexpr bool is_iterable