47#define DEBUG_TYPE "si-insert-waitcnts"
50 "Force emit s_waitcnt expcnt(0) instrs");
52 "Force emit s_waitcnt lgkmcnt(0) instrs");
54 "Force emit s_waitcnt vmcnt(0) instrs");
58 cl::desc(
"Force all waitcnt instrs to be emitted as "
59 "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
63 "amdgpu-waitcnt-load-forcezero",
64 cl::desc(
"Force all waitcnt load counters to wait until 0"),
68 "amdgpu-expert-scheduling-mode",
69 cl::desc(
"Enable expert scheduling mode 2 for all functions (GFX12+ only)"),
117 TRACKINGID_RANGE_LEN = (1 << 16),
122 REGUNITS_END = REGUNITS_BEGIN + TRACKINGID_RANGE_LEN,
127 NUM_LDSDMA = TRACKINGID_RANGE_LEN,
128 LDSDMA_BEGIN = REGUNITS_END,
129 LDSDMA_END = LDSDMA_BEGIN + NUM_LDSDMA,
133static constexpr VMEMID toVMEMID(MCRegUnit RU) {
134 return static_cast<unsigned>(RU);
137#define AMDGPU_DECLARE_WAIT_EVENTS(DECL) \
139 DECL(VMEM_SAMPLER_READ_ACCESS) \
140 DECL(VMEM_BVH_READ_ACCESS) \
141 DECL(GLOBAL_INV_ACCESS) \
142 DECL(VMEM_WRITE_ACCESS) \
143 DECL(SCRATCH_WRITE_ACCESS) \
153 DECL(EXP_POS_ACCESS) \
154 DECL(EXP_PARAM_ACCESS) \
156 DECL(EXP_LDS_ACCESS) \
157 DECL(VGPR_CSMACC_WRITE) \
158 DECL(VGPR_DPMACC_WRITE) \
159 DECL(VGPR_TRANS_WRITE) \
160 DECL(VGPR_XDL_WRITE) \
161 DECL(VGPR_LDS_READ) \
162 DECL(VGPR_FLAT_READ) \
163 DECL(VGPR_VMEM_READ) \
167#define AMDGPU_EVENT_ENUM(Name) Name,
172#undef AMDGPU_EVENT_ENUM
186auto wait_events(WaitEventType MaxEvent = NUM_WAIT_EVENTS) {
187 return enum_seq(VMEM_ACCESS, MaxEvent);
190#define AMDGPU_EVENT_NAME(Name) #Name,
194#undef AMDGPU_EVENT_NAME
195static constexpr StringLiteral getWaitEventTypeName(WaitEventType Event) {
196 return WaitEventTypeName[
Event];
219 AMDGPU::S_WAIT_LOADCNT, AMDGPU::S_WAIT_DSCNT, AMDGPU::S_WAIT_EXPCNT,
220 AMDGPU::S_WAIT_STORECNT, AMDGPU::S_WAIT_SAMPLECNT, AMDGPU::S_WAIT_BVHCNT,
221 AMDGPU::S_WAIT_KMCNT, AMDGPU::S_WAIT_XCNT, AMDGPU::S_WAIT_ASYNCCNT};
235 assert(updateVMCntOnly(Inst));
237 return VMEM_NOSAMPLER;
251 return VMEM_NOSAMPLER;
265 WaitEventSet() =
default;
266 explicit constexpr WaitEventSet(WaitEventType Event) {
267 static_assert(NUM_WAIT_EVENTS <=
sizeof(Mask) * 8,
268 "Not enough bits in Mask for all the events");
271 constexpr WaitEventSet(std::initializer_list<WaitEventType> Events) {
272 for (
auto &
E : Events) {
276 void insert(
const WaitEventType &Event) { Mask |= 1 <<
Event; }
277 void remove(
const WaitEventType &Event) { Mask &= ~(1 <<
Event); }
278 void remove(
const WaitEventSet &
Other) { Mask &= ~Other.Mask; }
279 bool contains(
const WaitEventType &Event)
const {
280 return Mask & (1 <<
Event);
284 return (~Mask &
Other.Mask) == 0;
309 return Mask ==
Other.Mask;
312 bool empty()
const {
return Mask == 0; }
314 bool twoOrMore()
const {
return Mask & (Mask - 1); }
315 operator bool()
const {
return !
empty(); }
316 void print(raw_ostream &OS)
const {
317 ListSeparator
LS(
", ");
318 for (WaitEventType Event : wait_events()) {
320 OS <<
LS << getWaitEventTypeName(Event);
326void WaitEventSet::dump()
const {
331class WaitcntBrackets;
339class WaitcntGenerator {
341 const GCNSubtarget &ST;
342 const SIInstrInfo &
TII;
343 AMDGPU::IsaVersion
IV;
346 bool ExpandWaitcntProfiling =
false;
347 const AMDGPU::HardwareLimits &Limits;
350 WaitcntGenerator() =
delete;
351 WaitcntGenerator(
const WaitcntGenerator &) =
delete;
352 WaitcntGenerator(
const MachineFunction &MF,
InstCounterType MaxCounter,
353 const AMDGPU::HardwareLimits &Limits)
354 :
ST(MF.getSubtarget<GCNSubtarget>()),
TII(*
ST.getInstrInfo()),
358 ExpandWaitcntProfiling(
359 MF.
getFunction().hasFnAttribute(
"amdgpu-expand-waitcnt-profiling")),
364 bool isOptNone()
const {
return OptNone; }
366 const AMDGPU::HardwareLimits &getLimits()
const {
return Limits; }
380 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
381 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
385 bool promoteSoftWaitCnt(MachineInstr *Waitcnt)
const;
390 virtual bool createNewWaitcnt(MachineBasicBlock &
Block,
392 AMDGPU::Waitcnt
Wait,
393 const WaitcntBrackets &ScoreBrackets) = 0;
411 virtual AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const = 0;
413 virtual ~WaitcntGenerator() =
default;
416class WaitcntGeneratorPreGFX12 final :
public WaitcntGenerator {
417 static constexpr const WaitEventSet
420 {VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS}),
421 WaitEventSet({SMEM_ACCESS, LDS_ACCESS, GDS_ACCESS, SQ_MESSAGE}),
422 WaitEventSet({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK,
423 EXP_PARAM_ACCESS, EXP_POS_ACCESS, EXP_LDS_ACCESS}),
424 WaitEventSet({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
433 using WaitcntGenerator::WaitcntGenerator;
435 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
436 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
439 bool createNewWaitcnt(MachineBasicBlock &
Block,
441 AMDGPU::Waitcnt
Wait,
442 const WaitcntBrackets &ScoreBrackets)
override;
445 return WaitEventMaskForInstPreGFX12[
T];
448 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
451class WaitcntGeneratorGFX12Plus final :
public WaitcntGenerator {
454 static constexpr const WaitEventSet
456 WaitEventSet({VMEM_ACCESS, GLOBAL_INV_ACCESS}),
457 WaitEventSet({LDS_ACCESS, GDS_ACCESS}),
458 WaitEventSet({EXP_GPR_LOCK, GDS_GPR_LOCK, VMW_GPR_LOCK,
459 EXP_PARAM_ACCESS, EXP_POS_ACCESS, EXP_LDS_ACCESS}),
460 WaitEventSet({VMEM_WRITE_ACCESS, SCRATCH_WRITE_ACCESS}),
461 WaitEventSet({VMEM_SAMPLER_READ_ACCESS}),
462 WaitEventSet({VMEM_BVH_READ_ACCESS}),
463 WaitEventSet({SMEM_ACCESS, SQ_MESSAGE, SCC_WRITE}),
464 WaitEventSet({VMEM_GROUP, SMEM_GROUP}),
465 WaitEventSet({ASYNC_ACCESS}),
466 WaitEventSet({VGPR_CSMACC_WRITE, VGPR_DPMACC_WRITE, VGPR_TRANS_WRITE,
468 WaitEventSet({VGPR_LDS_READ, VGPR_FLAT_READ, VGPR_VMEM_READ})};
471 WaitcntGeneratorGFX12Plus() =
delete;
472 WaitcntGeneratorGFX12Plus(
const MachineFunction &MF,
474 const AMDGPU::HardwareLimits &Limits,
476 : WaitcntGenerator(MF, MaxCounter, Limits), IsExpertMode(IsExpertMode) {}
479 applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
480 MachineInstr &OldWaitcntInstr, AMDGPU::Waitcnt &
Wait,
483 bool createNewWaitcnt(MachineBasicBlock &
Block,
485 AMDGPU::Waitcnt
Wait,
486 const WaitcntBrackets &ScoreBrackets)
override;
489 return WaitEventMaskForInstGFX12Plus[
T];
492 AMDGPU::Waitcnt getAllZeroWaitcnt(
bool IncludeVSCnt)
const override;
496struct PreheaderFlushFlags {
497 bool FlushVmCnt =
false;
498 bool FlushDsCnt =
false;
501class SIInsertWaitcnts {
502 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
503 DenseMap<MachineBasicBlock *, PreheaderFlushFlags> PreheadersToFlush;
504 MachineLoopInfo &MLI;
505 MachinePostDominatorTree &PDT;
510 std::unique_ptr<WaitcntBrackets> Incoming;
514 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos;
518 std::unique_ptr<WaitcntGenerator> WCG;
521 DenseSet<MachineInstr *> CallInsts;
522 DenseSet<MachineInstr *> ReturnInsts;
527 DenseMap<MachineInstr *, bool> EndPgmInsts;
529 AMDGPU::HardwareLimits Limits;
532 const GCNSubtarget &
ST;
533 const SIInstrInfo &
TII;
534 const SIRegisterInfo &
TRI;
535 const MachineRegisterInfo &MRI;
538 bool IsExpertMode =
false;
540 SIInsertWaitcnts(MachineLoopInfo &MLI, MachinePostDominatorTree &PDT,
542 : MLI(MLI), PDT(PDT), AA(AA), MF(MF),
ST(MF.getSubtarget<GCNSubtarget>()),
543 TII(*
ST.getInstrInfo()),
TRI(
TII.getRegisterInfo()),
544 MRI(MF.getRegInfo()) {
545 (void)ForceExpCounter;
546 (void)ForceLgkmCounter;
547 (void)ForceVMCounter;
550 const AMDGPU::HardwareLimits &getLimits()
const {
return Limits; }
552 PreheaderFlushFlags getPreheaderFlushFlags(MachineLoop *
ML,
553 const WaitcntBrackets &Brackets);
554 PreheaderFlushFlags isPreheaderToFlush(MachineBasicBlock &
MBB,
555 const WaitcntBrackets &ScoreBrackets);
556 bool isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const;
557 bool isDSRead(
const MachineInstr &
MI)
const;
558 bool mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const;
561 void setForceEmitWaitcnt() {
567 ForceEmitWaitcnt[
EXP_CNT] =
true;
569 ForceEmitWaitcnt[
EXP_CNT] =
false;
574 ForceEmitWaitcnt[
DS_CNT] =
true;
575 ForceEmitWaitcnt[
KM_CNT] =
true;
577 ForceEmitWaitcnt[
DS_CNT] =
false;
578 ForceEmitWaitcnt[
KM_CNT] =
false;
585 ForceEmitWaitcnt[
BVH_CNT] =
true;
589 ForceEmitWaitcnt[
BVH_CNT] =
false;
592 ForceEmitWaitcnt[
VA_VDST] =
false;
593 ForceEmitWaitcnt[
VM_VSRC] =
false;
599 WaitEventType getVmemWaitEventType(
const MachineInstr &Inst)
const {
602 case AMDGPU::GLOBAL_INV:
603 return GLOBAL_INV_ACCESS;
605 case AMDGPU::GLOBAL_WB:
606 case AMDGPU::GLOBAL_WBINV:
607 return VMEM_WRITE_ACCESS;
613 static const WaitEventType VmemReadMapping[NUM_VMEM_TYPES] = {
614 VMEM_ACCESS, VMEM_SAMPLER_READ_ACCESS, VMEM_BVH_READ_ACCESS};
623 if (
TII.mayAccessScratch(Inst))
624 return SCRATCH_WRITE_ACCESS;
625 return VMEM_WRITE_ACCESS;
629 return VmemReadMapping[getVmemType(Inst)];
632 std::optional<WaitEventType>
633 getExpertSchedulingEventType(
const MachineInstr &Inst)
const;
635 bool isAsync(
const MachineInstr &
MI)
const {
640 const MachineOperand *
Async =
641 TII.getNamedOperand(
MI, AMDGPU::OpName::IsAsync);
645 bool isNonAsyncLdsDmaWrite(
const MachineInstr &
MI)
const {
649 bool isAsyncLdsDmaWrite(
const MachineInstr &
MI)
const {
653 bool isVmemAccess(
const MachineInstr &
MI)
const;
654 bool generateWaitcntInstBefore(MachineInstr &
MI,
655 WaitcntBrackets &ScoreBrackets,
656 MachineInstr *OldWaitcntInstr,
657 PreheaderFlushFlags FlushFlags);
658 bool generateWaitcnt(AMDGPU::Waitcnt
Wait,
660 MachineBasicBlock &
Block, WaitcntBrackets &ScoreBrackets,
661 MachineInstr *OldWaitcntInstr);
663 WaitEventSet getEventsFor(
const MachineInstr &Inst)
const;
664 void updateEventWaitcntAfter(MachineInstr &Inst,
665 WaitcntBrackets *ScoreBrackets);
667 MachineBasicBlock *
Block)
const;
668 bool insertForcedWaitAfter(MachineInstr &Inst, MachineBasicBlock &
Block,
669 WaitcntBrackets &ScoreBrackets);
670 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &
Block,
671 WaitcntBrackets &ScoreBrackets);
674 bool removeRedundantSoftXcnts(MachineBasicBlock &
Block);
676 bool ExpertMode)
const;
678 return WCG->getWaitEvents(
T);
681 return WCG->getCounterFromEvent(
E);
693class WaitcntBrackets {
701 unsigned NumUnusedVmem = 0, NumUnusedSGPRs = 0;
702 for (
auto &[
ID, Val] : VMem) {
706 for (
auto &[
ID, Val] : SGPRs) {
711 if (NumUnusedVmem || NumUnusedSGPRs) {
712 errs() <<
"WaitcntBracket had unused entries at destruction time: "
713 << NumUnusedVmem <<
" VMem and " << NumUnusedSGPRs
714 <<
" SGPR unused entries\n";
725 return ScoreUBs[
T] - ScoreLBs[
T];
729 return getVMemScore(
ID,
T) > getScoreLB(
T);
747 return getScoreUB(
T) - getScoreLB(
T);
751 auto It = SGPRs.find(RU);
752 return It != SGPRs.end() ? It->second.get(
T) : 0;
756 auto It = VMem.find(TID);
757 return It != VMem.end() ? It->second.Scores[
T] : 0;
764 void simplifyWaitcnt(AMDGPU::Waitcnt &
Wait)
const {
767 void simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
768 AMDGPU::Waitcnt &UpdateWait)
const;
771 void simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
772 AMDGPU::Waitcnt &UpdateWait)
const;
773 void simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
774 AMDGPU::Waitcnt &UpdateWait)
const;
777 AMDGPU::Waitcnt &
Wait)
const;
779 AMDGPU::Waitcnt &
Wait)
const;
780 AMDGPU::Waitcnt determineAsyncWait(
unsigned N);
781 void tryClearSCCWriteEvent(MachineInstr *Inst);
783 void applyWaitcnt(
const AMDGPU::Waitcnt &
Wait);
786 void updateByEvent(WaitEventType
E, MachineInstr &
MI);
787 void recordAsyncMark(MachineInstr &
MI);
789 bool hasPendingEvent()
const {
return !PendingEvents.empty(); }
790 bool hasPendingEvent(WaitEventType
E)
const {
791 return PendingEvents.contains(
E);
794 bool HasPending = PendingEvents &
Context->getWaitEvents(
T);
796 "Expected pending events iff scoreboard is not empty");
801 WaitEventSet Events = PendingEvents &
Context->getWaitEvents(
T);
803 return Events.twoOrMore();
806 bool hasPendingFlat()
const {
807 return ((LastFlatDsCnt > ScoreLBs[
DS_CNT] &&
808 LastFlatDsCnt <= ScoreUBs[
DS_CNT]) ||
809 (LastFlatLoadCnt > ScoreLBs[
LOAD_CNT] &&
810 LastFlatLoadCnt <= ScoreUBs[
LOAD_CNT]));
813 void setPendingFlat() {
814 LastFlatLoadCnt = ScoreUBs[
LOAD_CNT];
815 LastFlatDsCnt = ScoreUBs[
DS_CNT];
818 bool hasPendingGDS()
const {
819 return LastGDS > ScoreLBs[
DS_CNT] && LastGDS <= ScoreUBs[
DS_CNT];
822 unsigned getPendingGDSWait()
const {
823 return std::min(getScoreUB(
DS_CNT) - LastGDS,
827 void setPendingGDS() { LastGDS = ScoreUBs[
DS_CNT]; }
831 bool hasOtherPendingVmemTypes(
MCPhysReg Reg, VmemType V)
const {
832 for (MCRegUnit RU : regunits(
Reg)) {
833 auto It = VMem.find(toVMEMID(RU));
834 if (It != VMem.end() && (It->second.VMEMTypes & ~(1 << V)))
841 for (MCRegUnit RU : regunits(
Reg)) {
842 if (
auto It = VMem.find(toVMEMID(RU)); It != VMem.end()) {
843 It->second.VMEMTypes = 0;
844 if (It->second.empty())
850 void setStateOnFunctionEntryOrReturn() {
856 ArrayRef<const MachineInstr *> getLDSDMAStores()
const {
860 bool hasPointSampleAccel(
const MachineInstr &
MI)
const;
861 bool hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
864 void print(raw_ostream &)
const;
869 void purgeEmptyTrackingData();
879 using CounterValueArray = std::array<unsigned, NUM_INST_CNTS>;
882 AMDGPU::Waitcnt &
Wait)
const;
884 static bool mergeScore(
const MergeInfo &M,
unsigned &Score,
885 unsigned OtherScore);
890 assert(
Reg != AMDGPU::SCC &&
"Shouldn't be used on SCC");
893 const TargetRegisterClass *RC =
Context->TRI.getPhysRegBaseClass(
Reg);
894 unsigned Size =
Context->TRI.getRegSizeInBits(*RC);
895 if (
Size == 16 &&
Context->ST.hasD16Writes32BitVgpr())
919 if (
Reg == AMDGPU::SCC) {
922 for (MCRegUnit RU : regunits(
Reg))
923 VMem[toVMEMID(RU)].Scores[
T] = Val;
925 for (MCRegUnit RU : regunits(
Reg))
926 SGPRs[RU].get(
T) = Val;
933 VMem[TID].Scores[
T] = Val;
939 const SIInsertWaitcnts *
Context;
943 WaitEventSet PendingEvents;
945 unsigned LastFlatDsCnt = 0;
946 unsigned LastFlatLoadCnt = 0;
948 unsigned LastGDS = 0;
965 CounterValueArray Scores{};
967 unsigned VMEMTypes = 0;
976 unsigned ScoreDsKmCnt = 0;
977 unsigned ScoreXCnt = 0;
982 return T ==
X_CNT ? ScoreXCnt : ScoreDsKmCnt;
986 return T ==
X_CNT ? ScoreXCnt : ScoreDsKmCnt;
989 bool empty()
const {
return !ScoreDsKmCnt && !ScoreXCnt; }
992 DenseMap<VMEMID, VMEMInfo> VMem;
993 DenseMap<MCRegUnit, SGPRInfo> SGPRs;
996 unsigned SCCScore = 0;
998 const MachineInstr *PendingSCCWrite =
nullptr;
1002 SmallVector<const MachineInstr *> LDSDMAStores;
1011 static constexpr unsigned MaxAsyncMarks = 16;
1015 CounterValueArray AsyncScore{};
1018class SIInsertWaitcntsLegacy :
public MachineFunctionPass {
1021 SIInsertWaitcntsLegacy() : MachineFunctionPass(
ID) {}
1023 bool runOnMachineFunction(MachineFunction &MF)
override;
1025 StringRef getPassName()
const override {
1026 return "SI insert wait instructions";
1029 void getAnalysisUsage(AnalysisUsage &AU)
const override {
1032 AU.
addRequired<MachinePostDominatorTreeWrapperPass>();
1041void WaitcntBrackets::setScoreByOperand(
const MachineOperand &
Op,
1043 setRegScore(
Op.getReg().asMCReg(), CntTy, Score);
1051bool WaitcntBrackets::hasPointSampleAccel(
const MachineInstr &
MI)
const {
1056 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo =
1066bool WaitcntBrackets::hasPointSamplePendingVmemTypes(
const MachineInstr &
MI,
1068 if (!hasPointSampleAccel(
MI))
1071 return hasOtherPendingVmemTypes(
Reg, VMEM_NOSAMPLER);
1074void WaitcntBrackets::updateByEvent(WaitEventType
E, MachineInstr &Inst) {
1078 unsigned UB = getScoreUB(
T);
1079 unsigned CurrScore = UB + 1;
1085 PendingEvents.insert(
E);
1086 setScoreUB(
T, CurrScore);
1089 const MachineRegisterInfo &MRI =
Context->MRI;
1098 if (
const auto *AddrOp =
TII.getNamedOperand(Inst, AMDGPU::OpName::addr))
1099 setScoreByOperand(*AddrOp,
EXP_CNT, CurrScore);
1102 if (
const auto *Data0 =
1103 TII.getNamedOperand(Inst, AMDGPU::OpName::data0))
1104 setScoreByOperand(*Data0,
EXP_CNT, CurrScore);
1105 if (
const auto *Data1 =
1106 TII.getNamedOperand(Inst, AMDGPU::OpName::data1))
1107 setScoreByOperand(*Data1,
EXP_CNT, CurrScore);
1109 Inst.
getOpcode() != AMDGPU::DS_APPEND &&
1110 Inst.
getOpcode() != AMDGPU::DS_CONSUME &&
1111 Inst.
getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
1112 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1113 if (
TRI.isVectorRegister(MRI,
Op.getReg()))
1114 setScoreByOperand(
Op,
EXP_CNT, CurrScore);
1117 }
else if (
TII.isFLAT(Inst)) {
1119 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1122 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1125 }
else if (
TII.isMIMG(Inst)) {
1129 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1132 }
else if (
TII.isMTBUF(Inst)) {
1135 }
else if (
TII.isMUBUF(Inst)) {
1139 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::data),
1142 }
else if (
TII.isLDSDIR(Inst)) {
1144 setScoreByOperand(*
TII.getNamedOperand(Inst, AMDGPU::OpName::vdst),
1147 if (
TII.isEXP(Inst)) {
1152 for (MachineOperand &DefMO : Inst.
all_defs()) {
1153 if (
TRI.isVGPR(MRI, DefMO.getReg())) {
1154 setScoreByOperand(DefMO,
EXP_CNT, CurrScore);
1158 for (
const MachineOperand &
Op : Inst.
all_uses()) {
1159 if (
TRI.isVectorRegister(MRI,
Op.getReg()))
1160 setScoreByOperand(
Op,
EXP_CNT, CurrScore);
1164 WaitEventType OtherEvent =
E == SMEM_GROUP ? VMEM_GROUP : SMEM_GROUP;
1165 if (PendingEvents.contains(OtherEvent)) {
1170 setScoreLB(
T, getScoreUB(
T) - 1);
1171 PendingEvents.remove(OtherEvent);
1173 for (
const MachineOperand &
Op : Inst.
all_uses())
1174 setScoreByOperand(
Op,
T, CurrScore);
1178 for (
const MachineOperand &
Op : Inst.
operands()) {
1183 setScoreByOperand(
Op,
T, CurrScore);
1195 for (
const MachineOperand &
Op : Inst.
defs()) {
1197 if (!
TRI.isVectorRegister(MRI,
Op.getReg()))
1199 if (updateVMCntOnly(Inst)) {
1204 VmemType
V = getVmemType(Inst);
1205 unsigned char TypesMask = 1 <<
V;
1208 if (hasPointSampleAccel(Inst))
1209 TypesMask |= 1 << VMEM_NOSAMPLER;
1210 for (MCRegUnit RU : regunits(
Op.getReg().asMCReg()))
1211 VMem[toVMEMID(RU)].VMEMTypes |= TypesMask;
1214 setScoreByOperand(
Op,
T, CurrScore);
1217 (
TII.isDS(Inst) ||
Context->isNonAsyncLdsDmaWrite(Inst))) {
1226 if (!MemOp->isStore() ||
1231 auto AAI = MemOp->getAAInfo();
1237 if (!AAI || !AAI.Scope)
1239 for (
unsigned I = 0,
E = LDSDMAStores.
size();
I !=
E && !Slot; ++
I) {
1240 for (
const auto *MemOp : LDSDMAStores[
I]->memoperands()) {
1241 if (MemOp->isStore() && AAI == MemOp->getAAInfo()) {
1256 setVMemScore(LDSDMA_BEGIN,
T, CurrScore);
1257 if (Slot && Slot < NUM_LDSDMA)
1258 setVMemScore(LDSDMA_BEGIN + Slot,
T, CurrScore);
1266 "unexpected GFX1250 instruction");
1267 AsyncScore[
T] = CurrScore;
1271 setRegScore(AMDGPU::SCC,
T, CurrScore);
1272 PendingSCCWrite = &Inst;
1277void WaitcntBrackets::recordAsyncMark(MachineInstr &Inst) {
1283 AsyncMarks.push_back(AsyncScore);
1286 dbgs() <<
"recordAsyncMark:\n" << Inst;
1287 for (
const auto &Mark : AsyncMarks) {
1294void WaitcntBrackets::print(raw_ostream &OS)
const {
1298 unsigned SR = getScoreRange(
T);
1301 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"LOAD" :
"VM") <<
"_CNT("
1305 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"DS" :
"LGKM") <<
"_CNT("
1309 OS <<
" EXP_CNT(" << SR <<
"):";
1312 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"STORE" :
"VS") <<
"_CNT("
1316 OS <<
" SAMPLE_CNT(" << SR <<
"):";
1319 OS <<
" BVH_CNT(" << SR <<
"):";
1322 OS <<
" KM_CNT(" << SR <<
"):";
1325 OS <<
" X_CNT(" << SR <<
"):";
1328 OS <<
" ASYNC_CNT(" << SR <<
"):";
1331 OS <<
" VA_VDST(" << SR <<
"): ";
1334 OS <<
" VM_VSRC(" << SR <<
"): ";
1337 OS <<
" UNKNOWN(" << SR <<
"):";
1343 unsigned LB = getScoreLB(
T);
1346 sort(SortedVMEMIDs);
1348 for (
auto ID : SortedVMEMIDs) {
1349 unsigned RegScore = VMem.at(
ID).Scores[
T];
1352 unsigned RelScore = RegScore - LB - 1;
1353 if (
ID < REGUNITS_END) {
1354 OS <<
' ' << RelScore <<
":vRU" <<
ID;
1356 assert(
ID >= LDSDMA_BEGIN &&
ID < LDSDMA_END &&
1357 "Unhandled/unexpected ID value!");
1358 OS <<
' ' << RelScore <<
":LDSDMA" <<
ID;
1363 if (isSmemCounter(
T)) {
1365 sort(SortedSMEMIDs);
1366 for (
auto ID : SortedSMEMIDs) {
1367 unsigned RegScore = SGPRs.at(
ID).get(
T);
1370 unsigned RelScore = RegScore - LB - 1;
1371 OS <<
' ' << RelScore <<
":sRU" <<
static_cast<unsigned>(
ID);
1375 if (
T ==
KM_CNT && SCCScore > 0)
1376 OS <<
' ' << SCCScore <<
":scc";
1381 OS <<
"Pending Events: ";
1382 if (hasPendingEvent()) {
1384 for (
unsigned I = 0;
I != NUM_WAIT_EVENTS; ++
I) {
1385 if (hasPendingEvent((WaitEventType)
I)) {
1386 OS <<
LS << WaitEventTypeName[
I];
1394 OS <<
"Async score: ";
1395 if (AsyncScore.empty())
1401 OS <<
"Async marks: " << AsyncMarks.size() <<
'\n';
1403 for (
const auto &Mark : AsyncMarks) {
1405 unsigned MarkedScore = Mark[
T];
1408 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"LOAD" :
"VM")
1409 <<
"_CNT: " << MarkedScore;
1412 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"DS" :
"LGKM")
1413 <<
"_CNT: " << MarkedScore;
1416 OS <<
" EXP_CNT: " << MarkedScore;
1419 OS <<
" " << (
ST.hasExtendedWaitCounts() ?
"STORE" :
"VS")
1420 <<
"_CNT: " << MarkedScore;
1423 OS <<
" SAMPLE_CNT: " << MarkedScore;
1426 OS <<
" BVH_CNT: " << MarkedScore;
1429 OS <<
" KM_CNT: " << MarkedScore;
1432 OS <<
" X_CNT: " << MarkedScore;
1435 OS <<
" ASYNC_CNT: " << MarkedScore;
1438 OS <<
" UNKNOWN: " << MarkedScore;
1449void WaitcntBrackets::simplifyWaitcnt(
const AMDGPU::Waitcnt &CheckWait,
1450 AMDGPU::Waitcnt &UpdateWait)
const {
1451 simplifyWaitcnt(UpdateWait,
LOAD_CNT);
1452 simplifyWaitcnt(UpdateWait,
EXP_CNT);
1453 simplifyWaitcnt(UpdateWait,
DS_CNT);
1456 simplifyWaitcnt(UpdateWait,
BVH_CNT);
1457 simplifyWaitcnt(UpdateWait,
KM_CNT);
1458 simplifyXcnt(CheckWait, UpdateWait);
1459 simplifyWaitcnt(UpdateWait,
VA_VDST);
1460 simplifyVmVsrc(CheckWait, UpdateWait);
1465 unsigned &
Count)
const {
1469 if (
Count >= getScoreRange(
T))
1474 unsigned Cnt =
Wait.get(
T);
1475 simplifyWaitcnt(
T, Cnt);
1479void WaitcntBrackets::simplifyXcnt(
const AMDGPU::Waitcnt &CheckWait,
1480 AMDGPU::Waitcnt &UpdateWait)
const {
1489 if (CheckWait.
get(
KM_CNT) == 0 && hasPendingEvent(SMEM_GROUP))
1494 if (CheckWait.
get(
LOAD_CNT) != ~0u && hasPendingEvent(VMEM_GROUP) &&
1498 simplifyWaitcnt(UpdateWait,
X_CNT);
1501void WaitcntBrackets::simplifyVmVsrc(
const AMDGPU::Waitcnt &CheckWait,
1502 AMDGPU::Waitcnt &UpdateWait)
const {
1507 std::min({CheckWait.get(LOAD_CNT), CheckWait.get(STORE_CNT),
1508 CheckWait.get(SAMPLE_CNT), CheckWait.get(BVH_CNT),
1509 CheckWait.get(DS_CNT)}))
1511 simplifyWaitcnt(UpdateWait,
VM_VSRC);
1514void WaitcntBrackets::purgeEmptyTrackingData() {
1526 unsigned ScoreToWait,
1527 AMDGPU::Waitcnt &
Wait)
const {
1528 const unsigned LB = getScoreLB(
T);
1529 const unsigned UB = getScoreUB(
T);
1532 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
1534 !
Context->ST.hasFlatLgkmVMemCountInOrder()) {
1538 addWait(
Wait,
T, 0);
1539 }
else if (counterOutOfOrder(
T)) {
1543 addWait(
Wait,
T, 0);
1547 unsigned NeededWait = std::min(
1548 UB - ScoreToWait, getWaitCountMax(
Context->getLimits(),
T) - 1);
1549 addWait(
Wait,
T, NeededWait);
1554AMDGPU::Waitcnt WaitcntBrackets::determineAsyncWait(
unsigned N) {
1556 dbgs() <<
"Need " <<
N <<
" async marks. Found " << AsyncMarks.size()
1558 for (
const auto &Mark : AsyncMarks) {
1564 if (AsyncMarks.size() == MaxAsyncMarks) {
1569 LLVM_DEBUG(
dbgs() <<
"Possible truncation. Ensuring a non-trivial wait.\n");
1570 N = std::min(
N, (
unsigned)MaxAsyncMarks - 1);
1573 AMDGPU::Waitcnt
Wait;
1574 if (AsyncMarks.size() <=
N) {
1579 size_t MarkIndex = AsyncMarks.size() -
N - 1;
1580 const auto &RequiredMark = AsyncMarks[MarkIndex];
1582 determineWaitForScore(
T, RequiredMark[
T],
Wait);
1588 dbgs() <<
"Removing " << (MarkIndex + 1)
1589 <<
" async marks after determining wait\n";
1591 AsyncMarks.erase(AsyncMarks.begin(), AsyncMarks.begin() + MarkIndex + 1);
1598 AMDGPU::Waitcnt &
Wait)
const {
1599 if (
Reg == AMDGPU::SCC) {
1600 determineWaitForScore(
T, SCCScore,
Wait);
1603 for (MCRegUnit RU : regunits(
Reg))
1604 determineWaitForScore(
1605 T, IsVGPR ? getVMemScore(toVMEMID(RU),
T) : getSGPRScore(RU,
T),
1611 AMDGPU::Waitcnt &
Wait)
const {
1612 assert(TID >= LDSDMA_BEGIN && TID < LDSDMA_END);
1613 determineWaitForScore(
T, getVMemScore(TID,
T),
Wait);
1616void WaitcntBrackets::tryClearSCCWriteEvent(MachineInstr *Inst) {
1619 if (PendingSCCWrite &&
1620 PendingSCCWrite->
getOpcode() == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM &&
1622 WaitEventSet SCC_WRITE_PendingEvent(SCC_WRITE);
1625 SCC_WRITE_PendingEvent) {
1629 PendingEvents.remove(SCC_WRITE_PendingEvent);
1630 PendingSCCWrite =
nullptr;
1634void WaitcntBrackets::applyWaitcnt(
const AMDGPU::Waitcnt &
Wait) {
1636 applyWaitcnt(
Wait,
T);
1640 const unsigned UB = getScoreUB(
T);
1644 if (counterOutOfOrder(
T))
1646 setScoreLB(
T, std::max(getScoreLB(
T), UB -
Count));
1649 PendingEvents.remove(
Context->getWaitEvents(
T));
1652 if (
T ==
KM_CNT &&
Count == 0 && hasPendingEvent(SMEM_GROUP)) {
1653 if (!hasMixedPendingEvents(
X_CNT))
1654 applyWaitcnt(
X_CNT, 0);
1656 PendingEvents.remove(SMEM_GROUP);
1658 if (
T ==
LOAD_CNT && hasPendingEvent(VMEM_GROUP) &&
1660 if (!hasMixedPendingEvents(
X_CNT))
1662 else if (
Count == 0)
1663 PendingEvents.remove(VMEM_GROUP);
1668 unsigned Cnt =
Wait.get(
T);
1669 applyWaitcnt(
T, Cnt);
1676 if ((
T ==
Context->SmemAccessCounter && hasPendingEvent(SMEM_ACCESS)) ||
1677 (
T ==
X_CNT && hasPendingEvent(SMEM_GROUP)))
1684 WaitEventSet Events = PendingEvents &
Context->getWaitEvents(
T);
1687 Events.remove(GLOBAL_INV_ACCESS);
1690 return Events.twoOrMore();
1693 return hasMixedPendingEvents(
T);
1703char SIInsertWaitcntsLegacy::
ID = 0;
1708 return new SIInsertWaitcntsLegacy();
1713 int OpIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
OpName);
1718 if (NewEnc == MO.
getImm())
1729 case AMDGPU::S_WAIT_LOADCNT:
1731 case AMDGPU::S_WAIT_EXPCNT:
1733 case AMDGPU::S_WAIT_STORECNT:
1735 case AMDGPU::S_WAIT_SAMPLECNT:
1737 case AMDGPU::S_WAIT_BVHCNT:
1739 case AMDGPU::S_WAIT_DSCNT:
1741 case AMDGPU::S_WAIT_KMCNT:
1743 case AMDGPU::S_WAIT_XCNT:
1750bool WaitcntGenerator::promoteSoftWaitCnt(MachineInstr *Waitcnt)
const {
1764bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt(
1765 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
1767 assert(isNormalMode(MaxCounter));
1770 MachineInstr *WaitcntInstr =
nullptr;
1771 MachineInstr *WaitcntVsCntInstr =
nullptr;
1774 dbgs() <<
"PreGFX12::applyPreexistingWaitcnt at: ";
1776 dbgs() <<
"end of block\n";
1784 if (
II.isMetaInstruction()) {
1790 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
1794 if (Opcode == AMDGPU::S_WAITCNT) {
1795 unsigned IEnc =
II.getOperand(0).getImm();
1798 ScoreBrackets.simplifyWaitcnt(OldWait);
1802 if (WaitcntInstr || (!
Wait.hasWaitExceptStoreCnt() && TrySimplify)) {
1803 II.eraseFromParent();
1807 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
1810 <<
"Before: " <<
Wait <<
'\n';);
1811 ScoreBrackets.determineWaitForLDSDMA(
LOAD_CNT, LDSDMA_BEGIN,
Wait);
1820 II.eraseFromParent();
1821 }
else if (Opcode == AMDGPU::WAIT_ASYNCMARK) {
1822 unsigned N =
II.getOperand(0).getImm();
1824 AMDGPU::Waitcnt OldWait = ScoreBrackets.determineAsyncWait(
N);
1827 assert(Opcode == AMDGPU::S_WAITCNT_VSCNT);
1828 assert(
II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1831 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
1833 ScoreBrackets.simplifyWaitcnt(InstCounterType::STORE_CNT, OldVSCnt);
1836 if (WaitcntVsCntInstr || (!
Wait.hasWaitStoreCnt() && TrySimplify)) {
1837 II.eraseFromParent();
1840 WaitcntVsCntInstr = &
II;
1847 Modified |= promoteSoftWaitCnt(WaitcntInstr);
1856 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
1857 <<
"New Instr at block end: "
1858 << *WaitcntInstr <<
'\n'
1859 :
dbgs() <<
"applied pre-existing waitcnt\n"
1860 <<
"Old Instr: " << *It
1861 <<
"New Instr: " << *WaitcntInstr <<
'\n');
1864 if (WaitcntVsCntInstr) {
1866 *WaitcntVsCntInstr, AMDGPU::OpName::simm16,
Wait.get(
STORE_CNT));
1867 Modified |= promoteSoftWaitCnt(WaitcntVsCntInstr);
1873 ?
dbgs() <<
"applied pre-existing waitcnt\n"
1874 <<
"New Instr at block end: " << *WaitcntVsCntInstr
1876 :
dbgs() <<
"applied pre-existing waitcnt\n"
1877 <<
"Old Instr: " << *It
1878 <<
"New Instr: " << *WaitcntVsCntInstr <<
'\n');
1886bool WaitcntGeneratorPreGFX12::createNewWaitcnt(
1888 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
1889 assert(isNormalMode(MaxCounter));
1897 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
1900 EmitWaitcnt(--Outstanding);
1901 }
while (Outstanding > Target);
1907 if (
Wait.hasWaitExceptStoreCnt()) {
1909 if (ExpandWaitcntProfiling) {
1913 bool AnyOutOfOrder =
false;
1915 unsigned WaitCnt =
Wait.get(CT);
1916 if (WaitCnt != ~0u && ScoreBrackets.counterOutOfOrder(CT)) {
1917 AnyOutOfOrder =
true;
1922 if (AnyOutOfOrder) {
1930 unsigned WaitCnt =
Wait.get(CT);
1934 unsigned Outstanding = std::min(ScoreBrackets.getOutstanding(CT),
1935 getWaitCountMax(getLimits(), CT) - 1);
1936 EmitExpandedWaitcnt(Outstanding, WaitCnt, [&](
unsigned Count) {
1947 [[maybe_unused]]
auto SWaitInst =
1952 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1953 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1957 if (
Wait.hasWaitStoreCnt()) {
1961 !ScoreBrackets.counterOutOfOrder(
STORE_CNT)) {
1963 unsigned Outstanding =
1964 std::min(ScoreBrackets.getOutstanding(
STORE_CNT),
1965 getWaitCountMax(getLimits(),
STORE_CNT) - 1);
1966 EmitExpandedWaitcnt(
1968 BuildMI(Block, It, DL, TII.get(AMDGPU::S_WAITCNT_VSCNT))
1969 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1973 [[maybe_unused]]
auto SWaitInst =
1975 .
addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1980 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1981 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1989WaitcntGeneratorPreGFX12::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
1990 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt &&
ST.hasVscnt() ? 0 : ~0u);
1994WaitcntGeneratorGFX12Plus::getAllZeroWaitcnt(
bool IncludeVSCnt)
const {
1995 unsigned ExpertVal = IsExpertMode ? 0 : ~0
u;
1996 return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt ? 0 : ~0u, 0, 0, 0,
1997 ~0u , ~0u , ExpertVal,
2005bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt(
2006 WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
2008 assert(!isNormalMode(MaxCounter));
2011 MachineInstr *CombinedLoadDsCntInstr =
nullptr;
2012 MachineInstr *CombinedStoreDsCntInstr =
nullptr;
2013 MachineInstr *WaitcntDepctrInstr =
nullptr;
2017 dbgs() <<
"GFX12Plus::applyPreexistingWaitcnt at: ";
2019 dbgs() <<
"end of block\n";
2025 AMDGPU::Waitcnt RequiredWait;
2030 if (
II.isMetaInstruction()) {
2039 bool TrySimplify = Opcode !=
II.getOpcode() && !OptNone;
2043 if (Opcode == AMDGPU::S_WAITCNT)
2046 if (Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT) {
2048 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2053 RequiredWait = RequiredWait.combined(OldWait);
2055 if (CombinedLoadDsCntInstr ==
nullptr) {
2056 CombinedLoadDsCntInstr = &
II;
2058 II.eraseFromParent();
2061 }
else if (Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT) {
2063 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2068 RequiredWait = RequiredWait.combined(OldWait);
2070 if (CombinedStoreDsCntInstr ==
nullptr) {
2071 CombinedStoreDsCntInstr = &
II;
2073 II.eraseFromParent();
2076 }
else if (Opcode == AMDGPU::S_WAITCNT_DEPCTR) {
2078 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2079 AMDGPU::Waitcnt OldWait;
2083 ScoreBrackets.simplifyWaitcnt(OldWait);
2085 if (WaitcntDepctrInstr ==
nullptr) {
2086 WaitcntDepctrInstr = &
II;
2095 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2103 II.eraseFromParent();
2107 }
else if (Opcode == AMDGPU::S_WAITCNT_lds_direct) {
2110 II.eraseFromParent();
2112 }
else if (Opcode == AMDGPU::WAIT_ASYNCMARK) {
2118 TII.getNamedOperand(
II, AMDGPU::OpName::simm16)->getImm();
2120 addWait(
Wait, CT.value(), OldCnt);
2122 addWait(RequiredWait, CT.value(), OldCnt);
2124 if (WaitInstrs[CT.value()] ==
nullptr) {
2125 WaitInstrs[CT.value()] = &
II;
2127 II.eraseFromParent();
2133 ScoreBrackets.simplifyWaitcnt(
Wait.combined(RequiredWait),
Wait);
2134 Wait =
Wait.combined(RequiredWait);
2136 if (CombinedLoadDsCntInstr) {
2152 AMDGPU::OpName::simm16, NewEnc);
2153 Modified |= promoteSoftWaitCnt(CombinedLoadDsCntInstr);
2159 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
2160 <<
"New Instr at block end: "
2161 << *CombinedLoadDsCntInstr <<
'\n'
2162 :
dbgs() <<
"applied pre-existing waitcnt\n"
2163 <<
"Old Instr: " << *It <<
"New Instr: "
2164 << *CombinedLoadDsCntInstr <<
'\n');
2171 if (CombinedStoreDsCntInstr) {
2176 AMDGPU::OpName::simm16, NewEnc);
2177 Modified |= promoteSoftWaitCnt(CombinedStoreDsCntInstr);
2183 LLVM_DEBUG(It.isEnd() ?
dbgs() <<
"applied pre-existing waitcnt\n"
2184 <<
"New Instr at block end: "
2185 << *CombinedStoreDsCntInstr <<
'\n'
2186 :
dbgs() <<
"applied pre-existing waitcnt\n"
2187 <<
"Old Instr: " << *It <<
"New Instr: "
2188 << *CombinedStoreDsCntInstr <<
'\n');
2218 for (MachineInstr **WI : WaitsToErase) {
2222 (*WI)->eraseFromParent();
2229 if (!WaitInstrs[CT])
2232 unsigned NewCnt =
Wait.get(CT);
2233 if (NewCnt != ~0u) {
2235 AMDGPU::OpName::simm16, NewCnt);
2236 Modified |= promoteSoftWaitCnt(WaitInstrs[CT]);
2238 ScoreBrackets.applyWaitcnt(CT, NewCnt);
2239 setNoWait(
Wait, CT);
2242 ?
dbgs() <<
"applied pre-existing waitcnt\n"
2243 <<
"New Instr at block end: " << *WaitInstrs[CT]
2245 :
dbgs() <<
"applied pre-existing waitcnt\n"
2246 <<
"Old Instr: " << *It
2247 <<
"New Instr: " << *WaitInstrs[CT] <<
'\n');
2254 if (WaitcntDepctrInstr) {
2258 TII.getNamedOperand(*WaitcntDepctrInstr, AMDGPU::OpName::simm16)
2273 AMDGPU::OpName::simm16, Enc);
2275 <<
"New Instr at block end: "
2276 << *WaitcntDepctrInstr <<
'\n'
2277 :
dbgs() <<
"applyPreexistingWaitcnt\n"
2278 <<
"Old Instr: " << *It <<
"New Instr: "
2279 << *WaitcntDepctrInstr <<
'\n');
2290bool WaitcntGeneratorGFX12Plus::createNewWaitcnt(
2292 AMDGPU::Waitcnt
Wait,
const WaitcntBrackets &ScoreBrackets) {
2293 assert(!isNormalMode(MaxCounter));
2299 auto EmitExpandedWaitcnt = [&](
unsigned Outstanding,
unsigned Target,
2301 for (
unsigned I = Outstanding - 1;
I >
Target &&
I != ~0
u; --
I)
2303 EmitWaitcnt(Target);
2309 if (ExpandWaitcntProfiling) {
2316 if (ScoreBrackets.counterOutOfOrder(CT)) {
2323 unsigned Outstanding = std::min(ScoreBrackets.getOutstanding(CT),
2324 getWaitCountMax(getLimits(), CT) - 1);
2325 EmitExpandedWaitcnt(Outstanding,
Count, [&](
unsigned Val) {
2336 MachineInstr *SWaitInst =
nullptr;
2360 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2361 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2373 [[maybe_unused]]
auto SWaitInst =
2380 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2381 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2384 if (
Wait.hasWaitDepctr()) {
2389 [[maybe_unused]]
auto SWaitInst =
2395 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
2396 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
2415bool SIInsertWaitcnts::generateWaitcntInstBefore(
2416 MachineInstr &
MI, WaitcntBrackets &ScoreBrackets,
2417 MachineInstr *OldWaitcntInstr, PreheaderFlushFlags FlushFlags) {
2419 setForceEmitWaitcnt();
2423 AMDGPU::Waitcnt
Wait;
2424 const unsigned Opc =
MI.getOpcode();
2427 case AMDGPU::BUFFER_WBINVL1:
2428 case AMDGPU::BUFFER_WBINVL1_SC:
2429 case AMDGPU::BUFFER_WBINVL1_VOL:
2430 case AMDGPU::BUFFER_GL0_INV:
2431 case AMDGPU::BUFFER_GL1_INV: {
2439 case AMDGPU::SI_RETURN_TO_EPILOG:
2440 case AMDGPU::SI_RETURN:
2441 case AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN:
2442 case AMDGPU::S_SETPC_B64_return: {
2447 AMDGPU::Waitcnt AllZeroWait =
2448 WCG->getAllZeroWaitcnt(
false);
2453 if (
ST.hasExtendedWaitCounts() &&
2454 !ScoreBrackets.hasPendingEvent(VMEM_ACCESS))
2459 case AMDGPU::S_ENDPGM:
2460 case AMDGPU::S_ENDPGM_SAVED: {
2469 EndPgmInsts[&
MI] = !ScoreBrackets.empty(
STORE_CNT) &&
2470 !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS);
2473 case AMDGPU::S_SENDMSG:
2474 case AMDGPU::S_SENDMSGHALT: {
2475 if (
ST.hasLegacyGeometry() &&
2490 if (
MI.modifiesRegister(AMDGPU::EXEC, &
TRI)) {
2493 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
2494 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
2495 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
2496 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
2503 if (
TII.isAlwaysGDS(
Opc) && ScoreBrackets.hasPendingGDS())
2504 addWait(
Wait,
DS_CNT, ScoreBrackets.getPendingGDSWait());
2511 Wait = AMDGPU::Waitcnt();
2513 const MachineOperand &CallAddrOp =
TII.getCalleeOperand(
MI);
2514 if (CallAddrOp.
isReg()) {
2515 ScoreBrackets.determineWaitForPhysReg(
2518 if (
const auto *RtnAddrOp =
2519 TII.getNamedOperand(
MI, AMDGPU::OpName::dst)) {
2520 ScoreBrackets.determineWaitForPhysReg(
2521 SmemAccessCounter, RtnAddrOp->getReg().asMCReg(),
Wait);
2524 }
else if (
Opc == AMDGPU::S_BARRIER_WAIT) {
2525 ScoreBrackets.tryClearSCCWriteEvent(&
MI);
2541 for (
const MachineMemOperand *Memop :
MI.memoperands()) {
2542 const Value *Ptr = Memop->getValue();
2543 if (Memop->isStore()) {
2544 if (
auto It = SLoadAddresses.
find(Ptr); It != SLoadAddresses.
end()) {
2545 addWait(
Wait, SmemAccessCounter, 0);
2547 SLoadAddresses.
erase(It);
2550 unsigned AS = Memop->getAddrSpace();
2554 if (
TII.mayWriteLDSThroughDMA(
MI))
2558 unsigned TID = LDSDMA_BEGIN;
2559 if (Ptr && Memop->getAAInfo()) {
2560 const auto &LDSDMAStores = ScoreBrackets.getLDSDMAStores();
2561 for (
unsigned I = 0,
E = LDSDMAStores.size();
I !=
E; ++
I) {
2562 if (
MI.mayAlias(AA, *LDSDMAStores[
I],
true)) {
2563 if ((
I + 1) >= NUM_LDSDMA) {
2566 ScoreBrackets.determineWaitForLDSDMA(
LOAD_CNT, TID,
Wait);
2570 ScoreBrackets.determineWaitForLDSDMA(
LOAD_CNT, TID +
I + 1,
Wait);
2574 ScoreBrackets.determineWaitForLDSDMA(
LOAD_CNT, TID,
Wait);
2576 if (Memop->isStore()) {
2577 ScoreBrackets.determineWaitForLDSDMA(
EXP_CNT, TID,
Wait);
2582 for (
const MachineOperand &
Op :
MI.operands()) {
2587 if (
Op.isTied() &&
Op.isUse() &&
TII.doesNotReadTiedSource(
MI))
2592 const bool IsVGPR =
TRI.isVectorRegister(MRI,
Op.getReg());
2599 if (
Op.isImplicit() &&
MI.mayLoadOrStore())
2611 if (
Op.isUse() || !updateVMCntOnly(
MI) ||
2612 ScoreBrackets.hasOtherPendingVmemTypes(
Reg, getVmemType(
MI)) ||
2613 ScoreBrackets.hasPointSamplePendingVmemTypes(
MI,
Reg) ||
2614 !
ST.hasVmemWriteVgprInOrder()) {
2618 ScoreBrackets.clearVgprVmemTypes(
Reg);
2621 if (
Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) {
2625 }
else if (
Op.getReg() == AMDGPU::SCC) {
2628 ScoreBrackets.determineWaitForPhysReg(SmemAccessCounter,
Reg,
Wait);
2631 if (
ST.hasWaitXcnt() &&
Op.isDef())
2632 ScoreBrackets.determineWaitForPhysReg(
X_CNT,
Reg,
Wait);
2650 if (
Opc == AMDGPU::S_BARRIER && !
ST.hasAutoWaitcntBeforeBarrier() &&
2651 !
ST.hasBackOffBarrier()) {
2652 Wait =
Wait.combined(WCG->getAllZeroWaitcnt(
true));
2659 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
2664 ScoreBrackets.simplifyWaitcnt(
Wait);
2677 ScoreBrackets.applyWaitcnt(
Wait,
X_CNT);
2684 Wait = WCG->getAllZeroWaitcnt(
false);
2688 if (!ForceEmitWaitcnt[
T])
2693 if (FlushFlags.FlushVmCnt) {
2698 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(
DS_CNT))
2704 return generateWaitcnt(
Wait,
MI.getIterator(), *
MI.getParent(), ScoreBrackets,
2708bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt
Wait,
2710 MachineBasicBlock &
Block,
2711 WaitcntBrackets &ScoreBrackets,
2712 MachineInstr *OldWaitcntInstr) {
2715 if (OldWaitcntInstr)
2719 WCG->applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr,
Wait, It);
2724 MachineOperand *WaitExp =
TII.getNamedOperand(*It, AMDGPU::OpName::waitexp);
2734 <<
"Update Instr: " << *It);
2737 if (WCG->createNewWaitcnt(
Block, It,
Wait, ScoreBrackets))
2742 ScoreBrackets.applyWaitcnt(
Wait);
2747std::optional<WaitEventType>
2748SIInsertWaitcnts::getExpertSchedulingEventType(
const MachineInstr &Inst)
const {
2749 if (
TII.isVALU(Inst)) {
2754 if (
TII.isXDL(Inst))
2755 return VGPR_XDL_WRITE;
2757 if (
TII.isTRANS(Inst))
2758 return VGPR_TRANS_WRITE;
2761 return VGPR_DPMACC_WRITE;
2763 return VGPR_CSMACC_WRITE;
2770 if (
TII.isFLAT(Inst))
2771 return VGPR_FLAT_READ;
2774 return VGPR_LDS_READ;
2776 if (
TII.isVMEM(Inst) ||
TII.isVIMAGE(Inst) ||
TII.isVSAMPLE(Inst))
2777 return VGPR_VMEM_READ;
2784bool SIInsertWaitcnts::isVmemAccess(
const MachineInstr &
MI)
const {
2785 return (
TII.isFLAT(
MI) &&
TII.mayAccessVMEMThroughFlat(
MI)) ||
2792 MachineBasicBlock *
Block)
const {
2793 auto BlockEnd =
Block->getParent()->end();
2794 auto BlockIter =
Block->getIterator();
2798 if (++BlockIter != BlockEnd) {
2799 It = BlockIter->instr_begin();
2806 if (!It->isMetaInstruction())
2814 return It->getOpcode() == AMDGPU::S_ENDPGM;
2818bool SIInsertWaitcnts::insertForcedWaitAfter(MachineInstr &Inst,
2819 MachineBasicBlock &
Block,
2820 WaitcntBrackets &ScoreBrackets) {
2821 AMDGPU::Waitcnt
Wait;
2822 bool NeedsEndPGMCheck =
false;
2830 NeedsEndPGMCheck =
true;
2833 ScoreBrackets.simplifyWaitcnt(
Wait);
2836 bool Result = generateWaitcnt(
Wait, SuccessorIt,
Block, ScoreBrackets,
2839 if (Result && NeedsEndPGMCheck && isNextENDPGM(SuccessorIt, &
Block)) {
2847WaitEventSet SIInsertWaitcnts::getEventsFor(
const MachineInstr &Inst)
const {
2848 WaitEventSet Events;
2850 if (
const auto ET = getExpertSchedulingEventType(Inst))
2854 if (
TII.isDS(Inst) &&
TII.usesLGKM_CNT(Inst)) {
2856 TII.hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
2857 Events.insert(GDS_ACCESS);
2858 Events.insert(GDS_GPR_LOCK);
2860 Events.insert(LDS_ACCESS);
2862 }
else if (
TII.isFLAT(Inst)) {
2864 Events.insert(getVmemWaitEventType(Inst));
2867 if (
TII.mayAccessVMEMThroughFlat(Inst)) {
2868 if (
ST.hasWaitXcnt())
2869 Events.insert(VMEM_GROUP);
2870 Events.insert(getVmemWaitEventType(Inst));
2872 if (
TII.mayAccessLDSThroughFlat(Inst))
2873 Events.insert(LDS_ACCESS);
2877 Inst.
getOpcode() == AMDGPU::BUFFER_WBL2)) {
2881 if (
ST.hasWaitXcnt())
2882 Events.insert(VMEM_GROUP);
2883 Events.insert(getVmemWaitEventType(Inst));
2884 if (
ST.vmemWriteNeedsExpWaitcnt() &&
2886 Events.insert(VMW_GPR_LOCK);
2888 }
else if (
TII.isSMRD(Inst)) {
2889 if (
ST.hasWaitXcnt())
2890 Events.insert(SMEM_GROUP);
2891 Events.insert(SMEM_ACCESS);
2893 Events.insert(EXP_LDS_ACCESS);
2895 unsigned Imm =
TII.getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
2897 Events.insert(EXP_PARAM_ACCESS);
2899 Events.insert(EXP_POS_ACCESS);
2901 Events.insert(EXP_GPR_LOCK);
2903 Events.insert(SCC_WRITE);
2906 case AMDGPU::S_SENDMSG:
2907 case AMDGPU::S_SENDMSG_RTN_B32:
2908 case AMDGPU::S_SENDMSG_RTN_B64:
2909 case AMDGPU::S_SENDMSGHALT:
2910 Events.insert(SQ_MESSAGE);
2912 case AMDGPU::S_MEMTIME:
2913 case AMDGPU::S_MEMREALTIME:
2914 case AMDGPU::S_GET_BARRIER_STATE_M0:
2915 case AMDGPU::S_GET_BARRIER_STATE_IMM:
2916 Events.insert(SMEM_ACCESS);
2923void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
2924 WaitcntBrackets *ScoreBrackets) {
2926 WaitEventSet InstEvents = getEventsFor(Inst);
2927 for (WaitEventType
E : wait_events()) {
2928 if (InstEvents.contains(
E))
2929 ScoreBrackets->updateByEvent(
E, Inst);
2932 if (
TII.isDS(Inst) &&
TII.usesLGKM_CNT(Inst)) {
2934 TII.hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
2935 ScoreBrackets->setPendingGDS();
2937 }
else if (
TII.isFLAT(Inst)) {
2945 ScoreBrackets->setPendingFlat();
2948 ScoreBrackets->updateByEvent(ASYNC_ACCESS, Inst);
2950 }
else if (Inst.
isCall()) {
2953 ScoreBrackets->applyWaitcnt(WCG->getAllZeroWaitcnt(
false));
2954 ScoreBrackets->setStateOnFunctionEntryOrReturn();
2955 }
else if (
TII.isVINTERP(Inst)) {
2956 int64_t
Imm =
TII.getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
2961bool WaitcntBrackets::mergeScore(
const MergeInfo &M,
unsigned &Score,
2962 unsigned OtherScore) {
2963 unsigned MyShifted = Score <=
M.OldLB ? 0 : Score +
M.MyShift;
2964 unsigned OtherShifted =
2965 OtherScore <=
M.OtherLB ? 0 : OtherScore +
M.OtherShift;
2966 Score = std::max(MyShifted, OtherShifted);
2967 return OtherShifted > MyShifted;
2972 bool StrictDom =
false;
2976 if (AsyncMarks.empty() && OtherMarks.
empty()) {
2983 auto MaxSize = (unsigned)std::max(AsyncMarks.size(), OtherMarks.
size());
2984 MaxSize = std::min(MaxSize, MaxAsyncMarks);
2987 if (AsyncMarks.size() > MaxSize)
2988 AsyncMarks.erase(AsyncMarks.begin(),
2989 AsyncMarks.begin() + (AsyncMarks.size() - MaxSize));
2995 constexpr CounterValueArray ZeroMark{};
2996 AsyncMarks.insert(AsyncMarks.begin(), MaxSize - AsyncMarks.size(), ZeroMark);
2999 dbgs() <<
"Before merge:\n";
3000 for (
const auto &Mark : AsyncMarks) {
3004 dbgs() <<
"Other marks:\n";
3005 for (
const auto &Mark : OtherMarks) {
3014 unsigned OtherSize = OtherMarks.size();
3015 unsigned OurSize = AsyncMarks.size();
3016 unsigned MergeCount = std::min(OtherSize, OurSize);
3019 StrictDom |= mergeScore(MergeInfos[
T], AsyncMarks[OurSize - Idx][
T],
3020 OtherMarks[OtherSize - Idx][
T]);
3025 dbgs() <<
"After merge:\n";
3026 for (
const auto &Mark : AsyncMarks) {
3040bool WaitcntBrackets::merge(
const WaitcntBrackets &
Other) {
3041 bool StrictDom =
false;
3045 for (
auto K :
Other.VMem.keys())
3046 VMem.try_emplace(K);
3047 for (
auto K :
Other.SGPRs.keys())
3048 SGPRs.try_emplace(K);
3055 const WaitEventSet &EventsForT =
Context->getWaitEvents(
T);
3056 const WaitEventSet OldEvents = PendingEvents & EventsForT;
3057 const WaitEventSet OtherEvents =
Other.PendingEvents & EventsForT;
3058 if (!OldEvents.contains(OtherEvents))
3060 PendingEvents |= OtherEvents;
3063 const unsigned MyPending = ScoreUBs[
T] - ScoreLBs[
T];
3064 const unsigned OtherPending =
Other.ScoreUBs[
T] -
Other.ScoreLBs[
T];
3065 const unsigned NewUB = ScoreLBs[
T] + std::max(MyPending, OtherPending);
3066 if (NewUB < ScoreLBs[
T])
3069 MergeInfo &
M = MergeInfos[
T];
3070 M.OldLB = ScoreLBs[
T];
3071 M.OtherLB =
Other.ScoreLBs[
T];
3072 M.MyShift = NewUB - ScoreUBs[
T];
3073 M.OtherShift = NewUB -
Other.ScoreUBs[
T];
3075 ScoreUBs[
T] = NewUB;
3078 StrictDom |= mergeScore(M, LastFlatLoadCnt,
Other.LastFlatLoadCnt);
3081 StrictDom |= mergeScore(M, LastFlatDsCnt,
Other.LastFlatDsCnt);
3082 StrictDom |= mergeScore(M, LastGDS,
Other.LastGDS);
3086 StrictDom |= mergeScore(M, SCCScore,
Other.SCCScore);
3087 if (
Other.hasPendingEvent(SCC_WRITE)) {
3088 if (!OldEvents.contains(SCC_WRITE)) {
3089 PendingSCCWrite =
Other.PendingSCCWrite;
3090 }
else if (PendingSCCWrite !=
Other.PendingSCCWrite) {
3091 PendingSCCWrite =
nullptr;
3096 for (
auto &[RegID, Info] : VMem)
3097 StrictDom |= mergeScore(M,
Info.Scores[
T],
Other.getVMemScore(RegID,
T));
3099 if (isSmemCounter(
T)) {
3100 for (
auto &[RegID, Info] : SGPRs) {
3101 auto It =
Other.SGPRs.find(RegID);
3102 unsigned OtherScore = (It !=
Other.SGPRs.end()) ? It->second.get(
T) : 0;
3103 StrictDom |= mergeScore(M,
Info.get(
T), OtherScore);
3108 for (
auto &[TID, Info] : VMem) {
3109 if (
auto It =
Other.VMem.find(TID); It !=
Other.VMem.end()) {
3110 unsigned char NewVmemTypes =
Info.VMEMTypes | It->second.VMEMTypes;
3111 StrictDom |= NewVmemTypes !=
Info.VMEMTypes;
3112 Info.VMEMTypes = NewVmemTypes;
3116 StrictDom |= mergeAsyncMarks(MergeInfos,
Other.AsyncMarks);
3118 StrictDom |= mergeScore(MergeInfos[
T], AsyncScore[
T],
Other.AsyncScore[
T]);
3120 purgeEmptyTrackingData();
3126 return Opcode == AMDGPU::S_WAITCNT ||
3129 Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT ||
3130 Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT ||
3131 Opcode == AMDGPU::S_WAITCNT_lds_direct ||
3132 Opcode == AMDGPU::WAIT_ASYNCMARK ||
3136void SIInsertWaitcnts::setSchedulingMode(MachineBasicBlock &
MBB,
3138 bool ExpertMode)
const {
3142 .
addImm(ExpertMode ? 2 : 0)
3160class VCCZWorkaround {
3161 const WaitcntBrackets &ScoreBrackets;
3162 const GCNSubtarget &
ST;
3163 const SIInstrInfo &
TII;
3164 const SIRegisterInfo &
TRI;
3165 bool VCCZCorruptionBug =
false;
3166 bool VCCZNotUpdatedByPartialWrites =
false;
3169 bool MustRecomputeVCCZ =
true;
3172 VCCZWorkaround(
const WaitcntBrackets &ScoreBrackets,
const GCNSubtarget &ST,
3173 const SIInstrInfo &
TII,
const SIRegisterInfo &
TRI)
3175 VCCZCorruptionBug =
ST.hasReadVCCZBug();
3176 VCCZNotUpdatedByPartialWrites = !
ST.partialVCCWritesUpdateVCCZ();
3183 bool tryRecomputeVCCZ(MachineInstr &
MI) {
3185 if (!VCCZCorruptionBug && !VCCZNotUpdatedByPartialWrites)
3195 MustRecomputeVCCZ |= VCCZCorruptionBug &&
TII.isSMRD(
MI);
3201 std::optional<bool> PartiallyWritesToVCCOpt;
3202 auto PartiallyWritesToVCC = [](MachineInstr &
MI) {
3203 return MI.definesRegister(AMDGPU::VCC_LO,
nullptr) ||
3204 MI.definesRegister(AMDGPU::VCC_HI,
nullptr);
3206 if (VCCZNotUpdatedByPartialWrites) {
3207 PartiallyWritesToVCCOpt = PartiallyWritesToVCC(
MI);
3210 MustRecomputeVCCZ |= *PartiallyWritesToVCCOpt;
3216 if (!ScoreBrackets.hasPendingEvent(SMEM_ACCESS) || !VCCZCorruptionBug) {
3218 if (!PartiallyWritesToVCCOpt)
3219 PartiallyWritesToVCCOpt = PartiallyWritesToVCC(
MI);
3220 bool FullyWritesToVCC = !*PartiallyWritesToVCCOpt &&
3221 MI.definesRegister(AMDGPU::VCC,
nullptr);
3224 bool UpdatesVCCZ = FullyWritesToVCC || (!VCCZNotUpdatedByPartialWrites &&
3225 *PartiallyWritesToVCCOpt);
3227 MustRecomputeVCCZ =
false;
3237 TII.get(
ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
3240 MustRecomputeVCCZ =
false;
3250bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
3251 MachineBasicBlock &
Block,
3252 WaitcntBrackets &ScoreBrackets) {
3256 dbgs() <<
"*** Begin Block: ";
3258 ScoreBrackets.dump();
3260 VCCZWorkaround VCCZW(ScoreBrackets, ST,
TII,
TRI);
3263 MachineInstr *OldWaitcntInstr =
nullptr;
3268 Iter !=
E; ++Iter) {
3269 MachineInstr &Inst = *Iter;
3275 (IsExpertMode && Inst.
getOpcode() == AMDGPU::S_WAITCNT_DEPCTR)) {
3276 if (!OldWaitcntInstr)
3277 OldWaitcntInstr = &Inst;
3281 PreheaderFlushFlags FlushFlags;
3282 if (
Block.getFirstTerminator() == Inst)
3283 FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
3286 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
3288 OldWaitcntInstr =
nullptr;
3290 if (Inst.
getOpcode() == AMDGPU::ASYNCMARK) {
3294 ScoreBrackets.recordAsyncMark(Inst);
3298 if (
TII.isSMRD(Inst)) {
3299 for (
const MachineMemOperand *Memop : Inst.
memoperands()) {
3302 if (!Memop->isInvariant()) {
3303 const Value *Ptr = Memop->getValue();
3309 updateEventWaitcntAfter(Inst, &ScoreBrackets);
3313 Modified |= insertForcedWaitAfter(Inst,
Block, ScoreBrackets);
3317 ScoreBrackets.dump();
3322 Modified |= VCCZW.tryRecomputeVCCZ(Inst);
3327 AMDGPU::Waitcnt
Wait;
3328 if (
Block.getFirstTerminator() ==
Block.end()) {
3329 PreheaderFlushFlags FlushFlags = isPreheaderToFlush(
Block, ScoreBrackets);
3330 if (FlushFlags.FlushVmCnt) {
3331 if (ScoreBrackets.hasPendingEvent(
LOAD_CNT))
3333 if (ScoreBrackets.hasPendingEvent(
SAMPLE_CNT))
3335 if (ScoreBrackets.hasPendingEvent(
BVH_CNT))
3338 if (FlushFlags.FlushDsCnt && ScoreBrackets.hasPendingEvent(
DS_CNT))
3347 dbgs() <<
"*** End Block: ";
3349 ScoreBrackets.dump();
3355bool SIInsertWaitcnts::removeRedundantSoftXcnts(MachineBasicBlock &
Block) {
3356 if (
Block.size() <= 1)
3364 MachineInstr *LastAtomicWithSoftXcnt =
nullptr;
3370 if (!IsLDS && (
MI.mayLoad() ^
MI.mayStore()))
3371 LastAtomicWithSoftXcnt =
nullptr;
3374 MI.mayLoad() &&
MI.mayStore();
3375 MachineInstr &PrevMI = *
MI.getPrevNode();
3377 if (PrevMI.
getOpcode() == AMDGPU::S_WAIT_XCNT_soft && IsAtomicRMW) {
3380 if (LastAtomicWithSoftXcnt) {
3384 LastAtomicWithSoftXcnt = &
MI;
3392SIInsertWaitcnts::isPreheaderToFlush(MachineBasicBlock &
MBB,
3393 const WaitcntBrackets &ScoreBrackets) {
3394 auto [Iterator, IsInserted] =
3397 return Iterator->second;
3401 return PreheaderFlushFlags();
3405 return PreheaderFlushFlags();
3408 Iterator->second = getPreheaderFlushFlags(Loop, ScoreBrackets);
3409 return Iterator->second;
3412 return PreheaderFlushFlags();
3415bool SIInsertWaitcnts::isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const {
3417 return TII.mayAccessVMEMThroughFlat(
MI);
3421bool SIInsertWaitcnts::isDSRead(
const MachineInstr &
MI)
const {
3427bool SIInsertWaitcnts::mayStoreIncrementingDSCNT(
const MachineInstr &
MI)
const {
3456SIInsertWaitcnts::getPreheaderFlushFlags(MachineLoop *
ML,
3457 const WaitcntBrackets &Brackets) {
3458 PreheaderFlushFlags
Flags;
3459 bool HasVMemLoad =
false;
3460 bool HasVMemStore =
false;
3461 bool UsesVgprVMEMLoadedOutside =
false;
3462 bool UsesVgprDSReadOutside =
false;
3463 bool VMemInvalidated =
false;
3467 bool TrackSimpleDSOpt =
ST.hasExtendedWaitCounts();
3468 DenseSet<MCRegUnit> VgprUse;
3469 DenseSet<MCRegUnit> VgprDefVMEM;
3470 DenseSet<MCRegUnit> VgprDefDS;
3476 DenseMap<MCRegUnit, unsigned> LastDSReadPositionMap;
3477 unsigned DSReadPosition = 0;
3478 bool IsSingleBlock =
ML->getNumBlocks() == 1;
3479 bool TrackDSFlushPoint =
ST.hasExtendedWaitCounts() && IsSingleBlock;
3480 unsigned LastDSFlushPosition = 0;
3482 for (MachineBasicBlock *
MBB :
ML->blocks()) {
3483 for (MachineInstr &
MI : *
MBB) {
3484 if (isVMEMOrFlatVMEM(
MI)) {
3485 HasVMemLoad |=
MI.mayLoad();
3486 HasVMemStore |=
MI.mayStore();
3490 if (mayStoreIncrementingDSCNT(
MI)) {
3493 if (VMemInvalidated)
3495 TrackSimpleDSOpt =
false;
3496 TrackDSFlushPoint =
false;
3498 bool IsDSRead = isDSRead(
MI);
3503 auto updateDSReadFlushTracking = [&](MCRegUnit RU) {
3504 if (!TrackDSFlushPoint)
3506 if (
auto It = LastDSReadPositionMap.
find(RU);
3507 It != LastDSReadPositionMap.
end()) {
3511 LastDSFlushPosition = std::max(LastDSFlushPosition, It->second);
3515 for (
const MachineOperand &
Op :
MI.all_uses()) {
3516 if (
Op.isDebug() || !
TRI.isVectorRegister(MRI,
Op.getReg()))
3519 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3523 VMemInvalidated =
true;
3527 TrackSimpleDSOpt =
false;
3530 if (VMemInvalidated && !TrackSimpleDSOpt && !TrackDSFlushPoint)
3534 updateDSReadFlushTracking(RU);
3539 VMEMID
ID = toVMEMID(RU);
3543 UsesVgprVMEMLoadedOutside =
true;
3547 else if (Brackets.hasPendingVMEM(
ID,
DS_CNT))
3548 UsesVgprDSReadOutside =
true;
3553 if (isVMEMOrFlatVMEM(
MI) &&
MI.mayLoad()) {
3554 for (
const MachineOperand &
Op :
MI.all_defs()) {
3555 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3559 VMemInvalidated =
true;
3564 if (VMemInvalidated && !TrackSimpleDSOpt && !TrackDSFlushPoint)
3575 if (IsDSRead || TrackDSFlushPoint) {
3576 for (
const MachineOperand &
Op :
MI.all_defs()) {
3577 if (!
TRI.isVectorRegister(MRI,
Op.getReg()))
3579 for (MCRegUnit RU :
TRI.regunits(
Op.getReg().asMCReg())) {
3582 updateDSReadFlushTracking(RU);
3585 if (TrackDSFlushPoint)
3586 LastDSReadPositionMap[RU] = DSReadPosition;
3595 if (!VMemInvalidated && UsesVgprVMEMLoadedOutside &&
3596 ((!
ST.hasVscnt() && HasVMemStore && !HasVMemLoad) ||
3597 (HasVMemLoad &&
ST.hasVmemWriteVgprInOrder())))
3598 Flags.FlushVmCnt =
true;
3604 bool SimpleDSOpt = TrackSimpleDSOpt && UsesVgprDSReadOutside;
3607 bool HasUnflushedDSReads = DSReadPosition > LastDSFlushPosition;
3608 bool DSFlushPointPrefetch =
3609 TrackDSFlushPoint && UsesVgprDSReadOutside && HasUnflushedDSReads;
3611 if (SimpleDSOpt || DSFlushPointPrefetch)
3612 Flags.FlushDsCnt =
true;
3617bool SIInsertWaitcntsLegacy::runOnMachineFunction(MachineFunction &MF) {
3618 auto &MLI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
3620 getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
3622 if (
auto *AAR = getAnalysisIfAvailable<AAResultsWrapperPass>())
3623 AA = &AAR->getAAResults();
3625 return SIInsertWaitcnts(MLI, PDT, AA, MF).run();
3637 if (!SIInsertWaitcnts(MLI, PDT,
AA, MF).
run())
3642 .preserve<AAManager>();
3645bool SIInsertWaitcnts::run() {
3653 if (ST.hasExtendedWaitCounts()) {
3654 IsExpertMode = ST.hasExpertSchedulingMode() &&
3662 WCG = std::make_unique<WaitcntGeneratorGFX12Plus>(MF, MaxCounter, Limits,
3671 SmemAccessCounter = getCounterFromEvent(SMEM_ACCESS);
3675 MachineBasicBlock &EntryBB = MF.
front();
3685 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3688 if (
ST.hasExtendedWaitCounts()) {
3696 if (!
ST.hasImageInsts() &&
3701 TII.get(instrsForExtendedCounterTypes[CT]))
3714 auto NonKernelInitialState = std::make_unique<WaitcntBrackets>(
this);
3715 NonKernelInitialState->setStateOnFunctionEntryOrReturn();
3716 BlockInfos[&EntryBB].Incoming = std::move(NonKernelInitialState);
3723 for (
auto *
MBB : ReversePostOrderTraversal<MachineFunction *>(&MF))
3726 std::unique_ptr<WaitcntBrackets> Brackets;
3731 for (
auto BII = BlockInfos.
begin(), BIE = BlockInfos.
end(); BII != BIE;
3733 MachineBasicBlock *
MBB = BII->first;
3734 BlockInfo &BI = BII->second;
3740 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
3742 *Brackets = *BI.Incoming;
3745 Brackets = std::make_unique<WaitcntBrackets>(
this);
3750 Brackets->~WaitcntBrackets();
3751 new (Brackets.get()) WaitcntBrackets(
this);
3755 if (
ST.hasWaitXcnt())
3757 Modified |= insertWaitcntInBlock(MF, *
MBB, *Brackets);
3760 if (Brackets->hasPendingEvent()) {
3761 BlockInfo *MoveBracketsToSucc =
nullptr;
3763 auto *SuccBII = BlockInfos.
find(Succ);
3764 BlockInfo &SuccBI = SuccBII->second;
3765 if (!SuccBI.Incoming) {
3766 SuccBI.Dirty =
true;
3767 if (SuccBII <= BII) {
3771 if (!MoveBracketsToSucc) {
3772 MoveBracketsToSucc = &SuccBI;
3774 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
3778 dbgs() <<
"Try to merge ";
3784 if (SuccBI.Incoming->merge(*Brackets)) {
3785 SuccBI.Dirty =
true;
3786 if (SuccBII <= BII) {
3793 if (MoveBracketsToSucc)
3794 MoveBracketsToSucc->Incoming = std::move(Brackets);
3799 if (
ST.hasScalarStores()) {
3800 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
3801 bool HaveScalarStores =
false;
3803 for (MachineBasicBlock &
MBB : MF) {
3804 for (MachineInstr &
MI :
MBB) {
3805 if (!HaveScalarStores &&
TII.isScalarStore(
MI))
3806 HaveScalarStores =
true;
3808 if (
MI.getOpcode() == AMDGPU::S_ENDPGM ||
3809 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
3814 if (HaveScalarStores) {
3823 for (MachineBasicBlock *
MBB : EndPgmBlocks) {
3824 bool SeenDCacheWB =
false;
3828 if (
I->getOpcode() == AMDGPU::S_DCACHE_WB)
3829 SeenDCacheWB =
true;
3830 else if (
TII.isScalarStore(*
I))
3831 SeenDCacheWB =
false;
3834 if ((
I->getOpcode() == AMDGPU::S_ENDPGM ||
3835 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
3851 while (
I != EntryBB.
end() &&
I->isMetaInstruction())
3853 setSchedulingMode(EntryBB,
I,
true);
3855 for (MachineInstr *
MI : CallInsts) {
3856 MachineBasicBlock &
MBB = *
MI->getParent();
3857 setSchedulingMode(
MBB,
MI,
false);
3858 setSchedulingMode(
MBB, std::next(
MI->getIterator()),
true);
3861 for (MachineInstr *
MI : ReturnInsts)
3862 setSchedulingMode(*
MI->getParent(),
MI,
false);
3873 for (
auto [
MI,
_] : EndPgmInsts) {
3875 TII.get(AMDGPU::S_ALLOC_VGPR))
3879 }
else if (!WCG->isOptNone() &&
3880 ST.getGeneration() >= AMDGPUSubtarget::GFX11 &&
3881 (MF.getFrameInfo().hasCalls() ||
3882 ST.getOccupancyWithNumVGPRs(
3883 TRI.getNumUsedPhysRegs(MRI, AMDGPU::VGPR_32RegClass),
3886 for (
auto [
MI, Flag] : EndPgmInsts) {
3888 if (
ST.requiresNopBeforeDeallocVGPRs()) {
3890 TII.get(AMDGPU::S_NOP))
3894 TII.get(AMDGPU::S_SENDMSG))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
static bool isOptNone(const MachineFunction &MF)
static LoopDeletionResult merge(LoopDeletionResult A, LoopDeletionResult B)
Register const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static cl::opt< bool > ForceEmitZeroLoadFlag("amdgpu-waitcnt-load-forcezero", cl::desc("Force all waitcnt load counters to wait until 0"), cl::init(false), cl::Hidden)
#define AMDGPU_EVENT_NAME(Name)
static bool updateOperandIfDifferent(MachineInstr &MI, AMDGPU::OpName OpName, unsigned NewEnc)
static bool isWaitInstr(MachineInstr &Inst)
static std::optional< InstCounterType > counterTypeForInstr(unsigned Opcode)
Determine if MI is a gfx12+ single-counter S_WAIT_*CNT instruction, and if so, which counter it is wa...
static cl::opt< bool > ExpertSchedulingModeFlag("amdgpu-expert-scheduling-mode", cl::desc("Enable expert scheduling mode 2 for all functions (GFX12+ only)"), cl::init(false), cl::Hidden)
static cl::opt< bool > ForceEmitZeroFlag("amdgpu-waitcnt-forcezero", cl::desc("Force all waitcnt instrs to be emitted as " "s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), cl::init(false), cl::Hidden)
#define AMDGPU_DECLARE_WAIT_EVENTS(DECL)
#define AMDGPU_EVENT_ENUM(Name)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Provides some synthesis utilities to produce sequences of values.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const uint32_t IV[8]
A manager for alias analyses.
bool isEntryFunction() const
Represents the counter values to wait for in an s_waitcnt instruction.
unsigned get(InstCounterType T) const
void set(InstCounterType T, unsigned Val)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
Represents analyses that only rely on functions' control flow.
static bool shouldExecute(CounterInfo &Counter)
static bool isCounterSet(CounterInfo &Info)
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
FunctionPass class - This class is used to implement most global optimizations.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI const MachineBasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
Instructions::iterator instr_iterator
iterator_range< succ_iterator > successors()
LLVM_ABI void printName(raw_ostream &os, unsigned printNameFlags=PrintNameIr, ModuleSlotTracker *moduleSlotTracker=nullptr) const
Print the basic block's name as:
MachineInstrBundleIterator< MachineInstr > iterator
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool isCall(QueryType Type=AnyInBundle) const
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool isMetaInstruction(QueryType Type=IgnoreBundle) const
Return true if this instruction doesn't produce any output in the form of executable instructions.
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
iterator find(const KeyT &Key)
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
static bool isCBranchVCCZRead(const MachineInstr &MI)
static bool isDS(const MachineInstr &MI)
static bool isVMEM(const MachineInstr &MI)
static bool isFLATScratch(const MachineInstr &MI)
static bool isEXP(const MachineInstr &MI)
static bool mayWriteLDSThroughDMA(const MachineInstr &MI)
static bool isLDSDIR(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
static bool isFLATGlobal(const MachineInstr &MI)
static bool isVSAMPLE(const MachineInstr &MI)
static bool isAtomicRet(const MachineInstr &MI)
static bool isImage(const MachineInstr &MI)
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static bool isVINTERP(const MachineInstr &MI)
static bool isGFX12CacheInvOrWBInst(unsigned Opc)
static bool isSBarrierSCCWrite(unsigned Opcode)
static bool isMIMG(const MachineInstr &MI)
static bool usesASYNC_CNT(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isLDSDMA(const MachineInstr &MI)
static bool isAtomicNoRet(const MachineInstr &MI)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool isDynamicVGPREnabled() const
void push_back(const T &Elt)
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaVdst(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
@ ID_DEALLOC_VGPRS_GFX11Plus
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isDPMACCInstruction(unsigned Opc)
iota_range< InstCounterType > inst_counter_types(InstCounterType MaxCounter)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool getMUBUFIsBufferInv(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
LLVM_ABI std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
APInt operator&(APInt a, const APInt &b)
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
static StringRef getCPU(StringRef CPU)
Processes a CPU name.
bool operator!=(uint64_t V1, const APInt &V2)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
char & SIInsertWaitcntsID
@ Async
"Asynchronous" unwind tables (instr precise)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
APInt operator|(APInt a, const APInt &b)
FunctionPass * createSIInsertWaitcntsPass()
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
static constexpr ValueType Default
static constexpr uint64_t encode(Fields... Values)
Represents the hardware counter limits for different wait count types.
Instruction set architecture version.
static constexpr bool is_iterable