41#define DEBUG_TYPE "si-insert-waitcnts"
44 "Force emit s_waitcnt expcnt(0) instrs");
46 "Force emit s_waitcnt lgkmcnt(0) instrs");
48 "Force emit s_waitcnt vmcnt(0) instrs");
51 "amdgpu-waitcnt-forcezero",
52 cl::desc(
"Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
60#define CNT_MASK(t) (1u << (t))
62enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS };
72auto inst_counter_types() {
return enum_seq(VM_CNT, NUM_INST_CNTS); }
74using RegInterval = std::pair<int, int>;
76struct HardwareLimits {
83struct RegisterEncoding {
107static const unsigned WaitEventMaskForInst[NUM_INST_CNTS] = {
108 (1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS),
109 (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) |
111 (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) |
112 (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS) | (1 << EXP_LDS_ACCESS),
113 (1 << VMEM_WRITE_ACCESS)};
121enum RegisterMapping {
122 SQ_MAX_PGM_VGPRS = 512,
124 SQ_MAX_PGM_SGPRS = 256,
127 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS,
150 assert(updateVMCntOnly(Inst));
152 return VMEM_NOSAMPLER;
156 return BaseInfo->
BVH ? VMEM_BVH
157 : BaseInfo->
Sampler ? VMEM_SAMPLER : VMEM_NOSAMPLER;
163 Wait.VmCnt = std::min(
Wait.VmCnt, Count);
166 Wait.ExpCnt = std::min(
Wait.ExpCnt, Count);
169 Wait.LgkmCnt = std::min(
Wait.LgkmCnt, Count);
172 Wait.VsCnt = std::min(
Wait.VsCnt, Count);
187class WaitcntBrackets {
189 WaitcntBrackets(
const GCNSubtarget *SubTarget, HardwareLimits Limits,
190 RegisterEncoding Encoding)
191 :
ST(SubTarget), Limits(Limits), Encoding(Encoding) {}
193 unsigned getWaitCountMax(InstCounterType
T)
const {
196 return Limits.VmcntMax;
198 return Limits.LgkmcntMax;
200 return Limits.ExpcntMax;
202 return Limits.VscntMax;
209 unsigned getScoreLB(InstCounterType
T)
const {
214 unsigned getScoreUB(InstCounterType
T)
const {
219 unsigned getScoreRange(InstCounterType
T)
const {
220 return getScoreUB(
T) - getScoreLB(
T);
224 InstCounterType eventCounter(WaitEventType
E)
const {
225 for (
auto T : inst_counter_types()) {
226 if (WaitEventMaskForInst[
T] & (1 <<
E))
232 unsigned getRegScore(
int GprNo, InstCounterType
T)
const {
233 if (GprNo < NUM_ALL_VGPRS) {
234 return VgprScores[
T][GprNo];
237 return SgprScores[GprNo - NUM_ALL_VGPRS];
246 bool counterOutOfOrder(InstCounterType
T)
const;
248 void simplifyWaitcnt(InstCounterType
T,
unsigned &Count)
const;
251 void applyWaitcnt(InstCounterType
T,
unsigned Count);
256 unsigned hasPendingEvent()
const {
return PendingEvents; }
257 unsigned hasPendingEvent(WaitEventType
E)
const {
258 return PendingEvents & (1 <<
E);
260 unsigned hasPendingEvent(InstCounterType
T)
const {
261 unsigned HasPending = PendingEvents & WaitEventMaskForInst[
T];
262 assert((HasPending != 0) == (getScoreRange(
T) != 0));
266 bool hasMixedPendingEvents(InstCounterType
T)
const {
267 unsigned Events = hasPendingEvent(
T);
269 return Events & (Events - 1);
272 bool hasPendingFlat()
const {
273 return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] &&
274 LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) ||
275 (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] &&
276 LastFlat[VM_CNT] <= ScoreUBs[VM_CNT]));
279 void setPendingFlat() {
286 bool hasOtherPendingVmemTypes(
int GprNo, VmemType V)
const {
287 assert(GprNo < NUM_ALL_VGPRS);
288 return VgprVmemTypes[GprNo] & ~(1 <<
V);
291 void clearVgprVmemTypes(
int GprNo) {
292 assert(GprNo < NUM_ALL_VGPRS);
293 VgprVmemTypes[GprNo] = 0;
306 static bool mergeScore(
const MergeInfo &M,
unsigned &Score,
307 unsigned OtherScore);
309 void setScoreLB(InstCounterType
T,
unsigned Val) {
314 void setScoreUB(InstCounterType
T,
unsigned Val) {
321 if (getScoreRange(EXP_CNT) > getWaitCountMax(EXP_CNT))
325 void setRegScore(
int GprNo, InstCounterType
T,
unsigned Val) {
326 if (GprNo < NUM_ALL_VGPRS) {
327 VgprUB = std::max(VgprUB, GprNo);
328 VgprScores[
T][GprNo] = Val;
331 SgprUB = std::max(SgprUB, GprNo - NUM_ALL_VGPRS);
332 SgprScores[GprNo - NUM_ALL_VGPRS] = Val;
338 unsigned OpNo,
unsigned Val);
341 HardwareLimits Limits = {};
342 RegisterEncoding Encoding = {};
343 unsigned ScoreLBs[NUM_INST_CNTS] = {0};
344 unsigned ScoreUBs[NUM_INST_CNTS] = {0};
345 unsigned PendingEvents = 0;
347 unsigned LastFlat[NUM_INST_CNTS] = {0};
352 unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}};
354 unsigned SgprScores[SQ_MAX_PGM_SGPRS] = {0};
357 unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0};
376 std::unique_ptr<WaitcntBrackets> Incoming;
386 bool ForceEmitZeroWaitcnts;
387 bool ForceEmitWaitcnt[NUM_INST_CNTS];
393 (void)ForceExpCounter;
394 (void)ForceLgkmCounter;
395 (void)ForceVMCounter;
398 bool shouldFlushVmCnt(
MachineLoop *
ML, WaitcntBrackets &Brackets);
400 WaitcntBrackets &ScoreBrackets);
405 return "SI insert wait instructions";
415 bool isForceEmitWaitcnt()
const {
416 for (
auto T : inst_counter_types())
417 if (ForceEmitWaitcnt[
T])
426 void setForceEmitWaitcnt() {
432 ForceEmitWaitcnt[
EXP_CNT] =
true;
434 ForceEmitWaitcnt[
EXP_CNT] =
false;
446 ForceEmitWaitcnt[
VM_CNT] =
true;
448 ForceEmitWaitcnt[
VM_CNT] =
false;
455 WaitEventType getVmemWaitEventType(
const MachineInstr &Inst)
const {
460 return VMEM_WRITE_ACCESS;
461 return VMEM_READ_ACCESS;
467 WaitcntBrackets &ScoreBrackets,
471 WaitcntBrackets &ScoreBrackets,
478 WaitcntBrackets *ScoreBrackets);
480 WaitcntBrackets &ScoreBrackets);
481 bool applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
489RegInterval WaitcntBrackets::getRegInterval(
const MachineInstr *
MI,
493 unsigned OpNo)
const {
495 if (!
TRI->isInAllocatableClass(
Op.getReg()))
506 if (
TRI->isVectorRegister(*
MRI,
Op.getReg())) {
507 assert(Reg >= Encoding.VGPR0 && Reg <= Encoding.VGPRL);
510 Result.first += AGPR_OFFSET;
512 }
else if (
TRI->isSGPRReg(*
MRI,
Op.getReg())) {
513 assert(Reg >= Encoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS);
514 Result.first =
Reg - Encoding.SGPR0 + NUM_ALL_VGPRS;
516 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS);
524 unsigned Size =
TRI->getRegSizeInBits(*RC);
536 assert(
TRI->isVectorRegister(*
MRI,
MI->getOperand(OpNo).getReg()));
538 setRegScore(RegNo, EXP_CNT, Val);
547 MI.getOpcode() != AMDGPU::BUFFER_STORE_LDS_DWORD;
554 InstCounterType
T = eventCounter(
E);
555 unsigned CurrScore = getScoreUB(
T) + 1;
561 PendingEvents |= 1 <<
E;
562 setScoreUB(
T, CurrScore);
572 if (AddrOpIdx != -1) {
573 setExpScore(&Inst,
TII,
TRI,
MRI, AddrOpIdx, CurrScore);
586 AMDGPU::OpName::data1),
590 Inst.
getOpcode() != AMDGPU::DS_GWS_INIT &&
591 Inst.
getOpcode() != AMDGPU::DS_GWS_SEMA_V &&
592 Inst.
getOpcode() != AMDGPU::DS_GWS_SEMA_BR &&
593 Inst.
getOpcode() != AMDGPU::DS_GWS_SEMA_P &&
594 Inst.
getOpcode() != AMDGPU::DS_GWS_BARRIER &&
596 Inst.
getOpcode() != AMDGPU::DS_CONSUME &&
597 Inst.
getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
600 if (
Op.isReg() && !
Op.isDef() &&
601 TRI->isVectorRegister(*
MRI,
Op.getReg())) {
602 setExpScore(&Inst,
TII,
TRI,
MRI,
I, CurrScore);
606 }
else if (
TII->isFLAT(Inst)) {
618 }
else if (
TII->isMIMG(Inst)) {
620 setExpScore(&Inst,
TII,
TRI,
MRI, 0, CurrScore);
627 }
else if (
TII->isMTBUF(Inst)) {
629 setExpScore(&Inst,
TII,
TRI,
MRI, 0, CurrScore);
631 }
else if (
TII->isMUBUF(Inst)) {
633 setExpScore(&Inst,
TII,
TRI,
MRI, 0, CurrScore);
640 }
else if (
TII->isLDSDIR(Inst)) {
647 if (
TII->isEXP(Inst)) {
666 setExpScore(&Inst,
TII,
TRI,
MRI,
I, CurrScore);
671 }
else if (Inst.
getOpcode() == AMDGPU::BUFFER_STORE_DWORD ||
672 Inst.
getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 ||
673 Inst.
getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) {
679 setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore);
686 if (!
Op.isReg() || !
Op.isDef())
690 if (
Interval.first >= NUM_ALL_VGPRS)
692 if (updateVMCntOnly(Inst)) {
693 VmemType
V = getVmemType(Inst);
695 VgprVmemTypes[RegNo] |= 1 <<
V;
699 setRegScore(RegNo,
T, CurrScore);
703 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS,
T, CurrScore);
710 for (
auto T : inst_counter_types()) {
711 unsigned SR = getScoreRange(
T);
715 OS <<
" VM_CNT(" << SR <<
"): ";
718 OS <<
" LGKM_CNT(" << SR <<
"): ";
721 OS <<
" EXP_CNT(" << SR <<
"): ";
724 OS <<
" VS_CNT(" << SR <<
"): ";
727 OS <<
" UNKNOWN(" << SR <<
"): ";
733 unsigned LB = getScoreLB(
T);
735 for (
int J = 0; J <= VgprUB; J++) {
736 unsigned RegScore = getRegScore(J,
T);
739 unsigned RelScore = RegScore - LB - 1;
740 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) {
741 OS << RelScore <<
":v" << J <<
" ";
743 OS << RelScore <<
":ds ";
748 for (
int J = 0; J <= SgprUB; J++) {
749 unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT);
752 unsigned RelScore = RegScore - LB - 1;
753 OS << RelScore <<
":s" << J <<
" ";
765 simplifyWaitcnt(VM_CNT,
Wait.VmCnt);
766 simplifyWaitcnt(EXP_CNT,
Wait.ExpCnt);
767 simplifyWaitcnt(LGKM_CNT,
Wait.LgkmCnt);
768 simplifyWaitcnt(VS_CNT,
Wait.VsCnt);
771void WaitcntBrackets::simplifyWaitcnt(InstCounterType
T,
772 unsigned &Count)
const {
776 if (Count >= getScoreRange(
T))
780void WaitcntBrackets::determineWait(InstCounterType
T,
int RegNo,
782 unsigned ScoreToWait = getRegScore(RegNo,
T);
786 const unsigned LB = getScoreLB(
T);
787 const unsigned UB = getScoreUB(
T);
788 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
789 if ((
T == VM_CNT ||
T == LGKM_CNT) &&
791 !
ST->hasFlatLgkmVMemCountInOrder()) {
796 }
else if (counterOutOfOrder(
T)) {
804 unsigned NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(
T) - 1);
805 addWait(
Wait,
T, NeededWait);
811 applyWaitcnt(VM_CNT,
Wait.VmCnt);
812 applyWaitcnt(EXP_CNT,
Wait.ExpCnt);
813 applyWaitcnt(LGKM_CNT,
Wait.LgkmCnt);
814 applyWaitcnt(VS_CNT,
Wait.VsCnt);
817void WaitcntBrackets::applyWaitcnt(InstCounterType
T,
unsigned Count) {
818 const unsigned UB = getScoreUB(
T);
822 if (counterOutOfOrder(
T))
824 setScoreLB(
T, std::max(getScoreLB(
T), UB - Count));
827 PendingEvents &= ~WaitEventMaskForInst[
T];
833bool WaitcntBrackets::counterOutOfOrder(InstCounterType
T)
const {
835 if (
T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS))
837 return hasMixedPendingEvents(
T);
847char SIInsertWaitcnts::
ID = 0;
852 return new SIInsertWaitcnts();
862 if (NewEnc == MO.
getImm())
873bool SIInsertWaitcnts::applyPreexistingWaitcnt(
874 WaitcntBrackets &ScoreBrackets,
MachineInstr &OldWaitcntInstr,
882 if (II.isMetaInstruction())
885 if (II.getOpcode() == AMDGPU::S_WAITCNT) {
889 if (!TrackedWaitcntSet.
count(&II)) {
904 assert(II.getOpcode() == AMDGPU::S_WAITCNT_VSCNT);
905 assert(II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
906 if (!TrackedWaitcntSet.
count(&II)) {
908 TII->getNamedOperand(II, AMDGPU::OpName::simm16)->getImm();
909 Wait.VsCnt = std::min(
Wait.VsCnt, OldVSCnt);
912 if (!WaitcntVsCntInstr) {
913 WaitcntVsCntInstr = &II;
923 if (
Wait.hasWaitExceptVsCnt()) {
927 ScoreBrackets.applyWaitcnt(
Wait);
933 ?
dbgs() <<
"applyPreexistingWaitcnt\n"
934 <<
"New Instr at block end: " << *WaitcntInstr
936 :
dbgs() <<
"applyPreexistingWaitcnt\n"
937 <<
"Old Instr: " << *It
938 <<
"New Instr: " << *WaitcntInstr <<
'\n');
946 if (WaitcntVsCntInstr) {
947 if (
Wait.hasWaitVsCnt()) {
950 AMDGPU::OpName::simm16,
Wait.VsCnt);
951 ScoreBrackets.applyWaitcnt(
Wait);
955 ?
dbgs() <<
"applyPreexistingWaitcnt\n"
956 <<
"New Instr at block end: "
957 << *WaitcntVsCntInstr <<
'\n'
958 :
dbgs() <<
"applyPreexistingWaitcnt\n"
959 <<
"Old Instr: " << *It
960 <<
"New Instr: " << *WaitcntVsCntInstr <<
'\n');
971 unsigned Opc =
MI.getOpcode();
972 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
973 !
MI.getOperand(1).isUndef();
1003bool SIInsertWaitcnts::generateWaitcntInstBefore(
MachineInstr &
MI,
1004 WaitcntBrackets &ScoreBrackets,
1007 setForceEmitWaitcnt();
1009 if (
MI.isMetaInstruction())
1018 if (
MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 ||
1019 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC ||
1020 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL ||
1021 MI.getOpcode() == AMDGPU::BUFFER_GL0_INV ||
1022 MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) {
1029 if (
MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
1030 MI.getOpcode() == AMDGPU::SI_RETURN ||
1031 MI.getOpcode() == AMDGPU::S_SETPC_B64_return ||
1033 Wait =
Wait.combined(allZeroWaitcnt());
1036 else if ((
MI.getOpcode() == AMDGPU::S_SENDMSG ||
1037 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
1038 ST->hasLegacyGeometry() &&
1044 else if (
MI.getOpcode() == SC_FENCE) {
1045 const unsigned int group_size =
1046 context->shader_info->GetMaxThreadGroupSize();
1048 const bool group_is_multi_wave =
1049 (group_size == 0 || group_size > target_info->GetWaveFrontSize());
1050 const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence();
1052 for (
unsigned int i = 0; i < Inst->NumSrcOperands(); i++) {
1053 SCRegType src_type = Inst->GetSrcType(i);
1056 if (group_is_multi_wave ||
1057 context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) {
1058 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
1059 ScoreBrackets->getScoreUB(LGKM_CNT));
1061 if (target_info->HasBufferLoadToLDS()) {
1062 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
1063 ScoreBrackets->getScoreUB(VM_CNT));
1069 if (group_is_multi_wave || fence_is_global) {
1070 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
1071 ScoreBrackets->getScoreUB(EXP_CNT));
1072 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
1073 ScoreBrackets->getScoreUB(LGKM_CNT));
1081 if (group_is_multi_wave || fence_is_global) {
1082 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
1083 ScoreBrackets->getScoreUB(EXP_CNT));
1084 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
1085 ScoreBrackets->getScoreUB(VM_CNT));
1102 if (
MI.modifiesRegister(AMDGPU::EXEC,
TRI)) {
1105 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
1106 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
1107 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
1108 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
1122 if (
MI.getOperand(CallAddrOpIdx).isReg()) {
1123 RegInterval CallAddrOpInterval =
1124 ScoreBrackets.getRegInterval(&
MI,
TII,
MRI,
TRI, CallAddrOpIdx);
1126 for (
int RegNo = CallAddrOpInterval.first;
1127 RegNo < CallAddrOpInterval.second; ++RegNo)
1128 ScoreBrackets.determineWait(LGKM_CNT, RegNo,
Wait);
1132 if (RtnAddrOpIdx != -1) {
1133 RegInterval RtnAddrOpInterval =
1134 ScoreBrackets.getRegInterval(&
MI,
TII,
MRI,
TRI, RtnAddrOpIdx);
1136 for (
int RegNo = RtnAddrOpInterval.first;
1137 RegNo < RtnAddrOpInterval.second; ++RegNo)
1138 ScoreBrackets.determineWait(LGKM_CNT, RegNo,
Wait);
1156 const Value *
Ptr = Memop->getValue();
1157 if (Memop->isStore() && SLoadAddresses.
count(
Ptr)) {
1158 addWait(
Wait, LGKM_CNT, 0);
1162 unsigned AS = Memop->getAddrSpace();
1168 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
1170 ScoreBrackets.determineWait(VM_CNT, RegNo,
Wait);
1171 if (Memop->isStore()) {
1172 ScoreBrackets.determineWait(EXP_CNT, RegNo,
Wait);
1177 for (
unsigned I = 0,
E =
MI.getNumOperands();
I !=
E; ++
I) {
1183 if (
Op.isTied() &&
Op.isUse() &&
TII->doesNotReadTiedSource(
MI))
1189 const bool IsVGPR =
TRI->isVectorRegister(*
MRI,
Op.getReg());
1196 if (
Op.isUse() || !updateVMCntOnly(
MI) ||
1197 ScoreBrackets.hasOtherPendingVmemTypes(RegNo,
1199 ScoreBrackets.determineWait(VM_CNT, RegNo,
Wait);
1200 ScoreBrackets.clearVgprVmemTypes(RegNo);
1202 if (
Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) {
1203 ScoreBrackets.determineWait(EXP_CNT, RegNo,
Wait);
1206 ScoreBrackets.determineWait(LGKM_CNT, RegNo,
Wait);
1216 if (
MI.getOpcode() == AMDGPU::S_BARRIER &&
1217 !
ST->hasAutoWaitcntBeforeBarrier() && !
ST->supportsBackOffBarrier()) {
1218 Wait =
Wait.combined(allZeroWaitcnt());
1225 if (ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1231 ScoreBrackets.simplifyWaitcnt(
Wait);
1233 if (ForceEmitZeroWaitcnts)
1234 Wait = allZeroWaitcnt();
1236 if (ForceEmitWaitcnt[VM_CNT])
1238 if (ForceEmitWaitcnt[EXP_CNT])
1240 if (ForceEmitWaitcnt[LGKM_CNT])
1242 if (ForceEmitWaitcnt[VS_CNT])
1246 if (ScoreBrackets.hasPendingEvent(VM_CNT))
1250 return generateWaitcnt(
Wait,
MI.getIterator(), *
MI.getParent(), ScoreBrackets,
1257 WaitcntBrackets &ScoreBrackets,
1261 if (!ScoreBrackets.hasPendingEvent(VM_CNT))
1266 return generateWaitcnt(
Wait,
Block.instr_end(), Block, ScoreBrackets,
1273 WaitcntBrackets &ScoreBrackets,
1278 if (OldWaitcntInstr)
1282 applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr,
Wait, It);
1284 ScoreBrackets.applyWaitcnt(
Wait);
1287 if (
Wait.ExpCnt != ~0u && It !=
Block.instr_end() &&
1290 TII->getNamedOperand(*It, AMDGPU::OpName::waitexp);
1298 <<
"Update Instr: " << *It);
1303 if (
Wait.hasWaitExceptVsCnt()) {
1307 TrackedWaitcntSet.
insert(SWaitInst);
1311 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1312 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1315 if (
Wait.hasWaitVsCnt()) {
1318 auto SWaitInst =
BuildMI(Block, It,
DL,
TII->get(AMDGPU::S_WAITCNT_VSCNT))
1321 TrackedWaitcntSet.
insert(SWaitInst);
1325 if (It !=
Block.instr_end())
dbgs() <<
"Old Instr: " << *It;
1326 dbgs() <<
"New Instr: " << *SWaitInst <<
'\n');
1334bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(
const MachineInstr &
MI)
const {
1342 if (
MI.memoperands_empty())
1351 unsigned AS = Memop->getAddrSpace();
1362bool SIInsertWaitcnts::mayAccessLDSThroughFlat(
const MachineInstr &
MI)
const {
1366 if (!
TII->usesLGKM_CNT(
MI))
1370 if (
ST->isTgSplitEnabled())
1375 if (
MI.memoperands_empty())
1380 unsigned AS = Memop->getAddrSpace();
1388void SIInsertWaitcnts::updateEventWaitcntAfter(
MachineInstr &Inst,
1389 WaitcntBrackets *ScoreBrackets) {
1394 if (
TII->isDS(Inst) &&
TII->usesLGKM_CNT(Inst)) {
1396 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
1397 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, GDS_ACCESS, Inst);
1398 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, GDS_GPR_LOCK, Inst);
1400 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, LDS_ACCESS, Inst);
1402 }
else if (
TII->isFLAT(Inst)) {
1405 int FlatASCount = 0;
1407 if (mayAccessVMEMThroughFlat(Inst)) {
1409 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, getVmemWaitEventType(Inst),
1413 if (mayAccessLDSThroughFlat(Inst)) {
1415 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, LDS_ACCESS, Inst);
1424 if (FlatASCount > 1)
1425 ScoreBrackets->setPendingFlat();
1428 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, getVmemWaitEventType(Inst),
1431 if (
ST->vmemWriteNeedsExpWaitcnt() &&
1433 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, VMW_GPR_LOCK, Inst);
1435 }
else if (
TII->isSMRD(Inst)) {
1436 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, SMEM_ACCESS, Inst);
1437 }
else if (Inst.
isCall()) {
1440 ScoreBrackets->applyWaitcnt(allZeroWaitcnt());
1446 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, EXP_LDS_ACCESS, Inst);
1447 }
else if (
TII->isVINTERP(Inst)) {
1448 int64_t
Imm =
TII->getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
1449 ScoreBrackets->applyWaitcnt(EXP_CNT, Imm);
1451 unsigned Imm =
TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
1453 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, EXP_PARAM_ACCESS, Inst);
1455 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, EXP_POS_ACCESS, Inst);
1457 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, EXP_GPR_LOCK, Inst);
1460 case AMDGPU::S_SENDMSG:
1461 case AMDGPU::S_SENDMSG_RTN_B32:
1462 case AMDGPU::S_SENDMSG_RTN_B64:
1463 case AMDGPU::S_SENDMSGHALT:
1464 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, SQ_MESSAGE, Inst);
1466 case AMDGPU::S_MEMTIME:
1467 case AMDGPU::S_MEMREALTIME:
1468 ScoreBrackets->updateByEvent(
TII,
TRI,
MRI, SMEM_ACCESS, Inst);
1474bool WaitcntBrackets::mergeScore(
const MergeInfo &M,
unsigned &Score,
1475 unsigned OtherScore) {
1476 unsigned MyShifted = Score <=
M.OldLB ? 0 : Score +
M.MyShift;
1477 unsigned OtherShifted =
1478 OtherScore <=
M.OtherLB ? 0 : OtherScore +
M.OtherShift;
1479 Score = std::max(MyShifted, OtherShifted);
1480 return OtherShifted > MyShifted;
1488bool WaitcntBrackets::merge(
const WaitcntBrackets &
Other) {
1489 bool StrictDom =
false;
1491 VgprUB = std::max(VgprUB,
Other.VgprUB);
1492 SgprUB = std::max(SgprUB,
Other.SgprUB);
1494 for (
auto T : inst_counter_types()) {
1496 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[
T];
1497 const unsigned OtherEvents =
Other.PendingEvents & WaitEventMaskForInst[
T];
1498 if (OtherEvents & ~OldEvents)
1500 PendingEvents |= OtherEvents;
1503 const unsigned MyPending = ScoreUBs[
T] - ScoreLBs[
T];
1504 const unsigned OtherPending =
Other.ScoreUBs[
T] -
Other.ScoreLBs[
T];
1505 const unsigned NewUB = ScoreLBs[
T] + std::max(MyPending, OtherPending);
1506 if (NewUB < ScoreLBs[
T])
1510 M.OldLB = ScoreLBs[
T];
1511 M.OtherLB =
Other.ScoreLBs[
T];
1512 M.MyShift = NewUB - ScoreUBs[
T];
1513 M.OtherShift = NewUB -
Other.ScoreUBs[
T];
1515 ScoreUBs[
T] = NewUB;
1517 StrictDom |= mergeScore(M, LastFlat[
T],
Other.LastFlat[
T]);
1519 for (
int J = 0; J <= VgprUB; J++)
1520 StrictDom |= mergeScore(M, VgprScores[
T][J],
Other.VgprScores[
T][J]);
1522 if (
T == LGKM_CNT) {
1523 for (
int J = 0; J <= SgprUB; J++)
1524 StrictDom |= mergeScore(M, SgprScores[J],
Other.SgprScores[J]);
1528 for (
int J = 0; J <= VgprUB; J++) {
1529 unsigned char NewVmemTypes = VgprVmemTypes[J] |
Other.VgprVmemTypes[J];
1530 StrictDom |= NewVmemTypes != VgprVmemTypes[J];
1531 VgprVmemTypes[J] = NewVmemTypes;
1538 return Inst.
getOpcode() == AMDGPU::S_WAITCNT ||
1539 (Inst.
getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
1547 WaitcntBrackets &ScoreBrackets) {
1551 dbgs() <<
"*** Block" <<
Block.getNumber() <<
" ***";
1552 ScoreBrackets.dump();
1558 bool VCCZCorrect =
true;
1559 if (
ST->hasReadVCCZBug()) {
1562 VCCZCorrect =
false;
1563 }
else if (!
ST->partialVCCWritesUpdateVCCZ()) {
1566 VCCZCorrect =
false;
1580 if (!OldWaitcntInstr)
1581 OldWaitcntInstr = &Inst;
1586 bool FlushVmCnt =
Block.getFirstTerminator() == Inst &&
1587 isPreheaderToFlush(Block, ScoreBrackets);
1590 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
1592 OldWaitcntInstr =
nullptr;
1595 bool RestoreVCCZ = !VCCZCorrect &&
readsVCCZ(Inst);
1598 if (
ST->hasReadVCCZBug() || !
ST->partialVCCWritesUpdateVCCZ()) {
1602 if (!
ST->partialVCCWritesUpdateVCCZ())
1603 VCCZCorrect =
false;
1612 if (
ST->hasReadVCCZBug() &&
1613 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1616 VCCZCorrect =
false;
1624 if (
TII->isSMRD(Inst)) {
1628 if (!Memop->isInvariant()) {
1629 const Value *
Ptr = Memop->getValue();
1633 if (
ST->hasReadVCCZBug()) {
1635 VCCZCorrect =
false;
1639 updateEventWaitcntAfter(Inst, &ScoreBrackets);
1645 if (RequireCheckResourceType(Inst, context)) {
1647 ScoreBrackets->setScoreLB(VM_CNT,
1648 ScoreBrackets->getScoreUB(VM_CNT));
1654 ScoreBrackets.dump();
1664 TII->get(
ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
1674 if (
Block.getFirstTerminator() ==
Block.end() &&
1675 isPreheaderToFlush(Block, ScoreBrackets))
1676 Modified |= generateWaitcntBlockEnd(Block, ScoreBrackets, OldWaitcntInstr);
1684 WaitcntBrackets &ScoreBrackets) {
1686 return PreheadersToFlush[&
MBB];
1688 auto UpdateCache = [&](
bool val) {
1689 PreheadersToFlush[&
MBB] = val;
1695 return UpdateCache(
false);
1699 return UpdateCache(
false);
1702 return UpdateCache(
true);
1704 return UpdateCache(
false);
1707bool SIInsertWaitcnts::isVMEMOrFlatVMEM(
const MachineInstr &
MI)
const {
1721 WaitcntBrackets &Brackets) {
1722 bool HasVMemLoad =
false;
1723 bool HasVMemStore =
false;
1724 bool UsesVgprLoadedOutside =
false;
1730 if (isVMEMOrFlatVMEM(
MI)) {
1734 HasVMemStore =
true;
1736 for (
unsigned I = 0;
I <
MI.getNumOperands();
I++) {
1738 if (!
Op.isReg() || !
TRI->isVectorRegister(*
MRI,
Op.getReg()))
1751 if (Brackets.getRegScore(RegNo, VM_CNT) > Brackets.getScoreLB(VM_CNT)) {
1752 UsesVgprLoadedOutside =
true;
1758 else if (isVMEMOrFlatVMEM(
MI) &&
MI.mayLoad() &&
Op.isDef())
1769 if (!
ST->hasVscnt() && HasVMemStore && !HasVMemLoad && UsesVgprLoadedOutside)
1771 return HasVMemLoad && UsesVgprLoadedOutside;
1776 TII =
ST->getInstrInfo();
1777 TRI = &
TII->getRegisterInfo();
1781 MLI = &getAnalysis<MachineLoopInfo>();
1782 PDT = &getAnalysis<MachinePostDominatorTree>();
1785 for (
auto T : inst_counter_types())
1786 ForceEmitWaitcnt[
T] =
false;
1788 HardwareLimits Limits = {};
1792 Limits.VscntMax =
ST->hasVscnt() ? 63 : 0;
1794 unsigned NumVGPRsMax =
ST->getAddressableNumVGPRs();
1795 unsigned NumSGPRsMax =
ST->getAddressableNumSGPRs();
1796 assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS);
1797 assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS);
1799 RegisterEncoding Encoding = {};
1800 Encoding.VGPR0 =
TRI->getEncodingValue(AMDGPU::VGPR0);
1801 Encoding.VGPRL = Encoding.VGPR0 + NumVGPRsMax - 1;
1802 Encoding.SGPR0 =
TRI->getEncodingValue(AMDGPU::SGPR0);
1803 Encoding.SGPRL = Encoding.SGPR0 + NumSGPRsMax - 1;
1805 TrackedWaitcntSet.
clear();
1819 I !=
E && (
I->isPHI() ||
I->isMetaInstruction()); ++
I)
1835 std::unique_ptr<WaitcntBrackets> Brackets;
1840 for (
auto BII = BlockInfos.
begin(), BIE = BlockInfos.
end(); BII != BIE;
1842 BlockInfo &BI = BII->second;
1848 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
1850 *Brackets = *BI.Incoming;
1853 Brackets = std::make_unique<WaitcntBrackets>(ST, Limits, Encoding);
1855 *Brackets = WaitcntBrackets(ST, Limits, Encoding);
1858 Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets);
1861 if (Brackets->hasPendingEvent()) {
1862 BlockInfo *MoveBracketsToSucc =
nullptr;
1864 auto SuccBII = BlockInfos.
find(Succ);
1865 BlockInfo &SuccBI = SuccBII->second;
1866 if (!SuccBI.Incoming) {
1867 SuccBI.Dirty =
true;
1870 if (!MoveBracketsToSucc) {
1871 MoveBracketsToSucc = &SuccBI;
1873 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
1875 }
else if (SuccBI.Incoming->merge(*Brackets)) {
1876 SuccBI.Dirty =
true;
1881 if (MoveBracketsToSucc)
1882 MoveBracketsToSucc->Incoming = std::move(Brackets);
1887 if (
ST->hasScalarStores()) {
1889 bool HaveScalarStores =
false;
1893 if (!HaveScalarStores &&
TII->isScalarStore(
MI))
1894 HaveScalarStores =
true;
1896 if (
MI.getOpcode() == AMDGPU::S_ENDPGM ||
1897 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
1902 if (HaveScalarStores) {
1912 bool SeenDCacheWB =
false;
1916 if (
I->getOpcode() == AMDGPU::S_DCACHE_WB)
1917 SeenDCacheWB =
true;
1918 else if (
TII->isScalarStore(*
I))
1919 SeenDCacheWB =
false;
1922 if ((
I->getOpcode() == AMDGPU::S_ENDPGM ||
1923 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Provides AMDGPU specific target descriptions.
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
std::optional< std::vector< StOtherPiece > > Other
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
static LoopDeletionResult merge(LoopDeletionResult A, LoopDeletionResult B)
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static bool callWaitsOnFunctionReturn(const MachineInstr &MI)
static bool callWaitsOnFunctionEntry(const MachineInstr &MI)
static bool updateOperandIfDifferent(MachineInstr &MI, uint16_t OpName, unsigned NewEnc)
static bool isWaitInstr(MachineInstr &Inst)
static bool readsVCCZ(const MachineInstr &MI)
static bool mayWriteLDSThroughDMA(const MachineInstr &MI)
static cl::opt< bool > ForceEmitZeroFlag("amdgpu-waitcnt-forcezero", cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), cl::init(false), cl::Hidden)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Provides some synthesis utilities to produce sequences of values.
static const uint32_t IV[8]
bool isEntryFunction() const
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
static bool isCounterSet(unsigned ID)
static bool shouldExecute(unsigned CounterName)
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
FunctionPass class - This class is used to implement most global optimizations.
Interval Class - An Interval is a set of nodes defined such that every node in the interval has all o...
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
Represents a single loop in the control flow graph.
const MachineBasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
MachineLoop * getLoopFor(const MachineBasicBlock *BB) const
Return the innermost loop that BB lives in.
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
static bool isVMEM(const MachineInstr &MI)
static bool isFLATScratch(const MachineInstr &MI)
static bool isEXP(const MachineInstr &MI)
static bool isLDSDIR(const MachineInstr &MI)
static bool isFLATGlobal(const MachineInstr &MI)
static bool isAtomicRet(const MachineInstr &MI)
static bool isVINTERP(const MachineInstr &MI)
static bool isMUBUF(const MachineInstr &MI)
static bool isMIMG(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isVALU(const MachineInstr &MI)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
LLVM Value Representation.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Iterator for intrusive lists based on ilist_node.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
IsaVersion getIsaVersion(StringRef GPU)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
unsigned getVmcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx)
unsigned getLgkmcntBitMask(const IsaVersion &Version)
unsigned getExpcntBitMask(const IsaVersion &Version)
unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
bool getMUBUFIsBufferInv(unsigned Opc)
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
char & SIInsertWaitcntsID
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
FunctionPass * createSIInsertWaitcntsPass()
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.
static Waitcnt allZero(bool HasVscnt)
static constexpr bool is_iterable