33#include "llvm/IR/IntrinsicsAMDGPU.h"
40#define DEBUG_TYPE "si-instr-info"
42#define GET_INSTRINFO_CTOR_DTOR
43#include "AMDGPUGenInstrInfo.inc"
46#define GET_D16ImageDimIntrinsics_IMPL
47#define GET_ImageDimIntrinsicTable_IMPL
48#define GET_RsrcIntrinsics_IMPL
49#include "AMDGPUGenSearchableTables.inc"
57 cl::desc(
"Restrict range of branch instructions (DEBUG)"));
60 "amdgpu-fix-16-bit-physreg-copies",
61 cl::desc(
"Fix copies between 32 and 16 bit registers by extending to 32 bit"),
76 unsigned N =
Node->getNumOperands();
77 while (
N &&
Node->getOperand(
N - 1).getValueType() == MVT::Glue)
89 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0,
OpName);
90 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1,
OpName);
92 if (Op0Idx == -1 && Op1Idx == -1)
96 if ((Op0Idx == -1 && Op1Idx != -1) ||
97 (Op1Idx == -1 && Op0Idx != -1))
118 return !
MI.memoperands_empty() &&
120 return MMO->isLoad() && MMO->isInvariant();
142 if (!
MI.hasImplicitDef() &&
143 MI.getNumImplicitOperands() ==
MI.getDesc().implicit_uses().size() &&
144 !
MI.mayRaiseFPException())
152bool SIInstrInfo::resultDependsOnExec(
const MachineInstr &
MI)
const {
155 if (
MI.isCompare()) {
166 switch (
Use.getOpcode()) {
167 case AMDGPU::S_AND_SAVEEXEC_B32:
168 case AMDGPU::S_AND_SAVEEXEC_B64:
170 case AMDGPU::S_AND_B32:
171 case AMDGPU::S_AND_B64:
172 if (!
Use.readsRegister(AMDGPU::EXEC,
nullptr))
182 switch (
MI.getOpcode()) {
185 case AMDGPU::V_READFIRSTLANE_B32:
202 if (
MI.getOpcode() == AMDGPU::SI_IF_BREAK)
207 for (
auto Op :
MI.uses()) {
208 if (
Op.isReg() &&
Op.getReg().isVirtual() &&
209 RI.isSGPRClass(
MRI.getRegClass(
Op.getReg()))) {
214 if (FromCycle ==
nullptr)
220 while (FromCycle && !FromCycle->
contains(ToCycle)) {
240 int64_t &Offset1)
const {
248 if (!
get(Opc0).mayLoad() || !
get(Opc1).mayLoad())
252 if (!
get(Opc0).getNumDefs() || !
get(Opc1).getNumDefs())
268 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
269 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
270 if (Offset0Idx == -1 || Offset1Idx == -1)
277 Offset0Idx -=
get(Opc0).NumDefs;
278 Offset1Idx -=
get(Opc1).NumDefs;
308 if (!Load0Offset || !Load1Offset)
325 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
326 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
328 if (OffIdx0 == -1 || OffIdx1 == -1)
334 OffIdx0 -=
get(Opc0).NumDefs;
335 OffIdx1 -=
get(Opc1).NumDefs;
354 case AMDGPU::DS_READ2ST64_B32:
355 case AMDGPU::DS_READ2ST64_B64:
356 case AMDGPU::DS_WRITE2ST64_B32:
357 case AMDGPU::DS_WRITE2ST64_B64:
372 OffsetIsScalable =
false;
389 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
391 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data0);
392 if (
Opc == AMDGPU::DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64)
405 unsigned Offset0 = Offset0Op->
getImm() & 0xff;
406 unsigned Offset1 = Offset1Op->
getImm() & 0xff;
407 if (Offset0 + 1 != Offset1)
418 int Data0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data0);
426 Offset = EltSize * Offset0;
428 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
429 if (DataOpIdx == -1) {
430 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data0);
432 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data1);
448 if (BaseOp && !BaseOp->
isFI())
456 if (SOffset->
isReg())
462 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
464 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdata);
473 isMIMG(LdSt) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
474 int SRsrcIdx = AMDGPU::getNamedOperandIdx(
Opc, RsrcOpName);
476 int VAddr0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vaddr0);
477 if (VAddr0Idx >= 0) {
479 for (
int I = VAddr0Idx;
I < SRsrcIdx; ++
I)
486 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdata);
501 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::sdst);
518 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
520 DataOpIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdata);
537 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
545 if (MO1->getAddrSpace() != MO2->getAddrSpace())
548 const auto *Base1 = MO1->getValue();
549 const auto *Base2 = MO2->getValue();
550 if (!Base1 || !Base2)
558 return Base1 == Base2;
562 int64_t Offset1,
bool OffsetIsScalable1,
564 int64_t Offset2,
bool OffsetIsScalable2,
565 unsigned ClusterSize,
566 unsigned NumBytes)
const {
579 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
598 const unsigned LoadSize = NumBytes / ClusterSize;
599 const unsigned NumDWords = ((LoadSize + 3) / 4) * ClusterSize;
600 return NumDWords <= MaxMemoryClusterDWords;
614 int64_t Offset0, int64_t Offset1,
615 unsigned NumLoads)
const {
616 assert(Offset1 > Offset0 &&
617 "Second offset should be larger than first offset!");
622 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
629 const char *Msg =
"illegal VGPR to SGPR copy") {
650 assert((
TII.getSubtarget().hasMAIInsts() &&
651 !
TII.getSubtarget().hasGFX90AInsts()) &&
652 "Expected GFX908 subtarget.");
655 AMDGPU::AGPR_32RegClass.
contains(SrcReg)) &&
656 "Source register of the copy should be either an SGPR or an AGPR.");
659 "Destination register of the copy should be an AGPR.");
668 for (
auto Def =
MI,
E =
MBB.begin(); Def !=
E; ) {
671 if (!Def->modifiesRegister(SrcReg, &RI))
674 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
675 Def->getOperand(0).getReg() != SrcReg)
682 bool SafeToPropagate =
true;
685 for (
auto I = Def;
I !=
MI && SafeToPropagate; ++
I)
686 if (
I->modifiesRegister(DefOp.
getReg(), &RI))
687 SafeToPropagate =
false;
689 if (!SafeToPropagate)
692 for (
auto I = Def;
I !=
MI; ++
I)
693 I->clearRegisterKills(DefOp.
getReg(), &RI);
702 if (ImpUseSuperReg) {
703 Builder.addReg(ImpUseSuperReg,
711 RS.enterBasicBlockEnd(
MBB);
712 RS.backward(std::next(
MI));
721 unsigned RegNo = (DestReg - AMDGPU::AGPR0) % 3;
724 assert(
MBB.getParent()->getRegInfo().isReserved(Tmp) &&
725 "VGPR used for an intermediate copy should have been reserved.");
730 Register Tmp2 = RS.scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass,
MI,
740 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32;
741 if (AMDGPU::AGPR_32RegClass.
contains(SrcReg)) {
742 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64;
749 if (ImpUseSuperReg) {
750 UseBuilder.
addReg(ImpUseSuperReg,
771 for (
unsigned Idx = 0; Idx < BaseIndices.
size(); ++Idx) {
772 int16_t SubIdx = BaseIndices[Idx];
773 Register DestSubReg = RI.getSubReg(DestReg, SubIdx);
774 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
775 assert(DestSubReg && SrcSubReg &&
"Failed to find subregs!");
776 unsigned Opcode = AMDGPU::S_MOV_B32;
779 bool AlignedDest = ((DestSubReg - AMDGPU::SGPR0) % 2) == 0;
780 bool AlignedSrc = ((SrcSubReg - AMDGPU::SGPR0) % 2) == 0;
781 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.
size())) {
785 DestSubReg = RI.getSubReg(DestReg, SubIdx);
786 SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
787 assert(DestSubReg && SrcSubReg &&
"Failed to find subregs!");
788 Opcode = AMDGPU::S_MOV_B64;
803 assert(FirstMI && LastMI);
811 LastMI->addRegisterKilled(SrcReg, &RI);
817 Register SrcReg,
bool KillSrc,
bool RenamableDest,
818 bool RenamableSrc)
const {
820 unsigned Size = RI.getRegSizeInBits(*RC);
822 unsigned SrcSize = RI.getRegSizeInBits(*SrcRC);
828 if (((
Size == 16) != (SrcSize == 16))) {
830 assert(ST.useRealTrue16Insts());
835 if (DestReg == SrcReg) {
841 RC = RI.getPhysRegBaseClass(DestReg);
842 Size = RI.getRegSizeInBits(*RC);
843 SrcRC = RI.getPhysRegBaseClass(SrcReg);
844 SrcSize = RI.getRegSizeInBits(*SrcRC);
848 if (RC == &AMDGPU::VGPR_32RegClass) {
850 AMDGPU::SReg_32RegClass.
contains(SrcReg) ||
851 AMDGPU::AGPR_32RegClass.
contains(SrcReg));
852 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ?
853 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32;
859 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
860 RC == &AMDGPU::SReg_32RegClass) {
861 if (SrcReg == AMDGPU::SCC) {
868 if (!AMDGPU::SReg_32RegClass.
contains(SrcReg)) {
869 if (DestReg == AMDGPU::VCC_LO) {
887 if (RC == &AMDGPU::SReg_64RegClass) {
888 if (SrcReg == AMDGPU::SCC) {
895 if (!AMDGPU::SReg_64_EncodableRegClass.
contains(SrcReg)) {
896 if (DestReg == AMDGPU::VCC) {
914 if (DestReg == AMDGPU::SCC) {
917 if (AMDGPU::SReg_64RegClass.
contains(SrcReg)) {
921 assert(ST.hasScalarCompareEq64());
935 if (RC == &AMDGPU::AGPR_32RegClass) {
936 if (AMDGPU::VGPR_32RegClass.
contains(SrcReg) ||
937 (ST.hasGFX90AInsts() && AMDGPU::SReg_32RegClass.contains(SrcReg))) {
943 if (AMDGPU::AGPR_32RegClass.
contains(SrcReg) && ST.hasGFX90AInsts()) {
952 const bool Overlap = RI.regsOverlap(SrcReg, DestReg);
959 AMDGPU::SReg_LO16RegClass.
contains(SrcReg) ||
960 AMDGPU::AGPR_LO16RegClass.
contains(SrcReg));
962 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg);
963 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg);
964 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg);
965 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
968 MCRegister NewDestReg = RI.get32BitRegister(DestReg);
969 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg);
982 if (IsAGPRDst || IsAGPRSrc) {
983 if (!DstLow || !SrcLow) {
985 "Cannot use hi16 subreg with an AGPR!");
992 if (ST.useRealTrue16Insts()) {
998 if (AMDGPU::VGPR_16_Lo128RegClass.
contains(DestReg) &&
999 (IsSGPRSrc || AMDGPU::VGPR_16_Lo128RegClass.
contains(SrcReg))) {
1011 if (IsSGPRSrc && !ST.hasSDWAScalar()) {
1012 if (!DstLow || !SrcLow) {
1014 "Cannot use hi16 subreg on VI!");
1037 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) {
1038 if (ST.hasMovB64()) {
1043 if (ST.hasPkMovB32()) {
1059 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
1060 if (RI.isSGPRClass(RC)) {
1061 if (!RI.isSGPRClass(SrcRC)) {
1065 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg);
1071 unsigned EltSize = 4;
1072 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
1073 if (RI.isAGPRClass(RC)) {
1074 if (ST.hasGFX90AInsts() && RI.isAGPRClass(SrcRC))
1075 Opcode = AMDGPU::V_ACCVGPR_MOV_B32;
1076 else if (RI.hasVGPRs(SrcRC) ||
1077 (ST.hasGFX90AInsts() && RI.isSGPRClass(SrcRC)))
1078 Opcode = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
1080 Opcode = AMDGPU::INSTRUCTION_LIST_END;
1081 }
else if (RI.hasVGPRs(RC) && RI.isAGPRClass(SrcRC)) {
1082 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64;
1083 }
else if ((
Size % 64 == 0) && RI.hasVGPRs(RC) &&
1084 (RI.isProperlyAlignedRC(*RC) &&
1085 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) {
1087 if (ST.hasMovB64()) {
1088 Opcode = AMDGPU::V_MOV_B64_e32;
1090 }
else if (ST.hasPkMovB32()) {
1091 Opcode = AMDGPU::V_PK_MOV_B32;
1101 std::unique_ptr<RegScavenger> RS;
1102 if (Opcode == AMDGPU::INSTRUCTION_LIST_END)
1103 RS = std::make_unique<RegScavenger>();
1109 const bool Overlap = RI.regsOverlap(SrcReg, DestReg);
1110 const bool CanKillSuperReg = KillSrc && !Overlap;
1112 for (
unsigned Idx = 0; Idx < SubIndices.
size(); ++Idx) {
1115 SubIdx = SubIndices[Idx];
1117 SubIdx = SubIndices[SubIndices.
size() - Idx - 1];
1118 Register DestSubReg = RI.getSubReg(DestReg, SubIdx);
1119 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
1120 assert(DestSubReg && SrcSubReg &&
"Failed to find subregs!");
1122 bool IsFirstSubreg = Idx == 0;
1123 bool UseKill = CanKillSuperReg && Idx == SubIndices.
size() - 1;
1125 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) {
1129 *RS, Overlap, ImpDefSuper, ImpUseSuper);
1130 }
else if (Opcode == AMDGPU::V_PK_MOV_B32) {
1176 return &AMDGPU::VGPR_32RegClass;
1188 assert(
MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&
1189 "Not a VGPR32 reg");
1191 if (
Cond.size() == 1) {
1192 Register SReg =
MRI.createVirtualRegister(BoolXExecRC);
1201 }
else if (
Cond.size() == 2) {
1202 assert(
Cond[0].isImm() &&
"Cond[0] is not an immediate");
1204 case SIInstrInfo::SCC_TRUE: {
1205 Register SReg =
MRI.createVirtualRegister(BoolXExecRC);
1215 case SIInstrInfo::SCC_FALSE: {
1216 Register SReg =
MRI.createVirtualRegister(BoolXExecRC);
1226 case SIInstrInfo::VCCNZ: {
1229 Register SReg =
MRI.createVirtualRegister(BoolXExecRC);
1240 case SIInstrInfo::VCCZ: {
1243 Register SReg =
MRI.createVirtualRegister(BoolXExecRC);
1254 case SIInstrInfo::EXECNZ: {
1255 Register SReg =
MRI.createVirtualRegister(BoolXExecRC);
1256 Register SReg2 =
MRI.createVirtualRegister(RI.getBoolRC());
1267 case SIInstrInfo::EXECZ: {
1268 Register SReg =
MRI.createVirtualRegister(BoolXExecRC);
1269 Register SReg2 =
MRI.createVirtualRegister(RI.getBoolRC());
1294 Register Reg =
MRI.createVirtualRegister(RI.getBoolRC());
1307 Register Reg =
MRI.createVirtualRegister(RI.getBoolRC());
1317 int64_t &ImmVal)
const {
1318 switch (
MI.getOpcode()) {
1319 case AMDGPU::V_MOV_B32_e32:
1320 case AMDGPU::S_MOV_B32:
1321 case AMDGPU::S_MOVK_I32:
1322 case AMDGPU::S_MOV_B64:
1323 case AMDGPU::V_MOV_B64_e32:
1324 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
1325 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
1326 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
1327 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
1328 case AMDGPU::V_MOV_B64_PSEUDO: {
1332 return MI.getOperand(0).getReg() == Reg;
1337 case AMDGPU::S_BREV_B32:
1338 case AMDGPU::V_BFREV_B32_e32:
1339 case AMDGPU::V_BFREV_B32_e64: {
1343 return MI.getOperand(0).getReg() == Reg;
1348 case AMDGPU::S_NOT_B32:
1349 case AMDGPU::V_NOT_B32_e32:
1350 case AMDGPU::V_NOT_B32_e64: {
1353 ImmVal =
static_cast<int64_t
>(~static_cast<int32_t>(Src0.
getImm()));
1354 return MI.getOperand(0).getReg() == Reg;
1366 if (RI.isAGPRClass(DstRC))
1367 return AMDGPU::COPY;
1368 if (RI.getRegSizeInBits(*DstRC) == 16) {
1371 return RI.isSGPRClass(DstRC) ? AMDGPU::COPY : AMDGPU::V_MOV_B16_t16_e64;
1373 if (RI.getRegSizeInBits(*DstRC) == 32)
1374 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1375 if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC))
1376 return AMDGPU::S_MOV_B64;
1377 if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC))
1378 return AMDGPU::V_MOV_B64_PSEUDO;
1379 return AMDGPU::COPY;
1384 bool IsIndirectSrc)
const {
1385 if (IsIndirectSrc) {
1387 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1);
1389 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2);
1391 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3);
1393 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4);
1395 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5);
1397 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8);
1399 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V9);
1401 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V10);
1403 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V11);
1405 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V12);
1407 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16);
1408 if (VecSize <= 1024)
1409 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32);
1415 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1);
1417 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2);
1419 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3);
1421 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4);
1423 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5);
1425 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8);
1427 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V9);
1429 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V10);
1431 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V11);
1433 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V12);
1435 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16);
1436 if (VecSize <= 1024)
1437 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32);
1444 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1446 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1448 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1450 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1452 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1454 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1456 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V9;
1458 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V10;
1460 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V11;
1462 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V12;
1464 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1465 if (VecSize <= 1024)
1466 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1473 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1475 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1477 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1479 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1481 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1483 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1485 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V9;
1487 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V10;
1489 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V11;
1491 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V12;
1493 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1494 if (VecSize <= 1024)
1495 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1502 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1;
1504 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2;
1506 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4;
1508 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8;
1509 if (VecSize <= 1024)
1510 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16;
1517 bool IsSGPR)
const {
1529 assert(EltSize == 32 &&
"invalid reg indexing elt size");
1536 return AMDGPU::SI_SPILL_S32_SAVE;
1538 return AMDGPU::SI_SPILL_S64_SAVE;
1540 return AMDGPU::SI_SPILL_S96_SAVE;
1542 return AMDGPU::SI_SPILL_S128_SAVE;
1544 return AMDGPU::SI_SPILL_S160_SAVE;
1546 return AMDGPU::SI_SPILL_S192_SAVE;
1548 return AMDGPU::SI_SPILL_S224_SAVE;
1550 return AMDGPU::SI_SPILL_S256_SAVE;
1552 return AMDGPU::SI_SPILL_S288_SAVE;
1554 return AMDGPU::SI_SPILL_S320_SAVE;
1556 return AMDGPU::SI_SPILL_S352_SAVE;
1558 return AMDGPU::SI_SPILL_S384_SAVE;
1560 return AMDGPU::SI_SPILL_S512_SAVE;
1562 return AMDGPU::SI_SPILL_S1024_SAVE;
1571 return AMDGPU::SI_SPILL_V16_SAVE;
1573 return AMDGPU::SI_SPILL_V32_SAVE;
1575 return AMDGPU::SI_SPILL_V64_SAVE;
1577 return AMDGPU::SI_SPILL_V96_SAVE;
1579 return AMDGPU::SI_SPILL_V128_SAVE;
1581 return AMDGPU::SI_SPILL_V160_SAVE;
1583 return AMDGPU::SI_SPILL_V192_SAVE;
1585 return AMDGPU::SI_SPILL_V224_SAVE;
1587 return AMDGPU::SI_SPILL_V256_SAVE;
1589 return AMDGPU::SI_SPILL_V288_SAVE;
1591 return AMDGPU::SI_SPILL_V320_SAVE;
1593 return AMDGPU::SI_SPILL_V352_SAVE;
1595 return AMDGPU::SI_SPILL_V384_SAVE;
1597 return AMDGPU::SI_SPILL_V512_SAVE;
1599 return AMDGPU::SI_SPILL_V1024_SAVE;
1608 return AMDGPU::SI_SPILL_AV32_SAVE;
1610 return AMDGPU::SI_SPILL_AV64_SAVE;
1612 return AMDGPU::SI_SPILL_AV96_SAVE;
1614 return AMDGPU::SI_SPILL_AV128_SAVE;
1616 return AMDGPU::SI_SPILL_AV160_SAVE;
1618 return AMDGPU::SI_SPILL_AV192_SAVE;
1620 return AMDGPU::SI_SPILL_AV224_SAVE;
1622 return AMDGPU::SI_SPILL_AV256_SAVE;
1624 return AMDGPU::SI_SPILL_AV288_SAVE;
1626 return AMDGPU::SI_SPILL_AV320_SAVE;
1628 return AMDGPU::SI_SPILL_AV352_SAVE;
1630 return AMDGPU::SI_SPILL_AV384_SAVE;
1632 return AMDGPU::SI_SPILL_AV512_SAVE;
1634 return AMDGPU::SI_SPILL_AV1024_SAVE;
1641 bool IsVectorSuperClass) {
1646 if (IsVectorSuperClass)
1647 return AMDGPU::SI_SPILL_WWM_AV32_SAVE;
1649 return AMDGPU::SI_SPILL_WWM_V32_SAVE;
1655 bool IsVectorSuperClass = RI.isVectorSuperClass(RC);
1662 if (ST.hasMAIInsts())
1682 FrameInfo.getObjectAlign(FrameIndex));
1683 unsigned SpillSize =
TRI->getSpillSize(*RC);
1686 if (RI.isSGPRClass(RC)) {
1688 assert(SrcReg != AMDGPU::M0 &&
"m0 should not be spilled");
1689 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI &&
1690 SrcReg != AMDGPU::EXEC &&
"exec should not be spilled");
1698 if (SrcReg.
isVirtual() && SpillSize == 4) {
1699 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1708 if (RI.spillSGPRToVGPR())
1728 return AMDGPU::SI_SPILL_S32_RESTORE;
1730 return AMDGPU::SI_SPILL_S64_RESTORE;
1732 return AMDGPU::SI_SPILL_S96_RESTORE;
1734 return AMDGPU::SI_SPILL_S128_RESTORE;
1736 return AMDGPU::SI_SPILL_S160_RESTORE;
1738 return AMDGPU::SI_SPILL_S192_RESTORE;
1740 return AMDGPU::SI_SPILL_S224_RESTORE;
1742 return AMDGPU::SI_SPILL_S256_RESTORE;
1744 return AMDGPU::SI_SPILL_S288_RESTORE;
1746 return AMDGPU::SI_SPILL_S320_RESTORE;
1748 return AMDGPU::SI_SPILL_S352_RESTORE;
1750 return AMDGPU::SI_SPILL_S384_RESTORE;
1752 return AMDGPU::SI_SPILL_S512_RESTORE;
1754 return AMDGPU::SI_SPILL_S1024_RESTORE;
1763 return AMDGPU::SI_SPILL_V16_RESTORE;
1765 return AMDGPU::SI_SPILL_V32_RESTORE;
1767 return AMDGPU::SI_SPILL_V64_RESTORE;
1769 return AMDGPU::SI_SPILL_V96_RESTORE;
1771 return AMDGPU::SI_SPILL_V128_RESTORE;
1773 return AMDGPU::SI_SPILL_V160_RESTORE;
1775 return AMDGPU::SI_SPILL_V192_RESTORE;
1777 return AMDGPU::SI_SPILL_V224_RESTORE;
1779 return AMDGPU::SI_SPILL_V256_RESTORE;
1781 return AMDGPU::SI_SPILL_V288_RESTORE;
1783 return AMDGPU::SI_SPILL_V320_RESTORE;
1785 return AMDGPU::SI_SPILL_V352_RESTORE;
1787 return AMDGPU::SI_SPILL_V384_RESTORE;
1789 return AMDGPU::SI_SPILL_V512_RESTORE;
1791 return AMDGPU::SI_SPILL_V1024_RESTORE;
1800 return AMDGPU::SI_SPILL_AV32_RESTORE;
1802 return AMDGPU::SI_SPILL_AV64_RESTORE;
1804 return AMDGPU::SI_SPILL_AV96_RESTORE;
1806 return AMDGPU::SI_SPILL_AV128_RESTORE;
1808 return AMDGPU::SI_SPILL_AV160_RESTORE;
1810 return AMDGPU::SI_SPILL_AV192_RESTORE;
1812 return AMDGPU::SI_SPILL_AV224_RESTORE;
1814 return AMDGPU::SI_SPILL_AV256_RESTORE;
1816 return AMDGPU::SI_SPILL_AV288_RESTORE;
1818 return AMDGPU::SI_SPILL_AV320_RESTORE;
1820 return AMDGPU::SI_SPILL_AV352_RESTORE;
1822 return AMDGPU::SI_SPILL_AV384_RESTORE;
1824 return AMDGPU::SI_SPILL_AV512_RESTORE;
1826 return AMDGPU::SI_SPILL_AV1024_RESTORE;
1833 bool IsVectorSuperClass) {
1838 if (IsVectorSuperClass)
1839 return AMDGPU::SI_SPILL_WWM_AV32_RESTORE;
1841 return AMDGPU::SI_SPILL_WWM_V32_RESTORE;
1847 bool IsVectorSuperClass = RI.isVectorSuperClass(RC);
1854 if (ST.hasMAIInsts())
1857 assert(!RI.isAGPRClass(RC));
1872 unsigned SpillSize =
TRI->getSpillSize(*RC);
1879 FrameInfo.getObjectAlign(FrameIndex));
1881 if (RI.isSGPRClass(RC)) {
1883 assert(DestReg != AMDGPU::M0 &&
"m0 should not be reloaded into");
1884 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI &&
1885 DestReg != AMDGPU::EXEC &&
"exec should not be spilled");
1890 if (DestReg.
isVirtual() && SpillSize == 4) {
1892 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1895 if (RI.spillSGPRToVGPR())
1921 unsigned Quantity)
const {
1923 unsigned MaxSNopCount = 1u << ST.getSNopBits();
1924 while (Quantity > 0) {
1925 unsigned Arg = std::min(Quantity, MaxSNopCount);
1932 auto *MF =
MBB.getParent();
1935 assert(Info->isEntryFunction());
1937 if (
MBB.succ_empty()) {
1938 bool HasNoTerminator =
MBB.getFirstTerminator() ==
MBB.end();
1939 if (HasNoTerminator) {
1940 if (Info->returnsVoid()) {
1954 constexpr unsigned DoorbellIDMask = 0x3ff;
1955 constexpr unsigned ECQueueWaveAbort = 0x400;
1961 if (!
MBB.succ_empty() || std::next(
MI.getIterator()) !=
MBB.end()) {
1962 ContBB =
MBB.splitAt(
MI,
false);
1966 MBB.addSuccessor(TrapBB);
1973 Register DoorbellReg =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1977 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_MOV_B32), AMDGPU::TTMP2)
1980 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1981 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_AND_B32), DoorbellRegMasked)
1985 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
1986 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_OR_B32), SetWaveAbortBit)
1987 .
addUse(DoorbellRegMasked)
1988 .
addImm(ECQueueWaveAbort);
1989 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1990 .
addUse(SetWaveAbortBit);
1993 BuildMI(*TrapBB, TrapBB->
end(),
DL,
get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2008 switch (
MI.getOpcode()) {
2010 if (
MI.isMetaInstruction())
2015 return MI.getOperand(0).getImm() + 1;
2025 switch (
MI.getOpcode()) {
2027 case AMDGPU::S_MOV_B64_term:
2030 MI.setDesc(
get(AMDGPU::S_MOV_B64));
2033 case AMDGPU::S_MOV_B32_term:
2036 MI.setDesc(
get(AMDGPU::S_MOV_B32));
2039 case AMDGPU::S_XOR_B64_term:
2042 MI.setDesc(
get(AMDGPU::S_XOR_B64));
2045 case AMDGPU::S_XOR_B32_term:
2048 MI.setDesc(
get(AMDGPU::S_XOR_B32));
2050 case AMDGPU::S_OR_B64_term:
2053 MI.setDesc(
get(AMDGPU::S_OR_B64));
2055 case AMDGPU::S_OR_B32_term:
2058 MI.setDesc(
get(AMDGPU::S_OR_B32));
2061 case AMDGPU::S_ANDN2_B64_term:
2064 MI.setDesc(
get(AMDGPU::S_ANDN2_B64));
2067 case AMDGPU::S_ANDN2_B32_term:
2070 MI.setDesc(
get(AMDGPU::S_ANDN2_B32));
2073 case AMDGPU::S_AND_B64_term:
2076 MI.setDesc(
get(AMDGPU::S_AND_B64));
2079 case AMDGPU::S_AND_B32_term:
2082 MI.setDesc(
get(AMDGPU::S_AND_B32));
2085 case AMDGPU::S_AND_SAVEEXEC_B64_term:
2088 MI.setDesc(
get(AMDGPU::S_AND_SAVEEXEC_B64));
2091 case AMDGPU::S_AND_SAVEEXEC_B32_term:
2094 MI.setDesc(
get(AMDGPU::S_AND_SAVEEXEC_B32));
2097 case AMDGPU::SI_SPILL_S32_TO_VGPR:
2098 MI.setDesc(
get(AMDGPU::V_WRITELANE_B32));
2101 case AMDGPU::SI_RESTORE_S32_FROM_VGPR:
2102 MI.setDesc(
get(AMDGPU::V_READLANE_B32));
2104 case AMDGPU::AV_MOV_B32_IMM_PSEUDO: {
2108 get(IsAGPR ? AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::V_MOV_B32_e32));
2111 case AMDGPU::AV_MOV_B64_IMM_PSEUDO: {
2114 int64_t Imm =
MI.getOperand(1).getImm();
2116 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2117 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2124 MI.eraseFromParent();
2130 case AMDGPU::V_MOV_B64_PSEUDO: {
2132 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2133 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2138 if (ST.hasMovB64()) {
2139 MI.setDesc(
get(AMDGPU::V_MOV_B64_e32));
2144 if (
SrcOp.isImm()) {
2146 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
2147 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
2169 if (ST.hasPkMovB32() &&
2190 MI.eraseFromParent();
2193 case AMDGPU::V_MOV_B64_DPP_PSEUDO: {
2197 case AMDGPU::S_MOV_B64_IMM_PSEUDO: {
2201 if (ST.has64BitLiterals()) {
2202 MI.setDesc(
get(AMDGPU::S_MOV_B64));
2208 MI.setDesc(
get(AMDGPU::S_MOV_B64));
2213 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2214 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2216 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
2217 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
2224 MI.eraseFromParent();
2227 case AMDGPU::V_SET_INACTIVE_B32: {
2231 .
add(
MI.getOperand(3))
2232 .
add(
MI.getOperand(4))
2233 .
add(
MI.getOperand(1))
2234 .
add(
MI.getOperand(2))
2235 .
add(
MI.getOperand(5));
2236 MI.eraseFromParent();
2239 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1:
2240 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2:
2241 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3:
2242 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4:
2243 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5:
2244 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8:
2245 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V9:
2246 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V10:
2247 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V11:
2248 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V12:
2249 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16:
2250 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32:
2251 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1:
2252 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2:
2253 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3:
2254 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4:
2255 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5:
2256 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8:
2257 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V9:
2258 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V10:
2259 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V11:
2260 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V12:
2261 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16:
2262 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32:
2263 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1:
2264 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2:
2265 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4:
2266 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8:
2267 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: {
2271 if (RI.hasVGPRs(EltRC)) {
2272 Opc = AMDGPU::V_MOVRELD_B32_e32;
2274 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64
2275 : AMDGPU::S_MOVRELD_B32;
2280 bool IsUndef =
MI.getOperand(1).isUndef();
2281 unsigned SubReg =
MI.getOperand(3).getImm();
2282 assert(VecReg ==
MI.getOperand(1).getReg());
2287 .
add(
MI.getOperand(2))
2291 const int ImpDefIdx =
2293 const int ImpUseIdx = ImpDefIdx + 1;
2295 MI.eraseFromParent();
2298 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1:
2299 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2:
2300 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3:
2301 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4:
2302 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5:
2303 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8:
2304 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V9:
2305 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V10:
2306 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V11:
2307 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V12:
2308 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16:
2309 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: {
2310 assert(ST.useVGPRIndexMode());
2312 bool IsUndef =
MI.getOperand(1).isUndef();
2321 const MCInstrDesc &OpDesc =
get(AMDGPU::V_MOV_B32_indirect_write);
2325 .
add(
MI.getOperand(2))
2330 const int ImpDefIdx =
2332 const int ImpUseIdx = ImpDefIdx + 1;
2339 MI.eraseFromParent();
2342 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1:
2343 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2:
2344 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3:
2345 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4:
2346 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5:
2347 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8:
2348 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V9:
2349 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V10:
2350 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V11:
2351 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V12:
2352 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16:
2353 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: {
2354 assert(ST.useVGPRIndexMode());
2357 bool IsUndef =
MI.getOperand(1).isUndef();
2375 MI.eraseFromParent();
2378 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
2381 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
2382 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
2401 if (ST.hasGetPCZeroExtension()) {
2405 BuildMI(MF,
DL,
get(AMDGPU::S_SEXT_I32_I16), RegHi).addReg(RegHi));
2412 BuildMI(MF,
DL,
get(AMDGPU::S_ADD_U32), RegLo).addReg(RegLo).add(OpLo));
2422 MI.eraseFromParent();
2425 case AMDGPU::SI_PC_ADD_REL_OFFSET64: {
2435 Op.setOffset(
Op.getOffset() + 4);
2437 BuildMI(MF,
DL,
get(AMDGPU::S_ADD_U64), Reg).addReg(Reg).add(
Op));
2441 MI.eraseFromParent();
2444 case AMDGPU::ENTER_STRICT_WWM: {
2450 case AMDGPU::ENTER_STRICT_WQM: {
2457 MI.eraseFromParent();
2460 case AMDGPU::EXIT_STRICT_WWM:
2461 case AMDGPU::EXIT_STRICT_WQM: {
2467 case AMDGPU::SI_RETURN: {
2481 MI.eraseFromParent();
2485 case AMDGPU::S_MUL_U64_U32_PSEUDO:
2486 case AMDGPU::S_MUL_I64_I32_PSEUDO:
2487 MI.setDesc(
get(AMDGPU::S_MUL_U64));
2490 case AMDGPU::S_GETPC_B64_pseudo:
2491 MI.setDesc(
get(AMDGPU::S_GETPC_B64));
2492 if (ST.hasGetPCZeroExtension()) {
2494 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2503 case AMDGPU::V_MAX_BF16_PSEUDO_e64:
2504 assert(ST.hasBF16PackedInsts());
2505 MI.setDesc(
get(AMDGPU::V_PK_MAX_NUM_BF16));
2529 case AMDGPU::S_LOAD_DWORDX16_IMM:
2530 case AMDGPU::S_LOAD_DWORDX8_IMM: {
2543 for (
auto &CandMO :
I->operands()) {
2544 if (!CandMO.isReg() || CandMO.getReg() != RegToFind || CandMO.isDef())
2552 if (!UseMO || UseMO->
getSubReg() == AMDGPU::NoSubRegister)
2556 unsigned SubregSize = RI.getSubRegIdxSize(UseMO->
getSubReg());
2560 assert(
MRI.use_nodbg_empty(DestReg) &&
"DestReg should have no users yet.");
2562 unsigned NewOpcode = -1;
2563 if (SubregSize == 256)
2564 NewOpcode = AMDGPU::S_LOAD_DWORDX8_IMM;
2565 else if (SubregSize == 128)
2566 NewOpcode = AMDGPU::S_LOAD_DWORDX4_IMM;
2573 MRI.setRegClass(DestReg, NewRC);
2576 UseMO->
setSubReg(AMDGPU::NoSubRegister);
2581 MI->getOperand(0).setReg(DestReg);
2582 MI->getOperand(0).setSubReg(AMDGPU::NoSubRegister);
2586 OffsetMO->
setImm(FinalOffset);
2592 MI->setMemRefs(*MF, NewMMOs);
2605std::pair<MachineInstr*, MachineInstr*>
2607 assert (
MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
2609 if (ST.hasMovB64() && ST.hasFeature(AMDGPU::FeatureDPALU_DPP) &&
2612 MI.setDesc(
get(AMDGPU::V_MOV_B64_dpp));
2613 return std::pair(&
MI,
nullptr);
2624 for (
auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) {
2626 if (Dst.isPhysical()) {
2627 MovDPP.addDef(RI.getSubReg(Dst,
Sub));
2630 auto Tmp =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2634 for (
unsigned I = 1;
I <= 2; ++
I) {
2637 if (
SrcOp.isImm()) {
2639 Imm.ashrInPlace(Part * 32);
2640 MovDPP.addImm(Imm.getLoBits(32).getZExtValue());
2644 if (Src.isPhysical())
2645 MovDPP.addReg(RI.getSubReg(Src,
Sub));
2652 MovDPP.addImm(MO.getImm());
2654 Split[Part] = MovDPP;
2658 if (Dst.isVirtual())
2665 MI.eraseFromParent();
2666 return std::pair(Split[0], Split[1]);
2669std::optional<DestSourcePair>
2671 if (
MI.getOpcode() == AMDGPU::WWM_COPY)
2674 return std::nullopt;
2678 AMDGPU::OpName Src0OpName,
2680 AMDGPU::OpName Src1OpName)
const {
2687 "All commutable instructions have both src0 and src1 modifiers");
2689 int Src0ModsVal = Src0Mods->
getImm();
2690 int Src1ModsVal = Src1Mods->
getImm();
2692 Src1Mods->
setImm(Src0ModsVal);
2693 Src0Mods->
setImm(Src1ModsVal);
2702 bool IsKill = RegOp.
isKill();
2704 bool IsUndef = RegOp.
isUndef();
2705 bool IsDebug = RegOp.
isDebug();
2707 if (NonRegOp.
isImm())
2709 else if (NonRegOp.
isFI())
2730 int64_t NonRegVal = NonRegOp1.
getImm();
2733 NonRegOp2.
setImm(NonRegVal);
2740 unsigned OpIdx1)
const {
2745 unsigned Opc =
MI.getOpcode();
2746 int Src0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0);
2756 if ((
int)OpIdx0 == Src0Idx && !MO0.
isReg() &&
2759 if ((
int)OpIdx1 == Src0Idx && !MO1.
isReg() &&
2764 if ((
int)OpIdx1 != Src0Idx && MO0.
isReg()) {
2770 if ((
int)OpIdx0 != Src0Idx && MO1.
isReg()) {
2785 unsigned Src1Idx)
const {
2786 assert(!NewMI &&
"this should never be used");
2788 unsigned Opc =
MI.getOpcode();
2790 if (CommutedOpcode == -1)
2793 if (Src0Idx > Src1Idx)
2796 assert(AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0) ==
2797 static_cast<int>(Src0Idx) &&
2798 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1) ==
2799 static_cast<int>(Src1Idx) &&
2800 "inconsistency with findCommutedOpIndices");
2825 Src1, AMDGPU::OpName::src1_modifiers);
2828 AMDGPU::OpName::src1_sel);
2840 unsigned &SrcOpIdx0,
2841 unsigned &SrcOpIdx1)
const {
2846 unsigned &SrcOpIdx0,
2847 unsigned &SrcOpIdx1)
const {
2848 if (!
Desc.isCommutable())
2851 unsigned Opc =
Desc.getOpcode();
2852 int Src0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0);
2856 int Src1Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1);
2860 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
2864 int64_t BrOffset)
const {
2881 return MI.getOperand(0).getMBB();
2886 if (
MI.getOpcode() == AMDGPU::SI_IF ||
MI.getOpcode() == AMDGPU::SI_ELSE ||
2887 MI.getOpcode() == AMDGPU::SI_LOOP)
2899 "new block should be inserted for expanding unconditional branch");
2902 "restore block should be inserted for restoring clobbered registers");
2910 if (ST.hasAddPC64Inst()) {
2912 MCCtx.createTempSymbol(
"offset",
true);
2916 MCCtx.createTempSymbol(
"post_addpc",
true);
2917 AddPC->setPostInstrSymbol(*MF, PostAddPCLabel);
2921 Offset->setVariableValue(OffsetExpr);
2925 assert(RS &&
"RegScavenger required for long branching");
2929 Register PCReg =
MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2933 const bool FlushSGPRWrites = (ST.isWave64() && ST.hasVALUMaskWriteHazard()) ||
2934 ST.hasVALUReadSGPRHazard();
2935 auto ApplyHazardWorkarounds = [
this, &
MBB, &
I, &
DL, FlushSGPRWrites]() {
2936 if (FlushSGPRWrites)
2944 ApplyHazardWorkarounds();
2947 MCCtx.createTempSymbol(
"post_getpc",
true);
2951 MCCtx.createTempSymbol(
"offset_lo",
true);
2953 MCCtx.createTempSymbol(
"offset_hi",
true);
2956 .
addReg(PCReg, 0, AMDGPU::sub0)
2960 .
addReg(PCReg, 0, AMDGPU::sub1)
2962 ApplyHazardWorkarounds();
3003 if (LongBranchReservedReg) {
3004 RS->enterBasicBlock(
MBB);
3005 Scav = LongBranchReservedReg;
3007 RS->enterBasicBlockEnd(
MBB);
3008 Scav = RS->scavengeRegisterBackwards(
3013 RS->setRegUsed(Scav);
3014 MRI.replaceRegWith(PCReg, Scav);
3015 MRI.clearVirtRegs();
3021 TRI->spillEmergencySGPR(GetPC, RestoreBB, AMDGPU::SGPR0_SGPR1, RS);
3022 MRI.replaceRegWith(PCReg, AMDGPU::SGPR0_SGPR1);
3023 MRI.clearVirtRegs();
3038unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate
Cond) {
3040 case SIInstrInfo::SCC_TRUE:
3041 return AMDGPU::S_CBRANCH_SCC1;
3042 case SIInstrInfo::SCC_FALSE:
3043 return AMDGPU::S_CBRANCH_SCC0;
3044 case SIInstrInfo::VCCNZ:
3045 return AMDGPU::S_CBRANCH_VCCNZ;
3046 case SIInstrInfo::VCCZ:
3047 return AMDGPU::S_CBRANCH_VCCZ;
3048 case SIInstrInfo::EXECNZ:
3049 return AMDGPU::S_CBRANCH_EXECNZ;
3050 case SIInstrInfo::EXECZ:
3051 return AMDGPU::S_CBRANCH_EXECZ;
3057SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(
unsigned Opcode) {
3059 case AMDGPU::S_CBRANCH_SCC0:
3061 case AMDGPU::S_CBRANCH_SCC1:
3063 case AMDGPU::S_CBRANCH_VCCNZ:
3065 case AMDGPU::S_CBRANCH_VCCZ:
3067 case AMDGPU::S_CBRANCH_EXECNZ:
3069 case AMDGPU::S_CBRANCH_EXECZ:
3081 bool AllowModify)
const {
3082 if (
I->getOpcode() == AMDGPU::S_BRANCH) {
3084 TBB =
I->getOperand(0).getMBB();
3088 BranchPredicate Pred = getBranchPredicate(
I->getOpcode());
3089 if (Pred == INVALID_BR)
3094 Cond.push_back(
I->getOperand(1));
3098 if (
I ==
MBB.end()) {
3104 if (
I->getOpcode() == AMDGPU::S_BRANCH) {
3106 FBB =
I->getOperand(0).getMBB();
3116 bool AllowModify)
const {
3124 while (
I != E && !
I->isBranch() && !
I->isReturn()) {
3125 switch (
I->getOpcode()) {
3126 case AMDGPU::S_MOV_B64_term:
3127 case AMDGPU::S_XOR_B64_term:
3128 case AMDGPU::S_OR_B64_term:
3129 case AMDGPU::S_ANDN2_B64_term:
3130 case AMDGPU::S_AND_B64_term:
3131 case AMDGPU::S_AND_SAVEEXEC_B64_term:
3132 case AMDGPU::S_MOV_B32_term:
3133 case AMDGPU::S_XOR_B32_term:
3134 case AMDGPU::S_OR_B32_term:
3135 case AMDGPU::S_ANDN2_B32_term:
3136 case AMDGPU::S_AND_B32_term:
3137 case AMDGPU::S_AND_SAVEEXEC_B32_term:
3140 case AMDGPU::SI_ELSE:
3141 case AMDGPU::SI_KILL_I1_TERMINATOR:
3142 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
3159 int *BytesRemoved)
const {
3161 unsigned RemovedSize = 0;
3164 if (
MI.isBranch() ||
MI.isReturn()) {
3166 MI.eraseFromParent();
3172 *BytesRemoved = RemovedSize;
3189 int *BytesAdded)
const {
3190 if (!FBB &&
Cond.empty()) {
3194 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
3201 = getBranchOpcode(
static_cast<BranchPredicate
>(
Cond[0].
getImm()));
3213 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
3231 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8;
3238 if (
Cond.size() != 2) {
3242 if (
Cond[0].isImm()) {
3253 Register FalseReg,
int &CondCycles,
3254 int &TrueCycles,
int &FalseCycles)
const {
3260 if (
MRI.getRegClass(FalseReg) != RC)
3264 CondCycles = TrueCycles = FalseCycles = NumInsts;
3267 return RI.hasVGPRs(RC) && NumInsts <= 6;
3275 if (
MRI.getRegClass(FalseReg) != RC)
3281 if (NumInsts % 2 == 0)
3284 CondCycles = TrueCycles = FalseCycles = NumInsts;
3285 return RI.isSGPRClass(RC);
3296 BranchPredicate Pred =
static_cast<BranchPredicate
>(
Cond[0].getImm());
3297 if (Pred == VCCZ || Pred == SCC_FALSE) {
3298 Pred =
static_cast<BranchPredicate
>(-Pred);
3304 unsigned DstSize = RI.getRegSizeInBits(*DstRC);
3306 if (DstSize == 32) {
3308 if (Pred == SCC_TRUE) {
3323 if (DstSize == 64 && Pred == SCC_TRUE) {
3333 static const int16_t Sub0_15[] = {
3334 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
3335 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
3336 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
3337 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
3340 static const int16_t Sub0_15_64[] = {
3341 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
3342 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
3343 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
3344 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
3347 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
3349 const int16_t *SubIndices = Sub0_15;
3350 int NElts = DstSize / 32;
3354 if (Pred == SCC_TRUE) {
3356 SelOp = AMDGPU::S_CSELECT_B32;
3357 EltRC = &AMDGPU::SGPR_32RegClass;
3359 SelOp = AMDGPU::S_CSELECT_B64;
3360 EltRC = &AMDGPU::SGPR_64RegClass;
3361 SubIndices = Sub0_15_64;
3367 MBB,
I,
DL,
get(AMDGPU::REG_SEQUENCE), DstReg);
3372 for (
int Idx = 0; Idx != NElts; ++Idx) {
3373 Register DstElt =
MRI.createVirtualRegister(EltRC);
3376 unsigned SubIdx = SubIndices[Idx];
3379 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) {
3382 .
addReg(FalseReg, 0, SubIdx)
3383 .
addReg(TrueReg, 0, SubIdx);
3387 .
addReg(TrueReg, 0, SubIdx)
3388 .
addReg(FalseReg, 0, SubIdx);
3400 switch (
MI.getOpcode()) {
3401 case AMDGPU::V_MOV_B16_t16_e32:
3402 case AMDGPU::V_MOV_B16_t16_e64:
3403 case AMDGPU::V_MOV_B32_e32:
3404 case AMDGPU::V_MOV_B32_e64:
3405 case AMDGPU::V_MOV_B64_PSEUDO:
3406 case AMDGPU::V_MOV_B64_e32:
3407 case AMDGPU::V_MOV_B64_e64:
3408 case AMDGPU::S_MOV_B32:
3409 case AMDGPU::S_MOV_B64:
3410 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
3412 case AMDGPU::WWM_COPY:
3413 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
3414 case AMDGPU::V_ACCVGPR_READ_B32_e64:
3415 case AMDGPU::V_ACCVGPR_MOV_B32:
3416 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
3417 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
3425 switch (
MI.getOpcode()) {
3426 case AMDGPU::V_MOV_B16_t16_e32:
3427 case AMDGPU::V_MOV_B16_t16_e64:
3429 case AMDGPU::V_MOV_B32_e32:
3430 case AMDGPU::V_MOV_B32_e64:
3431 case AMDGPU::V_MOV_B64_PSEUDO:
3432 case AMDGPU::V_MOV_B64_e32:
3433 case AMDGPU::V_MOV_B64_e64:
3434 case AMDGPU::S_MOV_B32:
3435 case AMDGPU::S_MOV_B64:
3436 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
3438 case AMDGPU::WWM_COPY:
3439 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
3440 case AMDGPU::V_ACCVGPR_READ_B32_e64:
3441 case AMDGPU::V_ACCVGPR_MOV_B32:
3442 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
3443 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
3451 AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
3452 AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::clamp,
3453 AMDGPU::OpName::omod, AMDGPU::OpName::op_sel};
3456 unsigned Opc =
MI.getOpcode();
3458 int Idx = AMDGPU::getNamedOperandIdx(
Opc, Name);
3460 MI.removeOperand(Idx);
3465 unsigned SubRegIndex) {
3466 switch (SubRegIndex) {
3467 case AMDGPU::NoSubRegister:
3477 case AMDGPU::sub1_lo16:
3479 case AMDGPU::sub1_hi16:
3482 return std::nullopt;
3490 case AMDGPU::V_MAC_F16_e32:
3491 case AMDGPU::V_MAC_F16_e64:
3492 case AMDGPU::V_MAD_F16_e64:
3493 return AMDGPU::V_MADAK_F16;
3494 case AMDGPU::V_MAC_F32_e32:
3495 case AMDGPU::V_MAC_F32_e64:
3496 case AMDGPU::V_MAD_F32_e64:
3497 return AMDGPU::V_MADAK_F32;
3498 case AMDGPU::V_FMAC_F32_e32:
3499 case AMDGPU::V_FMAC_F32_e64:
3500 case AMDGPU::V_FMA_F32_e64:
3501 return AMDGPU::V_FMAAK_F32;
3502 case AMDGPU::V_FMAC_F16_e32:
3503 case AMDGPU::V_FMAC_F16_e64:
3504 case AMDGPU::V_FMAC_F16_t16_e64:
3505 case AMDGPU::V_FMAC_F16_fake16_e64:
3506 case AMDGPU::V_FMA_F16_e64:
3507 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
3508 ? AMDGPU::V_FMAAK_F16_t16
3509 : AMDGPU::V_FMAAK_F16_fake16
3510 : AMDGPU::V_FMAAK_F16;
3511 case AMDGPU::V_FMAC_F64_e32:
3512 case AMDGPU::V_FMAC_F64_e64:
3513 case AMDGPU::V_FMA_F64_e64:
3514 return AMDGPU::V_FMAAK_F64;
3522 case AMDGPU::V_MAC_F16_e32:
3523 case AMDGPU::V_MAC_F16_e64:
3524 case AMDGPU::V_MAD_F16_e64:
3525 return AMDGPU::V_MADMK_F16;
3526 case AMDGPU::V_MAC_F32_e32:
3527 case AMDGPU::V_MAC_F32_e64:
3528 case AMDGPU::V_MAD_F32_e64:
3529 return AMDGPU::V_MADMK_F32;
3530 case AMDGPU::V_FMAC_F32_e32:
3531 case AMDGPU::V_FMAC_F32_e64:
3532 case AMDGPU::V_FMA_F32_e64:
3533 return AMDGPU::V_FMAMK_F32;
3534 case AMDGPU::V_FMAC_F16_e32:
3535 case AMDGPU::V_FMAC_F16_e64:
3536 case AMDGPU::V_FMAC_F16_t16_e64:
3537 case AMDGPU::V_FMAC_F16_fake16_e64:
3538 case AMDGPU::V_FMA_F16_e64:
3539 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
3540 ? AMDGPU::V_FMAMK_F16_t16
3541 : AMDGPU::V_FMAMK_F16_fake16
3542 : AMDGPU::V_FMAMK_F16;
3543 case AMDGPU::V_FMAC_F64_e32:
3544 case AMDGPU::V_FMAC_F64_e64:
3545 case AMDGPU::V_FMA_F64_e64:
3546 return AMDGPU::V_FMAMK_F64;
3558 const bool HasMultipleUses = !
MRI->hasOneNonDBGUse(Reg);
3560 assert(!
DefMI.getOperand(0).getSubReg() &&
"Expected SSA form");
3563 if (
Opc == AMDGPU::COPY) {
3564 assert(!
UseMI.getOperand(0).getSubReg() &&
"Expected SSA form");
3571 if (HasMultipleUses) {
3574 unsigned ImmDefSize = RI.getRegSizeInBits(*
MRI->getRegClass(Reg));
3577 if (UseSubReg != AMDGPU::NoSubRegister && ImmDefSize == 64)
3585 if (ImmDefSize == 32 &&
3590 bool Is16Bit = UseSubReg != AMDGPU::NoSubRegister &&
3591 RI.getSubRegIdxSize(UseSubReg) == 16;
3594 if (RI.hasVGPRs(DstRC))
3597 if (DstReg.
isVirtual() && UseSubReg != AMDGPU::lo16)
3603 unsigned NewOpc = AMDGPU::INSTRUCTION_LIST_END;
3610 for (
unsigned MovOp :
3611 {AMDGPU::S_MOV_B32, AMDGPU::V_MOV_B32_e32, AMDGPU::S_MOV_B64,
3612 AMDGPU::V_MOV_B64_PSEUDO, AMDGPU::V_ACCVGPR_WRITE_B32_e64}) {
3620 MovDstRC = RI.getMatchingSuperRegClass(MovDstRC, DstRC, AMDGPU::lo16);
3624 if (MovDstPhysReg) {
3628 RI.getMatchingSuperReg(MovDstPhysReg, AMDGPU::lo16, MovDstRC);
3635 if (MovDstPhysReg) {
3636 if (!MovDstRC->
contains(MovDstPhysReg))
3638 }
else if (!
MRI->constrainRegClass(DstReg, MovDstRC)) {
3652 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType) &&
3660 if (NewOpc == AMDGPU::INSTRUCTION_LIST_END)
3664 UseMI.getOperand(0).setSubReg(AMDGPU::NoSubRegister);
3666 UseMI.getOperand(0).setReg(MovDstPhysReg);
3671 UseMI.setDesc(NewMCID);
3672 UseMI.getOperand(1).ChangeToImmediate(*SubRegImm);
3673 UseMI.addImplicitDefUseOperands(*MF);
3677 if (HasMultipleUses)
3680 if (
Opc == AMDGPU::V_MAD_F32_e64 ||
Opc == AMDGPU::V_MAC_F32_e64 ||
3681 Opc == AMDGPU::V_MAD_F16_e64 ||
Opc == AMDGPU::V_MAC_F16_e64 ||
3682 Opc == AMDGPU::V_FMA_F32_e64 ||
Opc == AMDGPU::V_FMAC_F32_e64 ||
3683 Opc == AMDGPU::V_FMA_F16_e64 ||
Opc == AMDGPU::V_FMAC_F16_e64 ||
3684 Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3685 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
Opc == AMDGPU::V_FMA_F64_e64 ||
3686 Opc == AMDGPU::V_FMAC_F64_e64) {
3695 int Src0Idx = getNamedOperandIdx(
UseMI.getOpcode(), AMDGPU::OpName::src0);
3710 Src1->
isReg() && Src1->
getReg() == Reg ? Src0 : Src1;
3711 if (!RegSrc->
isReg())
3713 if (RI.isSGPRClass(
MRI->getRegClass(RegSrc->
getReg())) &&
3714 ST.getConstantBusLimit(
Opc) < 2)
3717 if (!Src2->
isReg() || RI.isSGPRClass(
MRI->getRegClass(Src2->
getReg())))
3729 if (Def && Def->isMoveImmediate() &&
3740 if (NewOpc == AMDGPU::V_FMAMK_F16_t16 ||
3741 NewOpc == AMDGPU::V_FMAMK_F16_fake16)
3751 unsigned SrcSubReg = RegSrc->
getSubReg();
3756 if (
Opc == AMDGPU::V_MAC_F32_e64 ||
Opc == AMDGPU::V_MAC_F16_e64 ||
3757 Opc == AMDGPU::V_FMAC_F32_e64 ||
Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3758 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
3759 Opc == AMDGPU::V_FMAC_F16_e64 ||
Opc == AMDGPU::V_FMAC_F64_e64)
3760 UseMI.untieRegOperand(
3761 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2));
3768 bool DeleteDef =
MRI->use_nodbg_empty(Reg);
3770 DefMI.eraseFromParent();
3777 if (ST.getConstantBusLimit(
Opc) < 2) {
3780 bool Src0Inlined =
false;
3781 if (Src0->
isReg()) {
3786 if (Def && Def->isMoveImmediate() &&
3791 }
else if (ST.getConstantBusLimit(
Opc) <= 1 &&
3798 if (Src1->
isReg() && !Src0Inlined) {
3801 if (Def && Def->isMoveImmediate() &&
3803 MRI->hasOneNonDBGUse(Src1->
getReg()) && commuteInstruction(
UseMI))
3805 else if (RI.isSGPRReg(*
MRI, Src1->
getReg()))
3818 if (NewOpc == AMDGPU::V_FMAAK_F16_t16 ||
3819 NewOpc == AMDGPU::V_FMAAK_F16_fake16)
3825 if (
Opc == AMDGPU::V_MAC_F32_e64 ||
Opc == AMDGPU::V_MAC_F16_e64 ||
3826 Opc == AMDGPU::V_FMAC_F32_e64 ||
Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3827 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
3828 Opc == AMDGPU::V_FMAC_F16_e64 ||
Opc == AMDGPU::V_FMAC_F64_e64)
3829 UseMI.untieRegOperand(
3830 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2));
3832 const std::optional<int64_t> SubRegImm =
3846 bool DeleteDef =
MRI->use_nodbg_empty(Reg);
3848 DefMI.eraseFromParent();
3860 if (BaseOps1.
size() != BaseOps2.
size())
3862 for (
size_t I = 0,
E = BaseOps1.
size();
I <
E; ++
I) {
3863 if (!BaseOps1[
I]->isIdenticalTo(*BaseOps2[
I]))
3871 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
3872 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
3873 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3875 LowOffset + (int)LowWidth.
getValue() <= HighOffset;
3878bool SIInstrInfo::checkInstOffsetsDoNotOverlap(
const MachineInstr &MIa,
3881 int64_t Offset0, Offset1;
3884 bool Offset0IsScalable, Offset1IsScalable;
3898 LocationSize Width0 = MIa.
memoperands().front()->getSize();
3899 LocationSize Width1 = MIb.
memoperands().front()->getSize();
3906 "MIa must load from or modify a memory location");
3908 "MIb must load from or modify a memory location");
3927 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3934 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3944 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3958 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3969 if (
Reg.isPhysical())
3971 auto *Def =
MRI.getUniqueVRegDef(
Reg);
3973 Imm = Def->getOperand(1).getImm();
3993 unsigned NumOps =
MI.getNumOperands();
3996 if (
Op.isReg() &&
Op.isKill())
4004 case AMDGPU::V_MAC_F16_e32:
4005 case AMDGPU::V_MAC_F16_e64:
4006 return AMDGPU::V_MAD_F16_e64;
4007 case AMDGPU::V_MAC_F32_e32:
4008 case AMDGPU::V_MAC_F32_e64:
4009 return AMDGPU::V_MAD_F32_e64;
4010 case AMDGPU::V_MAC_LEGACY_F32_e32:
4011 case AMDGPU::V_MAC_LEGACY_F32_e64:
4012 return AMDGPU::V_MAD_LEGACY_F32_e64;
4013 case AMDGPU::V_FMAC_LEGACY_F32_e32:
4014 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4015 return AMDGPU::V_FMA_LEGACY_F32_e64;
4016 case AMDGPU::V_FMAC_F16_e32:
4017 case AMDGPU::V_FMAC_F16_e64:
4018 case AMDGPU::V_FMAC_F16_t16_e64:
4019 case AMDGPU::V_FMAC_F16_fake16_e64:
4020 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
4021 ? AMDGPU::V_FMA_F16_gfx9_t16_e64
4022 : AMDGPU::V_FMA_F16_gfx9_fake16_e64
4023 : AMDGPU::V_FMA_F16_gfx9_e64;
4024 case AMDGPU::V_FMAC_F32_e32:
4025 case AMDGPU::V_FMAC_F32_e64:
4026 return AMDGPU::V_FMA_F32_e64;
4027 case AMDGPU::V_FMAC_F64_e32:
4028 case AMDGPU::V_FMAC_F64_e64:
4029 return AMDGPU::V_FMA_F64_e64;
4039 unsigned Opc =
MI.getOpcode();
4043 if (NewMFMAOpc != -1) {
4046 for (
unsigned I = 0, E =
MI.getNumOperands();
I != E; ++
I)
4047 MIB.
add(
MI.getOperand(
I));
4053 if (Def.isEarlyClobber() && Def.isReg() &&
4058 auto UpdateDefIndex = [&](
LiveRange &LR) {
4059 auto *S = LR.find(OldIndex);
4060 if (S != LR.end() && S->start == OldIndex) {
4061 assert(S->valno && S->valno->def == OldIndex);
4062 S->start = NewIndex;
4063 S->valno->def = NewIndex;
4067 for (
auto &SR : LI.subranges())
4078 for (
unsigned I = 0, E =
MI.getNumOperands();
I != E; ++
I)
4088 assert(
Opc != AMDGPU::V_FMAC_F16_t16_e32 &&
4089 Opc != AMDGPU::V_FMAC_F16_fake16_e32 &&
4090 "V_FMAC_F16_t16/fake16_e32 is not supported and not expected to be "
4094 bool IsF64 =
Opc == AMDGPU::V_FMAC_F64_e32 ||
Opc == AMDGPU::V_FMAC_F64_e64;
4095 bool IsLegacy =
Opc == AMDGPU::V_MAC_LEGACY_F32_e32 ||
4096 Opc == AMDGPU::V_MAC_LEGACY_F32_e64 ||
4097 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 ||
4098 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64;
4099 bool Src0Literal =
false;
4104 case AMDGPU::V_MAC_F16_e64:
4105 case AMDGPU::V_FMAC_F16_e64:
4106 case AMDGPU::V_FMAC_F16_t16_e64:
4107 case AMDGPU::V_FMAC_F16_fake16_e64:
4108 case AMDGPU::V_MAC_F32_e64:
4109 case AMDGPU::V_MAC_LEGACY_F32_e64:
4110 case AMDGPU::V_FMAC_F32_e64:
4111 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4112 case AMDGPU::V_FMAC_F64_e64:
4114 case AMDGPU::V_MAC_F16_e32:
4115 case AMDGPU::V_FMAC_F16_e32:
4116 case AMDGPU::V_MAC_F32_e32:
4117 case AMDGPU::V_MAC_LEGACY_F32_e32:
4118 case AMDGPU::V_FMAC_F32_e32:
4119 case AMDGPU::V_FMAC_LEGACY_F32_e32:
4120 case AMDGPU::V_FMAC_F64_e32: {
4121 int Src0Idx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
4122 AMDGPU::OpName::src0);
4149 if (!Src0Mods && !Src1Mods && !Src2Mods && !Clamp && !Omod && !IsLegacy &&
4150 (!IsF64 || ST.hasFmaakFmamkF64Insts()) &&
4152 (ST.getConstantBusLimit(
Opc) > 1 || !Src0->
isReg() ||
4153 !RI.isSGPRReg(
MBB.getParent()->getRegInfo(), Src0->
getReg()))) {
4155 const auto killDef = [&]() ->
void {
4160 if (
MRI.hasOneNonDBGUse(DefReg)) {
4162 DefMI->setDesc(
get(AMDGPU::IMPLICIT_DEF));
4163 DefMI->getOperand(0).setIsDead(
true);
4164 for (
unsigned I =
DefMI->getNumOperands() - 1;
I != 0; --
I)
4177 Register DummyReg =
MRI.cloneVirtualRegister(DefReg);
4179 if (MIOp.isReg() && MIOp.getReg() == DefReg) {
4180 MIOp.setIsUndef(
true);
4181 MIOp.setReg(DummyReg);
4230 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0),
4251 if (Src0Literal && !ST.hasVOP3Literal())
4271 MIB.
addImm(OpSel ? OpSel->getImm() : 0);
4282 switch (
MI.getOpcode()) {
4283 case AMDGPU::S_SET_GPR_IDX_ON:
4284 case AMDGPU::S_SET_GPR_IDX_MODE:
4285 case AMDGPU::S_SET_GPR_IDX_OFF:
4303 if (
MI.isTerminator() ||
MI.isPosition())
4307 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
4310 if (
MI.getOpcode() == AMDGPU::SCHED_BARRIER &&
MI.getOperand(0).getImm() == 0)
4316 return MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
4317 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
4318 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
4319 MI.getOpcode() == AMDGPU::S_SETPRIO ||
4320 MI.getOpcode() == AMDGPU::S_SETPRIO_INC_WG ||
4325 return Opcode == AMDGPU::DS_ORDERED_COUNT ||
4326 Opcode == AMDGPU::DS_ADD_GS_REG_RTN ||
4327 Opcode == AMDGPU::DS_SUB_GS_REG_RTN ||
isGWS(Opcode);
4335 if (
MI.getMF()->getFunction().hasFnAttribute(
"amdgpu-no-flat-scratch-init"))
4344 if (
MI.memoperands_empty())
4349 unsigned AS = Memop->getAddrSpace();
4350 if (AS == AMDGPUAS::FLAT_ADDRESS) {
4351 const MDNode *MD = Memop->getAAInfo().NoAliasAddrSpace;
4352 return !MD || !AMDGPU::hasValueInRangeLikeMetadata(
4353 *MD, AMDGPUAS::PRIVATE_ADDRESS);
4368 if (
MI.memoperands_empty())
4377 unsigned AS = Memop->getAddrSpace();
4394 if (ST.isTgSplitEnabled())
4399 if (
MI.memoperands_empty())
4404 unsigned AS = Memop->getAddrSpace();
4420 unsigned Opcode =
MI.getOpcode();
4435 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT ||
4436 isEXP(Opcode) || Opcode == AMDGPU::DS_ORDERED_COUNT ||
4437 Opcode == AMDGPU::S_TRAP || Opcode == AMDGPU::S_WAIT_EVENT)
4440 if (
MI.isCall() ||
MI.isInlineAsm())
4456 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 ||
4457 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32 ||
4458 Opcode == AMDGPU::SI_RESTORE_S32_FROM_VGPR ||
4459 Opcode == AMDGPU::SI_SPILL_S32_TO_VGPR)
4467 if (
MI.isMetaInstruction())
4471 if (
MI.isCopyLike()) {
4472 if (!RI.isSGPRReg(
MRI,
MI.getOperand(0).getReg()))
4476 return MI.readsRegister(AMDGPU::EXEC, &RI);
4487 return !
isSALU(
MI) ||
MI.readsRegister(AMDGPU::EXEC, &RI);
4491 switch (Imm.getBitWidth()) {
4497 ST.hasInv2PiInlineImm());
4500 ST.hasInv2PiInlineImm());
4502 return ST.has16BitInsts() &&
4504 ST.hasInv2PiInlineImm());
4511 APInt IntImm = Imm.bitcastToAPInt();
4513 bool HasInv2Pi = ST.hasInv2PiInlineImm();
4521 return ST.has16BitInsts() &&
4524 return ST.has16BitInsts() &&
4534 switch (OperandType) {
4544 int32_t Trunc =
static_cast<int32_t
>(Imm);
4584 int16_t Trunc =
static_cast<int16_t
>(Imm);
4585 return ST.has16BitInsts() &&
4594 int16_t Trunc =
static_cast<int16_t
>(Imm);
4595 return ST.has16BitInsts() &&
4646 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType))
4652 return ST.hasVOP3Literal();
4656 int64_t ImmVal)
const {
4659 if (
isMAI(InstDesc) && ST.hasMFMAInlineLiteralBug() &&
4660 OpNo == (
unsigned)AMDGPU::getNamedOperandIdx(InstDesc.
getOpcode(),
4661 AMDGPU::OpName::src2))
4663 return RI.opCanUseInlineConstant(OpInfo.OperandType);
4675 "unexpected imm-like operand kind");
4688 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts())
4706 AMDGPU::OpName
OpName)
const {
4708 return Mods && Mods->
getImm();
4721 switch (
MI.getOpcode()) {
4722 default:
return false;
4724 case AMDGPU::V_ADDC_U32_e64:
4725 case AMDGPU::V_SUBB_U32_e64:
4726 case AMDGPU::V_SUBBREV_U32_e64: {
4734 case AMDGPU::V_MAC_F16_e64:
4735 case AMDGPU::V_MAC_F32_e64:
4736 case AMDGPU::V_MAC_LEGACY_F32_e64:
4737 case AMDGPU::V_FMAC_F16_e64:
4738 case AMDGPU::V_FMAC_F16_t16_e64:
4739 case AMDGPU::V_FMAC_F16_fake16_e64:
4740 case AMDGPU::V_FMAC_F32_e64:
4741 case AMDGPU::V_FMAC_F64_e64:
4742 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4748 case AMDGPU::V_CNDMASK_B32_e64:
4754 if (Src1 && (!Src1->
isReg() || !RI.isVGPR(
MRI, Src1->
getReg()) ||
4784 (
Use.getReg() == AMDGPU::VCC ||
Use.getReg() == AMDGPU::VCC_LO)) {
4793 unsigned Op32)
const {
4807 Inst32.
add(
MI.getOperand(
I));
4811 int Idx =
MI.getNumExplicitDefs();
4813 int OpTy =
MI.getDesc().operands()[Idx++].OperandType;
4818 if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2) == -1) {
4840 if (Reg == AMDGPU::SGPR_NULL || Reg == AMDGPU::SGPR_NULL64)
4848 return Reg == AMDGPU::VCC || Reg == AMDGPU::VCC_LO || Reg == AMDGPU::M0;
4851 return AMDGPU::SReg_32RegClass.contains(Reg) ||
4852 AMDGPU::SReg_64RegClass.contains(Reg);
4858 return Reg.
isVirtual() ? RI.isSGPRClass(
MRI.getRegClass(Reg))
4870 return Reg.
isVirtual() ? RI.isSGPRClass(
MRI.getRegClass(Reg))
4880 switch (MO.getReg()) {
4882 case AMDGPU::VCC_LO:
4883 case AMDGPU::VCC_HI:
4885 case AMDGPU::FLAT_SCR:
4898 switch (
MI.getOpcode()) {
4899 case AMDGPU::V_READLANE_B32:
4900 case AMDGPU::SI_RESTORE_S32_FROM_VGPR:
4901 case AMDGPU::V_WRITELANE_B32:
4902 case AMDGPU::SI_SPILL_S32_TO_VGPR:
4909 if (
MI.isPreISelOpcode() ||
4910 SIInstrInfo::isGenericOpcode(
MI.getOpcode()) ||
4925 if (
SubReg.getReg().isPhysical())
4928 return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
4939 if (RI.isVectorRegister(
MRI, SrcReg) && RI.isSGPRReg(
MRI, DstReg)) {
4940 ErrInfo =
"illegal copy from vector register to SGPR";
4958 if (!
MRI.isSSA() &&
MI.isCopy())
4959 return verifyCopy(
MI,
MRI, ErrInfo);
4961 if (SIInstrInfo::isGenericOpcode(Opcode))
4964 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
4965 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
4966 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
4968 if (Src0Idx == -1) {
4970 Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0X);
4971 Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1X);
4972 Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0Y);
4973 Src3Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1Y);
4978 if (!
Desc.isVariadic() &&
4979 Desc.getNumOperands() !=
MI.getNumExplicitOperands()) {
4980 ErrInfo =
"Instruction has wrong number of operands.";
4984 if (
MI.isInlineAsm()) {
4997 if (!Reg.isVirtual() && !RC->
contains(Reg)) {
4998 ErrInfo =
"inlineasm operand has incorrect register class.";
5006 if (
isImage(
MI) &&
MI.memoperands_empty() &&
MI.mayLoadOrStore()) {
5007 ErrInfo =
"missing memory operand from image instruction.";
5012 for (
int i = 0, e =
Desc.getNumOperands(); i != e; ++i) {
5015 ErrInfo =
"FPImm Machine Operands are not supported. ISel should bitcast "
5016 "all fp values to integers.";
5021 int16_t RegClass = getOpRegClassID(OpInfo);
5023 switch (OpInfo.OperandType) {
5025 if (
MI.getOperand(i).isImm() ||
MI.getOperand(i).isGlobal()) {
5026 ErrInfo =
"Illegal immediate value for operand.";
5060 ErrInfo =
"Illegal immediate value for operand.";
5067 ErrInfo =
"Expected inline constant for operand.";
5082 if (!
MI.getOperand(i).isImm() && !
MI.getOperand(i).isFI()) {
5083 ErrInfo =
"Expected immediate, but got non-immediate";
5092 if (OpInfo.isGenericType())
5107 if (ST.needsAlignedVGPRs() && Opcode != AMDGPU::AV_MOV_B64_IMM_PSEUDO) {
5109 if (RI.hasVectorRegisters(RC) && MO.
getSubReg()) {
5111 RI.getSubRegisterClass(RC, MO.
getSubReg())) {
5112 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.
getSubReg());
5119 if (!RC || !RI.isProperlyAlignedRC(*RC)) {
5120 ErrInfo =
"Subtarget requires even aligned vector registers";
5125 if (RegClass != -1) {
5126 if (Reg.isVirtual())
5131 ErrInfo =
"Operand has incorrect register class.";
5139 if (!ST.hasSDWA()) {
5140 ErrInfo =
"SDWA is not supported on this target";
5144 for (
auto Op : {AMDGPU::OpName::src0_sel, AMDGPU::OpName::src1_sel,
5145 AMDGPU::OpName::dst_sel}) {
5149 int64_t Imm = MO->
getImm();
5151 ErrInfo =
"Invalid SDWA selection";
5156 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
5158 for (
int OpIdx : {DstIdx, Src0Idx, Src1Idx, Src2Idx}) {
5163 if (!ST.hasSDWAScalar()) {
5165 if (!MO.
isReg() || !RI.hasVGPRs(RI.getRegClassForReg(
MRI, MO.
getReg()))) {
5166 ErrInfo =
"Only VGPRs allowed as operands in SDWA instructions on VI";
5173 "Only reg allowed as operands in SDWA instructions on GFX9+";
5179 if (!ST.hasSDWAOmod()) {
5182 if (OMod !=
nullptr &&
5184 ErrInfo =
"OMod not allowed in SDWA instructions on VI";
5189 if (Opcode == AMDGPU::V_CVT_F32_FP8_sdwa ||
5190 Opcode == AMDGPU::V_CVT_F32_BF8_sdwa ||
5191 Opcode == AMDGPU::V_CVT_PK_F32_FP8_sdwa ||
5192 Opcode == AMDGPU::V_CVT_PK_F32_BF8_sdwa) {
5195 unsigned Mods = Src0ModsMO->
getImm();
5198 ErrInfo =
"sext, abs and neg are not allowed on this instruction";
5204 if (
isVOPC(BasicOpcode)) {
5205 if (!ST.hasSDWASdst() && DstIdx != -1) {
5208 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
5209 ErrInfo =
"Only VCC allowed as dst in SDWA instructions on VI";
5212 }
else if (!ST.hasSDWAOutModsVOPC()) {
5215 if (Clamp && (!Clamp->
isImm() || Clamp->
getImm() != 0)) {
5216 ErrInfo =
"Clamp not allowed in VOPC SDWA instructions on VI";
5222 if (OMod && (!OMod->
isImm() || OMod->
getImm() != 0)) {
5223 ErrInfo =
"OMod not allowed in VOPC SDWA instructions on VI";
5230 if (DstUnused && DstUnused->isImm() &&
5233 if (!Dst.isReg() || !Dst.isTied()) {
5234 ErrInfo =
"Dst register should have tied register";
5239 MI.getOperand(
MI.findTiedOperandIdx(DstIdx));
5242 "Dst register should be tied to implicit use of preserved register";
5246 ErrInfo =
"Dst register should use same physical register as preserved";
5253 if (
isImage(Opcode) && !
MI.mayStore()) {
5265 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
5273 AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
5277 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
5278 if (RegCount > DstSize) {
5279 ErrInfo =
"Image instruction returns too many registers for dst "
5288 if (
isVALU(
MI) &&
Desc.getOpcode() != AMDGPU::V_WRITELANE_B32) {
5289 unsigned ConstantBusCount = 0;
5290 bool UsesLiteral =
false;
5293 int ImmIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm);
5297 LiteralVal = &
MI.getOperand(ImmIdx);
5306 for (
int OpIdx : {Src0Idx, Src1Idx, Src2Idx, Src3Idx}) {
5317 }
else if (!MO.
isFI()) {
5324 ErrInfo =
"VOP2/VOP3 instruction uses more than one literal";
5334 if (
llvm::all_of(SGPRsUsed, [
this, SGPRUsed](
unsigned SGPR) {
5335 return !RI.regsOverlap(SGPRUsed, SGPR);
5344 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) &&
5345 Opcode != AMDGPU::V_WRITELANE_B32) {
5346 ErrInfo =
"VOP* instruction violates constant bus restriction";
5350 if (
isVOP3(
MI) && UsesLiteral && !ST.hasVOP3Literal()) {
5351 ErrInfo =
"VOP3 instruction uses literal";
5358 if (
Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) {
5359 unsigned SGPRCount = 0;
5362 for (
int OpIdx : {Src0Idx, Src1Idx}) {
5370 if (MO.
getReg() != SGPRUsed)
5375 if (SGPRCount > ST.getConstantBusLimit(Opcode)) {
5376 ErrInfo =
"WRITELANE instruction violates constant bus restriction";
5383 if (
Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 ||
5384 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) {
5391 ErrInfo =
"v_div_scale_{f32|f64} require src0 = src1 or src2";
5401 ErrInfo =
"ABS not allowed in VOP3B instructions";
5414 ErrInfo =
"SOP2/SOPC instruction requires too many immediate constants";
5421 if (
Desc.isBranch()) {
5423 ErrInfo =
"invalid branch target for SOPK instruction";
5430 ErrInfo =
"invalid immediate for SOPK instruction";
5435 ErrInfo =
"invalid immediate for SOPK instruction";
5442 if (
Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
5443 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
5444 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
5445 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
5446 const bool IsDst =
Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
5447 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
5449 const unsigned StaticNumOps =
5450 Desc.getNumOperands() +
Desc.implicit_uses().size();
5451 const unsigned NumImplicitOps = IsDst ? 2 : 1;
5456 if (
MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
5457 ErrInfo =
"missing implicit register operands";
5463 if (!Dst->isUse()) {
5464 ErrInfo =
"v_movreld_b32 vdst should be a use operand";
5469 if (!
MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
5470 UseOpIdx != StaticNumOps + 1) {
5471 ErrInfo =
"movrel implicit operands should be tied";
5478 =
MI.getOperand(StaticNumOps + NumImplicitOps - 1);
5480 !
isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
5481 ErrInfo =
"src0 should be subreg of implicit vector use";
5489 if (!
MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
5490 ErrInfo =
"VALU instruction does not implicitly read exec mask";
5496 if (
MI.mayStore() &&
5501 if (Soff && Soff->
getReg() != AMDGPU::M0) {
5502 ErrInfo =
"scalar stores must use m0 as offset register";
5508 if (
isFLAT(
MI) && !ST.hasFlatInstOffsets()) {
5510 if (
Offset->getImm() != 0) {
5511 ErrInfo =
"subtarget does not support offsets in flat instructions";
5516 if (
isDS(
MI) && !ST.hasGDS()) {
5518 if (GDSOp && GDSOp->
getImm() != 0) {
5519 ErrInfo =
"GDS is not supported on this subtarget";
5527 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode,
5528 AMDGPU::OpName::vaddr0);
5529 AMDGPU::OpName RSrcOpName =
5530 isMIMG(
MI) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
5531 int RsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, RSrcOpName);
5539 ErrInfo =
"dim is out of range";
5544 if (ST.hasR128A16()) {
5546 IsA16 = R128A16->
getImm() != 0;
5547 }
else if (ST.hasA16()) {
5549 IsA16 = A16->
getImm() != 0;
5552 bool IsNSA = RsrcIdx - VAddr0Idx > 1;
5554 unsigned AddrWords =
5557 unsigned VAddrWords;
5559 VAddrWords = RsrcIdx - VAddr0Idx;
5560 if (ST.hasPartialNSAEncoding() &&
5562 unsigned LastVAddrIdx = RsrcIdx - 1;
5563 VAddrWords +=
getOpSize(
MI, LastVAddrIdx) / 4 - 1;
5571 if (VAddrWords != AddrWords) {
5573 <<
" but got " << VAddrWords <<
"\n");
5574 ErrInfo =
"bad vaddr size";
5584 unsigned DC = DppCt->
getImm();
5585 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 ||
5586 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST ||
5587 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) ||
5588 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) ||
5589 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) ||
5590 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) ||
5591 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) {
5592 ErrInfo =
"Invalid dpp_ctrl value";
5595 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 &&
5597 ErrInfo =
"Invalid dpp_ctrl value: "
5598 "wavefront shifts are not supported on GFX10+";
5601 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 &&
5603 ErrInfo =
"Invalid dpp_ctrl value: "
5604 "broadcasts are not supported on GFX10+";
5607 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST &&
5609 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST &&
5610 DC <= DppCtrl::ROW_NEWBCAST_LAST &&
5611 !ST.hasGFX90AInsts()) {
5612 ErrInfo =
"Invalid dpp_ctrl value: "
5613 "row_newbroadcast/row_share is not supported before "
5617 if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) {
5618 ErrInfo =
"Invalid dpp_ctrl value: "
5619 "row_share and row_xmask are not supported before GFX10";
5624 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
5627 ErrInfo =
"Invalid dpp_ctrl value: "
5628 "DP ALU dpp only support row_newbcast";
5635 AMDGPU::OpName DataName =
5636 isDS(Opcode) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata;
5642 if (ST.hasGFX90AInsts()) {
5643 if (Dst &&
Data && !Dst->isTied() && !
Data->isTied() &&
5644 (RI.isAGPR(
MRI, Dst->getReg()) != RI.isAGPR(
MRI,
Data->getReg()))) {
5645 ErrInfo =
"Invalid register class: "
5646 "vdata and vdst should be both VGPR or AGPR";
5649 if (
Data && Data2 &&
5651 ErrInfo =
"Invalid register class: "
5652 "both data operands should be VGPR or AGPR";
5656 if ((Dst && RI.isAGPR(
MRI, Dst->getReg())) ||
5658 (Data2 && RI.isAGPR(
MRI, Data2->
getReg()))) {
5659 ErrInfo =
"Invalid register class: "
5660 "agpr loads and stores not supported on this GPU";
5666 if (ST.needsAlignedVGPRs()) {
5667 const auto isAlignedReg = [&
MI, &
MRI,
this](AMDGPU::OpName
OpName) ->
bool {
5672 if (Reg.isPhysical())
5673 return !(RI.getHWRegIndex(Reg) & 1);
5675 return RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) &&
5676 !(RI.getChannelFromSubReg(
Op->getSubReg()) & 1);
5679 if (Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_SEMA_BR ||
5680 Opcode == AMDGPU::DS_GWS_BARRIER) {
5682 if (!isAlignedReg(AMDGPU::OpName::data0)) {
5683 ErrInfo =
"Subtarget requires even aligned vector registers "
5684 "for DS_GWS instructions";
5690 if (!isAlignedReg(AMDGPU::OpName::vaddr)) {
5691 ErrInfo =
"Subtarget requires even aligned vector registers "
5692 "for vaddr operand of image instructions";
5698 if (Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts()) {
5700 if (Src->isReg() && RI.isSGPRReg(
MRI, Src->getReg())) {
5701 ErrInfo =
"Invalid register class: "
5702 "v_accvgpr_write with an SGPR is not supported on this GPU";
5707 if (
Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
5710 ErrInfo =
"pseudo expects only physical SGPRs";
5717 if (!ST.hasScaleOffset()) {
5718 ErrInfo =
"Subtarget does not support offset scaling";
5722 ErrInfo =
"Instruction does not support offset scaling";
5731 for (
unsigned I = 0;
I < 3; ++
I) {
5744 switch (
MI.getOpcode()) {
5745 default:
return AMDGPU::INSTRUCTION_LIST_END;
5746 case AMDGPU::REG_SEQUENCE:
return AMDGPU::REG_SEQUENCE;
5747 case AMDGPU::COPY:
return AMDGPU::COPY;
5748 case AMDGPU::PHI:
return AMDGPU::PHI;
5749 case AMDGPU::INSERT_SUBREG:
return AMDGPU::INSERT_SUBREG;
5750 case AMDGPU::WQM:
return AMDGPU::WQM;
5751 case AMDGPU::SOFT_WQM:
return AMDGPU::SOFT_WQM;
5752 case AMDGPU::STRICT_WWM:
return AMDGPU::STRICT_WWM;
5753 case AMDGPU::STRICT_WQM:
return AMDGPU::STRICT_WQM;
5754 case AMDGPU::S_MOV_B32: {
5756 return MI.getOperand(1).isReg() ||
5757 RI.isAGPR(
MRI,
MI.getOperand(0).getReg()) ?
5758 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
5760 case AMDGPU::S_ADD_I32:
5761 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
5762 case AMDGPU::S_ADDC_U32:
5763 return AMDGPU::V_ADDC_U32_e32;
5764 case AMDGPU::S_SUB_I32:
5765 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32;
5768 case AMDGPU::S_ADD_U32:
5769 return AMDGPU::V_ADD_CO_U32_e32;
5770 case AMDGPU::S_SUB_U32:
5771 return AMDGPU::V_SUB_CO_U32_e32;
5772 case AMDGPU::S_ADD_U64_PSEUDO:
5773 return AMDGPU::V_ADD_U64_PSEUDO;
5774 case AMDGPU::S_SUB_U64_PSEUDO:
5775 return AMDGPU::V_SUB_U64_PSEUDO;
5776 case AMDGPU::S_SUBB_U32:
return AMDGPU::V_SUBB_U32_e32;
5777 case AMDGPU::S_MUL_I32:
return AMDGPU::V_MUL_LO_U32_e64;
5778 case AMDGPU::S_MUL_HI_U32:
return AMDGPU::V_MUL_HI_U32_e64;
5779 case AMDGPU::S_MUL_HI_I32:
return AMDGPU::V_MUL_HI_I32_e64;
5780 case AMDGPU::S_AND_B32:
return AMDGPU::V_AND_B32_e64;
5781 case AMDGPU::S_OR_B32:
return AMDGPU::V_OR_B32_e64;
5782 case AMDGPU::S_XOR_B32:
return AMDGPU::V_XOR_B32_e64;
5783 case AMDGPU::S_XNOR_B32:
5784 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
5785 case AMDGPU::S_MIN_I32:
return AMDGPU::V_MIN_I32_e64;
5786 case AMDGPU::S_MIN_U32:
return AMDGPU::V_MIN_U32_e64;
5787 case AMDGPU::S_MAX_I32:
return AMDGPU::V_MAX_I32_e64;
5788 case AMDGPU::S_MAX_U32:
return AMDGPU::V_MAX_U32_e64;
5789 case AMDGPU::S_ASHR_I32:
return AMDGPU::V_ASHR_I32_e32;
5790 case AMDGPU::S_ASHR_I64:
return AMDGPU::V_ASHR_I64_e64;
5791 case AMDGPU::S_LSHL_B32:
return AMDGPU::V_LSHL_B32_e32;
5792 case AMDGPU::S_LSHL_B64:
return AMDGPU::V_LSHL_B64_e64;
5793 case AMDGPU::S_LSHR_B32:
return AMDGPU::V_LSHR_B32_e32;
5794 case AMDGPU::S_LSHR_B64:
return AMDGPU::V_LSHR_B64_e64;
5795 case AMDGPU::S_SEXT_I32_I8:
return AMDGPU::V_BFE_I32_e64;
5796 case AMDGPU::S_SEXT_I32_I16:
return AMDGPU::V_BFE_I32_e64;
5797 case AMDGPU::S_BFE_U32:
return AMDGPU::V_BFE_U32_e64;
5798 case AMDGPU::S_BFE_I32:
return AMDGPU::V_BFE_I32_e64;
5799 case AMDGPU::S_BFM_B32:
return AMDGPU::V_BFM_B32_e64;
5800 case AMDGPU::S_BREV_B32:
return AMDGPU::V_BFREV_B32_e32;
5801 case AMDGPU::S_NOT_B32:
return AMDGPU::V_NOT_B32_e32;
5802 case AMDGPU::S_NOT_B64:
return AMDGPU::V_NOT_B32_e32;
5803 case AMDGPU::S_CMP_EQ_I32:
return AMDGPU::V_CMP_EQ_I32_e64;
5804 case AMDGPU::S_CMP_LG_I32:
return AMDGPU::V_CMP_NE_I32_e64;
5805 case AMDGPU::S_CMP_GT_I32:
return AMDGPU::V_CMP_GT_I32_e64;
5806 case AMDGPU::S_CMP_GE_I32:
return AMDGPU::V_CMP_GE_I32_e64;
5807 case AMDGPU::S_CMP_LT_I32:
return AMDGPU::V_CMP_LT_I32_e64;
5808 case AMDGPU::S_CMP_LE_I32:
return AMDGPU::V_CMP_LE_I32_e64;
5809 case AMDGPU::S_CMP_EQ_U32:
return AMDGPU::V_CMP_EQ_U32_e64;
5810 case AMDGPU::S_CMP_LG_U32:
return AMDGPU::V_CMP_NE_U32_e64;
5811 case AMDGPU::S_CMP_GT_U32:
return AMDGPU::V_CMP_GT_U32_e64;
5812 case AMDGPU::S_CMP_GE_U32:
return AMDGPU::V_CMP_GE_U32_e64;
5813 case AMDGPU::S_CMP_LT_U32:
return AMDGPU::V_CMP_LT_U32_e64;
5814 case AMDGPU::S_CMP_LE_U32:
return AMDGPU::V_CMP_LE_U32_e64;
5815 case AMDGPU::S_CMP_EQ_U64:
return AMDGPU::V_CMP_EQ_U64_e64;
5816 case AMDGPU::S_CMP_LG_U64:
return AMDGPU::V_CMP_NE_U64_e64;
5817 case AMDGPU::S_BCNT1_I32_B32:
return AMDGPU::V_BCNT_U32_B32_e64;
5818 case AMDGPU::S_FF1_I32_B32:
return AMDGPU::V_FFBL_B32_e32;
5819 case AMDGPU::S_FLBIT_I32_B32:
return AMDGPU::V_FFBH_U32_e32;
5820 case AMDGPU::S_FLBIT_I32:
return AMDGPU::V_FFBH_I32_e64;
5821 case AMDGPU::S_CBRANCH_SCC0:
return AMDGPU::S_CBRANCH_VCCZ;
5822 case AMDGPU::S_CBRANCH_SCC1:
return AMDGPU::S_CBRANCH_VCCNZ;
5823 case AMDGPU::S_CVT_F32_I32:
return AMDGPU::V_CVT_F32_I32_e64;
5824 case AMDGPU::S_CVT_F32_U32:
return AMDGPU::V_CVT_F32_U32_e64;
5825 case AMDGPU::S_CVT_I32_F32:
return AMDGPU::V_CVT_I32_F32_e64;
5826 case AMDGPU::S_CVT_U32_F32:
return AMDGPU::V_CVT_U32_F32_e64;
5827 case AMDGPU::S_CVT_F32_F16:
5828 case AMDGPU::S_CVT_HI_F32_F16:
5829 return ST.useRealTrue16Insts() ? AMDGPU::V_CVT_F32_F16_t16_e64
5830 : AMDGPU::V_CVT_F32_F16_fake16_e64;
5831 case AMDGPU::S_CVT_F16_F32:
5832 return ST.useRealTrue16Insts() ? AMDGPU::V_CVT_F16_F32_t16_e64
5833 : AMDGPU::V_CVT_F16_F32_fake16_e64;
5834 case AMDGPU::S_CEIL_F32:
return AMDGPU::V_CEIL_F32_e64;
5835 case AMDGPU::S_FLOOR_F32:
return AMDGPU::V_FLOOR_F32_e64;
5836 case AMDGPU::S_TRUNC_F32:
return AMDGPU::V_TRUNC_F32_e64;
5837 case AMDGPU::S_RNDNE_F32:
return AMDGPU::V_RNDNE_F32_e64;
5838 case AMDGPU::S_CEIL_F16:
5839 return ST.useRealTrue16Insts() ? AMDGPU::V_CEIL_F16_t16_e64
5840 : AMDGPU::V_CEIL_F16_fake16_e64;
5841 case AMDGPU::S_FLOOR_F16:
5842 return ST.useRealTrue16Insts() ? AMDGPU::V_FLOOR_F16_t16_e64
5843 : AMDGPU::V_FLOOR_F16_fake16_e64;
5844 case AMDGPU::S_TRUNC_F16:
5845 return ST.useRealTrue16Insts() ? AMDGPU::V_TRUNC_F16_t16_e64
5846 : AMDGPU::V_TRUNC_F16_fake16_e64;
5847 case AMDGPU::S_RNDNE_F16:
5848 return ST.useRealTrue16Insts() ? AMDGPU::V_RNDNE_F16_t16_e64
5849 : AMDGPU::V_RNDNE_F16_fake16_e64;
5850 case AMDGPU::S_ADD_F32:
return AMDGPU::V_ADD_F32_e64;
5851 case AMDGPU::S_SUB_F32:
return AMDGPU::V_SUB_F32_e64;
5852 case AMDGPU::S_MIN_F32:
return AMDGPU::V_MIN_F32_e64;
5853 case AMDGPU::S_MAX_F32:
return AMDGPU::V_MAX_F32_e64;
5854 case AMDGPU::S_MINIMUM_F32:
return AMDGPU::V_MINIMUM_F32_e64;
5855 case AMDGPU::S_MAXIMUM_F32:
return AMDGPU::V_MAXIMUM_F32_e64;
5856 case AMDGPU::S_MUL_F32:
return AMDGPU::V_MUL_F32_e64;
5857 case AMDGPU::S_ADD_F16:
5858 return ST.useRealTrue16Insts() ? AMDGPU::V_ADD_F16_t16_e64
5859 : AMDGPU::V_ADD_F16_fake16_e64;
5860 case AMDGPU::S_SUB_F16:
5861 return ST.useRealTrue16Insts() ? AMDGPU::V_SUB_F16_t16_e64
5862 : AMDGPU::V_SUB_F16_fake16_e64;
5863 case AMDGPU::S_MIN_F16:
5864 return ST.useRealTrue16Insts() ? AMDGPU::V_MIN_F16_t16_e64
5865 : AMDGPU::V_MIN_F16_fake16_e64;
5866 case AMDGPU::S_MAX_F16:
5867 return ST.useRealTrue16Insts() ? AMDGPU::V_MAX_F16_t16_e64
5868 : AMDGPU::V_MAX_F16_fake16_e64;
5869 case AMDGPU::S_MINIMUM_F16:
5870 return ST.useRealTrue16Insts() ? AMDGPU::V_MINIMUM_F16_t16_e64
5871 : AMDGPU::V_MINIMUM_F16_fake16_e64;
5872 case AMDGPU::S_MAXIMUM_F16:
5873 return ST.useRealTrue16Insts() ? AMDGPU::V_MAXIMUM_F16_t16_e64
5874 : AMDGPU::V_MAXIMUM_F16_fake16_e64;
5875 case AMDGPU::S_MUL_F16:
5876 return ST.useRealTrue16Insts() ? AMDGPU::V_MUL_F16_t16_e64
5877 : AMDGPU::V_MUL_F16_fake16_e64;
5878 case AMDGPU::S_CVT_PK_RTZ_F16_F32:
return AMDGPU::V_CVT_PKRTZ_F16_F32_e64;
5879 case AMDGPU::S_FMAC_F32:
return AMDGPU::V_FMAC_F32_e64;
5880 case AMDGPU::S_FMAC_F16:
5881 return ST.useRealTrue16Insts() ? AMDGPU::V_FMAC_F16_t16_e64
5882 : AMDGPU::V_FMAC_F16_fake16_e64;
5883 case AMDGPU::S_FMAMK_F32:
return AMDGPU::V_FMAMK_F32;
5884 case AMDGPU::S_FMAAK_F32:
return AMDGPU::V_FMAAK_F32;
5885 case AMDGPU::S_CMP_LT_F32:
return AMDGPU::V_CMP_LT_F32_e64;
5886 case AMDGPU::S_CMP_EQ_F32:
return AMDGPU::V_CMP_EQ_F32_e64;
5887 case AMDGPU::S_CMP_LE_F32:
return AMDGPU::V_CMP_LE_F32_e64;
5888 case AMDGPU::S_CMP_GT_F32:
return AMDGPU::V_CMP_GT_F32_e64;
5889 case AMDGPU::S_CMP_LG_F32:
return AMDGPU::V_CMP_LG_F32_e64;
5890 case AMDGPU::S_CMP_GE_F32:
return AMDGPU::V_CMP_GE_F32_e64;
5891 case AMDGPU::S_CMP_O_F32:
return AMDGPU::V_CMP_O_F32_e64;
5892 case AMDGPU::S_CMP_U_F32:
return AMDGPU::V_CMP_U_F32_e64;
5893 case AMDGPU::S_CMP_NGE_F32:
return AMDGPU::V_CMP_NGE_F32_e64;
5894 case AMDGPU::S_CMP_NLG_F32:
return AMDGPU::V_CMP_NLG_F32_e64;
5895 case AMDGPU::S_CMP_NGT_F32:
return AMDGPU::V_CMP_NGT_F32_e64;
5896 case AMDGPU::S_CMP_NLE_F32:
return AMDGPU::V_CMP_NLE_F32_e64;
5897 case AMDGPU::S_CMP_NEQ_F32:
return AMDGPU::V_CMP_NEQ_F32_e64;
5898 case AMDGPU::S_CMP_NLT_F32:
return AMDGPU::V_CMP_NLT_F32_e64;
5899 case AMDGPU::S_CMP_LT_F16:
5900 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LT_F16_t16_e64
5901 : AMDGPU::V_CMP_LT_F16_fake16_e64;
5902 case AMDGPU::S_CMP_EQ_F16:
5903 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_EQ_F16_t16_e64
5904 : AMDGPU::V_CMP_EQ_F16_fake16_e64;
5905 case AMDGPU::S_CMP_LE_F16:
5906 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LE_F16_t16_e64
5907 : AMDGPU::V_CMP_LE_F16_fake16_e64;
5908 case AMDGPU::S_CMP_GT_F16:
5909 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_GT_F16_t16_e64
5910 : AMDGPU::V_CMP_GT_F16_fake16_e64;
5911 case AMDGPU::S_CMP_LG_F16:
5912 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LG_F16_t16_e64
5913 : AMDGPU::V_CMP_LG_F16_fake16_e64;
5914 case AMDGPU::S_CMP_GE_F16:
5915 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_GE_F16_t16_e64
5916 : AMDGPU::V_CMP_GE_F16_fake16_e64;
5917 case AMDGPU::S_CMP_O_F16:
5918 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_O_F16_t16_e64
5919 : AMDGPU::V_CMP_O_F16_fake16_e64;
5920 case AMDGPU::S_CMP_U_F16:
5921 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_U_F16_t16_e64
5922 : AMDGPU::V_CMP_U_F16_fake16_e64;
5923 case AMDGPU::S_CMP_NGE_F16:
5924 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NGE_F16_t16_e64
5925 : AMDGPU::V_CMP_NGE_F16_fake16_e64;
5926 case AMDGPU::S_CMP_NLG_F16:
5927 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLG_F16_t16_e64
5928 : AMDGPU::V_CMP_NLG_F16_fake16_e64;
5929 case AMDGPU::S_CMP_NGT_F16:
5930 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NGT_F16_t16_e64
5931 : AMDGPU::V_CMP_NGT_F16_fake16_e64;
5932 case AMDGPU::S_CMP_NLE_F16:
5933 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLE_F16_t16_e64
5934 : AMDGPU::V_CMP_NLE_F16_fake16_e64;
5935 case AMDGPU::S_CMP_NEQ_F16:
5936 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NEQ_F16_t16_e64
5937 : AMDGPU::V_CMP_NEQ_F16_fake16_e64;
5938 case AMDGPU::S_CMP_NLT_F16:
5939 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLT_F16_t16_e64
5940 : AMDGPU::V_CMP_NLT_F16_fake16_e64;
5941 case AMDGPU::V_S_EXP_F32_e64:
return AMDGPU::V_EXP_F32_e64;
5942 case AMDGPU::V_S_EXP_F16_e64:
5943 return ST.useRealTrue16Insts() ? AMDGPU::V_EXP_F16_t16_e64
5944 : AMDGPU::V_EXP_F16_fake16_e64;
5945 case AMDGPU::V_S_LOG_F32_e64:
return AMDGPU::V_LOG_F32_e64;
5946 case AMDGPU::V_S_LOG_F16_e64:
5947 return ST.useRealTrue16Insts() ? AMDGPU::V_LOG_F16_t16_e64
5948 : AMDGPU::V_LOG_F16_fake16_e64;
5949 case AMDGPU::V_S_RCP_F32_e64:
return AMDGPU::V_RCP_F32_e64;
5950 case AMDGPU::V_S_RCP_F16_e64:
5951 return ST.useRealTrue16Insts() ? AMDGPU::V_RCP_F16_t16_e64
5952 : AMDGPU::V_RCP_F16_fake16_e64;
5953 case AMDGPU::V_S_RSQ_F32_e64:
return AMDGPU::V_RSQ_F32_e64;
5954 case AMDGPU::V_S_RSQ_F16_e64:
5955 return ST.useRealTrue16Insts() ? AMDGPU::V_RSQ_F16_t16_e64
5956 : AMDGPU::V_RSQ_F16_fake16_e64;
5957 case AMDGPU::V_S_SQRT_F32_e64:
return AMDGPU::V_SQRT_F32_e64;
5958 case AMDGPU::V_S_SQRT_F16_e64:
5959 return ST.useRealTrue16Insts() ? AMDGPU::V_SQRT_F16_t16_e64
5960 : AMDGPU::V_SQRT_F16_fake16_e64;
5963 "Unexpected scalar opcode without corresponding vector one!");
6012 "Not a whole wave func");
6015 if (
MI.getOpcode() == AMDGPU::SI_WHOLE_WAVE_FUNC_SETUP ||
6016 MI.getOpcode() == AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_SETUP)
6031 int16_t RegClass = getOpRegClassID(OpInfo);
6032 return RI.getRegClass(RegClass);
6036 unsigned OpNo)
const {
6038 if (
MI.isVariadic() || OpNo >=
Desc.getNumOperands() ||
6039 Desc.operands()[OpNo].RegClass == -1) {
6042 if (Reg.isVirtual()) {
6044 MI.getParent()->getParent()->getRegInfo();
6045 return MRI.getRegClass(Reg);
6047 return RI.getPhysRegBaseClass(Reg);
6050 return RI.getRegClass(getOpRegClassID(
Desc.operands()[OpNo]));
6058 unsigned RCID = getOpRegClassID(
get(
MI.getOpcode()).operands()[
OpIdx]);
6060 unsigned Size = RI.getRegSizeInBits(*RC);
6061 unsigned Opcode = (
Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO
6062 :
Size == 16 ? AMDGPU::V_MOV_B16_t16_e64
6063 : AMDGPU::V_MOV_B32_e32;
6065 Opcode = AMDGPU::COPY;
6066 else if (RI.isSGPRClass(RC))
6067 Opcode = (
Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
6081 return RI.getSubReg(SuperReg.
getReg(), SubIdx);
6087 unsigned NewSubIdx = RI.composeSubRegIndices(SuperReg.
getSubReg(), SubIdx);
6098 if (SubIdx == AMDGPU::sub0)
6100 if (SubIdx == AMDGPU::sub1)
6112void SIInstrInfo::swapOperands(
MachineInstr &Inst)
const {
6128 if (Reg.isPhysical())
6138 return RI.getMatchingSuperRegClass(SuperRC, DRC, MO.
getSubReg()) !=
nullptr;
6141 return RI.getCommonSubClass(DRC, RC) !=
nullptr;
6148 unsigned Opc =
MI.getOpcode();
6154 constexpr const AMDGPU::OpName OpNames[] = {
6155 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2};
6158 int SrcIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), OpNames[
I]);
6159 if (
static_cast<unsigned>(SrcIdx) ==
OpIdx &&
6169 bool IsAGPR = RI.isAGPR(
MRI, MO.
getReg());
6170 if (IsAGPR && !ST.hasMAIInsts())
6172 if (IsAGPR && (!ST.hasGFX90AInsts() || !
MRI.reservedRegsFrozen()) &&
6176 const int VDstIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst);
6177 const int DataIdx = AMDGPU::getNamedOperandIdx(
6178 Opc,
isDS(
Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata);
6179 if ((
int)
OpIdx == VDstIdx && DataIdx != -1 &&
6180 MI.getOperand(DataIdx).isReg() &&
6181 RI.isAGPR(
MRI,
MI.getOperand(DataIdx).getReg()) != IsAGPR)
6183 if ((
int)
OpIdx == DataIdx) {
6184 if (VDstIdx != -1 &&
6185 RI.isAGPR(
MRI,
MI.getOperand(VDstIdx).getReg()) != IsAGPR)
6188 const int Data1Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::data1);
6189 if (Data1Idx != -1 &&
MI.getOperand(Data1Idx).isReg() &&
6190 RI.isAGPR(
MRI,
MI.getOperand(Data1Idx).getReg()) != IsAGPR)
6195 if (
Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts() &&
6196 (
int)
OpIdx == AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0) &&
6216 constexpr const unsigned NumOps = 3;
6217 constexpr const AMDGPU::OpName OpNames[
NumOps * 2] = {
6218 AMDGPU::OpName::src0, AMDGPU::OpName::src1,
6219 AMDGPU::OpName::src2, AMDGPU::OpName::src0_modifiers,
6220 AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src2_modifiers};
6225 int SrcIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), OpNames[SrcN]);
6228 MO = &
MI.getOperand(SrcIdx);
6235 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), OpNames[
NumOps + SrcN]);
6239 unsigned Mods =
MI.getOperand(ModsIdx).getImm();
6243 return !OpSel && !OpSelHi;
6252 int64_t RegClass = getOpRegClassID(OpInfo);
6254 RegClass != -1 ? RI.getRegClass(RegClass) :
nullptr;
6263 int ConstantBusLimit = ST.getConstantBusLimit(
MI.getOpcode());
6264 int LiteralLimit = !
isVOP3(
MI) || ST.hasVOP3Literal() ? 1 : 0;
6268 if (!LiteralLimit--)
6278 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
6286 if (--ConstantBusLimit <= 0)
6298 if (!LiteralLimit--)
6300 if (--ConstantBusLimit <= 0)
6306 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
6310 if (!
Op.isReg() && !
Op.isFI() && !
Op.isRegMask() &&
6312 !
Op.isIdenticalTo(*MO))
6322 }
else if (IsInlineConst && ST.hasNoF16PseudoScalarTransInlineConstants() &&
6336 bool Is64BitOp = Is64BitFPOp ||
6343 (!ST.has64BitLiterals() || InstDesc.
getSize() != 4))
6352 if (!Is64BitFPOp && (int32_t)Imm < 0 &&
6370 bool IsGFX950Only = ST.hasGFX950Insts();
6371 bool IsGFX940Only = ST.hasGFX940Insts();
6373 if (!IsGFX950Only && !IsGFX940Only)
6391 unsigned Opcode =
MI.getOpcode();
6393 case AMDGPU::V_CVT_PK_BF8_F32_e64:
6394 case AMDGPU::V_CVT_PK_FP8_F32_e64:
6395 case AMDGPU::V_MQSAD_PK_U16_U8_e64:
6396 case AMDGPU::V_MQSAD_U32_U8_e64:
6397 case AMDGPU::V_PK_ADD_F16:
6398 case AMDGPU::V_PK_ADD_F32:
6399 case AMDGPU::V_PK_ADD_I16:
6400 case AMDGPU::V_PK_ADD_U16:
6401 case AMDGPU::V_PK_ASHRREV_I16:
6402 case AMDGPU::V_PK_FMA_F16:
6403 case AMDGPU::V_PK_FMA_F32:
6404 case AMDGPU::V_PK_FMAC_F16_e32:
6405 case AMDGPU::V_PK_FMAC_F16_e64:
6406 case AMDGPU::V_PK_LSHLREV_B16:
6407 case AMDGPU::V_PK_LSHRREV_B16:
6408 case AMDGPU::V_PK_MAD_I16:
6409 case AMDGPU::V_PK_MAD_U16:
6410 case AMDGPU::V_PK_MAX_F16:
6411 case AMDGPU::V_PK_MAX_I16:
6412 case AMDGPU::V_PK_MAX_U16:
6413 case AMDGPU::V_PK_MIN_F16:
6414 case AMDGPU::V_PK_MIN_I16:
6415 case AMDGPU::V_PK_MIN_U16:
6416 case AMDGPU::V_PK_MOV_B32:
6417 case AMDGPU::V_PK_MUL_F16:
6418 case AMDGPU::V_PK_MUL_F32:
6419 case AMDGPU::V_PK_MUL_LO_U16:
6420 case AMDGPU::V_PK_SUB_I16:
6421 case AMDGPU::V_PK_SUB_U16:
6422 case AMDGPU::V_QSAD_PK_U16_U8_e64:
6431 unsigned Opc =
MI.getOpcode();
6434 int Src0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0);
6437 int Src1Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1);
6443 if (HasImplicitSGPR && ST.getConstantBusLimit(
Opc) <= 1 && Src0.
isReg() &&
6450 if (
Opc == AMDGPU::V_WRITELANE_B32) {
6453 Register Reg =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6459 Register Reg =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6469 if (
Opc == AMDGPU::V_FMAC_F32_e32 ||
Opc == AMDGPU::V_FMAC_F16_e32) {
6470 int Src2Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2);
6471 if (!RI.isVGPR(
MRI,
MI.getOperand(Src2Idx).getReg()))
6483 if (
Opc == AMDGPU::V_READLANE_B32 && Src1.
isReg() &&
6485 Register Reg =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6497 if (HasImplicitSGPR || !
MI.isCommutable()) {
6514 if (CommutedOpc == -1) {
6519 MI.setDesc(
get(CommutedOpc));
6523 bool Src0Kill = Src0.
isKill();
6527 else if (Src1.
isReg()) {
6542 unsigned Opc =
MI.getOpcode();
6545 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0),
6546 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1),
6547 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src2)
6550 if (
Opc == AMDGPU::V_PERMLANE16_B32_e64 ||
6551 Opc == AMDGPU::V_PERMLANEX16_B32_e64 ||
6552 Opc == AMDGPU::V_PERMLANE_BCAST_B32_e64 ||
6553 Opc == AMDGPU::V_PERMLANE_UP_B32_e64 ||
6554 Opc == AMDGPU::V_PERMLANE_DOWN_B32_e64 ||
6555 Opc == AMDGPU::V_PERMLANE_XOR_B32_e64 ||
6556 Opc == AMDGPU::V_PERMLANE_IDX_GEN_B32_e64) {
6560 if (Src1.
isReg() && !RI.isSGPRClass(
MRI.getRegClass(Src1.
getReg()))) {
6561 Register Reg =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6566 if (VOP3Idx[2] != -1) {
6568 if (Src2.
isReg() && !RI.isSGPRClass(
MRI.getRegClass(Src2.
getReg()))) {
6569 Register Reg =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6578 int ConstantBusLimit = ST.getConstantBusLimit(
Opc);
6579 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
6581 Register SGPRReg = findUsedSGPR(
MI, VOP3Idx);
6583 SGPRsUsed.
insert(SGPRReg);
6587 for (
int Idx : VOP3Idx) {
6596 if (LiteralLimit > 0 && ConstantBusLimit > 0) {
6608 if (!RI.isSGPRClass(RI.getRegClassForReg(
MRI, MO.
getReg())))
6615 if (ConstantBusLimit > 0) {
6627 if ((
Opc == AMDGPU::V_FMAC_F32_e64 ||
Opc == AMDGPU::V_FMAC_F16_e64) &&
6628 !RI.isVGPR(
MRI,
MI.getOperand(VOP3Idx[2]).getReg()))
6634 for (
unsigned I = 0;
I < 3; ++
I) {
6647 SRC = RI.getCommonSubClass(SRC, DstRC);
6650 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
6652 if (RI.hasAGPRs(VRC)) {
6653 VRC = RI.getEquivalentVGPRClass(VRC);
6654 Register NewSrcReg =
MRI.createVirtualRegister(VRC);
6656 get(TargetOpcode::COPY), NewSrcReg)
6663 get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
6669 for (
unsigned i = 0; i < SubRegs; ++i) {
6670 Register SGPR =
MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
6672 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
6673 .
addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
6679 get(AMDGPU::REG_SEQUENCE), DstReg);
6680 for (
unsigned i = 0; i < SubRegs; ++i) {
6682 MIB.
addImm(RI.getSubRegFromChannel(i));
6695 if (SBase && !RI.isSGPRClass(
MRI.getRegClass(SBase->getReg()))) {
6697 SBase->setReg(SGPR);
6700 if (SOff && !RI.isSGPRReg(
MRI, SOff->
getReg())) {
6708 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::saddr);
6709 if (OldSAddrIdx < 0)
6725 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr);
6726 if (NewVAddrIdx < 0)
6729 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vaddr);
6733 if (OldVAddrIdx >= 0) {
6735 VAddrDef =
MRI.getUniqueVRegDef(VAddr.
getReg());
6747 if (OldVAddrIdx == NewVAddrIdx) {
6750 MRI.removeRegOperandFromUseList(&NewVAddr);
6751 MRI.moveOperands(&NewVAddr, &SAddr, 1);
6755 MRI.removeRegOperandFromUseList(&NewVAddr);
6756 MRI.addRegOperandToUseList(&NewVAddr);
6758 assert(OldSAddrIdx == NewVAddrIdx);
6760 if (OldVAddrIdx >= 0) {
6761 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc,
6762 AMDGPU::OpName::vdst_in);
6766 if (NewVDstIn != -1) {
6767 int OldVDstIn = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst_in);
6773 if (NewVDstIn != -1) {
6774 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst);
6795 if (!SAddr || RI.isSGPRClass(
MRI.getRegClass(SAddr->
getReg())))
6815 unsigned OpSubReg =
Op.getSubReg();
6818 RI.getRegClassForReg(
MRI, OpReg), OpSubReg);
6824 Register DstReg =
MRI.createVirtualRegister(DstRC);
6834 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass)
6837 bool ImpDef = Def->isImplicitDef();
6838 while (!ImpDef && Def && Def->isCopy()) {
6839 if (Def->getOperand(1).getReg().isPhysical())
6841 Def =
MRI.getUniqueVRegDef(Def->getOperand(1).getReg());
6842 ImpDef = Def && Def->isImplicitDef();
6844 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) &&
6863 const auto *BoolXExecRC =
TRI->getWaveMaskRegClass();
6869 unsigned RegSize =
TRI->getRegSizeInBits(ScalarOp->getReg(),
MRI);
6870 unsigned NumSubRegs =
RegSize / 32;
6871 Register VScalarOp = ScalarOp->getReg();
6873 if (NumSubRegs == 1) {
6874 Register CurReg =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6876 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::V_READFIRSTLANE_B32), CurReg)
6879 Register NewCondReg =
MRI.createVirtualRegister(BoolXExecRC);
6881 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::V_CMP_EQ_U32_e64), NewCondReg)
6887 CondReg = NewCondReg;
6889 Register AndReg =
MRI.createVirtualRegister(BoolXExecRC);
6897 ScalarOp->setReg(CurReg);
6898 ScalarOp->setIsKill();
6902 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 &&
6903 "Unhandled register size");
6905 for (
unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) {
6907 MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6909 MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6912 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo)
6913 .
addReg(VScalarOp, VScalarOpUndef,
TRI->getSubRegFromChannel(Idx));
6916 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi)
6917 .
addReg(VScalarOp, VScalarOpUndef,
6918 TRI->getSubRegFromChannel(Idx + 1));
6924 Register CurReg =
MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass);
6925 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::REG_SEQUENCE), CurReg)
6931 Register NewCondReg =
MRI.createVirtualRegister(BoolXExecRC);
6932 auto Cmp =
BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::V_CMP_EQ_U64_e64),
6935 if (NumSubRegs <= 2)
6936 Cmp.addReg(VScalarOp);
6938 Cmp.addReg(VScalarOp, VScalarOpUndef,
6939 TRI->getSubRegFromChannel(Idx, 2));
6943 CondReg = NewCondReg;
6945 Register AndReg =
MRI.createVirtualRegister(BoolXExecRC);
6953 const auto *SScalarOpRC =
6954 TRI->getEquivalentSGPRClass(
MRI.getRegClass(VScalarOp));
6955 Register SScalarOp =
MRI.createVirtualRegister(SScalarOpRC);
6959 BuildMI(LoopBB,
I,
DL,
TII.get(AMDGPU::REG_SEQUENCE), SScalarOp);
6960 unsigned Channel = 0;
6961 for (
Register Piece : ReadlanePieces) {
6962 Merge.addReg(Piece).addImm(
TRI->getSubRegFromChannel(Channel++));
6966 ScalarOp->setReg(SScalarOp);
6967 ScalarOp->setIsKill();
6971 Register SaveExec =
MRI.createVirtualRegister(BoolXExecRC);
6972 MRI.setSimpleHint(SaveExec, CondReg);
7003 if (!Begin.isValid())
7005 if (!End.isValid()) {
7011 const auto *BoolXExecRC =
TRI->getWaveMaskRegClass();
7019 MBB.computeRegisterLiveness(
TRI, AMDGPU::SCC,
MI,
7020 std::numeric_limits<unsigned>::max()) !=
7023 SaveSCCReg =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
7029 Register SaveExec =
MRI.createVirtualRegister(BoolXExecRC);
7038 for (
auto I = Begin;
I != AfterMI;
I++) {
7039 for (
auto &MO :
I->all_uses())
7040 MRI.clearKillFlags(MO.getReg());
7065 MBB.addSuccessor(LoopBB);
7075 for (
auto &Succ : RemainderBB->
successors()) {
7099static std::tuple<unsigned, unsigned>
7107 TII.buildExtractSubReg(
MI,
MRI, Rsrc, &AMDGPU::VReg_128RegClass,
7108 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
7111 Register Zero64 =
MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
7112 Register SRsrcFormatLo =
MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
7113 Register SRsrcFormatHi =
MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
7114 Register NewSRsrc =
MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
7115 uint64_t RsrcDataFormat =
TII.getDefaultRsrcDataFormat();
7132 .
addImm(AMDGPU::sub0_sub1)
7138 return std::tuple(RsrcPtr, NewSRsrc);
7175 if (
MI.getOpcode() == AMDGPU::PHI) {
7177 for (
unsigned i = 1, e =
MI.getNumOperands(); i != e; i += 2) {
7178 if (!
MI.getOperand(i).isReg() || !
MI.getOperand(i).getReg().isVirtual())
7181 MRI.getRegClass(
MI.getOperand(i).getReg());
7182 if (RI.hasVectorRegisters(OpRC)) {
7196 VRC = &AMDGPU::VReg_1RegClass;
7199 ? RI.getEquivalentAGPRClass(SRC)
7200 : RI.getEquivalentVGPRClass(SRC);
7203 ? RI.getEquivalentAGPRClass(VRC)
7204 : RI.getEquivalentVGPRClass(VRC);
7212 for (
unsigned I = 1, E =
MI.getNumOperands();
I != E;
I += 2) {
7214 if (!
Op.isReg() || !
Op.getReg().isVirtual())
7230 if (
MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
7233 if (RI.hasVGPRs(DstRC)) {
7237 for (
unsigned I = 1, E =
MI.getNumOperands();
I != E;
I += 2) {
7239 if (!
Op.isReg() || !
Op.getReg().isVirtual())
7257 if (
MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
7262 if (DstRC != Src0RC) {
7271 if (
MI.getOpcode() == AMDGPU::SI_INIT_M0) {
7273 if (Src.isReg() && RI.hasVectorRegisters(
MRI.getRegClass(Src.getReg())))
7279 if (
MI.getOpcode() == AMDGPU::S_BITREPLICATE_B64_B32 ||
7280 MI.getOpcode() == AMDGPU::S_QUADMASK_B32 ||
7281 MI.getOpcode() == AMDGPU::S_QUADMASK_B64 ||
7282 MI.getOpcode() == AMDGPU::S_WQM_B32 ||
7283 MI.getOpcode() == AMDGPU::S_WQM_B64 ||
7284 MI.getOpcode() == AMDGPU::S_INVERSE_BALLOT_U32 ||
7285 MI.getOpcode() == AMDGPU::S_INVERSE_BALLOT_U64) {
7287 if (Src.isReg() && RI.hasVectorRegisters(
MRI.getRegClass(Src.getReg())))
7300 ? AMDGPU::OpName::rsrc
7301 : AMDGPU::OpName::srsrc;
7303 if (SRsrc && !RI.isSGPRClass(
MRI.getRegClass(SRsrc->
getReg())))
7306 AMDGPU::OpName SampOpName =
7307 isMIMG(
MI) ? AMDGPU::OpName::ssamp : AMDGPU::OpName::samp;
7309 if (SSamp && !RI.isSGPRClass(
MRI.getRegClass(SSamp->
getReg())))
7316 if (
MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
7318 if (!RI.isSGPRClass(
MRI.getRegClass(Dest->
getReg()))) {
7322 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
7323 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
7328 while (Start->getOpcode() != FrameSetupOpcode)
7331 while (End->getOpcode() != FrameDestroyOpcode)
7335 while (End !=
MBB.end() && End->isCopy() && End->getOperand(1).isReg() &&
7336 MI.definesRegister(End->getOperand(1).getReg(),
nullptr))
7344 if (
MI.getOpcode() == AMDGPU::S_SLEEP_VAR) {
7346 Register Reg =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
7348 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src0);
7358 if (
MI.getOpcode() == AMDGPU::TENSOR_LOAD_TO_LDS ||
7359 MI.getOpcode() == AMDGPU::TENSOR_LOAD_TO_LDS_D2 ||
7360 MI.getOpcode() == AMDGPU::TENSOR_STORE_FROM_LDS ||
7361 MI.getOpcode() == AMDGPU::TENSOR_STORE_FROM_LDS_D2) {
7363 if (Src.isReg() && RI.hasVectorRegisters(
MRI.getRegClass(Src.getReg())))
7370 bool isSoffsetLegal =
true;
7372 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::soffset);
7373 if (SoffsetIdx != -1) {
7376 !RI.isSGPRClass(
MRI.getRegClass(Soffset->
getReg()))) {
7377 isSoffsetLegal =
false;
7381 bool isRsrcLegal =
true;
7383 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::srsrc);
7384 if (RsrcIdx != -1) {
7387 isRsrcLegal =
false;
7391 if (isRsrcLegal && isSoffsetLegal)
7415 Register NewVAddrLo =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7416 Register NewVAddrHi =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7417 Register NewVAddr =
MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
7419 const auto *BoolXExecRC = RI.getWaveMaskRegClass();
7420 Register CondReg0 =
MRI.createVirtualRegister(BoolXExecRC);
7421 Register CondReg1 =
MRI.createVirtualRegister(BoolXExecRC);
7423 unsigned RsrcPtr, NewSRsrc;
7430 .
addReg(RsrcPtr, 0, AMDGPU::sub0)
7437 .
addReg(RsrcPtr, 0, AMDGPU::sub1)
7451 }
else if (!VAddr && ST.hasAddr64()) {
7455 "FIXME: Need to emit flat atomics here");
7457 unsigned RsrcPtr, NewSRsrc;
7460 Register NewVAddr =
MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
7483 MIB.
addImm(CPol->getImm());
7488 MIB.
addImm(TFE->getImm());
7508 MI.removeFromParent();
7513 .
addReg(RsrcPtr, 0, AMDGPU::sub0)
7515 .
addReg(RsrcPtr, 0, AMDGPU::sub1)
7519 if (!isSoffsetLegal) {
7531 if (!isSoffsetLegal) {
7543 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::srsrc);
7544 if (RsrcIdx != -1) {
7545 DeferredList.insert(
MI);
7550 return DeferredList.contains(
MI);
7560 if (!ST.useRealTrue16Insts())
7563 unsigned Opcode =
MI.getOpcode();
7567 OpIdx >=
get(Opcode).getNumOperands() ||
7568 get(Opcode).operands()[
OpIdx].RegClass == -1)
7572 if (!
Op.isReg() || !
Op.getReg().isVirtual())
7576 if (!RI.isVGPRClass(CurrRC))
7579 int16_t RCID = getOpRegClassID(
get(Opcode).operands()[
OpIdx]);
7581 if (RI.getMatchingSuperRegClass(CurrRC, ExpectedRC, AMDGPU::lo16)) {
7582 Op.setSubReg(AMDGPU::lo16);
7583 }
else if (RI.getMatchingSuperRegClass(ExpectedRC, CurrRC, AMDGPU::lo16)) {
7585 Register NewDstReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7586 Register Undef =
MRI.createVirtualRegister(&AMDGPU::VGPR_16RegClass);
7593 Op.setReg(NewDstReg);
7605 while (!Worklist.
empty()) {
7619 "Deferred MachineInstr are not supposed to re-populate worklist");
7637 case AMDGPU::S_ADD_I32:
7638 case AMDGPU::S_SUB_I32: {
7642 std::tie(
Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT);
7650 case AMDGPU::S_MUL_U64:
7651 if (ST.hasVectorMulU64()) {
7652 NewOpcode = AMDGPU::V_MUL_U64_e64;
7656 splitScalarSMulU64(Worklist, Inst, MDT);
7660 case AMDGPU::S_MUL_U64_U32_PSEUDO:
7661 case AMDGPU::S_MUL_I64_I32_PSEUDO:
7664 splitScalarSMulPseudo(Worklist, Inst, MDT);
7668 case AMDGPU::S_AND_B64:
7669 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT);
7673 case AMDGPU::S_OR_B64:
7674 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT);
7678 case AMDGPU::S_XOR_B64:
7679 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT);
7683 case AMDGPU::S_NAND_B64:
7684 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT);
7688 case AMDGPU::S_NOR_B64:
7689 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT);
7693 case AMDGPU::S_XNOR_B64:
7694 if (ST.hasDLInsts())
7695 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
7697 splitScalar64BitXnor(Worklist, Inst, MDT);
7701 case AMDGPU::S_ANDN2_B64:
7702 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT);
7706 case AMDGPU::S_ORN2_B64:
7707 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT);
7711 case AMDGPU::S_BREV_B64:
7712 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32,
true);
7716 case AMDGPU::S_NOT_B64:
7717 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
7721 case AMDGPU::S_BCNT1_I32_B64:
7722 splitScalar64BitBCNT(Worklist, Inst);
7726 case AMDGPU::S_BFE_I64:
7727 splitScalar64BitBFE(Worklist, Inst);
7731 case AMDGPU::S_FLBIT_I32_B64:
7732 splitScalar64BitCountOp(Worklist, Inst, AMDGPU::V_FFBH_U32_e32);
7735 case AMDGPU::S_FF1_I32_B64:
7736 splitScalar64BitCountOp(Worklist, Inst, AMDGPU::V_FFBL_B32_e32);
7740 case AMDGPU::S_LSHL_B32:
7741 if (ST.hasOnlyRevVALUShifts()) {
7742 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
7746 case AMDGPU::S_ASHR_I32:
7747 if (ST.hasOnlyRevVALUShifts()) {
7748 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
7752 case AMDGPU::S_LSHR_B32:
7753 if (ST.hasOnlyRevVALUShifts()) {
7754 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
7758 case AMDGPU::S_LSHL_B64:
7759 if (ST.hasOnlyRevVALUShifts()) {
7761 ? AMDGPU::V_LSHLREV_B64_pseudo_e64
7762 : AMDGPU::V_LSHLREV_B64_e64;
7766 case AMDGPU::S_ASHR_I64:
7767 if (ST.hasOnlyRevVALUShifts()) {
7768 NewOpcode = AMDGPU::V_ASHRREV_I64_e64;
7772 case AMDGPU::S_LSHR_B64:
7773 if (ST.hasOnlyRevVALUShifts()) {
7774 NewOpcode = AMDGPU::V_LSHRREV_B64_e64;
7779 case AMDGPU::S_ABS_I32:
7780 lowerScalarAbs(Worklist, Inst);
7784 case AMDGPU::S_CBRANCH_SCC0:
7785 case AMDGPU::S_CBRANCH_SCC1: {
7788 bool IsSCC = CondReg == AMDGPU::SCC;
7796 case AMDGPU::S_BFE_U64:
7797 case AMDGPU::S_BFM_B64:
7800 case AMDGPU::S_PACK_LL_B32_B16:
7801 case AMDGPU::S_PACK_LH_B32_B16:
7802 case AMDGPU::S_PACK_HL_B32_B16:
7803 case AMDGPU::S_PACK_HH_B32_B16:
7804 movePackToVALU(Worklist,
MRI, Inst);
7808 case AMDGPU::S_XNOR_B32:
7809 lowerScalarXnor(Worklist, Inst);
7813 case AMDGPU::S_NAND_B32:
7814 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32);
7818 case AMDGPU::S_NOR_B32:
7819 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32);
7823 case AMDGPU::S_ANDN2_B32:
7824 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32);
7828 case AMDGPU::S_ORN2_B32:
7829 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32);
7837 case AMDGPU::S_ADD_CO_PSEUDO:
7838 case AMDGPU::S_SUB_CO_PSEUDO: {
7839 unsigned Opc = (Inst.
getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
7840 ? AMDGPU::V_ADDC_U32_e64
7841 : AMDGPU::V_SUBB_U32_e64;
7842 const auto *CarryRC = RI.getWaveMaskRegClass();
7845 if (!
MRI.constrainRegClass(CarryInReg, CarryRC)) {
7846 Register NewCarryReg =
MRI.createVirtualRegister(CarryRC);
7853 Register DestReg =
MRI.createVirtualRegister(RI.getEquivalentVGPRClass(
7864 addUsersToMoveToVALUWorklist(DestReg,
MRI, Worklist);
7868 case AMDGPU::S_UADDO_PSEUDO:
7869 case AMDGPU::S_USUBO_PSEUDO: {
7876 unsigned Opc = (Inst.
getOpcode() == AMDGPU::S_UADDO_PSEUDO)
7877 ? AMDGPU::V_ADD_CO_U32_e64
7878 : AMDGPU::V_SUB_CO_U32_e64;
7880 RI.getEquivalentVGPRClass(
MRI.getRegClass(Dest0.
getReg()));
7881 Register DestReg =
MRI.createVirtualRegister(NewRC);
7889 MRI.replaceRegWith(Dest0.
getReg(), DestReg);
7896 case AMDGPU::S_CSELECT_B32:
7897 case AMDGPU::S_CSELECT_B64:
7898 lowerSelect(Worklist, Inst, MDT);
7901 case AMDGPU::S_CMP_EQ_I32:
7902 case AMDGPU::S_CMP_LG_I32:
7903 case AMDGPU::S_CMP_GT_I32:
7904 case AMDGPU::S_CMP_GE_I32:
7905 case AMDGPU::S_CMP_LT_I32:
7906 case AMDGPU::S_CMP_LE_I32:
7907 case AMDGPU::S_CMP_EQ_U32:
7908 case AMDGPU::S_CMP_LG_U32:
7909 case AMDGPU::S_CMP_GT_U32:
7910 case AMDGPU::S_CMP_GE_U32:
7911 case AMDGPU::S_CMP_LT_U32:
7912 case AMDGPU::S_CMP_LE_U32:
7913 case AMDGPU::S_CMP_EQ_U64:
7914 case AMDGPU::S_CMP_LG_U64:
7915 case AMDGPU::S_CMP_LT_F32:
7916 case AMDGPU::S_CMP_EQ_F32:
7917 case AMDGPU::S_CMP_LE_F32:
7918 case AMDGPU::S_CMP_GT_F32:
7919 case AMDGPU::S_CMP_LG_F32:
7920 case AMDGPU::S_CMP_GE_F32:
7921 case AMDGPU::S_CMP_O_F32:
7922 case AMDGPU::S_CMP_U_F32:
7923 case AMDGPU::S_CMP_NGE_F32:
7924 case AMDGPU::S_CMP_NLG_F32:
7925 case AMDGPU::S_CMP_NGT_F32:
7926 case AMDGPU::S_CMP_NLE_F32:
7927 case AMDGPU::S_CMP_NEQ_F32:
7928 case AMDGPU::S_CMP_NLT_F32: {
7929 Register CondReg =
MRI.createVirtualRegister(RI.getWaveMaskRegClass());
7933 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src0_modifiers) >=
7947 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
7951 case AMDGPU::S_CMP_LT_F16:
7952 case AMDGPU::S_CMP_EQ_F16:
7953 case AMDGPU::S_CMP_LE_F16:
7954 case AMDGPU::S_CMP_GT_F16:
7955 case AMDGPU::S_CMP_LG_F16:
7956 case AMDGPU::S_CMP_GE_F16:
7957 case AMDGPU::S_CMP_O_F16:
7958 case AMDGPU::S_CMP_U_F16:
7959 case AMDGPU::S_CMP_NGE_F16:
7960 case AMDGPU::S_CMP_NLG_F16:
7961 case AMDGPU::S_CMP_NGT_F16:
7962 case AMDGPU::S_CMP_NLE_F16:
7963 case AMDGPU::S_CMP_NEQ_F16:
7964 case AMDGPU::S_CMP_NLT_F16: {
7965 Register CondReg =
MRI.createVirtualRegister(RI.getWaveMaskRegClass());
7987 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
7991 case AMDGPU::S_CVT_HI_F32_F16: {
7993 Register TmpReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7994 Register NewDst =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7995 if (ST.useRealTrue16Insts()) {
8000 .
addReg(TmpReg, 0, AMDGPU::hi16)
8016 addUsersToMoveToVALUWorklist(NewDst,
MRI, Worklist);
8020 case AMDGPU::S_MINIMUM_F32:
8021 case AMDGPU::S_MAXIMUM_F32: {
8023 Register NewDst =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8034 addUsersToMoveToVALUWorklist(NewDst,
MRI, Worklist);
8038 case AMDGPU::S_MINIMUM_F16:
8039 case AMDGPU::S_MAXIMUM_F16: {
8041 Register NewDst =
MRI.createVirtualRegister(ST.useRealTrue16Insts()
8042 ? &AMDGPU::VGPR_16RegClass
8043 : &AMDGPU::VGPR_32RegClass);
8055 addUsersToMoveToVALUWorklist(NewDst,
MRI, Worklist);
8059 case AMDGPU::V_S_EXP_F16_e64:
8060 case AMDGPU::V_S_LOG_F16_e64:
8061 case AMDGPU::V_S_RCP_F16_e64:
8062 case AMDGPU::V_S_RSQ_F16_e64:
8063 case AMDGPU::V_S_SQRT_F16_e64: {
8065 Register NewDst =
MRI.createVirtualRegister(ST.useRealTrue16Insts()
8066 ? &AMDGPU::VGPR_16RegClass
8067 : &AMDGPU::VGPR_32RegClass);
8079 addUsersToMoveToVALUWorklist(NewDst,
MRI, Worklist);
8085 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
8093 if (NewOpcode == Opcode) {
8101 Register NewDst =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
8103 get(AMDGPU::V_READFIRSTLANE_B32), NewDst)
8121 addUsersToMoveToVALUWorklist(DstReg,
MRI, Worklist);
8123 MRI.replaceRegWith(DstReg, NewDstReg);
8124 MRI.clearKillFlags(NewDstReg);
8138 if (ST.useRealTrue16Insts() && Inst.
isCopy() &&
8142 if (RI.getMatchingSuperRegClass(NewDstRC, SrcRegRC, AMDGPU::lo16)) {
8143 Register NewDstReg =
MRI.createVirtualRegister(NewDstRC);
8144 Register Undef =
MRI.createVirtualRegister(&AMDGPU::VGPR_16RegClass);
8146 get(AMDGPU::IMPLICIT_DEF), Undef);
8148 get(AMDGPU::REG_SEQUENCE), NewDstReg)
8154 MRI.replaceRegWith(DstReg, NewDstReg);
8155 addUsersToMoveToVALUWorklist(NewDstReg,
MRI, Worklist);
8157 }
else if (RI.getMatchingSuperRegClass(SrcRegRC, NewDstRC,
8160 Register NewDstReg =
MRI.createVirtualRegister(NewDstRC);
8161 MRI.replaceRegWith(DstReg, NewDstReg);
8162 addUsersToMoveToVALUWorklist(NewDstReg,
MRI, Worklist);
8167 Register NewDstReg =
MRI.createVirtualRegister(NewDstRC);
8168 MRI.replaceRegWith(DstReg, NewDstReg);
8170 addUsersToMoveToVALUWorklist(NewDstReg,
MRI, Worklist);
8180 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8181 AMDGPU::OpName::src0_modifiers) >= 0)
8185 NewInstr->addOperand(Src);
8188 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
8191 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
8193 NewInstr.addImm(
Size);
8194 }
else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
8198 }
else if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
8203 "Scalar BFE is only implemented for constant width and offset");
8211 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8212 AMDGPU::OpName::src1_modifiers) >= 0)
8214 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src1) >= 0)
8216 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8217 AMDGPU::OpName::src2_modifiers) >= 0)
8219 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src2) >= 0)
8221 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::clamp) >= 0)
8223 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::omod) >= 0)
8225 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::op_sel) >= 0)
8231 NewInstr->addOperand(
Op);
8238 if (
Op.getReg() == AMDGPU::SCC) {
8240 if (
Op.isDef() && !
Op.isDead())
8241 addSCCDefUsersToVALUWorklist(
Op, Inst, Worklist);
8243 addSCCDefsToVALUWorklist(NewInstr, Worklist);
8248 if (NewInstr->getOperand(0).isReg() && NewInstr->getOperand(0).isDef()) {
8249 Register DstReg = NewInstr->getOperand(0).getReg();
8254 NewDstReg =
MRI.createVirtualRegister(NewDstRC);
8255 MRI.replaceRegWith(DstReg, NewDstReg);
8264 addUsersToMoveToVALUWorklist(NewDstReg,
MRI, Worklist);
8268std::pair<bool, MachineBasicBlock *>
8280 Register ResultReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8283 assert(
Opc == AMDGPU::S_ADD_I32 ||
Opc == AMDGPU::S_SUB_I32);
8285 unsigned NewOpc =
Opc == AMDGPU::S_ADD_I32 ?
8286 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64;
8294 MRI.replaceRegWith(OldDstReg, ResultReg);
8297 addUsersToMoveToVALUWorklist(ResultReg,
MRI, Worklist);
8298 return std::pair(
true, NewBB);
8301 return std::pair(
false,
nullptr);
8318 bool IsSCC = (CondReg == AMDGPU::SCC);
8326 MRI.replaceRegWith(Dest.
getReg(), CondReg);
8332 const TargetRegisterClass *TC = RI.getWaveMaskRegClass();
8333 NewCondReg =
MRI.createVirtualRegister(TC);
8337 bool CopyFound =
false;
8338 for (MachineInstr &CandI :
8341 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI,
false,
false) !=
8343 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) {
8345 .
addReg(CandI.getOperand(1).getReg());
8357 ST.isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
8365 RI.getEquivalentVGPRClass(
MRI.getRegClass(Dest.
getReg())));
8366 MachineInstr *NewInst;
8367 if (Inst.
getOpcode() == AMDGPU::S_CSELECT_B32) {
8368 NewInst =
BuildMI(
MBB, MII,
DL,
get(AMDGPU::V_CNDMASK_B32_e64), NewDestReg)
8381 MRI.replaceRegWith(Dest.
getReg(), NewDestReg);
8383 addUsersToMoveToVALUWorklist(NewDestReg,
MRI, Worklist);
8395 Register TmpReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8396 Register ResultReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8398 unsigned SubOp = ST.hasAddNoCarry() ?
8399 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32;
8409 MRI.replaceRegWith(Dest.
getReg(), ResultReg);
8410 addUsersToMoveToVALUWorklist(ResultReg,
MRI, Worklist);
8424 if (ST.hasDLInsts()) {
8425 Register NewDest =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8433 MRI.replaceRegWith(Dest.
getReg(), NewDest);
8434 addUsersToMoveToVALUWorklist(NewDest,
MRI, Worklist);
8440 bool Src0IsSGPR = Src0.
isReg() &&
8441 RI.isSGPRClass(
MRI.getRegClass(Src0.
getReg()));
8442 bool Src1IsSGPR = Src1.
isReg() &&
8443 RI.isSGPRClass(
MRI.getRegClass(Src1.
getReg()));
8445 Register Temp =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8446 Register NewDest =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8456 }
else if (Src1IsSGPR) {
8470 MRI.replaceRegWith(Dest.
getReg(), NewDest);
8474 addUsersToMoveToVALUWorklist(NewDest,
MRI, Worklist);
8480 unsigned Opcode)
const {
8490 Register NewDest =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8491 Register Interm =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8503 MRI.replaceRegWith(Dest.
getReg(), NewDest);
8504 addUsersToMoveToVALUWorklist(NewDest,
MRI, Worklist);
8509 unsigned Opcode)
const {
8519 Register NewDest =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
8520 Register Interm =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
8532 MRI.replaceRegWith(Dest.
getReg(), NewDest);
8533 addUsersToMoveToVALUWorklist(NewDest,
MRI, Worklist);
8548 const MCInstrDesc &InstDesc =
get(Opcode);
8549 const TargetRegisterClass *Src0RC = Src0.
isReg() ?
8551 &AMDGPU::SGPR_32RegClass;
8553 const TargetRegisterClass *Src0SubRC =
8554 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8557 AMDGPU::sub0, Src0SubRC);
8559 const TargetRegisterClass *DestRC =
MRI.getRegClass(Dest.
getReg());
8560 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
8561 const TargetRegisterClass *NewDestSubRC =
8562 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0);
8564 Register DestSub0 =
MRI.createVirtualRegister(NewDestSubRC);
8565 MachineInstr &LoHalf = *
BuildMI(
MBB, MII,
DL, InstDesc, DestSub0).
add(SrcReg0Sub0);
8568 AMDGPU::sub1, Src0SubRC);
8570 Register DestSub1 =
MRI.createVirtualRegister(NewDestSubRC);
8571 MachineInstr &HiHalf = *
BuildMI(
MBB, MII,
DL, InstDesc, DestSub1).
add(SrcReg0Sub1);
8576 Register FullDestReg =
MRI.createVirtualRegister(NewDestRC);
8583 MRI.replaceRegWith(Dest.
getReg(), FullDestReg);
8585 Worklist.
insert(&LoHalf);
8586 Worklist.
insert(&HiHalf);
8592 addUsersToMoveToVALUWorklist(FullDestReg,
MRI, Worklist);
8603 Register FullDestReg =
MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8604 Register DestSub0 =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8605 Register DestSub1 =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8613 const TargetRegisterClass *Src0RC =
MRI.getRegClass(Src0.
getReg());
8614 const TargetRegisterClass *Src1RC =
MRI.getRegClass(Src1.
getReg());
8615 const TargetRegisterClass *Src0SubRC =
8616 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8617 if (RI.isSGPRClass(Src0SubRC))
8618 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
8619 const TargetRegisterClass *Src1SubRC =
8620 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
8621 if (RI.isSGPRClass(Src1SubRC))
8622 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
8626 MachineOperand Op0L =
8628 MachineOperand Op1L =
8630 MachineOperand Op0H =
8632 MachineOperand Op1H =
8650 Register Op1L_Op0H_Reg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8651 MachineInstr *Op1L_Op0H =
8656 Register Op1H_Op0L_Reg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8657 MachineInstr *Op1H_Op0L =
8662 Register CarryReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8663 MachineInstr *Carry =
8668 MachineInstr *LoHalf =
8673 Register AddReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8678 MachineInstr *HiHalf =
8689 MRI.replaceRegWith(Dest.
getReg(), FullDestReg);
8701 addUsersToMoveToVALUWorklist(FullDestReg,
MRI, Worklist);
8712 Register FullDestReg =
MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8713 Register DestSub0 =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8714 Register DestSub1 =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8722 const TargetRegisterClass *Src0RC =
MRI.getRegClass(Src0.
getReg());
8723 const TargetRegisterClass *Src1RC =
MRI.getRegClass(Src1.
getReg());
8724 const TargetRegisterClass *Src0SubRC =
8725 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8726 if (RI.isSGPRClass(Src0SubRC))
8727 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
8728 const TargetRegisterClass *Src1SubRC =
8729 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
8730 if (RI.isSGPRClass(Src1SubRC))
8731 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
8735 MachineOperand Op0L =
8737 MachineOperand Op1L =
8741 unsigned NewOpc =
Opc == AMDGPU::S_MUL_U64_U32_PSEUDO
8742 ? AMDGPU::V_MUL_HI_U32_e64
8743 : AMDGPU::V_MUL_HI_I32_e64;
8744 MachineInstr *HiHalf =
8747 MachineInstr *LoHalf =
8758 MRI.replaceRegWith(Dest.
getReg(), FullDestReg);
8766 addUsersToMoveToVALUWorklist(FullDestReg,
MRI, Worklist);
8782 const MCInstrDesc &InstDesc =
get(Opcode);
8783 const TargetRegisterClass *Src0RC = Src0.
isReg() ?
8785 &AMDGPU::SGPR_32RegClass;
8787 const TargetRegisterClass *Src0SubRC =
8788 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8789 const TargetRegisterClass *Src1RC = Src1.
isReg() ?
8791 &AMDGPU::SGPR_32RegClass;
8793 const TargetRegisterClass *Src1SubRC =
8794 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
8797 AMDGPU::sub0, Src0SubRC);
8799 AMDGPU::sub0, Src1SubRC);
8801 AMDGPU::sub1, Src0SubRC);
8803 AMDGPU::sub1, Src1SubRC);
8805 const TargetRegisterClass *DestRC =
MRI.getRegClass(Dest.
getReg());
8806 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
8807 const TargetRegisterClass *NewDestSubRC =
8808 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0);
8810 Register DestSub0 =
MRI.createVirtualRegister(NewDestSubRC);
8811 MachineInstr &LoHalf = *
BuildMI(
MBB, MII,
DL, InstDesc, DestSub0)
8815 Register DestSub1 =
MRI.createVirtualRegister(NewDestSubRC);
8816 MachineInstr &HiHalf = *
BuildMI(
MBB, MII,
DL, InstDesc, DestSub1)
8820 Register FullDestReg =
MRI.createVirtualRegister(NewDestRC);
8827 MRI.replaceRegWith(Dest.
getReg(), FullDestReg);
8829 Worklist.
insert(&LoHalf);
8830 Worklist.
insert(&HiHalf);
8833 addUsersToMoveToVALUWorklist(FullDestReg,
MRI, Worklist);
8849 const TargetRegisterClass *DestRC =
MRI.getRegClass(Dest.
getReg());
8851 Register Interm =
MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
8853 MachineOperand* Op0;
8854 MachineOperand* Op1;
8867 Register NewDest =
MRI.createVirtualRegister(DestRC);
8873 MRI.replaceRegWith(Dest.
getReg(), NewDest);
8889 const MCInstrDesc &InstDesc =
get(AMDGPU::V_BCNT_U32_B32_e64);
8890 const TargetRegisterClass *SrcRC = Src.isReg() ?
8891 MRI.getRegClass(Src.getReg()) :
8892 &AMDGPU::SGPR_32RegClass;
8894 Register MidReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8895 Register ResultReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8897 const TargetRegisterClass *SrcSubRC =
8898 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0);
8901 AMDGPU::sub0, SrcSubRC);
8903 AMDGPU::sub1, SrcSubRC);
8909 MRI.replaceRegWith(Dest.
getReg(), ResultReg);
8913 addUsersToMoveToVALUWorklist(ResultReg,
MRI, Worklist);
8932 Offset == 0 &&
"Not implemented");
8935 Register MidRegLo =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8936 Register MidRegHi =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8937 Register ResultReg =
MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8954 MRI.replaceRegWith(Dest.
getReg(), ResultReg);
8955 addUsersToMoveToVALUWorklist(ResultReg,
MRI, Worklist);
8960 Register TmpReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8961 Register ResultReg =
MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8965 .
addReg(Src.getReg(), 0, AMDGPU::sub0);
8968 .
addReg(Src.getReg(), 0, AMDGPU::sub0)
8973 MRI.replaceRegWith(Dest.
getReg(), ResultReg);
8974 addUsersToMoveToVALUWorklist(ResultReg,
MRI, Worklist);
8993 const MCInstrDesc &InstDesc =
get(Opcode);
8995 bool IsCtlz = Opcode == AMDGPU::V_FFBH_U32_e32;
8996 unsigned OpcodeAdd =
8997 ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
8999 const TargetRegisterClass *SrcRC =
9000 Src.isReg() ?
MRI.getRegClass(Src.getReg()) : &AMDGPU::SGPR_32RegClass;
9001 const TargetRegisterClass *SrcSubRC =
9002 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0);
9004 MachineOperand SrcRegSub0 =
9006 MachineOperand SrcRegSub1 =
9009 Register MidReg1 =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9010 Register MidReg2 =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9011 Register MidReg3 =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9012 Register MidReg4 =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9019 .
addReg(IsCtlz ? MidReg1 : MidReg2)
9025 .
addReg(IsCtlz ? MidReg2 : MidReg1);
9027 MRI.replaceRegWith(Dest.
getReg(), MidReg4);
9029 addUsersToMoveToVALUWorklist(MidReg4,
MRI, Worklist);
9032void SIInstrInfo::addUsersToMoveToVALUWorklist(
9036 MachineInstr &
UseMI = *MO.getParent();
9040 switch (
UseMI.getOpcode()) {
9043 case AMDGPU::SOFT_WQM:
9044 case AMDGPU::STRICT_WWM:
9045 case AMDGPU::STRICT_WQM:
9046 case AMDGPU::REG_SEQUENCE:
9048 case AMDGPU::INSERT_SUBREG:
9051 OpNo = MO.getOperandNo();
9056 MRI.constrainRegClass(DstReg, OpRC);
9058 if (!RI.hasVectorRegisters(OpRC))
9069 Register ResultReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9076 case AMDGPU::S_PACK_LL_B32_B16: {
9077 Register ImmReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9078 Register TmpReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9095 case AMDGPU::S_PACK_LH_B32_B16: {
9096 Register ImmReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9105 case AMDGPU::S_PACK_HL_B32_B16: {
9106 Register TmpReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9116 case AMDGPU::S_PACK_HH_B32_B16: {
9117 Register ImmReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9118 Register TmpReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9135 MRI.replaceRegWith(Dest.
getReg(), ResultReg);
9136 addUsersToMoveToVALUWorklist(ResultReg,
MRI, Worklist);
9145 assert(
Op.isReg() &&
Op.getReg() == AMDGPU::SCC &&
Op.isDef() &&
9146 !
Op.isDead() &&
Op.getParent() == &SCCDefInst);
9147 SmallVector<MachineInstr *, 4> CopyToDelete;
9150 for (MachineInstr &
MI :
9154 int SCCIdx =
MI.findRegisterUseOperandIdx(AMDGPU::SCC, &RI,
false);
9157 MachineRegisterInfo &
MRI =
MI.getParent()->getParent()->getRegInfo();
9158 Register DestReg =
MI.getOperand(0).getReg();
9160 MRI.replaceRegWith(DestReg, NewCond);
9165 MI.getOperand(SCCIdx).setReg(NewCond);
9171 if (
MI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI,
false,
false) != -1)
9174 for (
auto &Copy : CopyToDelete)
9175 Copy->eraseFromParent();
9183void SIInstrInfo::addSCCDefsToVALUWorklist(
MachineInstr *SCCUseInst,
9189 for (MachineInstr &
MI :
9192 if (
MI.modifiesRegister(AMDGPU::VCC, &RI))
9194 if (
MI.definesRegister(AMDGPU::SCC, &RI)) {
9203 const TargetRegisterClass *NewDstRC =
getOpRegClass(Inst, 0);
9211 case AMDGPU::REG_SEQUENCE:
9212 case AMDGPU::INSERT_SUBREG:
9214 case AMDGPU::SOFT_WQM:
9215 case AMDGPU::STRICT_WWM:
9216 case AMDGPU::STRICT_WQM: {
9218 if (RI.isAGPRClass(SrcRC)) {
9219 if (RI.isAGPRClass(NewDstRC))
9224 case AMDGPU::REG_SEQUENCE:
9225 case AMDGPU::INSERT_SUBREG:
9226 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC);
9229 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
9235 if (RI.isVGPRClass(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass)
9238 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
9252 int OpIndices[3])
const {
9253 const MCInstrDesc &
Desc =
MI.getDesc();
9269 const MachineRegisterInfo &
MRI =
MI.getParent()->getParent()->getRegInfo();
9271 for (
unsigned i = 0; i < 3; ++i) {
9272 int Idx = OpIndices[i];
9276 const MachineOperand &MO =
MI.getOperand(Idx);
9282 const TargetRegisterClass *OpRC =
9283 RI.getRegClass(getOpRegClassID(
Desc.operands()[Idx]));
9284 bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
9290 const TargetRegisterClass *RegRC =
MRI.getRegClass(
Reg);
9291 if (RI.isSGPRClass(RegRC))
9309 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
9310 SGPRReg = UsedSGPRs[0];
9313 if (!SGPRReg && UsedSGPRs[1]) {
9314 if (UsedSGPRs[1] == UsedSGPRs[2])
9315 SGPRReg = UsedSGPRs[1];
9322 AMDGPU::OpName OperandName)
const {
9323 if (OperandName == AMDGPU::OpName::NUM_OPERAND_NAMES)
9326 int Idx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), OperandName);
9330 return &
MI.getOperand(Idx);
9344 if (ST.isAmdHsaOS()) {
9347 RsrcDataFormat |= (1ULL << 56);
9352 RsrcDataFormat |= (2ULL << 59);
9355 return RsrcDataFormat;
9365 uint64_t EltSizeValue =
Log2_32(ST.getMaxPrivateElementSize(
true)) - 1;
9370 uint64_t IndexStride = ST.isWave64() ? 3 : 2;
9377 Rsrc23 &=
~AMDGPU::RSRC_DATA_FORMAT;
9383 unsigned Opc =
MI.getOpcode();
9389 return get(
Opc).mayLoad() &&
9394 int &FrameIndex)
const {
9396 if (!Addr || !Addr->
isFI())
9407 int &FrameIndex)
const {
9415 int &FrameIndex)
const {
9429 int &FrameIndex)
const {
9446 while (++
I != E &&
I->isInsideBundle()) {
9447 assert(!
I->isBundle() &&
"No nested bundle!");
9455 unsigned Opc =
MI.getOpcode();
9457 unsigned DescSize =
Desc.getSize();
9462 unsigned Size = DescSize;
9466 if (
MI.isBranch() && ST.hasOffset3fBug())
9477 bool HasLiteral =
false;
9478 unsigned LiteralSize = 4;
9479 for (
int I = 0, E =
MI.getNumExplicitOperands();
I != E; ++
I) {
9484 if (ST.has64BitLiterals()) {
9485 switch (OpInfo.OperandType) {
9501 return HasLiteral ? DescSize + LiteralSize : DescSize;
9506 int VAddr0Idx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vaddr0);
9510 int RSrcIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::srsrc);
9511 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4);
9515 case TargetOpcode::BUNDLE:
9517 case TargetOpcode::INLINEASM:
9518 case TargetOpcode::INLINEASM_BR: {
9520 const char *AsmStr =
MI.getOperand(0).getSymbolName();
9524 if (
MI.isMetaInstruction())
9528 const auto *D16Info = AMDGPU::getT16D16Helper(
Opc);
9531 unsigned LoInstOpcode = D16Info->LoOp;
9533 DescSize =
Desc.getSize();
9537 if (
Opc == AMDGPU::V_FMA_MIX_F16_t16 ||
Opc == AMDGPU::V_FMA_MIX_BF16_t16) {
9540 DescSize =
Desc.getSize();
9551 if (
MI.memoperands_empty())
9563 static const std::pair<int, const char *> TargetIndices[] = {
9601std::pair<unsigned, unsigned>
9608 static const std::pair<unsigned, const char *> TargetFlags[] = {
9626 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
9641 return AMDGPU::WWM_COPY;
9643 return AMDGPU::COPY;
9655 bool IsNullOrVectorRegister =
true;
9658 IsNullOrVectorRegister = !RI.isSGPRClass(RI.getRegClassForReg(
MRI, Reg));
9663 return IsNullOrVectorRegister &&
9665 (Opcode == AMDGPU::IMPLICIT_DEF &&
9667 (!
MI.isTerminator() && Opcode != AMDGPU::COPY &&
9668 MI.modifiesRegister(AMDGPU::EXEC, &RI)));
9676 if (ST.hasAddNoCarry())
9680 Register UnusedCarry =
MRI.createVirtualRegister(RI.getBoolRC());
9681 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC());
9692 if (ST.hasAddNoCarry())
9696 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC)
9698 : RS.scavengeRegisterBackwards(
9699 *RI.getBoolRC(),
I,
false,
9712 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
9713 case AMDGPU::SI_KILL_I1_TERMINATOR:
9722 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
9723 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR);
9724 case AMDGPU::SI_KILL_I1_PSEUDO:
9725 return get(AMDGPU::SI_KILL_I1_TERMINATOR);
9737 const unsigned OffsetBits =
9739 return (1 << OffsetBits) - 1;
9746 if (
MI.isInlineAsm())
9749 for (
auto &
Op :
MI.implicit_operands()) {
9750 if (
Op.isReg() &&
Op.getReg() == AMDGPU::VCC)
9751 Op.setReg(AMDGPU::VCC_LO);
9760 int Idx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::sbase);
9764 const int16_t RCID = getOpRegClassID(
MI.getDesc().operands()[Idx]);
9765 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass);
9782 if (Imm <= MaxImm + 64) {
9784 Overflow = Imm - MaxImm;
9811 if (ST.hasRestrictedSOffset())
9854 if (!ST.hasFlatInstOffsets())
9862 if (ST.hasNegativeUnalignedScratchOffsetBug() &&
9874std::pair<int64_t, int64_t>
9877 int64_t RemainderOffset = COffsetVal;
9878 int64_t ImmField = 0;
9883 if (AllowNegative) {
9885 int64_t
D = 1LL << NumBits;
9886 RemainderOffset = (COffsetVal /
D) *
D;
9887 ImmField = COffsetVal - RemainderOffset;
9889 if (ST.hasNegativeUnalignedScratchOffsetBug() &&
9891 (ImmField % 4) != 0) {
9893 RemainderOffset += ImmField % 4;
9894 ImmField -= ImmField % 4;
9896 }
else if (COffsetVal >= 0) {
9898 RemainderOffset = COffsetVal - ImmField;
9902 assert(RemainderOffset + ImmField == COffsetVal);
9903 return {ImmField, RemainderOffset};
9907 if (ST.hasNegativeScratchOffsetBug() &&
9915 switch (ST.getGeneration()) {
9941 case AMDGPU::V_MOVRELS_B32_dpp_gfx10:
9942 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10:
9943 case AMDGPU::V_MOVRELD_B32_dpp_gfx10:
9944 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10:
9945 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10:
9946 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10:
9947 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10:
9948 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10:
9955#define GENERATE_RENAMED_GFX9_CASES(OPCODE) \
9956 case OPCODE##_dpp: \
9957 case OPCODE##_e32: \
9958 case OPCODE##_e64: \
9959 case OPCODE##_e64_dpp: \
9974 case AMDGPU::V_DIV_FIXUP_F16_gfx9_e64:
9975 case AMDGPU::V_DIV_FIXUP_F16_gfx9_fake16_e64:
9976 case AMDGPU::V_FMA_F16_gfx9_e64:
9977 case AMDGPU::V_FMA_F16_gfx9_fake16_e64:
9978 case AMDGPU::V_INTERP_P2_F16:
9979 case AMDGPU::V_MAD_F16_e64:
9980 case AMDGPU::V_MAD_U16_e64:
9981 case AMDGPU::V_MAD_I16_e64:
10003 switch (ST.getGeneration()) {
10016 if (
isMAI(Opcode)) {
10024 if (MCOp == (
uint16_t)-1 && ST.hasGFX1250Insts())
10031 if (ST.hasGFX90AInsts()) {
10033 if (ST.hasGFX940Insts())
10064 for (
unsigned I = 0, E = (
MI.getNumOperands() - 1)/ 2;
I < E; ++
I)
10065 if (
MI.getOperand(1 + 2 *
I + 1).getImm() ==
SubReg) {
10066 auto &RegOp =
MI.getOperand(1 + 2 *
I);
10078 switch (
MI.getOpcode()) {
10080 case AMDGPU::REG_SEQUENCE:
10084 case AMDGPU::INSERT_SUBREG:
10085 if (RSR.
SubReg == (
unsigned)
MI.getOperand(3).getImm())
10102 if (!
P.Reg.isVirtual())
10106 auto *DefInst =
MRI.getVRegDef(RSR.Reg);
10107 while (
auto *
MI = DefInst) {
10109 switch (
MI->getOpcode()) {
10111 case AMDGPU::V_MOV_B32_e32: {
10112 auto &Op1 =
MI->getOperand(1);
10117 DefInst =
MRI.getVRegDef(RSR.Reg);
10125 DefInst =
MRI.getVRegDef(RSR.Reg);
10138 assert(
MRI.isSSA() &&
"Must be run on SSA");
10140 auto *
TRI =
MRI.getTargetRegisterInfo();
10141 auto *DefBB =
DefMI.getParent();
10145 if (
UseMI.getParent() != DefBB)
10148 const int MaxInstScan = 20;
10152 auto E =
UseMI.getIterator();
10153 for (
auto I = std::next(
DefMI.getIterator());
I != E; ++
I) {
10154 if (
I->isDebugInstr())
10157 if (++NumInst > MaxInstScan)
10160 if (
I->modifiesRegister(AMDGPU::EXEC,
TRI))
10170 assert(
MRI.isSSA() &&
"Must be run on SSA");
10172 auto *
TRI =
MRI.getTargetRegisterInfo();
10173 auto *DefBB =
DefMI.getParent();
10175 const int MaxUseScan = 10;
10178 for (
auto &
Use :
MRI.use_nodbg_operands(VReg)) {
10179 auto &UseInst = *
Use.getParent();
10182 if (UseInst.getParent() != DefBB || UseInst.isPHI())
10185 if (++NumUse > MaxUseScan)
10192 const int MaxInstScan = 20;
10196 for (
auto I = std::next(
DefMI.getIterator()); ; ++
I) {
10199 if (
I->isDebugInstr())
10202 if (++NumInst > MaxInstScan)
10215 if (Reg == VReg && --NumUse == 0)
10217 }
else if (
TRI->regsOverlap(Reg, AMDGPU::EXEC))
10226 auto Cur =
MBB.begin();
10227 if (Cur !=
MBB.end())
10229 if (!Cur->isPHI() && Cur->readsRegister(Dst,
nullptr))
10232 }
while (Cur !=
MBB.end() && Cur != LastPHIIt);
10241 if (InsPt !=
MBB.end() &&
10242 (InsPt->getOpcode() == AMDGPU::SI_IF ||
10243 InsPt->getOpcode() == AMDGPU::SI_ELSE ||
10244 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) &&
10245 InsPt->definesRegister(Src,
nullptr)) {
10249 .
addReg(Src, 0, SrcSubReg)
10274 if (isFullCopyInstr(
MI)) {
10275 Register DstReg =
MI.getOperand(0).getReg();
10276 Register SrcReg =
MI.getOperand(1).getReg();
10283 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
10287 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass);
10298 unsigned *PredCost)
const {
10299 if (
MI.isBundle()) {
10302 unsigned Lat = 0,
Count = 0;
10303 for (++
I;
I != E &&
I->isBundledWithPred(); ++
I) {
10305 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*
I));
10307 return Lat +
Count - 1;
10310 return SchedModel.computeInstrLatency(&
MI);
10316 unsigned Opcode =
MI.getOpcode();
10321 :
MI.getOperand(1).getReg();
10322 LLT DstTy =
MRI.getType(Dst);
10323 LLT SrcTy =
MRI.getType(Src);
10325 unsigned SrcAS = SrcTy.getAddressSpace();
10328 ST.hasGloballyAddressableScratch()
10336 if (Opcode == TargetOpcode::G_ADDRSPACE_CAST)
10337 return HandleAddrSpaceCast(
MI);
10340 auto IID = GI->getIntrinsicID();
10347 case Intrinsic::amdgcn_addrspacecast_nonnull:
10348 return HandleAddrSpaceCast(
MI);
10349 case Intrinsic::amdgcn_if:
10350 case Intrinsic::amdgcn_else:
10364 if (Opcode == AMDGPU::G_LOAD || Opcode == AMDGPU::G_ZEXTLOAD ||
10365 Opcode == AMDGPU::G_SEXTLOAD) {
10366 if (
MI.memoperands_empty())
10370 return mmo->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
10371 mmo->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS;
10379 if (SIInstrInfo::isGenericAtomicRMWOpcode(Opcode) ||
10380 Opcode == AMDGPU::G_ATOMIC_CMPXCHG ||
10381 Opcode == AMDGPU::G_ATOMIC_CMPXCHG_WITH_SUCCESS ||
10394 unsigned opcode =
MI.getOpcode();
10395 if (opcode == AMDGPU::V_READLANE_B32 ||
10396 opcode == AMDGPU::V_READFIRSTLANE_B32 ||
10397 opcode == AMDGPU::SI_RESTORE_S32_FROM_VGPR)
10400 if (isCopyInstr(
MI)) {
10404 RI.getPhysRegBaseClass(srcOp.
getReg());
10412 if (
MI.isPreISelOpcode())
10427 if (
MI.memoperands_empty())
10431 return mmo->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
10432 mmo->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS;
10447 for (
unsigned I = 0, E =
MI.getNumOperands();
I != E; ++
I) {
10449 if (!
SrcOp.isReg())
10453 if (!Reg || !
SrcOp.readsReg())
10459 if (RegBank && RegBank->
getID() != AMDGPU::SGPRRegBankID)
10486 F,
"ds_ordered_count unsupported for this calling conv"));
10500 Register &SrcReg2, int64_t &CmpMask,
10501 int64_t &CmpValue)
const {
10502 if (!
MI.getOperand(0).isReg() ||
MI.getOperand(0).getSubReg())
10505 switch (
MI.getOpcode()) {
10508 case AMDGPU::S_CMP_EQ_U32:
10509 case AMDGPU::S_CMP_EQ_I32:
10510 case AMDGPU::S_CMP_LG_U32:
10511 case AMDGPU::S_CMP_LG_I32:
10512 case AMDGPU::S_CMP_LT_U32:
10513 case AMDGPU::S_CMP_LT_I32:
10514 case AMDGPU::S_CMP_GT_U32:
10515 case AMDGPU::S_CMP_GT_I32:
10516 case AMDGPU::S_CMP_LE_U32:
10517 case AMDGPU::S_CMP_LE_I32:
10518 case AMDGPU::S_CMP_GE_U32:
10519 case AMDGPU::S_CMP_GE_I32:
10520 case AMDGPU::S_CMP_EQ_U64:
10521 case AMDGPU::S_CMP_LG_U64:
10522 SrcReg =
MI.getOperand(0).getReg();
10523 if (
MI.getOperand(1).isReg()) {
10524 if (
MI.getOperand(1).getSubReg())
10526 SrcReg2 =
MI.getOperand(1).getReg();
10528 }
else if (
MI.getOperand(1).isImm()) {
10530 CmpValue =
MI.getOperand(1).getImm();
10536 case AMDGPU::S_CMPK_EQ_U32:
10537 case AMDGPU::S_CMPK_EQ_I32:
10538 case AMDGPU::S_CMPK_LG_U32:
10539 case AMDGPU::S_CMPK_LG_I32:
10540 case AMDGPU::S_CMPK_LT_U32:
10541 case AMDGPU::S_CMPK_LT_I32:
10542 case AMDGPU::S_CMPK_GT_U32:
10543 case AMDGPU::S_CMPK_GT_I32:
10544 case AMDGPU::S_CMPK_LE_U32:
10545 case AMDGPU::S_CMPK_LE_I32:
10546 case AMDGPU::S_CMPK_GE_U32:
10547 case AMDGPU::S_CMPK_GE_I32:
10548 SrcReg =
MI.getOperand(0).getReg();
10550 CmpValue =
MI.getOperand(1).getImm();
10559 Register SrcReg2, int64_t CmpMask,
10568 const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue,
MRI,
10569 this](int64_t ExpectedValue,
unsigned SrcSize,
10570 bool IsReversible,
bool IsSigned) ->
bool {
10595 if (!Def || Def->getParent() != CmpInstr.
getParent())
10598 if (Def->getOpcode() != AMDGPU::S_AND_B32 &&
10599 Def->getOpcode() != AMDGPU::S_AND_B64)
10603 const auto isMask = [&Mask, SrcSize](
const MachineOperand *MO) ->
bool {
10614 SrcOp = &Def->getOperand(2);
10615 else if (isMask(&Def->getOperand(2)))
10616 SrcOp = &Def->getOperand(1);
10624 if (IsSigned && BitNo == SrcSize - 1)
10627 ExpectedValue <<= BitNo;
10629 bool IsReversedCC =
false;
10630 if (CmpValue != ExpectedValue) {
10633 IsReversedCC = CmpValue == (ExpectedValue ^ Mask);
10638 Register DefReg = Def->getOperand(0).getReg();
10639 if (IsReversedCC && !
MRI->hasOneNonDBGUse(DefReg))
10642 for (
auto I = std::next(Def->getIterator()), E = CmpInstr.
getIterator();
10644 if (
I->modifiesRegister(AMDGPU::SCC, &RI) ||
10645 I->killsRegister(AMDGPU::SCC, &RI))
10650 Def->findRegisterDefOperand(AMDGPU::SCC,
nullptr);
10654 if (!
MRI->use_nodbg_empty(DefReg)) {
10662 unsigned NewOpc = (SrcSize == 32) ? IsReversedCC ? AMDGPU::S_BITCMP0_B32
10663 : AMDGPU::S_BITCMP1_B32
10664 : IsReversedCC ? AMDGPU::S_BITCMP0_B64
10665 : AMDGPU::S_BITCMP1_B64;
10670 Def->eraseFromParent();
10678 case AMDGPU::S_CMP_EQ_U32:
10679 case AMDGPU::S_CMP_EQ_I32:
10680 case AMDGPU::S_CMPK_EQ_U32:
10681 case AMDGPU::S_CMPK_EQ_I32:
10682 return optimizeCmpAnd(1, 32,
true,
false);
10683 case AMDGPU::S_CMP_GE_U32:
10684 case AMDGPU::S_CMPK_GE_U32:
10685 return optimizeCmpAnd(1, 32,
false,
false);
10686 case AMDGPU::S_CMP_GE_I32:
10687 case AMDGPU::S_CMPK_GE_I32:
10688 return optimizeCmpAnd(1, 32,
false,
true);
10689 case AMDGPU::S_CMP_EQ_U64:
10690 return optimizeCmpAnd(1, 64,
true,
false);
10691 case AMDGPU::S_CMP_LG_U32:
10692 case AMDGPU::S_CMP_LG_I32:
10693 case AMDGPU::S_CMPK_LG_U32:
10694 case AMDGPU::S_CMPK_LG_I32:
10695 return optimizeCmpAnd(0, 32,
true,
false);
10696 case AMDGPU::S_CMP_GT_U32:
10697 case AMDGPU::S_CMPK_GT_U32:
10698 return optimizeCmpAnd(0, 32,
false,
false);
10699 case AMDGPU::S_CMP_GT_I32:
10700 case AMDGPU::S_CMPK_GT_I32:
10701 return optimizeCmpAnd(0, 32,
false,
true);
10702 case AMDGPU::S_CMP_LG_U64:
10703 return optimizeCmpAnd(0, 64,
true,
false);
10710 AMDGPU::OpName
OpName)
const {
10711 if (!ST.needsAlignedVGPRs())
10714 int OpNo = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
OpName);
10726 bool IsAGPR = RI.isAGPR(
MRI, DataReg);
10728 IsAGPR ? &AMDGPU::AGPR_32RegClass : &AMDGPU::VGPR_32RegClass);
10731 MRI.createVirtualRegister(IsAGPR ? &AMDGPU::AReg_64_Align2RegClass
10732 : &AMDGPU::VReg_64_Align2RegClass);
10734 .
addReg(DataReg, 0,
Op.getSubReg())
10739 Op.setSubReg(AMDGPU::sub0);
10761 unsigned Opcode =
MI.getOpcode();
10767 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
10768 Opcode == AMDGPU::V_ACCVGPR_READ_B32_e64)
10771 if (!ST.hasGFX940Insts())
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isUndef(const MachineInstr &MI)
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static cl::opt< bool > Fix16BitCopies("amdgpu-fix-16-bit-physreg-copies", cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), cl::init(true), cl::ReallyHidden)
static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RC, bool Forward)
static unsigned getNewFMAInst(const GCNSubtarget &ST, unsigned Opc)
static void indirectCopyToAGPR(const SIInstrInfo &TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, RegScavenger &RS, bool RegsOverlap, Register ImpDefSuperReg=Register(), Register ImpUseSuperReg=Register())
Handle copying from SGPR to AGPR, or from AGPR to AGPR on GFX908.
static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize)
static bool compareMachineOp(const MachineOperand &Op0, const MachineOperand &Op1)
static bool isStride64(unsigned Opc)
#define GENERATE_RENAMED_GFX9_CASES(OPCODE)
static std::tuple< unsigned, unsigned > extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc)
static bool followSubRegDef(MachineInstr &MI, TargetInstrInfo::RegSubRegPair &RSR)
static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize)
static MachineInstr * swapImmOperands(MachineInstr &MI, MachineOperand &NonRegOp1, MachineOperand &NonRegOp2)
static void copyFlagsToImplicitVCC(MachineInstr &MI, const MachineOperand &Orig)
static void emitLoadScalarOpsFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, MachineBasicBlock &LoopBB, MachineBasicBlock &BodyBB, const DebugLoc &DL, ArrayRef< MachineOperand * > ScalarOps)
static bool offsetsDoNotOverlap(LocationSize WidthA, int OffsetA, LocationSize WidthB, int OffsetB)
static unsigned getWWMRegSpillSaveOpcode(unsigned Size, bool IsVectorSuperClass)
static bool memOpsHaveSameBaseOperands(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2)
static unsigned getWWMRegSpillRestoreOpcode(unsigned Size, bool IsVectorSuperClass)
static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI, int64_t &Imm, MachineInstr **DefMI=nullptr)
static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize)
static unsigned subtargetEncodingFamily(const GCNSubtarget &ST)
static void preserveCondRegFlags(MachineOperand &CondReg, const MachineOperand &OrigCond)
static Register findImplicitSGPRRead(const MachineInstr &MI)
static unsigned getNewFMAAKInst(const GCNSubtarget &ST, unsigned Opc)
static cl::opt< unsigned > BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), cl::desc("Restrict range of branch instructions (DEBUG)"))
static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, MachineInstr &NewMI)
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static unsigned getSGPRSpillRestoreOpcode(unsigned Size)
static bool isRegOrFI(const MachineOperand &MO)
static unsigned getSGPRSpillSaveOpcode(unsigned Size)
static constexpr AMDGPU::OpName ModifierOpNames[]
static unsigned getVGPRSpillSaveOpcode(unsigned Size)
static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const char *Msg="illegal VGPR to SGPR copy")
static MachineInstr * swapRegAndNonRegOperand(MachineInstr &MI, MachineOperand &RegOp, MachineOperand &NonRegOp)
static bool shouldReadExec(const MachineInstr &MI)
static unsigned getNewFMAMKInst(const GCNSubtarget &ST, unsigned Opc)
static bool isRenamedInGFX9(int Opcode)
static TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd)
static bool changesVGPRIndexingMode(const MachineInstr &MI)
static bool isSubRegOf(const SIRegisterInfo &TRI, const MachineOperand &SuperVec, const MachineOperand &SubReg)
static bool nodesHaveSameOperandValue(SDNode *N0, SDNode *N1, AMDGPU::OpName OpName)
Returns true if both nodes have the same value for the given operand Op, or if both nodes do not have...
static unsigned getAVSpillSaveOpcode(unsigned Size)
static unsigned getNumOperandsNoGlue(SDNode *Node)
static bool canRemat(const MachineInstr &MI)
static MachineBasicBlock * loadMBUFScalarOperandsFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, ArrayRef< MachineOperand * > ScalarOps, MachineDominatorTree *MDT, MachineBasicBlock::iterator Begin=nullptr, MachineBasicBlock::iterator End=nullptr)
static unsigned getAVSpillRestoreOpcode(unsigned Size)
static unsigned getVGPRSpillRestoreOpcode(unsigned Size)
Interface definition for SIInstrInfo.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
const unsigned CSelectOpc
static const LaneMaskConstants & get(const GCNSubtarget &ST)
const unsigned XorTermOpc
const unsigned OrSaveExecOpc
const unsigned AndSaveExecOpc
Class for arbitrary precision integers.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
uint64_t getZExtValue() const
Diagnostic information for unsupported feature in backend.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasAddNoCarry() const
CycleT * getCycle(const BlockT *Block) const
Find the innermost cycle containing a given block.
void getExitingBlocks(SmallVectorImpl< BlockT * > &TmpStorage) const
Return all blocks of this cycle that have successor outside of this cycle.
bool contains(const BlockT *Block) const
Return whether Block is contained in the cycle.
const GenericCycle * getParentCycle() const
Itinerary data supplied by a subtarget to be used by a target.
constexpr unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
LiveInterval - This class represents the liveness of a register, or stack slot.
bool hasInterval(Register Reg) const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
LiveInterval & getInterval(Register Reg)
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
This class represents the liveness of a register, stack slot, etc.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
static const MCBinaryExpr * createAnd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCBinaryExpr * createAShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
unsigned getOpcode() const
Return the opcode number for this descriptor.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint8_t OperandType
Information about the type of the operand.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
LLVM_ABI void setVariableValue(const MCExpr *Value)
Helper class for constructing bundles of MachineInstrs.
MachineBasicBlock::instr_iterator begin() const
Return an iterator to the first bundled instruction.
MIBundleBuilder & append(MachineInstr *MI)
Insert MI into MBB by appending it to the instructions in the bundle.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
mop_range implicit_operands()
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
LLVM_ABI void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
LLVM_ABI unsigned getOperandNo() const
Returns the index of this operand in the instruction that it belongs to.
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
LLVM_ABI void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
LLVM_ABI void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
void setIsKill(bool Val=true)
LLVM_ABI void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setOffset(int64_t Offset)
unsigned getTargetFlags() const
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isTargetIndex() const
isTargetIndex - Tests if this is a MO_TargetIndex operand.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isLegalMUBUFImmOffset(unsigned Imm) const
bool isInlineConstant(const APInt &Imm) const
void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const
Fix operands in MI to satisfy constant bus requirements.
static bool isDS(const MachineInstr &MI)
MachineBasicBlock * legalizeOperands(MachineInstr &MI, MachineDominatorTree *MDT=nullptr) const
Legalize all operands in this instruction.
bool areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, int64_t &Offset0, int64_t &Offset1) const override
unsigned getLiveRangeSplitOpcode(Register Reg, const MachineFunction &MF) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const final
Register isSGPRStackAccess(const MachineInstr &MI, int &FrameIndex) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
static bool isNeverUniform(const MachineInstr &MI)
unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const
Return the size in bytes of the operand OpNo on the given.
bool isXDLWMMA(const MachineInstr &MI) const
bool isBasicBlockPrologue(const MachineInstr &MI, Register Reg=Register()) const override
uint64_t getDefaultRsrcDataFormat() const
static bool isSOPP(const MachineInstr &MI)
InstructionUniformity getGenericInstructionUniformity(const MachineInstr &MI) const
bool isIGLP(unsigned Opcode) const
static bool isFLATScratch(const MachineInstr &MI)
const MCInstrDesc & getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize, bool IsSGPR) const
MachineInstrBuilder getAddNoCarry(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg) const
Return a partially built integer add instruction without carry.
bool mayAccessFlatAddressSpace(const MachineInstr &MI) const
bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0, int64_t Offset1, unsigned NumLoads) const override
bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, Align Alignment=Align(4)) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
void moveToVALU(SIInstrWorklist &Worklist, MachineDominatorTree *MDT) const
Replace the instructions opcode with the equivalent VALU opcode.
static bool isSMRD(const MachineInstr &MI)
void restoreExec(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, SlotIndexes *Indexes=nullptr) const
bool usesConstantBus(const MachineRegisterInfo &MRI, const MachineOperand &MO, const MCOperandInfo &OpInfo) const
Returns true if this operand uses the constant bus.
static unsigned getMaxMUBUFImmOffset(const GCNSubtarget &ST)
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static unsigned getFoldableCopySrcIdx(const MachineInstr &MI)
bool mayAccessScratchThroughFlat(const MachineInstr &MI) const
void legalizeOperandsFLAT(MachineRegisterInfo &MRI, MachineInstr &MI) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
static std::optional< int64_t > extractSubregFromImm(int64_t ImmVal, unsigned SubRegIndex)
Return the extracted immediate value in a subregister use from a constant materialized in a super reg...
Register isStackAccess(const MachineInstr &MI, int &FrameIndex) const
static bool isMTBUF(const MachineInstr &MI)
const MCInstrDesc & getIndirectGPRIDXPseudo(unsigned VecSize, bool IsIndirectSrc) const
void insertReturn(MachineBasicBlock &MBB) const
static bool isDGEMM(unsigned Opcode)
static bool isEXP(const MachineInstr &MI)
static bool isSALU(const MachineInstr &MI)
void legalizeGenericOperand(MachineBasicBlock &InsertMBB, MachineBasicBlock::iterator I, const TargetRegisterClass *DstRC, MachineOperand &Op, MachineRegisterInfo &MRI, const DebugLoc &DL) const
MachineInstr * buildShrunkInst(MachineInstr &MI, unsigned NewOpcode) const
unsigned getInstBundleSize(const MachineInstr &MI) const
static bool isVOP2(const MachineInstr &MI)
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
static bool isSDWA(const MachineInstr &MI)
const MCInstrDesc & getKillTerminatorFromPseudo(unsigned Opcode) const
void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const override
static bool isGather4(const MachineInstr &MI)
MachineInstr * getWholeWaveFunctionSetup(MachineFunction &MF) const
bool isLegalVSrcOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO would be a valid operand for the given operand definition OpInfo.
static bool isDOT(const MachineInstr &MI)
MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const override
bool hasModifiers(unsigned Opcode) const
Return true if this instruction has any modifiers.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
static bool isSWMMAC(const MachineInstr &MI)
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
bool isHighLatencyDef(int Opc) const override
void legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const
Legalize the OpIndex operand of this instruction by inserting a MOV.
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
static bool isVOPC(const MachineInstr &MI)
void removeModOperands(MachineInstr &MI) const
std::pair< int64_t, int64_t > splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace, uint64_t FlatVariant) const
Split COffsetVal into {immediate offset field, remainder offset} values.
unsigned getVectorRegSpillRestoreOpcode(Register Reg, const TargetRegisterClass *RC, unsigned Size, const SIMachineFunctionInfo &MFI) const
bool isXDL(const MachineInstr &MI) const
static bool isVIMAGE(const MachineInstr &MI)
void enforceOperandRCAlignment(MachineInstr &MI, AMDGPU::OpName OpName) const
static bool isSOP2(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
bool isLegalAV64PseudoImm(uint64_t Imm) const
Check if this immediate value can be used for AV_MOV_B64_IMM_PSEUDO.
bool isNeverCoissue(MachineInstr &MI) const
bool hasModifiersSet(const MachineInstr &MI, AMDGPU::OpName OpName) const
const TargetRegisterClass * getPreferredSelectRegClass(unsigned Size) const
bool isLegalToSwap(const MachineInstr &MI, unsigned fromIdx, unsigned toIdx) const
static bool isFLATGlobal(const MachineInstr &MI)
bool isGlobalMemoryObject(const MachineInstr *MI) const override
static bool isVSAMPLE(const MachineInstr &MI)
bool isBufferSMRD(const MachineInstr &MI) const
static bool isKillTerminator(unsigned Opcode)
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx0, unsigned &SrcOpIdx1) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void insertScratchExecCopy(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, bool IsSCCLive, SlotIndexes *Indexes=nullptr) const
bool hasVALU32BitEncoding(unsigned Opcode) const
Return true if this 64-bit VALU instruction has a 32-bit encoding.
unsigned getMovOpcode(const TargetRegisterClass *DstRC) const
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const override
unsigned buildExtractSubReg(MachineBasicBlock::iterator MI, MachineRegisterInfo &MRI, const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC, unsigned SubIdx, const TargetRegisterClass *SubRC) const
void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const
Legalize operands in MI by either commuting it or inserting a copy of src1.
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const final
static bool isTRANS(const MachineInstr &MI)
static bool isImage(const MachineInstr &MI)
static bool isSOPK(const MachineInstr &MI)
const TargetRegisterClass * getOpRegClass(const MachineInstr &MI, unsigned OpNo) const
Return the correct register class for OpNo.
MachineBasicBlock * insertSimulatedTrap(MachineRegisterInfo &MRI, MachineBasicBlock &MBB, MachineInstr &MI, const DebugLoc &DL) const
Build instructions that simulate the behavior of a s_trap 2 instructions for hardware (namely,...
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static unsigned getDSShaderTypeValue(const MachineFunction &MF)
static bool isFoldableCopy(const MachineInstr &MI)
bool mayAccessLDSThroughFlat(const MachineInstr &MI) const
bool isIgnorableUse(const MachineOperand &MO) const override
static bool isMUBUF(const MachineInstr &MI)
bool expandPostRAPseudo(MachineInstr &MI) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
const TargetRegisterClass * getRegClass(const MCInstrDesc &TID, unsigned OpNum, const TargetRegisterInfo *TRI) const override
InstructionUniformity getInstructionUniformity(const MachineInstr &MI) const override final
static bool isSegmentSpecificFLAT(const MachineInstr &MI)
bool isReMaterializableImpl(const MachineInstr &MI) const override
static bool isVOP3(const MCInstrDesc &Desc)
bool physRegUsesConstantBus(const MachineOperand &Reg) const
static bool isF16PseudoScalarTrans(unsigned Opcode)
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const
static bool isDPP(const MachineInstr &MI)
bool analyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const
static bool isMFMA(const MachineInstr &MI)
bool isLowLatencyInstruction(const MachineInstr &MI) const
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isAlwaysGDS(uint16_t Opcode) const
static bool isMAI(const MCInstrDesc &Desc)
static bool usesLGKM_CNT(const MachineInstr &MI)
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void legalizeOperandsVALUt16(MachineInstr &Inst, MachineRegisterInfo &MRI) const
Fix operands in Inst to fix 16bit SALU to VALU lowering.
void moveToVALUImpl(SIInstrWorklist &Worklist, MachineDominatorTree *MDT, MachineInstr &Inst) const
bool isImmOperandLegal(const MCInstrDesc &InstDesc, unsigned OpNo, const MachineOperand &MO) const
bool canShrink(const MachineInstr &MI, const MachineRegisterInfo &MRI) const
bool isAsmOnlyOpcode(int MCOp) const
Check if this instruction should only be used by assembler.
static bool isVGPRSpill(const MachineInstr &MI)
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
This is used by the post-RA scheduler (SchedulePostRAList.cpp).
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, uint64_t FlatVariant) const
Returns if Offset is legal for the subtarget as the offset to a FLAT encoded instruction with the giv...
static bool isWWMRegSpillOpcode(uint16_t Opcode)
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
int64_t getNamedImmOperand(const MachineInstr &MI, AMDGPU::OpName OperandName) const
Get required immediate operand.
ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const override
bool regUsesConstantBus(const MachineOperand &Reg, const MachineRegisterInfo &MRI) const
static bool isMIMG(const MachineInstr &MI)
MachineOperand buildExtractSubRegOrImm(MachineBasicBlock::iterator MI, MachineRegisterInfo &MRI, const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC, unsigned SubIdx, const TargetRegisterClass *SubRC) const
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isLegalRegOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO (a register operand) is a legal register for the given operand description or operand ind...
bool allowNegativeFlatOffset(uint64_t FlatVariant) const
Returns true if negative offsets are allowed for the given FlatVariant.
static unsigned getNumWaitStates(const MachineInstr &MI)
Return the number of wait states that result from executing this instruction.
unsigned getVectorRegSpillSaveOpcode(Register Reg, const TargetRegisterClass *RC, unsigned Size, const SIMachineFunctionInfo &MFI) const
unsigned getVALUOp(const MachineInstr &MI) const
static bool modifiesModeRegister(const MachineInstr &MI)
Return true if the instruction modifies the mode register.q.
Register readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, MachineRegisterInfo &MRI, const TargetRegisterClass *DstRC=nullptr) const
Copy a value from a VGPR (SrcReg) to SGPR.
bool hasDivergentBranch(const MachineBasicBlock *MBB) const
Return whether the block terminate with divergent branch.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void fixImplicitOperands(MachineInstr &MI) const
bool moveFlatAddrToVGPR(MachineInstr &Inst) const
Change SADDR form of a FLAT Inst to its VADDR form if saddr operand was moved to VGPR.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool swapSourceModifiers(MachineInstr &MI, MachineOperand &Src0, AMDGPU::OpName Src0OpName, MachineOperand &Src1, AMDGPU::OpName Src1OpName) const
Register insertNE(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SrcReg, int Value) const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
bool hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const
This function is used to determine if an instruction can be safely executed under EXEC = 0 without ha...
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
static bool isAtomic(const MachineInstr &MI)
bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, Register DstReg, Register TrueReg, Register FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const override
bool isLiteralOperandLegal(const MCInstrDesc &InstDesc, const MCOperandInfo &OpInfo) const
static bool sopkIsZext(unsigned Opcode)
static bool isSGPRSpill(const MachineInstr &MI)
static bool isWMMA(const MachineInstr &MI)
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
bool mayReadEXEC(const MachineRegisterInfo &MRI, const MachineInstr &MI) const
Returns true if the instruction could potentially depend on the value of exec.
void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
void insertVectorSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
std::pair< MachineInstr *, MachineInstr * > expandMovDPP64(MachineInstr &MI) const
Register insertEQ(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SrcReg, int Value) const
static bool isSOPC(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isVALU(const MachineInstr &MI)
bool isBarrier(unsigned Opcode) const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx0, unsigned OpIdx1) const override
int pseudoToMCOpcode(int Opcode) const
Return a target-specific opcode if Opcode is a pseudo instruction.
const MCInstrDesc & getMCOpcodeFromPseudo(unsigned Opcode) const
Return the descriptor of the target-specific machine instruction that corresponds to the specified ps...
bool isLegalGFX12PlusPackedMathFP32Operand(const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN, const MachineOperand *MO=nullptr) const
Check if MO would be a legal operand for gfx12+ packed math FP32 instructions.
static bool usesVM_CNT(const MachineInstr &MI)
MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const override
static bool isFixedSize(const MachineInstr &MI)
bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo, MachineCycleInfo *CI) const override
LLVM_READONLY int commuteOpcode(unsigned Opc) const
uint64_t getScratchRsrcWords23() const
LLVM_READONLY MachineOperand * getNamedOperand(MachineInstr &MI, AMDGPU::OpName OperandName) const
Returns the operand named Op.
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool isOperandLegal(const MachineInstr &MI, unsigned OpIdx, const MachineOperand *MO=nullptr) const
Check if MO is a legal operand if it was the OpIdx Operand for MI.
static bool isLDSDMA(const MachineInstr &MI)
static bool isVOP1(const MachineInstr &MI)
SIInstrInfo(const GCNSubtarget &ST)
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool hasAnyModifiersSet(const MachineInstr &MI) const
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
Register getLongBranchReservedReg() const
bool isWholeWaveFunction() const
Register getStackPtrOffsetReg() const
unsigned getMaxMemoryClusterDWords() const
void setHasSpilledVGPRs(bool Spill=true)
bool isWWMReg(Register Reg) const
bool checkFlag(Register Reg, uint8_t Flag) const
void setHasSpilledSGPRs(bool Spill=true)
static unsigned getSubRegFromChannel(unsigned Channel, unsigned NumRegs=1)
ArrayRef< int16_t > getRegSplitParts(const TargetRegisterClass *RC, unsigned EltSize) const
unsigned getHWRegIndex(MCRegister Reg) const
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
unsigned getChannelFromSubReg(unsigned SubReg) const
static bool isAGPRClass(const TargetRegisterClass *RC)
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
SlotIndex insertMachineInstrInMaps(MachineInstr &MI, bool Late=false)
Insert the given machine instruction into the mapping.
Implements a dense probed hash-table based set with some number of buckets stored inline.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
virtual MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
bool isPackedFP32Inst(unsigned Opc)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
const uint64_t RSRC_DATA_FORMAT
LLVM_READONLY int getBasicFromSDWAOp(uint16_t Opcode)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
LLVM_READONLY int getVOPe32(uint16_t Opcode)
bool getWMMAIsXDL(unsigned Opc)
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
bool isInlinableLiteralV2I16(uint32_t Literal)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
LLVM_READONLY int getFlatScratchInstSVfromSS(uint16_t Opcode)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool isGFX12Plus(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
LLVM_READONLY int getGlobalVaddrOp(uint16_t Opcode)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
LLVM_READNONE bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC)
bool getMAIIsGFX940XDL(unsigned Opc)
const uint64_t RSRC_ELEMENT_SIZE_SHIFT
LLVM_READONLY int getAddr64Inst(uint16_t Opcode)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
LLVM_READONLY int getMFMAEarlyClobberOp(uint16_t Opcode)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
const uint64_t RSRC_TID_ENABLE
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
constexpr bool isSISrcOperand(const MCOperandInfo &OpInfo)
Is this an AMDGPU specific source operand?
bool isGenericAtomic(unsigned Opc)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
LLVM_READONLY int getCommuteRev(uint16_t Opcode)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_INLINE_C_AV64_PSEUDO
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
LLVM_READONLY int getCommuteOrig(uint16_t Opcode)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
bool isGFX1250(const MCSubtargetInfo &STI)
int getMCOpcode(uint16_t Opcode, unsigned Gen)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
const uint64_t RSRC_INDEX_STRIDE_SHIFT
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
LLVM_READNONE constexpr bool isGraphics(CallingConv::ID CC)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
LLVM_READONLY int getIfAddr64Inst(uint16_t Opcode)
Check if Opcode is an Addr64 opcode.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Not(const Pred &P) -> Not< Pred >
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
LLVM_ABI void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
TargetInstrInfo::RegSubRegPair getRegSubRegPair(const MachineOperand &O)
Create RegSubRegPair from a register MachineOperand.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
constexpr uint64_t maxUIntN(uint64_t N)
Gets the maximum value for a N-bit unsigned integer.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
bool execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, Register VReg, const MachineInstr &DefMI, const MachineInstr &UseMI)
Return false if EXEC is not changed between the def of VReg at DefMI and the use at UseMI.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
TargetInstrInfo::RegSubRegPair getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg)
Return the SubReg component from REG_SEQUENCE.
static const MachineMemOperand::Flags MONoClobber
Mark the MMO of a uniform load if there are no potentially clobbering stores on any path from the sta...
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto reverse(ContainerTy &&C)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
MachineInstr * getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, MachineRegisterInfo &MRI)
Return the defining instruction for a given reg:subreg pair skipping copy like instructions and subre...
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
static const MachineMemOperand::Flags MOCooperative
Mark the MMO of cooperative load/store atomics.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionAddr VTableAddr uintptr_t uintptr_t Data
unsigned getUndefRegState(bool B)
@ Xor
Bitwise or logical XOR of integers.
@ Sub
Subtraction of integers.
unsigned getKillRegState(bool B)
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned DefaultMemoryClusterDWordsLimit
constexpr unsigned BitWidth
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
static const MachineMemOperand::Flags MOLastUse
Mark the MMO of a load as the last use.
constexpr T reverseBits(T Val)
Reverse the bits in Val.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
@ AlwaysUniform
The result values are always uniform.
@ NeverUniform
The result values can never be assumed to be uniform.
@ Default
The result values are uniform if and only if all operands are uniform.
GenericCycleInfo< MachineSSAContext > MachineCycleInfo
MachineCycleInfo::CycleT MachineCycle
int popcount(T Value) noexcept
Count the number of set bits in a value.
bool execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, Register VReg, const MachineInstr &DefMI)
Return false if EXEC is not changed between the def of VReg at DefMI and all its uses.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI Semantics SemanticsToEnum(const llvm::fltSemantics &Sem)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
SparseBitVector AliveBlocks
AliveBlocks - Set of blocks in which this value is alive completely through.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Utility to store machine instructions worklist.
MachineInstr * top() const
bool isDeferred(MachineInstr *MI)
SetVector< MachineInstr * > & getDeferredList()
void insert(MachineInstr *MI)
A pair composed of a register and a sub-register index.