21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
32#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRMAP_INFO
34#include "AMDGPUGenInstrInfo.inc"
39 llvm::cl::desc(
"Set default AMDHSA Code Object Version (module flag "
40 "or asm directive still take priority if present)"));
45unsigned getBitMask(
unsigned Shift,
unsigned Width) {
46 return ((1 << Width) - 1) << Shift;
52unsigned packBits(
unsigned Src,
unsigned Dst,
unsigned Shift,
unsigned Width) {
53 unsigned Mask = getBitMask(Shift, Width);
54 return ((Src << Shift) & Mask) | (Dst & ~Mask);
60unsigned unpackBits(
unsigned Src,
unsigned Shift,
unsigned Width) {
61 return (Src & getBitMask(Shift, Width)) >> Shift;
65unsigned getVmcntBitShiftLo(
unsigned VersionMajor) {
70unsigned getVmcntBitWidthLo(
unsigned VersionMajor) {
75unsigned getExpcntBitShift(
unsigned VersionMajor) {
80unsigned getExpcntBitWidth(
unsigned VersionMajor) {
return 3; }
83unsigned getLgkmcntBitShift(
unsigned VersionMajor) {
88unsigned getLgkmcntBitWidth(
unsigned VersionMajor) {
93unsigned getVmcntBitShiftHi(
unsigned VersionMajor) {
return 14; }
96unsigned getVmcntBitWidthHi(
unsigned VersionMajor) {
97 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
101unsigned getLoadcntBitWidth(
unsigned VersionMajor) {
106unsigned getSamplecntBitWidth(
unsigned VersionMajor) {
111unsigned getBvhcntBitWidth(
unsigned VersionMajor) {
116unsigned getDscntBitWidth(
unsigned VersionMajor) {
121unsigned getDscntBitShift(
unsigned VersionMajor) {
return 0; }
124unsigned getStorecntBitWidth(
unsigned VersionMajor) {
129unsigned getKmcntBitWidth(
unsigned VersionMajor) {
134unsigned getXcntBitWidth(
unsigned VersionMajor,
unsigned VersionMinor) {
139unsigned getLoadcntStorecntBitShift(
unsigned VersionMajor) {
144inline unsigned getVaSdstBitWidth() {
return 3; }
147inline unsigned getVaSdstBitShift() {
return 9; }
150inline unsigned getVmVsrcBitWidth() {
return 3; }
153inline unsigned getVmVsrcBitShift() {
return 2; }
156inline unsigned getVaVdstBitWidth() {
return 4; }
159inline unsigned getVaVdstBitShift() {
return 12; }
162inline unsigned getVaVccBitWidth() {
return 1; }
165inline unsigned getVaVccBitShift() {
return 1; }
168inline unsigned getSaSdstBitWidth() {
return 1; }
171inline unsigned getSaSdstBitShift() {
return 0; }
174inline unsigned getVaSsrcBitWidth() {
return 1; }
177inline unsigned getVaSsrcBitShift() {
return 8; }
180inline unsigned getHoldCntWidth(
unsigned VersionMajor,
unsigned VersionMinor) {
181 static constexpr const unsigned MinMajor = 10;
182 static constexpr const unsigned MinMinor = 3;
183 return std::tie(VersionMajor, VersionMinor) >= std::tie(MinMajor, MinMinor)
189inline unsigned getHoldCntBitShift() {
return 7; }
214 M.getModuleFlag(
"amdhsa_code_object_version"))) {
215 return (
unsigned)Ver->getZExtValue() / 100;
226 switch (ABIVersion) {
242 switch (CodeObjectVersion) {
251 Twine(CodeObjectVersion));
256 switch (CodeObjectVersion) {
269 switch (CodeObjectVersion) {
280 switch (CodeObjectVersion) {
291 switch (CodeObjectVersion) {
301#define GET_MIMGBaseOpcodesTable_IMPL
302#define GET_MIMGDimInfoTable_IMPL
303#define GET_MIMGInfoTable_IMPL
304#define GET_MIMGLZMappingTable_IMPL
305#define GET_MIMGMIPMappingTable_IMPL
306#define GET_MIMGBiasMappingTable_IMPL
307#define GET_MIMGOffsetMappingTable_IMPL
308#define GET_MIMGG16MappingTable_IMPL
309#define GET_MAIInstInfoTable_IMPL
310#define GET_WMMAInstInfoTable_IMPL
311#include "AMDGPUGenSearchableTables.inc"
314 unsigned VDataDwords,
unsigned VAddrDwords) {
316 getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
317 return Info ? Info->Opcode : -1;
330 return NewInfo ? NewInfo->
Opcode : -1;
335 bool IsG16Supported) {
342 AddrWords += AddrComponents;
350 if ((IsA16 && !IsG16Supported) || BaseOpcode->
G16)
423#define GET_FP4FP8DstByteSelTable_DECL
424#define GET_FP4FP8DstByteSelTable_IMPL
437#define GET_DPMACCInstructionTable_DECL
438#define GET_DPMACCInstructionTable_IMPL
439#define GET_MTBUFInfoTable_DECL
440#define GET_MTBUFInfoTable_IMPL
441#define GET_MUBUFInfoTable_DECL
442#define GET_MUBUFInfoTable_IMPL
443#define GET_SMInfoTable_DECL
444#define GET_SMInfoTable_IMPL
445#define GET_VOP1InfoTable_DECL
446#define GET_VOP1InfoTable_IMPL
447#define GET_VOP2InfoTable_DECL
448#define GET_VOP2InfoTable_IMPL
449#define GET_VOP3InfoTable_DECL
450#define GET_VOP3InfoTable_IMPL
451#define GET_VOPC64DPPTable_DECL
452#define GET_VOPC64DPPTable_IMPL
453#define GET_VOPC64DPP8Table_DECL
454#define GET_VOPC64DPP8Table_IMPL
455#define GET_VOPCAsmOnlyInfoTable_DECL
456#define GET_VOPCAsmOnlyInfoTable_IMPL
457#define GET_VOP3CAsmOnlyInfoTable_DECL
458#define GET_VOP3CAsmOnlyInfoTable_IMPL
459#define GET_VOPDComponentTable_DECL
460#define GET_VOPDComponentTable_IMPL
461#define GET_VOPDPairs_DECL
462#define GET_VOPDPairs_IMPL
463#define GET_VOPTrue16Table_DECL
464#define GET_VOPTrue16Table_IMPL
465#define GET_True16D16Table_IMPL
466#define GET_WMMAOpcode2AddrMappingTable_DECL
467#define GET_WMMAOpcode2AddrMappingTable_IMPL
468#define GET_WMMAOpcode3AddrMappingTable_DECL
469#define GET_WMMAOpcode3AddrMappingTable_IMPL
470#define GET_getMFMA_F8F6F4_WithSize_DECL
471#define GET_getMFMA_F8F6F4_WithSize_IMPL
472#define GET_isMFMA_F8F6F4Table_IMPL
473#define GET_isCvtScaleF32_F32F16ToF8F4Table_IMPL
475#include "AMDGPUGenSearchableTables.inc"
479 return Info ? Info->BaseOpcode : -1;
484 getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
485 return Info ? Info->Opcode : -1;
490 return Info ? Info->elements : 0;
495 return Info && Info->has_vaddr;
500 return Info && Info->has_srsrc;
505 return Info && Info->has_soffset;
510 return Info ? Info->BaseOpcode : -1;
515 getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
516 return Info ? Info->Opcode : -1;
521 return Info ? Info->elements : 0;
526 return Info && Info->has_vaddr;
531 return Info && Info->has_srsrc;
536 return Info && Info->has_soffset;
541 return Info && Info->IsBufferInv;
546 return Info && Info->tfe;
550 const SMInfo *Info = getSMEMOpcodeHelper(
Opc);
551 return Info && Info->IsBuffer;
555 const VOPInfo *Info = getVOP1OpcodeHelper(
Opc);
556 return !Info || Info->IsSingle;
560 const VOPInfo *Info = getVOP2OpcodeHelper(
Opc);
561 return !Info || Info->IsSingle;
565 const VOPInfo *Info = getVOP3OpcodeHelper(
Opc);
566 return !Info || Info->IsSingle;
570 return isVOPC64DPPOpcodeHelper(
Opc) || isVOPC64DPP8OpcodeHelper(
Opc);
577 return Info && Info->is_dgemm;
582 return Info && Info->is_gfx940_xdl;
587 return Info ? Info->is_wmma_xdl :
false;
591 switch (EncodingVal) {
608 unsigned F8F8Opcode) {
611 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
631 unsigned F8F8Opcode) {
634 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
638 if (ST.hasFeature(AMDGPU::FeatureGFX13Insts))
640 if (ST.hasFeature(AMDGPU::FeatureGFX1250Insts))
642 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts))
644 if (ST.hasFeature(AMDGPU::FeatureGFX11_7Insts))
646 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts))
653 Opc = IsConvertibleToBitOp ? (
unsigned)AMDGPU::V_BITOP3_B32_e64 :
Opc;
664 EncodingFamily, VOPD3) != -1;
668 CanBeVOPDX = Info->CanBeVOPDX;
671 EncodingFamily, VOPD3) != -1;
672 return {CanBeVOPDX, CanBeVOPDY};
675 return {
false,
false};
680 Opc = IsConvertibleToBitOp ? (
unsigned)AMDGPU::V_BITOP3_B32_e64 :
Opc;
682 return Info ? Info->VOPDOp : ~0u;
690 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
691 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
692 Opc == AMDGPU::V_MAC_F32_e64_vi ||
693 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
694 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
695 Opc == AMDGPU::V_MAC_F16_e64_vi ||
696 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
697 Opc == AMDGPU::V_FMAC_F64_e64_gfx12 ||
698 Opc == AMDGPU::V_FMAC_F64_e64_gfx13 ||
699 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
700 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
701 Opc == AMDGPU::V_FMAC_F32_e64_gfx12 ||
702 Opc == AMDGPU::V_FMAC_F32_e64_gfx13 ||
703 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
704 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
705 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
706 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
707 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
708 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx11 ||
709 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx12 ||
710 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx12 ||
711 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx13 ||
712 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx13 ||
713 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
714 Opc == AMDGPU::V_DOT2C_F32_BF16_e64_vi ||
715 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
716 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
717 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
721 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
722 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
723 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
724 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11 ||
725 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx12 ||
726 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx12 ||
727 Opc == AMDGPU::V_PERMLANE16_VAR_B32_e64_gfx12 ||
728 Opc == AMDGPU::V_PERMLANEX16_VAR_B32_e64_gfx12;
732 return Opc == AMDGPU::V_CVT_F32_BF8_e64_gfx12 ||
733 Opc == AMDGPU::V_CVT_F32_FP8_e64_gfx12 ||
734 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp_gfx12 ||
735 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp_gfx12 ||
736 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp8_gfx12 ||
737 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp8_gfx12 ||
738 Opc == AMDGPU::V_CVT_PK_F32_BF8_fake16_e64_gfx12 ||
739 Opc == AMDGPU::V_CVT_PK_F32_FP8_fake16_e64_gfx12 ||
740 Opc == AMDGPU::V_CVT_PK_F32_BF8_t16_e64_gfx12 ||
741 Opc == AMDGPU::V_CVT_PK_F32_FP8_t16_e64_gfx12;
745 return Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
746 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
747 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
748 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
749 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
750 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
751 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
752 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
753 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
754 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
755 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
756 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
757 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
758 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
759 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
760 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
761 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB_CLAMP_U32 ||
762 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_COND_SUB_U32 ||
763 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
767 return Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_gfx1250 ||
768 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_gfx1250 ||
769 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_gfx1250 ||
770 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_gfx1250 ||
771 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_SADDR_gfx1250 ||
772 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_SADDR_gfx1250 ||
773 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_SADDR_gfx1250 ||
774 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_SADDR_gfx1250;
778 return Opc == TENSOR_STORE_FROM_LDS_d2_gfx1250 ||
779 Opc == TENSOR_STORE_FROM_LDS_d4_gfx1250;
799 return Info && Info->IsTrue16;
806 if (Info->HasFP8DstByteSel)
808 if (Info->HasFP4DstByteSel)
816 return Info && Info->IsDPMACCInstruction;
821 return Info ? Info->Opcode3Addr : ~0u;
826 return Info ? Info->Opcode2Addr : ~0u;
833 return getMCOpcodeGen(Opcode,
static_cast<Subtarget
>(Gen));
840 case AMDGPU::V_AND_B32_e32:
842 case AMDGPU::V_OR_B32_e32:
844 case AMDGPU::V_XOR_B32_e32:
846 case AMDGPU::V_XNOR_B32_e32:
851int getVOPDFull(
unsigned OpX,
unsigned OpY,
unsigned EncodingFamily,
853 bool IsConvertibleToBitOp = VOPD3 ?
getBitOp2(OpY) : 0;
854 OpY = IsConvertibleToBitOp ? (
unsigned)AMDGPU::V_BITOP3_B32_e64 : OpY;
856 getVOPDInfoFromComponentOpcodes(OpX, OpY, EncodingFamily, VOPD3);
857 return Info ? Info->Opcode : -1;
861 const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
863 const auto *OpX = getVOPDBaseFromComponent(Info->OpX);
864 const auto *OpY = getVOPDBaseFromComponent(Info->OpY);
866 return {OpX->BaseVOP, OpY->BaseVOP};
878 HasSrc2Acc = TiedIdx != -1;
888 if (Opcode == AMDGPU::V_CNDMASK_B32_e32 ||
889 Opcode == AMDGPU::V_CNDMASK_B32_e64) {
896 getNamedOperandIdx(Opcode, OpName::src0))) {
899 NumVOPD3Mods = SrcOperandsNum;
909 for (CompOprIdx =
Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
911 MandatoryLiteralIdx = CompOprIdx;
918 return getNamedOperandIdx(Opcode, OpName::bitop3);
936 std::function<
MCRegister(
unsigned,
unsigned)> GetRegIdx,
946 unsigned BanksMask) ->
bool {
953 if ((BaseX.
id() & BanksMask) == (BaseY.
id() & BanksMask))
956 ((BaseX.
id() + 1) & BanksMask) == (BaseY.
id() & BanksMask))
959 (BaseX.
id() & BanksMask) == ((BaseY.
id() + 1) & BanksMask))
971 if (!OpXRegs[CompOprIdx] || !OpYRegs[CompOprIdx])
984 if (MRI.
regsOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx]))
990 if (banksOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx], BanksMasks) &&
992 OpXRegs[CompOprIdx] != OpYRegs[CompOprIdx]))
1007InstInfo::getRegIndices(
unsigned CompIdx,
1008 std::function<
MCRegister(
unsigned,
unsigned)> GetRegIdx,
1012 const auto &Comp = CompInfo[CompIdx];
1015 RegIndices[
DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
1018 unsigned CompSrcIdx = CompOprIdx -
DST_NUM;
1020 Comp.hasRegSrcOperand(CompSrcIdx)
1021 ? GetRegIdx(CompIdx,
1022 Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
1037 const auto &OpXDesc = InstrInfo->get(OpX);
1038 const auto &OpYDesc = InstrInfo->get(OpY);
1050 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
1052 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
1061 std::optional<bool> XnackRequested;
1062 std::optional<bool> SramEccRequested;
1064 for (
const std::string &Feature : Features.
getFeatures()) {
1065 if (Feature ==
"+xnack")
1066 XnackRequested =
true;
1067 else if (Feature ==
"-xnack")
1068 XnackRequested =
false;
1069 else if (Feature ==
"+sramecc")
1070 SramEccRequested =
true;
1071 else if (Feature ==
"-sramecc")
1072 SramEccRequested =
false;
1078 if (XnackRequested) {
1079 if (XnackSupported) {
1085 if (*XnackRequested) {
1086 errs() <<
"warning: xnack 'On' was requested for a processor that does "
1087 "not support it!\n";
1089 errs() <<
"warning: xnack 'Off' was requested for a processor that "
1090 "does not support it!\n";
1095 if (SramEccRequested) {
1096 if (SramEccSupported) {
1103 if (*SramEccRequested) {
1104 errs() <<
"warning: sramecc 'On' was requested for a processor that "
1105 "does not support it!\n";
1107 errs() <<
"warning: sramecc 'Off' was requested for a processor that "
1108 "does not support it!\n";
1126 TargetID.
split(TargetIDSplit,
':');
1128 for (
const auto &FeatureString : TargetIDSplit) {
1129 if (FeatureString.starts_with(
"xnack"))
1131 if (FeatureString.starts_with(
"sramecc"))
1137 const Triple &TargetTriple = STI.getTargetTriple();
1141 <<
'-' << TargetTriple.
getOSName() <<
'-'
1144 std::string Processor;
1149 Processor = STI.getCPU().
str();
1155 std::string Features;
1159 Features +=
":sramecc-";
1161 Features +=
":sramecc+";
1164 Features +=
":xnack-";
1166 Features +=
":xnack+";
1169 StreamRep << Processor << Features;
1233 unsigned FlatWorkGroupSize) {
1234 assert(FlatWorkGroupSize != 0);
1244 unsigned MaxBarriers = 16;
1248 return std::min(MaxWaves /
N, MaxBarriers);
1263 unsigned FlatWorkGroupSize) {
1271 unsigned FlatWorkGroupSize) {
1329 return Addressable ? AddressableNumSGPRs : 108;
1330 if (
Version.Major >= 8 && !Addressable)
1331 AddressableNumSGPRs = 112;
1336 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1340 bool FlatScrUsed,
bool XNACKUsed) {
1341 unsigned ExtraSGPRs = 0;
1372 return divideCeil(std::max(1u, NumRegs), Granule);
1382 unsigned DynamicVGPRBlockSize,
1383 std::optional<bool> EnableWavefrontSize32) {
1387 if (DynamicVGPRBlockSize != 0)
1388 return DynamicVGPRBlockSize;
1390 bool IsWave32 = EnableWavefrontSize32
1391 ? *EnableWavefrontSize32
1395 return IsWave32 ? 24 : 12;
1398 return IsWave32 ? 16 : 8;
1400 return IsWave32 ? 8 : 4;
1404 std::optional<bool> EnableWavefrontSize32) {
1408 bool IsWave32 = EnableWavefrontSize32
1409 ? *EnableWavefrontSize32
1413 return IsWave32 ? 16 : 8;
1415 return IsWave32 ? 8 : 4;
1427 return IsWave32 ? 1536 : 768;
1428 return IsWave32 ? 1024 : 512;
1433 if (Features.test(Feature1024AddressableVGPRs))
1434 return Features.
test(FeatureWavefrontSize32) ? 1024 : 512;
1439 unsigned DynamicVGPRBlockSize) {
1441 if (Features.test(FeatureGFX90AInsts))
1444 if (DynamicVGPRBlockSize != 0)
1452 unsigned DynamicVGPRBlockSize) {
1460 unsigned TotalNumVGPRs) {
1461 if (NumVGPRs < Granule)
1463 unsigned RoundedRegs =
alignTo(NumVGPRs, Granule);
1464 return std::min(std::max(TotalNumVGPRs / RoundedRegs, 1u), MaxWaves);
1495 unsigned DynamicVGPRBlockSize) {
1499 if (WavesPerEU >= MaxWavesPerEU)
1503 unsigned AddrsableNumVGPRs =
1506 unsigned MaxNumVGPRs =
alignDown(TotNumVGPRs / WavesPerEU, Granule);
1508 if (MaxNumVGPRs ==
alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1512 DynamicVGPRBlockSize);
1513 if (WavesPerEU < MinWavesPerEU)
1516 unsigned MaxNumVGPRsNext =
alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1517 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1518 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1522 unsigned DynamicVGPRBlockSize) {
1525 unsigned MaxNumVGPRs =
1528 unsigned AddressableNumVGPRs =
1530 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1534 std::optional<bool> EnableWavefrontSize32) {
1542 unsigned DynamicVGPRBlockSize,
1543 std::optional<bool> EnableWavefrontSize32) {
1603 return C ==
'v' ||
C ==
's' ||
C ==
'a';
1612 if (
RegName.consume_front(
"[")) {
1619 unsigned NumRegs = End - Idx + 1;
1621 return {Kind, Idx, NumRegs};
1627 return {Kind, Idx, 1};
1633std::tuple<char, unsigned, unsigned>
1641std::pair<unsigned, unsigned>
1643 std::pair<unsigned, unsigned>
Default,
1644 bool OnlyFirstRequired) {
1646 return {Attr->first, Attr->second.value_or(
Default.second)};
1650std::optional<std::pair<unsigned, std::optional<unsigned>>>
1652 bool OnlyFirstRequired) {
1654 if (!
A.isStringAttribute())
1655 return std::nullopt;
1658 std::pair<unsigned, std::optional<unsigned>> Ints;
1659 std::pair<StringRef, StringRef> Strs =
A.getValueAsString().split(
',');
1660 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1661 Ctx.emitError(
"can't parse first integer attribute " + Name);
1662 return std::nullopt;
1664 unsigned Second = 0;
1665 if (Strs.second.trim().getAsInteger(0, Second)) {
1666 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1667 Ctx.emitError(
"can't parse second integer attribute " + Name);
1668 return std::nullopt;
1671 Ints.second = Second;
1679 unsigned DefaultVal) {
1680 std::optional<SmallVector<unsigned>> R =
1685std::optional<SmallVector<unsigned>>
1692 return std::nullopt;
1693 if (!
A.isStringAttribute()) {
1694 Ctx.emitError(Name +
" is not a string attribute");
1695 return std::nullopt;
1703 std::pair<StringRef, StringRef> Strs = S.
split(
',');
1705 if (Strs.first.trim().getAsInteger(0, IntVal)) {
1706 Ctx.emitError(
"can't parse integer attribute " + Strs.first +
" in " +
1708 return std::nullopt;
1715 Ctx.emitError(
"attribute " + Name +
1716 " has incorrect number of integers; expected " +
1718 return std::nullopt;
1735 if (
Low.ule(Val) &&
High.ugt(Val))
1738 if (
Low.uge(Val) &&
High.ult(Val))
1748 if (
Wait.LoadCnt != ~0u)
1749 OS << LS <<
"LoadCnt: " <<
Wait.LoadCnt;
1750 if (
Wait.ExpCnt != ~0u)
1751 OS << LS <<
"ExpCnt: " <<
Wait.ExpCnt;
1752 if (
Wait.DsCnt != ~0u)
1753 OS << LS <<
"DsCnt: " <<
Wait.DsCnt;
1754 if (
Wait.StoreCnt != ~0u)
1755 OS << LS <<
"StoreCnt: " <<
Wait.StoreCnt;
1756 if (
Wait.SampleCnt != ~0u)
1757 OS << LS <<
"SampleCnt: " <<
Wait.SampleCnt;
1758 if (
Wait.BvhCnt != ~0u)
1759 OS << LS <<
"BvhCnt: " <<
Wait.BvhCnt;
1760 if (
Wait.KmCnt != ~0u)
1761 OS << LS <<
"KmCnt: " <<
Wait.KmCnt;
1762 if (
Wait.XCnt != ~0u)
1763 OS << LS <<
"XCnt: " <<
Wait.XCnt;
1771 return (1 << (getVmcntBitWidthLo(
Version.Major) +
1772 getVmcntBitWidthHi(
Version.Major))) -
1777 return (1 << getLoadcntBitWidth(
Version.Major)) - 1;
1781 return (1 << getSamplecntBitWidth(
Version.Major)) - 1;
1785 return (1 << getBvhcntBitWidth(
Version.Major)) - 1;
1789 return (1 << getExpcntBitWidth(
Version.Major)) - 1;
1793 return (1 << getLgkmcntBitWidth(
Version.Major)) - 1;
1797 return (1 << getDscntBitWidth(
Version.Major)) - 1;
1801 return (1 << getKmcntBitWidth(
Version.Major)) - 1;
1809 return (1 << getStorecntBitWidth(
Version.Major)) - 1;
1813 bool HasExtendedWaitCounts =
IV.Major >= 12;
1814 if (HasExtendedWaitCounts) {
1832 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(
Version.Major),
1833 getVmcntBitWidthLo(
Version.Major));
1834 unsigned Expcnt = getBitMask(getExpcntBitShift(
Version.Major),
1835 getExpcntBitWidth(
Version.Major));
1836 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(
Version.Major),
1837 getLgkmcntBitWidth(
Version.Major));
1838 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(
Version.Major),
1839 getVmcntBitWidthHi(
Version.Major));
1840 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1844 unsigned VmcntLo = unpackBits(
Waitcnt, getVmcntBitShiftLo(
Version.Major),
1845 getVmcntBitWidthLo(
Version.Major));
1846 unsigned VmcntHi = unpackBits(
Waitcnt, getVmcntBitShiftHi(
Version.Major),
1847 getVmcntBitWidthHi(
Version.Major));
1848 return VmcntLo | VmcntHi << getVmcntBitWidthLo(
Version.Major);
1853 getExpcntBitWidth(
Version.Major));
1858 getLgkmcntBitWidth(
Version.Major));
1862 unsigned &Expcnt,
unsigned &Lgkmcnt) {
1879 getVmcntBitWidthLo(
Version.Major));
1880 return packBits(Vmcnt >> getVmcntBitWidthLo(
Version.Major),
Waitcnt,
1881 getVmcntBitShiftHi(
Version.Major),
1882 getVmcntBitWidthHi(
Version.Major));
1887 return packBits(Expcnt,
Waitcnt, getExpcntBitShift(
Version.Major),
1888 getExpcntBitWidth(
Version.Major));
1893 return packBits(Lgkmcnt,
Waitcnt, getLgkmcntBitShift(
Version.Major),
1894 getLgkmcntBitWidth(
Version.Major));
1898 unsigned Expcnt,
unsigned Lgkmcnt) {
1913 unsigned Dscnt = getBitMask(getDscntBitShift(
Version.Major),
1914 getDscntBitWidth(
Version.Major));
1916 unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(
Version.Major),
1917 getStorecntBitWidth(
Version.Major));
1918 return Dscnt | Storecnt;
1920 unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(
Version.Major),
1921 getLoadcntBitWidth(
Version.Major));
1922 return Dscnt | Loadcnt;
1928 getLoadcntStorecntBitShift(
Version.Major),
1929 getLoadcntBitWidth(
Version.Major)));
1930 Decoded.
set(
DS_CNT, unpackBits(LoadcntDscnt, getDscntBitShift(
Version.Major),
1931 getDscntBitWidth(
Version.Major)));
1938 getLoadcntStorecntBitShift(
Version.Major),
1939 getStorecntBitWidth(
Version.Major)));
1940 Decoded.
set(
DS_CNT, unpackBits(StorecntDscnt, getDscntBitShift(
Version.Major),
1941 getDscntBitWidth(
Version.Major)));
1947 return packBits(Loadcnt,
Waitcnt, getLoadcntStorecntBitShift(
Version.Major),
1948 getLoadcntBitWidth(
Version.Major));
1952 unsigned Storecnt) {
1953 return packBits(Storecnt,
Waitcnt, getLoadcntStorecntBitShift(
Version.Major),
1954 getStorecntBitWidth(
Version.Major));
1960 getDscntBitWidth(
Version.Major));
1977 unsigned Storecnt,
unsigned Dscnt) {
1998 for (
int Idx = 0; Idx <
Size; ++Idx) {
1999 const auto &
Op = Opr[Idx];
2000 if (
Op.isSupported(STI))
2001 Enc |=
Op.encode(
Op.Default);
2007 int Size,
unsigned Code,
2008 bool &HasNonDefaultVal,
2010 unsigned UsedOprMask = 0;
2011 HasNonDefaultVal =
false;
2012 for (
int Idx = 0; Idx <
Size; ++Idx) {
2013 const auto &
Op = Opr[Idx];
2014 if (!
Op.isSupported(STI))
2016 UsedOprMask |=
Op.getMask();
2017 unsigned Val =
Op.decode(Code);
2018 if (!
Op.isValid(Val))
2020 HasNonDefaultVal |= (Val !=
Op.Default);
2022 return (Code & ~UsedOprMask) == 0;
2026 unsigned Code,
int &Idx,
StringRef &Name,
2027 unsigned &Val,
bool &IsDefault,
2029 while (Idx <
Size) {
2030 const auto &
Op = Opr[Idx++];
2031 if (
Op.isSupported(STI)) {
2033 Val =
Op.decode(Code);
2034 IsDefault = (Val ==
Op.Default);
2044 if (InputVal < 0 || InputVal >
Op.Max)
2046 return Op.encode(InputVal);
2051 unsigned &UsedOprMask,
2054 for (
int Idx = 0; Idx <
Size; ++Idx) {
2055 const auto &
Op = Opr[Idx];
2056 if (
Op.Name == Name) {
2057 if (!
Op.isSupported(STI)) {
2061 auto OprMask =
Op.getMask();
2062 if (OprMask & UsedOprMask)
2064 UsedOprMask |= OprMask;
2087 HasNonDefaultVal, STI);
2119 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2123 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2127 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2131 return unpackBits(Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2135 return unpackBits(Encoded, getVaVccBitShift(), getVaVccBitWidth());
2139 return unpackBits(Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2143 return unpackBits(Encoded, getHoldCntBitShift(),
2148 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2157 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2166 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2175 return packBits(VaSdst, Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2184 return packBits(VaVcc, Encoded, getVaVccBitShift(), getVaVccBitWidth());
2193 return packBits(VaSsrc, Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2203 return packBits(HoldCnt, Encoded, getHoldCntBitShift(),
2240 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
2241 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
2252 if (Val.MaxIndex == 0 && Name == Val.Name)
2255 if (Val.MaxIndex > 0 && Name.starts_with(Val.Name)) {
2256 StringRef Suffix = Name.drop_front(Val.Name.size());
2263 if (Suffix.
size() > 1 && Suffix[0] ==
'0')
2266 return Val.Tgt + Id;
2295namespace MTBUFFormat {
2321 if (Name == lookupTable[Id])
2520 return F.getFnAttributeAsParsedInteger(
"InitialPSInputAddr", 0);
2525 return F.getFnAttributeAsParsedInteger(
2526 "amdgpu-color-export",
2531 return F.getFnAttributeAsParsedInteger(
"amdgpu-depth-export", 0) != 0;
2536 F.getFnAttributeAsParsedInteger(
"amdgpu-dynamic-vgpr-block-size", 0);
2549 return STI.
hasFeature(AMDGPU::FeatureSRAMECC);
2553 return STI.
hasFeature(AMDGPU::FeatureMIMG_R128) &&
2566 return !STI.
hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !
isCI(STI) &&
2577 return Version.Minor >= 3 ? 13 : 5;
2581 return HasSampler ? 4 : 5;
2592 return STI.
hasFeature(AMDGPU::FeatureSouthernIslands);
2596 return STI.
hasFeature(AMDGPU::FeatureSeaIslands);
2600 return STI.
hasFeature(AMDGPU::FeatureVolcanicIslands);
2690 return STI.
hasFeature(AMDGPU::FeatureGCN3Encoding);
2694 return STI.
hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2698 return STI.
hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2702 return STI.
hasFeature(AMDGPU::FeatureGFX10_3Insts);
2710 return STI.
hasFeature(AMDGPU::FeatureGFX90AInsts);
2714 return STI.
hasFeature(AMDGPU::FeatureGFX940Insts);
2718 return STI.
hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2722 return STI.
hasFeature(AMDGPU::FeatureMAIInsts);
2726 return STI.
hasFeature(AMDGPU::FeatureVOPDInsts);
2730 return STI.
hasFeature(AMDGPU::FeatureDPPSrc1SGPR);
2734 return STI.
hasFeature(AMDGPU::FeatureKernargPreload);
2738 int32_t ArgNumVGPR) {
2739 if (has90AInsts && ArgNumAGPR)
2740 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2741 return std::max(ArgNumVGPR, ArgNumAGPR);
2747 return SGPRClass.
contains(FirstSubReg != 0 ? FirstSubReg :
Reg) ||
2755#define MAP_REG2REG \
2756 using namespace AMDGPU; \
2757 switch (Reg.id()) { \
2760 CASE_CI_VI(FLAT_SCR) \
2761 CASE_CI_VI(FLAT_SCR_LO) \
2762 CASE_CI_VI(FLAT_SCR_HI) \
2763 CASE_VI_GFX9PLUS(TTMP0) \
2764 CASE_VI_GFX9PLUS(TTMP1) \
2765 CASE_VI_GFX9PLUS(TTMP2) \
2766 CASE_VI_GFX9PLUS(TTMP3) \
2767 CASE_VI_GFX9PLUS(TTMP4) \
2768 CASE_VI_GFX9PLUS(TTMP5) \
2769 CASE_VI_GFX9PLUS(TTMP6) \
2770 CASE_VI_GFX9PLUS(TTMP7) \
2771 CASE_VI_GFX9PLUS(TTMP8) \
2772 CASE_VI_GFX9PLUS(TTMP9) \
2773 CASE_VI_GFX9PLUS(TTMP10) \
2774 CASE_VI_GFX9PLUS(TTMP11) \
2775 CASE_VI_GFX9PLUS(TTMP12) \
2776 CASE_VI_GFX9PLUS(TTMP13) \
2777 CASE_VI_GFX9PLUS(TTMP14) \
2778 CASE_VI_GFX9PLUS(TTMP15) \
2779 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2780 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2781 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2782 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2783 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2784 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2785 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2786 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2787 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2788 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2789 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2790 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2791 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2792 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2793 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2795 TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2796 CASE_GFXPRE11_GFX11PLUS(M0) \
2797 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2798 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2801#define CASE_CI_VI(node) \
2802 assert(!isSI(STI)); \
2804 return isCI(STI) ? node##_ci : node##_vi;
2806#define CASE_VI_GFX9PLUS(node) \
2808 return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2810#define CASE_GFXPRE11_GFX11PLUS(node) \
2812 return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2814#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2816 return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2825#undef CASE_VI_GFX9PLUS
2826#undef CASE_GFXPRE11_GFX11PLUS
2827#undef CASE_GFXPRE11_GFX11PLUS_TO
2829#define CASE_CI_VI(node) \
2833#define CASE_VI_GFX9PLUS(node) \
2835 case node##_gfx9plus: \
2837#define CASE_GFXPRE11_GFX11PLUS(node) \
2838 case node##_gfx11plus: \
2839 case node##_gfxpre11: \
2841#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2847 case AMDGPU::SRC_SHARED_BASE_LO:
2848 case AMDGPU::SRC_SHARED_BASE:
2849 case AMDGPU::SRC_SHARED_LIMIT_LO:
2850 case AMDGPU::SRC_SHARED_LIMIT:
2851 case AMDGPU::SRC_PRIVATE_BASE_LO:
2852 case AMDGPU::SRC_PRIVATE_BASE:
2853 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2854 case AMDGPU::SRC_PRIVATE_LIMIT:
2855 case AMDGPU::SRC_FLAT_SCRATCH_BASE_LO:
2856 case AMDGPU::SRC_FLAT_SCRATCH_BASE_HI:
2857 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2859 case AMDGPU::SRC_VCCZ:
2860 case AMDGPU::SRC_EXECZ:
2861 case AMDGPU::SRC_SCC:
2863 case AMDGPU::SGPR_NULL:
2871#undef CASE_VI_GFX9PLUS
2872#undef CASE_GFXPRE11_GFX11PLUS
2873#undef CASE_GFXPRE11_GFX11PLUS_TO
2878 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2885 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2908 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2919 case AMDGPU::VGPR_16RegClassID:
2920 case AMDGPU::VGPR_16_Lo128RegClassID:
2921 case AMDGPU::SGPR_LO16RegClassID:
2922 case AMDGPU::AGPR_LO16RegClassID:
2924 case AMDGPU::SGPR_32RegClassID:
2925 case AMDGPU::VGPR_32RegClassID:
2926 case AMDGPU::VGPR_32_Lo256RegClassID:
2927 case AMDGPU::VRegOrLds_32RegClassID:
2928 case AMDGPU::AGPR_32RegClassID:
2929 case AMDGPU::VS_32RegClassID:
2930 case AMDGPU::AV_32RegClassID:
2931 case AMDGPU::SReg_32RegClassID:
2932 case AMDGPU::SReg_32_XM0RegClassID:
2933 case AMDGPU::SRegOrLds_32RegClassID:
2935 case AMDGPU::SGPR_64RegClassID:
2936 case AMDGPU::VS_64RegClassID:
2937 case AMDGPU::SReg_64RegClassID:
2938 case AMDGPU::VReg_64RegClassID:
2939 case AMDGPU::AReg_64RegClassID:
2940 case AMDGPU::SReg_64_XEXECRegClassID:
2941 case AMDGPU::VReg_64_Align2RegClassID:
2942 case AMDGPU::AReg_64_Align2RegClassID:
2943 case AMDGPU::AV_64RegClassID:
2944 case AMDGPU::AV_64_Align2RegClassID:
2945 case AMDGPU::VReg_64_Lo256_Align2RegClassID:
2946 case AMDGPU::VS_64_Lo256RegClassID:
2948 case AMDGPU::SGPR_96RegClassID:
2949 case AMDGPU::SReg_96RegClassID:
2950 case AMDGPU::VReg_96RegClassID:
2951 case AMDGPU::AReg_96RegClassID:
2952 case AMDGPU::VReg_96_Align2RegClassID:
2953 case AMDGPU::AReg_96_Align2RegClassID:
2954 case AMDGPU::AV_96RegClassID:
2955 case AMDGPU::AV_96_Align2RegClassID:
2956 case AMDGPU::VReg_96_Lo256_Align2RegClassID:
2958 case AMDGPU::SGPR_128RegClassID:
2959 case AMDGPU::SReg_128RegClassID:
2960 case AMDGPU::VReg_128RegClassID:
2961 case AMDGPU::AReg_128RegClassID:
2962 case AMDGPU::VReg_128_Align2RegClassID:
2963 case AMDGPU::AReg_128_Align2RegClassID:
2964 case AMDGPU::AV_128RegClassID:
2965 case AMDGPU::AV_128_Align2RegClassID:
2966 case AMDGPU::SReg_128_XNULLRegClassID:
2967 case AMDGPU::VReg_128_Lo256_Align2RegClassID:
2969 case AMDGPU::SGPR_160RegClassID:
2970 case AMDGPU::SReg_160RegClassID:
2971 case AMDGPU::VReg_160RegClassID:
2972 case AMDGPU::AReg_160RegClassID:
2973 case AMDGPU::VReg_160_Align2RegClassID:
2974 case AMDGPU::AReg_160_Align2RegClassID:
2975 case AMDGPU::AV_160RegClassID:
2976 case AMDGPU::AV_160_Align2RegClassID:
2977 case AMDGPU::VReg_160_Lo256_Align2RegClassID:
2979 case AMDGPU::SGPR_192RegClassID:
2980 case AMDGPU::SReg_192RegClassID:
2981 case AMDGPU::VReg_192RegClassID:
2982 case AMDGPU::AReg_192RegClassID:
2983 case AMDGPU::VReg_192_Align2RegClassID:
2984 case AMDGPU::AReg_192_Align2RegClassID:
2985 case AMDGPU::AV_192RegClassID:
2986 case AMDGPU::AV_192_Align2RegClassID:
2987 case AMDGPU::VReg_192_Lo256_Align2RegClassID:
2989 case AMDGPU::SGPR_224RegClassID:
2990 case AMDGPU::SReg_224RegClassID:
2991 case AMDGPU::VReg_224RegClassID:
2992 case AMDGPU::AReg_224RegClassID:
2993 case AMDGPU::VReg_224_Align2RegClassID:
2994 case AMDGPU::AReg_224_Align2RegClassID:
2995 case AMDGPU::AV_224RegClassID:
2996 case AMDGPU::AV_224_Align2RegClassID:
2997 case AMDGPU::VReg_224_Lo256_Align2RegClassID:
2999 case AMDGPU::SGPR_256RegClassID:
3000 case AMDGPU::SReg_256RegClassID:
3001 case AMDGPU::VReg_256RegClassID:
3002 case AMDGPU::AReg_256RegClassID:
3003 case AMDGPU::VReg_256_Align2RegClassID:
3004 case AMDGPU::AReg_256_Align2RegClassID:
3005 case AMDGPU::AV_256RegClassID:
3006 case AMDGPU::AV_256_Align2RegClassID:
3007 case AMDGPU::SReg_256_XNULLRegClassID:
3008 case AMDGPU::VReg_256_Lo256_Align2RegClassID:
3010 case AMDGPU::SGPR_288RegClassID:
3011 case AMDGPU::SReg_288RegClassID:
3012 case AMDGPU::VReg_288RegClassID:
3013 case AMDGPU::AReg_288RegClassID:
3014 case AMDGPU::VReg_288_Align2RegClassID:
3015 case AMDGPU::AReg_288_Align2RegClassID:
3016 case AMDGPU::AV_288RegClassID:
3017 case AMDGPU::AV_288_Align2RegClassID:
3018 case AMDGPU::VReg_288_Lo256_Align2RegClassID:
3020 case AMDGPU::SGPR_320RegClassID:
3021 case AMDGPU::SReg_320RegClassID:
3022 case AMDGPU::VReg_320RegClassID:
3023 case AMDGPU::AReg_320RegClassID:
3024 case AMDGPU::VReg_320_Align2RegClassID:
3025 case AMDGPU::AReg_320_Align2RegClassID:
3026 case AMDGPU::AV_320RegClassID:
3027 case AMDGPU::AV_320_Align2RegClassID:
3028 case AMDGPU::VReg_320_Lo256_Align2RegClassID:
3030 case AMDGPU::SGPR_352RegClassID:
3031 case AMDGPU::SReg_352RegClassID:
3032 case AMDGPU::VReg_352RegClassID:
3033 case AMDGPU::AReg_352RegClassID:
3034 case AMDGPU::VReg_352_Align2RegClassID:
3035 case AMDGPU::AReg_352_Align2RegClassID:
3036 case AMDGPU::AV_352RegClassID:
3037 case AMDGPU::AV_352_Align2RegClassID:
3038 case AMDGPU::VReg_352_Lo256_Align2RegClassID:
3040 case AMDGPU::SGPR_384RegClassID:
3041 case AMDGPU::SReg_384RegClassID:
3042 case AMDGPU::VReg_384RegClassID:
3043 case AMDGPU::AReg_384RegClassID:
3044 case AMDGPU::VReg_384_Align2RegClassID:
3045 case AMDGPU::AReg_384_Align2RegClassID:
3046 case AMDGPU::AV_384RegClassID:
3047 case AMDGPU::AV_384_Align2RegClassID:
3048 case AMDGPU::VReg_384_Lo256_Align2RegClassID:
3050 case AMDGPU::SGPR_512RegClassID:
3051 case AMDGPU::SReg_512RegClassID:
3052 case AMDGPU::VReg_512RegClassID:
3053 case AMDGPU::AReg_512RegClassID:
3054 case AMDGPU::VReg_512_Align2RegClassID:
3055 case AMDGPU::AReg_512_Align2RegClassID:
3056 case AMDGPU::AV_512RegClassID:
3057 case AMDGPU::AV_512_Align2RegClassID:
3058 case AMDGPU::VReg_512_Lo256_Align2RegClassID:
3060 case AMDGPU::SGPR_1024RegClassID:
3061 case AMDGPU::SReg_1024RegClassID:
3062 case AMDGPU::VReg_1024RegClassID:
3063 case AMDGPU::AReg_1024RegClassID:
3064 case AMDGPU::VReg_1024_Align2RegClassID:
3065 case AMDGPU::AReg_1024_Align2RegClassID:
3066 case AMDGPU::AV_1024RegClassID:
3067 case AMDGPU::AV_1024_Align2RegClassID:
3068 case AMDGPU::VReg_1024_Lo256_Align2RegClassID:
3093 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
3119 (Val == 0x3e22f983 && HasInv2Pi);
3128 return Val == 0x3F00 ||
3149 return Val == 0x3C00 ||
3176 return 192 + std::abs(
Signed);
3181 case 0x3800:
return 240;
3182 case 0xB800:
return 241;
3183 case 0x3C00:
return 242;
3184 case 0xBC00:
return 243;
3185 case 0x4000:
return 244;
3186 case 0xC000:
return 245;
3187 case 0x4400:
return 246;
3188 case 0xC400:
return 247;
3189 case 0x3118:
return 248;
3196 case 0x3F000000:
return 240;
3197 case 0xBF000000:
return 241;
3198 case 0x3F800000:
return 242;
3199 case 0xBF800000:
return 243;
3200 case 0x40000000:
return 244;
3201 case 0xC0000000:
return 245;
3202 case 0x40800000:
return 246;
3203 case 0xC0800000:
return 247;
3204 case 0x3E22F983:
return 248;
3227 return 192 + std::abs(
Signed);
3231 case 0x3F00:
return 240;
3232 case 0xBF00:
return 241;
3233 case 0x3F80:
return 242;
3234 case 0xBF80:
return 243;
3235 case 0x4000:
return 244;
3236 case 0xC000:
return 245;
3237 case 0x4080:
return 246;
3238 case 0xC080:
return 247;
3239 case 0x3E22:
return 248;
3244 return std::nullopt;
3271 return 192 + std::abs(
Signed);
3277 return std::nullopt;
3337 return Imm & 0xffff;
3379 return A->hasAttribute(Attribute::InReg) ||
3380 A->hasAttribute(Attribute::ByVal);
3383 return A->hasAttribute(Attribute::InReg);
3418 int64_t EncodedOffset) {
3427 int64_t EncodedOffset,
bool IsBuffer) {
3429 if (IsBuffer && EncodedOffset < 0)
3438 return (ByteOffset & 3) == 0;
3447 return ByteOffset >> 2;
3451 int64_t ByteOffset,
bool IsBuffer,
3457 return std::nullopt;
3460 return isInt<24>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3466 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3471 return std::nullopt;
3475 ? std::optional<int64_t>(EncodedOffset)
3480 int64_t ByteOffset) {
3482 return std::nullopt;
3485 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
3490 if (ST.getFeatureBits().test(FeatureFlatOffsetBits12))
3492 if (ST.getFeatureBits().test(FeatureFlatOffsetBits24))
3499struct SourceOfDivergence {
3502const SourceOfDivergence *lookupSourceOfDivergence(
unsigned Intr);
3507const AlwaysUniform *lookupAlwaysUniform(
unsigned Intr);
3509#define GET_SourcesOfDivergence_IMPL
3510#define GET_UniformIntrinsics_IMPL
3511#define GET_Gfx9BufferFormat_IMPL
3512#define GET_Gfx10BufferFormat_IMPL
3513#define GET_Gfx11PlusBufferFormat_IMPL
3515#include "AMDGPUGenSearchableTables.inc"
3520 return lookupSourceOfDivergence(IntrID);
3524 return lookupAlwaysUniform(IntrID);
3531 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(
3532 BitsPerComp, NumComponents, NumFormat)
3534 ? getGfx10BufferFormatInfo(BitsPerComp, NumComponents, NumFormat)
3535 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
3542 : getGfx9BufferFormatInfo(
Format);
3547 const unsigned VGPRClasses[] = {
3548 AMDGPU::VGPR_16RegClassID, AMDGPU::VGPR_32RegClassID,
3549 AMDGPU::VReg_64RegClassID, AMDGPU::VReg_96RegClassID,
3550 AMDGPU::VReg_128RegClassID, AMDGPU::VReg_160RegClassID,
3551 AMDGPU::VReg_192RegClassID, AMDGPU::VReg_224RegClassID,
3552 AMDGPU::VReg_256RegClassID, AMDGPU::VReg_288RegClassID,
3553 AMDGPU::VReg_320RegClassID, AMDGPU::VReg_352RegClassID,
3554 AMDGPU::VReg_384RegClassID, AMDGPU::VReg_512RegClassID,
3555 AMDGPU::VReg_1024RegClassID};
3557 for (
unsigned RCID : VGPRClasses) {
3584 if (RC->
getID() == AMDGPU::VGPR_16RegClassID) {
3594static std::optional<unsigned>
3596 bool HasSetregVGPRMSBFixup) {
3597 constexpr unsigned VGPRMSBShift =
3602 (!HasSetregVGPRMSBFixup && (
Offset +
Size) <= VGPRMSBShift))
3605 if (!HasSetregVGPRMSBFixup)
3611 bool HasSetregVGPRMSBFixup) {
3612 assert(
MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32);
3614 MI.getOperand(1).getImm(),
3615 HasSetregVGPRMSBFixup);
3619 bool HasSetregVGPRMSBFixup) {
3620 assert(
MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32_gfx12);
3622 MI.getOperand(1).getImm(),
3623 HasSetregVGPRMSBFixup);
3626std::pair<const AMDGPU::OpName *, const AMDGPU::OpName *>
3628 static const AMDGPU::OpName VOPOps[4] = {
3629 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2,
3630 AMDGPU::OpName::vdst};
3631 static const AMDGPU::OpName VDSOps[4] = {
3632 AMDGPU::OpName::addr, AMDGPU::OpName::data0, AMDGPU::OpName::data1,
3633 AMDGPU::OpName::vdst};
3634 static const AMDGPU::OpName FLATOps[4] = {
3635 AMDGPU::OpName::vaddr, AMDGPU::OpName::vdata,
3636 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdst};
3637 static const AMDGPU::OpName BUFOps[4] = {
3638 AMDGPU::OpName::vaddr, AMDGPU::OpName::NUM_OPERAND_NAMES,
3639 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdata};
3640 static const AMDGPU::OpName VIMGOps[4] = {
3641 AMDGPU::OpName::vaddr0, AMDGPU::OpName::vaddr1, AMDGPU::OpName::vaddr2,
3642 AMDGPU::OpName::vdata};
3647 static const AMDGPU::OpName VOPDOpsX[4] = {
3648 AMDGPU::OpName::src0X, AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vsrc2X,
3649 AMDGPU::OpName::vdstX};
3650 static const AMDGPU::OpName VOPDOpsY[4] = {
3651 AMDGPU::OpName::src0Y, AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vsrc2Y,
3652 AMDGPU::OpName::vdstY};
3655 static const AMDGPU::OpName VOP2MADMKOps[4] = {
3656 AMDGPU::OpName::src0, AMDGPU::OpName::NUM_OPERAND_NAMES,
3657 AMDGPU::OpName::src1, AMDGPU::OpName::vdst};
3658 static const AMDGPU::OpName VOPDFMAMKOpsX[4] = {
3659 AMDGPU::OpName::src0X, AMDGPU::OpName::NUM_OPERAND_NAMES,
3660 AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vdstX};
3661 static const AMDGPU::OpName VOPDFMAMKOpsY[4] = {
3662 AMDGPU::OpName::src0Y, AMDGPU::OpName::NUM_OPERAND_NAMES,
3663 AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vdstY};
3665 unsigned TSFlags =
Desc.TSFlags;
3670 switch (
Desc.getOpcode()) {
3672 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32:
3673 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32_gfx1250:
3674 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64:
3675 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64_gfx1250:
3677 case AMDGPU::V_FMAMK_F16:
3678 case AMDGPU::V_FMAMK_F16_t16:
3679 case AMDGPU::V_FMAMK_F16_t16_gfx12:
3680 case AMDGPU::V_FMAMK_F16_fake16:
3681 case AMDGPU::V_FMAMK_F16_fake16_gfx12:
3682 case AMDGPU::V_FMAMK_F32:
3683 case AMDGPU::V_FMAMK_F32_gfx12:
3684 case AMDGPU::V_FMAMK_F64:
3685 case AMDGPU::V_FMAMK_F64_gfx1250:
3686 return {VOP2MADMKOps,
nullptr};
3690 return {VOPOps,
nullptr};
3694 return {VDSOps,
nullptr};
3697 return {FLATOps,
nullptr};
3700 return {BUFOps,
nullptr};
3703 return {VIMGOps,
nullptr};
3707 return {(OpX == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsX : VOPDOpsX,
3708 (OpY == AMDGPU::V_FMAMK_F32) ? VOPDFMAMKOpsY : VOPDOpsY};
3715 " these instructions are not expected on gfx1250");
3741 for (
auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) {
3749 if (RegClass == AMDGPU::VReg_64RegClassID ||
3750 RegClass == AMDGPU::VReg_64_Align2RegClassID)
3759 case AMDGPU::V_MUL_LO_U32_e64:
3760 case AMDGPU::V_MUL_LO_U32_e64_dpp:
3761 case AMDGPU::V_MUL_LO_U32_e64_dpp_gfx1250:
3762 case AMDGPU::V_MUL_HI_U32_e64:
3763 case AMDGPU::V_MUL_HI_U32_e64_dpp:
3764 case AMDGPU::V_MUL_HI_U32_e64_dpp_gfx1250:
3765 case AMDGPU::V_MUL_HI_I32_e64:
3766 case AMDGPU::V_MUL_HI_I32_e64_dpp:
3767 case AMDGPU::V_MUL_HI_I32_e64_dpp_gfx1250:
3768 case AMDGPU::V_MAD_U32_e64:
3769 case AMDGPU::V_MAD_U32_e64_dpp:
3770 case AMDGPU::V_MAD_U32_e64_dpp_gfx1250:
3779 if (!ST.hasFeature(AMDGPU::FeatureDPALU_DPP))
3783 return ST.hasFeature(AMDGPU::FeatureGFX1250Insts);
3789 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
3791 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
3793 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
3795 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
3802 case AMDGPU::V_PK_ADD_F32:
3803 case AMDGPU::V_PK_ADD_F32_gfx12:
3804 case AMDGPU::V_PK_MUL_F32:
3805 case AMDGPU::V_PK_MUL_F32_gfx12:
3806 case AMDGPU::V_PK_FMA_F32:
3807 case AMDGPU::V_PK_FMA_F32_gfx12:
3827 OS << EncoNoCluster <<
',' << EncoNoCluster <<
',' << EncoNoCluster;
3828 return Buffer.
c_str();
3831 OS << EncoVariableDims <<
',' << EncoVariableDims <<
','
3832 << EncoVariableDims;
3833 return Buffer.
c_str();
3836 OS << Dims[0] <<
',' << Dims[1] <<
',' << Dims[2];
3837 return Buffer.
c_str();
3844 std::optional<SmallVector<unsigned>> Attr =
3848 if (!Attr.has_value())
3857 A.Dims = {(*Attr)[0], (*Attr)[1], (*Attr)[2]};
3868 OS <<
"Unsupported";
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static llvm::cl::opt< unsigned > DefaultAMDHSACodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::init(llvm::AMDGPU::AMDHSA_COV6), llvm::cl::desc("Set default AMDHSA Code Object Version (module flag " "or asm directive still take priority if present)"))
Provides AMDGPU specific target descriptions.
MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where required.
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Register const TargetRegisterInfo * TRI
#define S_00B848_MEM_ORDERED(x)
#define S_00B848_WGP_MODE(x)
#define S_00B848_FWD_PROGRESS(x)
static const int BlockSize
static const uint32_t IV[8]
static ClusterDimsAttr get(const Function &F)
ClusterDimsAttr()=default
std::string to_string() const
const std::array< unsigned, 3 > & getDims() const
bool isSramEccSupported() const
void setTargetIDFromFeaturesString(StringRef FS)
TargetIDSetting getXnackSetting() const
void print(raw_ostream &OS) const
Write string representation to OS.
AMDGPUTargetID(const MCSubtargetInfo &STI)
bool isXnackSupported() const
void setTargetIDFromTargetIDStream(StringRef TargetID)
std::string toString() const
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfDstInParsedOperands() const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
int getBitOp3OperandIdx() const
unsigned getCompParsedSrcOperandsNum() const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< MCRegister(unsigned, unsigned)> GetRegIdx, const MCRegisterInfo &MRI, bool SkipSrc=false, bool AllowSameVGPR=false, bool VOPD3=false) const
std::array< MCRegister, Component::MAX_OPR_NUM > RegIndices
Represents the counter values to wait for in an s_waitcnt instruction.
unsigned get(InstCounterType T) const
void set(InstCounterType T, unsigned Val)
This class represents an incoming formal argument to a Function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
A helper class to return the specified delimiter string after the first invocation of operator String...
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayStore() const
Return true if this instruction could possibly modify memory.
bool mayLoad() const
Return true if this instruction could possibly read memory.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Interface to description of machine instruction set.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo, unsigned HwModeId) const
Return the ID of the register class to use for OpInfo, for the active HwMode HwModeId.
This holds information about one operand of a machine instruction, indicating the register class for ...
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
constexpr unsigned id() const
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Representation of each machine instruction.
A Module instance is used to store all the information related to an LLVM module.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
std::string str() const
str - Get the contents as an std::string.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr size_t size() const
size - Get the string size.
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
LLVM_ABI StringRef getVendorName() const
Get the vendor (second) component of the triple.
LLVM_ABI StringRef getOSName() const
Get the operating system (third) component of the triple.
OSType getOS() const
Get the parsed operating system type of this triple.
ArchType getArch() const
Get the parsed architecture type of this triple.
LLVM_ABI StringRef getEnvironmentName() const
Get the optional environment (fourth) component of the triple, or "" if empty.
bool isAMDGCN() const
Tests whether the target is AMDGCN.
LLVM_ABI StringRef getArchName() const
Get the architecture (first) component of the triple.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
unsigned decodeFieldVaVcc(unsigned Encoded)
unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc)
unsigned decodeFieldHoldCnt(unsigned Encoded, const IsaVersion &Version)
unsigned getVaVccBitMask()
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt, const IsaVersion &Version)
unsigned getVmVsrcBitMask()
unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned getHoldCntBitMask(const IsaVersion &Version)
unsigned decodeFieldVaSdst(unsigned Encoded)
unsigned getVaVdstBitMask()
unsigned getVaSsrcBitMask()
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned getVaSdstBitMask()
unsigned decodeFieldVaSsrc(unsigned Encoded)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
unsigned getSaSdstBitMask()
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
@ ET_DUAL_SRC_BLEND_MAX_IDX
constexpr uint32_t VersionMinor
HSA metadata minor version.
constexpr uint32_t VersionMajor
HSA metadata major version.
@ COMPLETION_ACTION_OFFSET
@ MULTIGRID_SYNC_ARG_OFFSET
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
@ FIXED_NUM_SGPRS_FOR_INIT_BUG
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getArchVGPRAllocGranule()
For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage, returns the allocation granule...
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs, unsigned Granule)
unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
@ ID_DEALLOC_VGPRS_GFX11Plus
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
bool msgDoesNotUseM0(int64_t MsgId, const MCSubtargetInfo &STI)
Returns true if the message does not use the m0 operand.
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
constexpr unsigned VOPD3_VGPR_BANK_MASKS[]
bool isPackedFP32Inst(unsigned Opc)
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isInlineValue(MCRegister Reg)
bool isGFX10_GFX11(const MCSubtargetInfo &STI)
bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType)
bool isPKFMACF16InlineConstant(uint32_t Literal, bool IsGFX11Plus)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Storecnt)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
bool isVOPCAsmOnly(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool getWMMAIsXDL(unsigned Opc)
static std::optional< unsigned > convertSetRegImmToVgprMSBs(unsigned Imm, unsigned Simm16, bool HasSetregVGPRMSBFixup)
uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isInlinableLiteralV2I16(uint32_t Literal)
bool isDPMACCInstruction(unsigned Opc)
int getMTBUFElements(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
unsigned getTemporalHintType(const MCInstrDesc TID)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
iota_range< InstCounterType > inst_counter_types(InstCounterType MaxCounter)
bool isGFX10(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV216(bool IsFloat, uint32_t Literal)
FPType getFPDstSelType(unsigned Opc)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
bool isGFX12Plus(const MCSubtargetInfo &STI)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getStorecntBitMask(const IsaVersion &Version)
unsigned getLdsDwGranularity(const MCSubtargetInfo &ST)
bool isGFX940(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX10_3_GFX11(const MCSubtargetInfo &STI)
bool isGFX13(const MCSubtargetInfo &STI)
bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val)
Checks if Val is inside MD, a !range-like metadata.
uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal)
unsigned getVOPDOpcode(unsigned Opc, bool VOPD3)
bool isGroupSegment(const GlobalValue *GV)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
static unsigned getCombinedCountBitMask(const IsaVersion &Version, bool IsStore)
CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
unsigned getDefaultAMDHSACodeObjectVersion()
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool isDPALU_DPP32BitOpc(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
std::tuple< char, unsigned, unsigned > parseAsmPhysRegName(StringRef RegName)
Returns a valid charcode or 0 in the first entry if this is a valid physical register name.
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isAsyncStore(unsigned Opc)
unsigned getDynamicVGPRBlockSize(const Function &F)
unsigned getKmcntBitMask(const IsaVersion &Version)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
unsigned getBitOp2(unsigned Opc)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
unsigned getXcntBitMask(const IsaVersion &Version)
bool isGenericAtomic(unsigned Opc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool getMUBUFTfe(unsigned Opc)
unsigned getBvhcntBitMask(const IsaVersion &Version)
bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool isGFX13Plus(const MCSubtargetInfo &STI)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
int32_t getMCOpcode(uint32_t Opcode, unsigned Gen)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
std::tuple< char, unsigned, unsigned > parseAsmConstraintPhysReg(StringRef Constraint)
Returns a valid charcode or 0 in the first entry if this is a valid physical register constraint.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Loadcnt)
bool isGFX10Plus(const MCSubtargetInfo &STI)
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
static bool isValidRegPrefix(char C)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer, bool HasSOffset)
bool isGlobalSegment(const GlobalValue *GV)
int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_LAST
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_INLINE_AC_FIRST
@ OPERAND_REG_IMM_V2FP16_SPLAT
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FIRST
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_AC_LAST
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
std::optional< unsigned > getPKFMACF16InlineEncoding(uint32_t Literal, bool IsGFX11Plus)
raw_ostream & operator<<(raw_ostream &OS, const AMDGPU::Waitcnt &Wait)
void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode, const MCSubtargetInfo *STI)
bool isNotGFX9Plus(const MCSubtargetInfo &STI)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI)
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool isGFX1250(const MCSubtargetInfo &STI)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
bool isTensorStore(unsigned Opc)
bool getMUBUFIsBufferInv(unsigned Opc)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool supportsWGP(const MCSubtargetInfo &STI)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
bool getVOP2IsSingle(unsigned Opc)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
bool isGFX1250Plus(const MCSubtargetInfo &STI)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isNotGFX12Plus(const MCSubtargetInfo &STI)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
unsigned getLoadcntBitMask(const IsaVersion &Version)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily, bool VOPD3)
static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Dscnt)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
unsigned getDscntBitMask(const IsaVersion &Version)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
@ ELFABIVERSION_AMDGPU_HSA_V4
@ ELFABIVERSION_AMDGPU_HSA_V5
@ ELFABIVERSION_AMDGPU_HSA_V6
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
constexpr T rotr(T V, int R)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
testing::Matcher< const detail::ErrorHolder & > Failed()
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
std::string utostr(uint64_t X, bool isNeg=false)
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
FunctionAddr VTableAddr uintptr_t uintptr_t Version
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
To bit_cast(const From &from) noexcept
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
constexpr int countr_zero_constexpr(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
@ AlwaysUniform
The result values are always uniform.
@ Default
The result values are uniform if and only if all operands are uniform.
AMD Kernel Code Object (amd_kernel_code_t).
uint16_t amd_machine_version_major
uint16_t amd_machine_kind
uint16_t amd_machine_version_stepping
uint8_t private_segment_alignment
int64_t kernel_code_entry_byte_offset
uint32_t amd_kernel_code_version_major
uint16_t amd_machine_version_minor
uint8_t group_segment_alignment
uint8_t kernarg_segment_alignment
uint32_t amd_kernel_code_version_minor
uint64_t compute_pgm_resource_registers
static std::tuple< typename Fields::ValueType... > decode(uint64_t Encoded)
Instruction set architecture version.