21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
32#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRMAP_INFO
34#include "AMDGPUGenInstrInfo.inc"
39 llvm::cl::desc(
"Set default AMDHSA Code Object Version (module flag "
40 "or asm directive still take priority if present)"));
45unsigned getBitMask(
unsigned Shift,
unsigned Width) {
46 return ((1 << Width) - 1) << Shift;
52unsigned packBits(
unsigned Src,
unsigned Dst,
unsigned Shift,
unsigned Width) {
53 unsigned Mask = getBitMask(Shift, Width);
54 return ((Src << Shift) & Mask) | (Dst & ~Mask);
60unsigned unpackBits(
unsigned Src,
unsigned Shift,
unsigned Width) {
61 return (Src & getBitMask(Shift, Width)) >> Shift;
65unsigned getVmcntBitShiftLo(
unsigned VersionMajor) {
70unsigned getVmcntBitWidthLo(
unsigned VersionMajor) {
75unsigned getExpcntBitShift(
unsigned VersionMajor) {
80unsigned getExpcntBitWidth(
unsigned VersionMajor) {
return 3; }
83unsigned getLgkmcntBitShift(
unsigned VersionMajor) {
88unsigned getLgkmcntBitWidth(
unsigned VersionMajor) {
93unsigned getVmcntBitShiftHi(
unsigned VersionMajor) {
return 14; }
96unsigned getVmcntBitWidthHi(
unsigned VersionMajor) {
97 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
101unsigned getLoadcntBitWidth(
unsigned VersionMajor) {
106unsigned getSamplecntBitWidth(
unsigned VersionMajor) {
111unsigned getBvhcntBitWidth(
unsigned VersionMajor) {
116unsigned getDscntBitWidth(
unsigned VersionMajor) {
121unsigned getDscntBitShift(
unsigned VersionMajor) {
return 0; }
124unsigned getStorecntBitWidth(
unsigned VersionMajor) {
129unsigned getKmcntBitWidth(
unsigned VersionMajor) {
134unsigned getXcntBitWidth(
unsigned VersionMajor,
unsigned VersionMinor) {
139unsigned getLoadcntStorecntBitShift(
unsigned VersionMajor) {
144inline unsigned getVaSdstBitWidth() {
return 3; }
147inline unsigned getVaSdstBitShift() {
return 9; }
150inline unsigned getVmVsrcBitWidth() {
return 3; }
153inline unsigned getVmVsrcBitShift() {
return 2; }
156inline unsigned getVaVdstBitWidth() {
return 4; }
159inline unsigned getVaVdstBitShift() {
return 12; }
162inline unsigned getVaVccBitWidth() {
return 1; }
165inline unsigned getVaVccBitShift() {
return 1; }
168inline unsigned getSaSdstBitWidth() {
return 1; }
171inline unsigned getSaSdstBitShift() {
return 0; }
174inline unsigned getVaSsrcBitWidth() {
return 1; }
177inline unsigned getVaSsrcBitShift() {
return 8; }
180inline unsigned getHoldCntWidth() {
return 1; }
183inline unsigned getHoldCntBitShift() {
return 7; }
204 M.getModuleFlag(
"amdhsa_code_object_version"))) {
205 return (
unsigned)Ver->getZExtValue() / 100;
216 switch (ABIVersion) {
232 switch (CodeObjectVersion) {
241 Twine(CodeObjectVersion));
246 switch (CodeObjectVersion) {
259 switch (CodeObjectVersion) {
270 switch (CodeObjectVersion) {
281 switch (CodeObjectVersion) {
291#define GET_MIMGBaseOpcodesTable_IMPL
292#define GET_MIMGDimInfoTable_IMPL
293#define GET_MIMGInfoTable_IMPL
294#define GET_MIMGLZMappingTable_IMPL
295#define GET_MIMGMIPMappingTable_IMPL
296#define GET_MIMGBiasMappingTable_IMPL
297#define GET_MIMGOffsetMappingTable_IMPL
298#define GET_MIMGG16MappingTable_IMPL
299#define GET_MAIInstInfoTable_IMPL
300#define GET_WMMAInstInfoTable_IMPL
301#include "AMDGPUGenSearchableTables.inc"
304 unsigned VDataDwords,
unsigned VAddrDwords) {
306 getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
320 return NewInfo ? NewInfo->
Opcode : -1;
325 bool IsG16Supported) {
332 AddrWords += AddrComponents;
340 if ((IsA16 && !IsG16Supported) || BaseOpcode->
G16)
413#define GET_FP4FP8DstByteSelTable_DECL
414#define GET_FP4FP8DstByteSelTable_IMPL
427#define GET_MTBUFInfoTable_DECL
428#define GET_MTBUFInfoTable_IMPL
429#define GET_MUBUFInfoTable_DECL
430#define GET_MUBUFInfoTable_IMPL
431#define GET_SMInfoTable_DECL
432#define GET_SMInfoTable_IMPL
433#define GET_VOP1InfoTable_DECL
434#define GET_VOP1InfoTable_IMPL
435#define GET_VOP2InfoTable_DECL
436#define GET_VOP2InfoTable_IMPL
437#define GET_VOP3InfoTable_DECL
438#define GET_VOP3InfoTable_IMPL
439#define GET_VOPC64DPPTable_DECL
440#define GET_VOPC64DPPTable_IMPL
441#define GET_VOPC64DPP8Table_DECL
442#define GET_VOPC64DPP8Table_IMPL
443#define GET_VOPCAsmOnlyInfoTable_DECL
444#define GET_VOPCAsmOnlyInfoTable_IMPL
445#define GET_VOP3CAsmOnlyInfoTable_DECL
446#define GET_VOP3CAsmOnlyInfoTable_IMPL
447#define GET_VOPDComponentTable_DECL
448#define GET_VOPDComponentTable_IMPL
449#define GET_VOPDPairs_DECL
450#define GET_VOPDPairs_IMPL
451#define GET_VOPTrue16Table_DECL
452#define GET_VOPTrue16Table_IMPL
453#define GET_True16D16Table_IMPL
454#define GET_WMMAOpcode2AddrMappingTable_DECL
455#define GET_WMMAOpcode2AddrMappingTable_IMPL
456#define GET_WMMAOpcode3AddrMappingTable_DECL
457#define GET_WMMAOpcode3AddrMappingTable_IMPL
458#define GET_getMFMA_F8F6F4_WithSize_DECL
459#define GET_getMFMA_F8F6F4_WithSize_IMPL
460#define GET_isMFMA_F8F6F4Table_IMPL
461#define GET_isCvtScaleF32_F32F16ToF8F4Table_IMPL
463#include "AMDGPUGenSearchableTables.inc"
467 return Info ?
Info->BaseOpcode : -1;
472 getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
498 return Info ?
Info->BaseOpcode : -1;
503 getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
558 return isVOPC64DPPOpcodeHelper(
Opc) || isVOPC64DPP8OpcodeHelper(
Opc);
575 return Info ?
Info->is_wmma_xdl :
false;
579 switch (EncodingVal) {
596 unsigned F8F8Opcode) {
599 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
619 unsigned F8F8Opcode) {
622 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
626 if (ST.hasFeature(AMDGPU::FeatureGFX1250Insts))
628 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts))
630 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts))
637 Opc = IsConvertibleToBitOp ? (
unsigned)AMDGPU::V_BITOP3_B32_e64 :
Opc;
646 EncodingFamily, VOPD3) != -1;
647 return {VOPD3 ?
Info->CanBeVOPD3X :
Info->CanBeVOPDX, CanBeVOPDY};
650 return {
false,
false};
655 Opc = IsConvertibleToBitOp ? (
unsigned)AMDGPU::V_BITOP3_B32_e64 :
Opc;
665 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
666 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
667 Opc == AMDGPU::V_MAC_F32_e64_vi ||
668 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
669 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
670 Opc == AMDGPU::V_MAC_F16_e64_vi ||
671 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
672 Opc == AMDGPU::V_FMAC_F64_e64_gfx12 ||
673 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
674 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
675 Opc == AMDGPU::V_FMAC_F32_e64_gfx12 ||
676 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
677 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
678 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
679 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
680 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
681 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx11 ||
682 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx12 ||
683 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx12 ||
684 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
685 Opc == AMDGPU::V_DOT2C_F32_BF16_e64_vi ||
686 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
687 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
688 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
692 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
693 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
694 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
695 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11 ||
696 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx12 ||
697 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx12 ||
698 Opc == AMDGPU::V_PERMLANE16_VAR_B32_e64_gfx12 ||
699 Opc == AMDGPU::V_PERMLANEX16_VAR_B32_e64_gfx12;
703 return Opc == AMDGPU::V_CVT_F32_BF8_e64_gfx12 ||
704 Opc == AMDGPU::V_CVT_F32_FP8_e64_gfx12 ||
705 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp_gfx12 ||
706 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp_gfx12 ||
707 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp8_gfx12 ||
708 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp8_gfx12 ||
709 Opc == AMDGPU::V_CVT_PK_F32_BF8_fake16_e64_gfx12 ||
710 Opc == AMDGPU::V_CVT_PK_F32_FP8_fake16_e64_gfx12 ||
711 Opc == AMDGPU::V_CVT_PK_F32_BF8_t16_e64_gfx12 ||
712 Opc == AMDGPU::V_CVT_PK_F32_FP8_t16_e64_gfx12;
716 return Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
717 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
718 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
719 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
720 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
721 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
722 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
723 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
724 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
725 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
726 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
727 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
728 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
729 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
730 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
731 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
732 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
736 return Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_gfx1250 ||
737 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_gfx1250 ||
738 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_gfx1250 ||
739 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_gfx1250 ||
740 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_SADDR_gfx1250 ||
741 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_SADDR_gfx1250 ||
742 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_SADDR_gfx1250 ||
743 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_SADDR_gfx1250;
747 return Opc == TENSOR_STORE_FROM_LDS_gfx1250 ||
748 Opc == TENSOR_STORE_FROM_LDS_D2_gfx1250;
775 if (
Info->HasFP8DstByteSel)
777 if (
Info->HasFP4DstByteSel)
785 return Info ?
Info->Opcode3Addr : ~0u;
790 return Info ?
Info->Opcode2Addr : ~0u;
797 return getMCOpcodeGen(Opcode,
static_cast<Subtarget
>(Gen));
804 case AMDGPU::V_AND_B32_e32:
806 case AMDGPU::V_OR_B32_e32:
808 case AMDGPU::V_XOR_B32_e32:
810 case AMDGPU::V_XNOR_B32_e32:
815int getVOPDFull(
unsigned OpX,
unsigned OpY,
unsigned EncodingFamily,
817 bool IsConvertibleToBitOp = VOPD3 ?
getBitOp2(OpY) : 0;
818 OpY = IsConvertibleToBitOp ? (
unsigned)AMDGPU::V_BITOP3_B32_e64 : OpY;
820 getVOPDInfoFromComponentOpcodes(OpX, OpY, EncodingFamily, VOPD3);
827 const auto *OpX = getVOPDBaseFromComponent(
Info->OpX);
828 const auto *OpY = getVOPDBaseFromComponent(
Info->OpY);
830 return {OpX->BaseVOP, OpY->BaseVOP};
842 HasSrc2Acc = TiedIdx != -1;
852 if (Opcode == AMDGPU::V_CNDMASK_B32_e32 ||
853 Opcode == AMDGPU::V_CNDMASK_B32_e64) {
860 getNamedOperandIdx(Opcode, OpName::src0))) {
863 NumVOPD3Mods = SrcOperandsNum;
873 for (CompOprIdx =
Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
875 MandatoryLiteralIdx = CompOprIdx;
882 return getNamedOperandIdx(Opcode, OpName::bitop3);
900 std::function<
MCRegister(
unsigned,
unsigned)> GetRegIdx,
910 unsigned BanksMask) ->
bool {
917 if ((BaseX.
id() & BanksMask) == (BaseY.
id() & BanksMask))
920 ((BaseX.
id() + 1) & BanksMask) == (BaseY.
id() & BanksMask))
923 (BaseX.
id() & BanksMask) == ((BaseY.
id() + 1) & BanksMask))
935 if (!OpXRegs[CompOprIdx] || !OpYRegs[CompOprIdx])
948 if (
MRI.regsOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx]))
954 if (banksOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx], BanksMasks) &&
956 OpXRegs[CompOprIdx] != OpYRegs[CompOprIdx]))
971InstInfo::getRegIndices(
unsigned CompIdx,
972 std::function<
MCRegister(
unsigned,
unsigned)> GetRegIdx,
976 const auto &Comp = CompInfo[CompIdx];
979 RegIndices[
DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
982 unsigned CompSrcIdx = CompOprIdx -
DST_NUM;
984 Comp.hasRegSrcOperand(CompSrcIdx)
986 Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
1001 const auto &OpXDesc = InstrInfo->get(OpX);
1002 const auto &OpYDesc = InstrInfo->get(OpY);
1014 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
1016 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
1025 std::optional<bool> XnackRequested;
1026 std::optional<bool> SramEccRequested;
1028 for (
const std::string &Feature : Features.
getFeatures()) {
1029 if (Feature ==
"+xnack")
1030 XnackRequested =
true;
1031 else if (Feature ==
"-xnack")
1032 XnackRequested =
false;
1033 else if (Feature ==
"+sramecc")
1034 SramEccRequested =
true;
1035 else if (Feature ==
"-sramecc")
1036 SramEccRequested =
false;
1042 if (XnackRequested) {
1043 if (XnackSupported) {
1049 if (*XnackRequested) {
1050 errs() <<
"warning: xnack 'On' was requested for a processor that does "
1051 "not support it!\n";
1053 errs() <<
"warning: xnack 'Off' was requested for a processor that "
1054 "does not support it!\n";
1059 if (SramEccRequested) {
1060 if (SramEccSupported) {
1067 if (*SramEccRequested) {
1068 errs() <<
"warning: sramecc 'On' was requested for a processor that "
1069 "does not support it!\n";
1071 errs() <<
"warning: sramecc 'Off' was requested for a processor that "
1072 "does not support it!\n";
1090 TargetID.
split(TargetIDSplit,
':');
1092 for (
const auto &FeatureString : TargetIDSplit) {
1093 if (FeatureString.starts_with(
"xnack"))
1095 if (FeatureString.starts_with(
"sramecc"))
1101 std::string StringRep;
1104 auto TargetTriple = STI.getTargetTriple();
1107 StreamRep << TargetTriple.getArchName() <<
'-' << TargetTriple.getVendorName()
1108 <<
'-' << TargetTriple.getOSName() <<
'-'
1109 << TargetTriple.getEnvironmentName() <<
'-';
1111 std::string Processor;
1116 Processor = STI.getCPU().
str();
1122 std::string Features;
1126 Features +=
":sramecc-";
1128 Features +=
":sramecc+";
1131 Features +=
":xnack-";
1133 Features +=
":xnack+";
1136 StreamRep << Processor << Features;
1195 unsigned FlatWorkGroupSize) {
1196 assert(FlatWorkGroupSize != 0);
1206 unsigned MaxBarriers = 16;
1210 return std::min(MaxWaves /
N, MaxBarriers);
1225 unsigned FlatWorkGroupSize) {
1238 unsigned FlatWorkGroupSize) {
1296 return Addressable ? AddressableNumSGPRs : 108;
1297 if (
Version.Major >= 8 && !Addressable)
1298 AddressableNumSGPRs = 112;
1303 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1307 bool FlatScrUsed,
bool XNACKUsed) {
1308 unsigned ExtraSGPRs = 0;
1339 return divideCeil(std::max(1u, NumRegs), Granule);
1349 unsigned DynamicVGPRBlockSize,
1350 std::optional<bool> EnableWavefrontSize32) {
1354 if (DynamicVGPRBlockSize != 0)
1355 return DynamicVGPRBlockSize;
1357 bool IsWave32 = EnableWavefrontSize32
1358 ? *EnableWavefrontSize32
1362 return IsWave32 ? 24 : 12;
1365 return IsWave32 ? 16 : 8;
1367 return IsWave32 ? 8 : 4;
1371 std::optional<bool> EnableWavefrontSize32) {
1375 bool IsWave32 = EnableWavefrontSize32
1376 ? *EnableWavefrontSize32
1380 return IsWave32 ? 16 : 8;
1382 return IsWave32 ? 8 : 4;
1394 return IsWave32 ? 1536 : 768;
1395 return IsWave32 ? 1024 : 512;
1400 if (Features.test(Feature1024AddressableVGPRs))
1401 return Features.
test(FeatureWavefrontSize32) ? 1024 : 512;
1406 unsigned DynamicVGPRBlockSize) {
1408 if (Features.test(FeatureGFX90AInsts))
1411 if (DynamicVGPRBlockSize != 0)
1419 unsigned DynamicVGPRBlockSize) {
1427 unsigned TotalNumVGPRs) {
1428 if (NumVGPRs < Granule)
1430 unsigned RoundedRegs =
alignTo(NumVGPRs, Granule);
1431 return std::min(std::max(TotalNumVGPRs / RoundedRegs, 1u), MaxWaves);
1462 unsigned DynamicVGPRBlockSize) {
1466 if (WavesPerEU >= MaxWavesPerEU)
1470 unsigned AddrsableNumVGPRs =
1473 unsigned MaxNumVGPRs =
alignDown(TotNumVGPRs / WavesPerEU, Granule);
1475 if (MaxNumVGPRs ==
alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1479 DynamicVGPRBlockSize);
1480 if (WavesPerEU < MinWavesPerEU)
1483 unsigned MaxNumVGPRsNext =
alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1484 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1485 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1489 unsigned DynamicVGPRBlockSize) {
1492 unsigned MaxNumVGPRs =
1495 unsigned AddressableNumVGPRs =
1497 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1501 std::optional<bool> EnableWavefrontSize32) {
1509 unsigned DynamicVGPRBlockSize,
1510 std::optional<bool> EnableWavefrontSize32) {
1570 return C ==
'v' ||
C ==
's' ||
C ==
'a';
1579 if (
RegName.consume_front(
"[")) {
1586 unsigned NumRegs = End - Idx + 1;
1588 return {Kind, Idx, NumRegs};
1594 return {Kind, Idx, 1};
1600std::tuple<char, unsigned, unsigned>
1608std::pair<unsigned, unsigned>
1610 std::pair<unsigned, unsigned>
Default,
1611 bool OnlyFirstRequired) {
1613 return {Attr->first, Attr->second.value_or(
Default.second)};
1617std::optional<std::pair<unsigned, std::optional<unsigned>>>
1619 bool OnlyFirstRequired) {
1621 if (!
A.isStringAttribute())
1622 return std::nullopt;
1625 std::pair<unsigned, std::optional<unsigned>> Ints;
1626 std::pair<StringRef, StringRef> Strs =
A.getValueAsString().split(
',');
1627 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1628 Ctx.emitError(
"can't parse first integer attribute " + Name);
1629 return std::nullopt;
1631 unsigned Second = 0;
1632 if (Strs.second.trim().getAsInteger(0, Second)) {
1633 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1634 Ctx.emitError(
"can't parse second integer attribute " + Name);
1635 return std::nullopt;
1638 Ints.second = Second;
1647 std::optional<SmallVector<unsigned>> R =
1652std::optional<SmallVector<unsigned>>
1659 return std::nullopt;
1660 if (!
A.isStringAttribute()) {
1661 Ctx.emitError(Name +
" is not a string attribute");
1662 return std::nullopt;
1670 std::pair<StringRef, StringRef> Strs = S.
split(
',');
1672 if (Strs.first.trim().getAsInteger(0, IntVal)) {
1673 Ctx.emitError(
"can't parse integer attribute " + Strs.first +
" in " +
1675 return std::nullopt;
1682 Ctx.emitError(
"attribute " + Name +
1683 " has incorrect number of integers; expected " +
1685 return std::nullopt;
1702 if (
Low.ule(Val) &&
High.ugt(Val))
1705 if (
Low.uge(Val) &&
High.ult(Val))
1714 return (1 << (getVmcntBitWidthLo(
Version.Major) +
1715 getVmcntBitWidthHi(
Version.Major))) -
1720 return (1 << getLoadcntBitWidth(
Version.Major)) - 1;
1724 return (1 << getSamplecntBitWidth(
Version.Major)) - 1;
1728 return (1 << getBvhcntBitWidth(
Version.Major)) - 1;
1732 return (1 << getExpcntBitWidth(
Version.Major)) - 1;
1736 return (1 << getLgkmcntBitWidth(
Version.Major)) - 1;
1740 return (1 << getDscntBitWidth(
Version.Major)) - 1;
1744 return (1 << getKmcntBitWidth(
Version.Major)) - 1;
1752 return (1 << getStorecntBitWidth(
Version.Major)) - 1;
1756 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(
Version.Major),
1757 getVmcntBitWidthLo(
Version.Major));
1758 unsigned Expcnt = getBitMask(getExpcntBitShift(
Version.Major),
1759 getExpcntBitWidth(
Version.Major));
1760 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(
Version.Major),
1761 getLgkmcntBitWidth(
Version.Major));
1762 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(
Version.Major),
1763 getVmcntBitWidthHi(
Version.Major));
1764 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1768 unsigned VmcntLo = unpackBits(
Waitcnt, getVmcntBitShiftLo(
Version.Major),
1769 getVmcntBitWidthLo(
Version.Major));
1770 unsigned VmcntHi = unpackBits(
Waitcnt, getVmcntBitShiftHi(
Version.Major),
1771 getVmcntBitWidthHi(
Version.Major));
1772 return VmcntLo | VmcntHi << getVmcntBitWidthLo(
Version.Major);
1777 getExpcntBitWidth(
Version.Major));
1782 getLgkmcntBitWidth(
Version.Major));
1786 unsigned &Expcnt,
unsigned &Lgkmcnt) {
1803 getVmcntBitWidthLo(
Version.Major));
1804 return packBits(Vmcnt >> getVmcntBitWidthLo(
Version.Major),
Waitcnt,
1805 getVmcntBitShiftHi(
Version.Major),
1806 getVmcntBitWidthHi(
Version.Major));
1811 return packBits(Expcnt,
Waitcnt, getExpcntBitShift(
Version.Major),
1812 getExpcntBitWidth(
Version.Major));
1817 return packBits(Lgkmcnt,
Waitcnt, getLgkmcntBitShift(
Version.Major),
1818 getLgkmcntBitWidth(
Version.Major));
1822 unsigned Expcnt,
unsigned Lgkmcnt) {
1836 unsigned Dscnt = getBitMask(getDscntBitShift(
Version.Major),
1837 getDscntBitWidth(
Version.Major));
1839 unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(
Version.Major),
1840 getStorecntBitWidth(
Version.Major));
1841 return Dscnt | Storecnt;
1843 unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(
Version.Major),
1844 getLoadcntBitWidth(
Version.Major));
1845 return Dscnt | Loadcnt;
1851 unpackBits(LoadcntDscnt, getLoadcntStorecntBitShift(
Version.Major),
1852 getLoadcntBitWidth(
Version.Major));
1853 Decoded.
DsCnt = unpackBits(LoadcntDscnt, getDscntBitShift(
Version.Major),
1854 getDscntBitWidth(
Version.Major));
1861 unpackBits(StorecntDscnt, getLoadcntStorecntBitShift(
Version.Major),
1862 getStorecntBitWidth(
Version.Major));
1863 Decoded.
DsCnt = unpackBits(StorecntDscnt, getDscntBitShift(
Version.Major),
1864 getDscntBitWidth(
Version.Major));
1870 return packBits(Loadcnt,
Waitcnt, getLoadcntStorecntBitShift(
Version.Major),
1871 getLoadcntBitWidth(
Version.Major));
1875 unsigned Storecnt) {
1876 return packBits(Storecnt,
Waitcnt, getLoadcntStorecntBitShift(
Version.Major),
1877 getStorecntBitWidth(
Version.Major));
1883 getDscntBitWidth(
Version.Major));
1899 unsigned Storecnt,
unsigned Dscnt) {
1919 for (
int Idx = 0; Idx <
Size; ++Idx) {
1920 const auto &
Op = Opr[Idx];
1921 if (
Op.isSupported(STI))
1922 Enc |=
Op.encode(
Op.Default);
1928 int Size,
unsigned Code,
1929 bool &HasNonDefaultVal,
1931 unsigned UsedOprMask = 0;
1932 HasNonDefaultVal =
false;
1933 for (
int Idx = 0; Idx <
Size; ++Idx) {
1934 const auto &
Op = Opr[Idx];
1935 if (!
Op.isSupported(STI))
1937 UsedOprMask |=
Op.getMask();
1938 unsigned Val =
Op.decode(Code);
1939 if (!
Op.isValid(Val))
1941 HasNonDefaultVal |= (Val !=
Op.Default);
1943 return (Code & ~UsedOprMask) == 0;
1947 unsigned Code,
int &Idx,
StringRef &Name,
1948 unsigned &Val,
bool &IsDefault,
1950 while (Idx <
Size) {
1951 const auto &
Op = Opr[Idx++];
1952 if (
Op.isSupported(STI)) {
1954 Val =
Op.decode(Code);
1955 IsDefault = (Val ==
Op.Default);
1965 if (InputVal < 0 || InputVal >
Op.Max)
1967 return Op.encode(InputVal);
1972 unsigned &UsedOprMask,
1975 for (
int Idx = 0; Idx <
Size; ++Idx) {
1976 const auto &
Op = Opr[Idx];
1977 if (
Op.Name == Name) {
1978 if (!
Op.isSupported(STI)) {
1982 auto OprMask =
Op.getMask();
1983 if (OprMask & UsedOprMask)
1985 UsedOprMask |= OprMask;
2008 HasNonDefaultVal, STI);
2024 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2028 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2032 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2036 return unpackBits(Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2040 return unpackBits(Encoded, getVaVccBitShift(), getVaVccBitWidth());
2044 return unpackBits(Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2048 return unpackBits(Encoded, getHoldCntBitShift(), getHoldCntWidth());
2052 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2060 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2068 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2076 return packBits(VaSdst, Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2084 return packBits(VaVcc, Encoded, getVaVccBitShift(), getVaVccBitWidth());
2092 return packBits(VaSsrc, Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2100 return packBits(HoldCnt, Encoded, getHoldCntBitShift(), getHoldCntWidth());
2135 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
2136 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
2147 if (Val.MaxIndex == 0 && Name == Val.Name)
2150 if (Val.MaxIndex > 0 && Name.starts_with(Val.Name)) {
2151 StringRef Suffix = Name.drop_front(Val.Name.size());
2158 if (Suffix.
size() > 1 && Suffix[0] ==
'0')
2161 return Val.Tgt + Id;
2190namespace MTBUFFormat {
2216 if (Name == lookupTable[Id])
2388 return F.getFnAttributeAsParsedInteger(
"InitialPSInputAddr", 0);
2393 return F.getFnAttributeAsParsedInteger(
2394 "amdgpu-color-export",
2399 return F.getFnAttributeAsParsedInteger(
"amdgpu-depth-export", 0) != 0;
2404 F.getFnAttributeAsParsedInteger(
"amdgpu-dynamic-vgpr-block-size", 0);
2417 return STI.
hasFeature(AMDGPU::FeatureSRAMECC);
2421 return STI.
hasFeature(AMDGPU::FeatureMIMG_R128) &&
2434 return !STI.
hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !
isCI(STI) &&
2445 return Version.Minor >= 3 ? 13 : 5;
2449 return HasSampler ? 4 : 5;
2460 return STI.
hasFeature(AMDGPU::FeatureSouthernIslands);
2464 return STI.
hasFeature(AMDGPU::FeatureSeaIslands);
2468 return STI.
hasFeature(AMDGPU::FeatureVolcanicIslands);
2546 return STI.
hasFeature(AMDGPU::FeatureGCN3Encoding);
2550 return STI.
hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2554 return STI.
hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2558 return STI.
hasFeature(AMDGPU::FeatureGFX10_3Insts);
2566 return STI.
hasFeature(AMDGPU::FeatureGFX90AInsts);
2570 return STI.
hasFeature(AMDGPU::FeatureGFX940Insts);
2574 return STI.
hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2578 return STI.
hasFeature(AMDGPU::FeatureMAIInsts);
2586 return STI.
hasFeature(AMDGPU::FeatureDPPSrc1SGPR);
2590 return STI.
hasFeature(AMDGPU::FeatureKernargPreload);
2594 int32_t ArgNumVGPR) {
2595 if (has90AInsts && ArgNumAGPR)
2596 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2597 return std::max(ArgNumVGPR, ArgNumAGPR);
2603 return SGPRClass.
contains(FirstSubReg != 0 ? FirstSubReg :
Reg) ||
2611#define MAP_REG2REG \
2612 using namespace AMDGPU; \
2613 switch (Reg.id()) { \
2616 CASE_CI_VI(FLAT_SCR) \
2617 CASE_CI_VI(FLAT_SCR_LO) \
2618 CASE_CI_VI(FLAT_SCR_HI) \
2619 CASE_VI_GFX9PLUS(TTMP0) \
2620 CASE_VI_GFX9PLUS(TTMP1) \
2621 CASE_VI_GFX9PLUS(TTMP2) \
2622 CASE_VI_GFX9PLUS(TTMP3) \
2623 CASE_VI_GFX9PLUS(TTMP4) \
2624 CASE_VI_GFX9PLUS(TTMP5) \
2625 CASE_VI_GFX9PLUS(TTMP6) \
2626 CASE_VI_GFX9PLUS(TTMP7) \
2627 CASE_VI_GFX9PLUS(TTMP8) \
2628 CASE_VI_GFX9PLUS(TTMP9) \
2629 CASE_VI_GFX9PLUS(TTMP10) \
2630 CASE_VI_GFX9PLUS(TTMP11) \
2631 CASE_VI_GFX9PLUS(TTMP12) \
2632 CASE_VI_GFX9PLUS(TTMP13) \
2633 CASE_VI_GFX9PLUS(TTMP14) \
2634 CASE_VI_GFX9PLUS(TTMP15) \
2635 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2636 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2637 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2638 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2639 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2640 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2641 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2642 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2643 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2644 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2645 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2646 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2647 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2648 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2649 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2651 TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2652 CASE_GFXPRE11_GFX11PLUS(M0) \
2653 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2654 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2657#define CASE_CI_VI(node) \
2658 assert(!isSI(STI)); \
2660 return isCI(STI) ? node##_ci : node##_vi;
2662#define CASE_VI_GFX9PLUS(node) \
2664 return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2666#define CASE_GFXPRE11_GFX11PLUS(node) \
2668 return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2670#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2672 return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2681#undef CASE_VI_GFX9PLUS
2682#undef CASE_GFXPRE11_GFX11PLUS
2683#undef CASE_GFXPRE11_GFX11PLUS_TO
2685#define CASE_CI_VI(node) \
2689#define CASE_VI_GFX9PLUS(node) \
2691 case node##_gfx9plus: \
2693#define CASE_GFXPRE11_GFX11PLUS(node) \
2694 case node##_gfx11plus: \
2695 case node##_gfxpre11: \
2697#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2703 case AMDGPU::SRC_SHARED_BASE_LO:
2704 case AMDGPU::SRC_SHARED_BASE:
2705 case AMDGPU::SRC_SHARED_LIMIT_LO:
2706 case AMDGPU::SRC_SHARED_LIMIT:
2707 case AMDGPU::SRC_PRIVATE_BASE_LO:
2708 case AMDGPU::SRC_PRIVATE_BASE:
2709 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2710 case AMDGPU::SRC_PRIVATE_LIMIT:
2711 case AMDGPU::SRC_FLAT_SCRATCH_BASE_LO:
2712 case AMDGPU::SRC_FLAT_SCRATCH_BASE_HI:
2713 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2715 case AMDGPU::SRC_VCCZ:
2716 case AMDGPU::SRC_EXECZ:
2717 case AMDGPU::SRC_SCC:
2719 case AMDGPU::SGPR_NULL:
2727#undef CASE_VI_GFX9PLUS
2728#undef CASE_GFXPRE11_GFX11PLUS
2729#undef CASE_GFXPRE11_GFX11PLUS_TO
2734 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2741 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2763 unsigned OpType =
Desc.operands()[OpNo].OperandType;
2774 case AMDGPU::VGPR_16RegClassID:
2775 case AMDGPU::VGPR_16_Lo128RegClassID:
2776 case AMDGPU::SGPR_LO16RegClassID:
2777 case AMDGPU::AGPR_LO16RegClassID:
2779 case AMDGPU::SGPR_32RegClassID:
2780 case AMDGPU::VGPR_32RegClassID:
2781 case AMDGPU::VGPR_32_Lo256RegClassID:
2782 case AMDGPU::VRegOrLds_32RegClassID:
2783 case AMDGPU::AGPR_32RegClassID:
2784 case AMDGPU::VS_32RegClassID:
2785 case AMDGPU::AV_32RegClassID:
2786 case AMDGPU::SReg_32RegClassID:
2787 case AMDGPU::SReg_32_XM0RegClassID:
2788 case AMDGPU::SRegOrLds_32RegClassID:
2790 case AMDGPU::SGPR_64RegClassID:
2791 case AMDGPU::VS_64RegClassID:
2792 case AMDGPU::SReg_64RegClassID:
2793 case AMDGPU::VReg_64RegClassID:
2794 case AMDGPU::AReg_64RegClassID:
2795 case AMDGPU::SReg_64_XEXECRegClassID:
2796 case AMDGPU::VReg_64_Align2RegClassID:
2797 case AMDGPU::AReg_64_Align2RegClassID:
2798 case AMDGPU::AV_64RegClassID:
2799 case AMDGPU::AV_64_Align2RegClassID:
2800 case AMDGPU::VReg_64_Lo256_Align2RegClassID:
2801 case AMDGPU::VS_64_Lo256RegClassID:
2803 case AMDGPU::SGPR_96RegClassID:
2804 case AMDGPU::SReg_96RegClassID:
2805 case AMDGPU::VReg_96RegClassID:
2806 case AMDGPU::AReg_96RegClassID:
2807 case AMDGPU::VReg_96_Align2RegClassID:
2808 case AMDGPU::AReg_96_Align2RegClassID:
2809 case AMDGPU::AV_96RegClassID:
2810 case AMDGPU::AV_96_Align2RegClassID:
2811 case AMDGPU::VReg_96_Lo256_Align2RegClassID:
2813 case AMDGPU::SGPR_128RegClassID:
2814 case AMDGPU::SReg_128RegClassID:
2815 case AMDGPU::VReg_128RegClassID:
2816 case AMDGPU::AReg_128RegClassID:
2817 case AMDGPU::VReg_128_Align2RegClassID:
2818 case AMDGPU::AReg_128_Align2RegClassID:
2819 case AMDGPU::AV_128RegClassID:
2820 case AMDGPU::AV_128_Align2RegClassID:
2821 case AMDGPU::SReg_128_XNULLRegClassID:
2822 case AMDGPU::VReg_128_Lo256_Align2RegClassID:
2824 case AMDGPU::SGPR_160RegClassID:
2825 case AMDGPU::SReg_160RegClassID:
2826 case AMDGPU::VReg_160RegClassID:
2827 case AMDGPU::AReg_160RegClassID:
2828 case AMDGPU::VReg_160_Align2RegClassID:
2829 case AMDGPU::AReg_160_Align2RegClassID:
2830 case AMDGPU::AV_160RegClassID:
2831 case AMDGPU::AV_160_Align2RegClassID:
2832 case AMDGPU::VReg_160_Lo256_Align2RegClassID:
2834 case AMDGPU::SGPR_192RegClassID:
2835 case AMDGPU::SReg_192RegClassID:
2836 case AMDGPU::VReg_192RegClassID:
2837 case AMDGPU::AReg_192RegClassID:
2838 case AMDGPU::VReg_192_Align2RegClassID:
2839 case AMDGPU::AReg_192_Align2RegClassID:
2840 case AMDGPU::AV_192RegClassID:
2841 case AMDGPU::AV_192_Align2RegClassID:
2842 case AMDGPU::VReg_192_Lo256_Align2RegClassID:
2844 case AMDGPU::SGPR_224RegClassID:
2845 case AMDGPU::SReg_224RegClassID:
2846 case AMDGPU::VReg_224RegClassID:
2847 case AMDGPU::AReg_224RegClassID:
2848 case AMDGPU::VReg_224_Align2RegClassID:
2849 case AMDGPU::AReg_224_Align2RegClassID:
2850 case AMDGPU::AV_224RegClassID:
2851 case AMDGPU::AV_224_Align2RegClassID:
2852 case AMDGPU::VReg_224_Lo256_Align2RegClassID:
2854 case AMDGPU::SGPR_256RegClassID:
2855 case AMDGPU::SReg_256RegClassID:
2856 case AMDGPU::VReg_256RegClassID:
2857 case AMDGPU::AReg_256RegClassID:
2858 case AMDGPU::VReg_256_Align2RegClassID:
2859 case AMDGPU::AReg_256_Align2RegClassID:
2860 case AMDGPU::AV_256RegClassID:
2861 case AMDGPU::AV_256_Align2RegClassID:
2862 case AMDGPU::SReg_256_XNULLRegClassID:
2863 case AMDGPU::VReg_256_Lo256_Align2RegClassID:
2865 case AMDGPU::SGPR_288RegClassID:
2866 case AMDGPU::SReg_288RegClassID:
2867 case AMDGPU::VReg_288RegClassID:
2868 case AMDGPU::AReg_288RegClassID:
2869 case AMDGPU::VReg_288_Align2RegClassID:
2870 case AMDGPU::AReg_288_Align2RegClassID:
2871 case AMDGPU::AV_288RegClassID:
2872 case AMDGPU::AV_288_Align2RegClassID:
2873 case AMDGPU::VReg_288_Lo256_Align2RegClassID:
2875 case AMDGPU::SGPR_320RegClassID:
2876 case AMDGPU::SReg_320RegClassID:
2877 case AMDGPU::VReg_320RegClassID:
2878 case AMDGPU::AReg_320RegClassID:
2879 case AMDGPU::VReg_320_Align2RegClassID:
2880 case AMDGPU::AReg_320_Align2RegClassID:
2881 case AMDGPU::AV_320RegClassID:
2882 case AMDGPU::AV_320_Align2RegClassID:
2883 case AMDGPU::VReg_320_Lo256_Align2RegClassID:
2885 case AMDGPU::SGPR_352RegClassID:
2886 case AMDGPU::SReg_352RegClassID:
2887 case AMDGPU::VReg_352RegClassID:
2888 case AMDGPU::AReg_352RegClassID:
2889 case AMDGPU::VReg_352_Align2RegClassID:
2890 case AMDGPU::AReg_352_Align2RegClassID:
2891 case AMDGPU::AV_352RegClassID:
2892 case AMDGPU::AV_352_Align2RegClassID:
2893 case AMDGPU::VReg_352_Lo256_Align2RegClassID:
2895 case AMDGPU::SGPR_384RegClassID:
2896 case AMDGPU::SReg_384RegClassID:
2897 case AMDGPU::VReg_384RegClassID:
2898 case AMDGPU::AReg_384RegClassID:
2899 case AMDGPU::VReg_384_Align2RegClassID:
2900 case AMDGPU::AReg_384_Align2RegClassID:
2901 case AMDGPU::AV_384RegClassID:
2902 case AMDGPU::AV_384_Align2RegClassID:
2903 case AMDGPU::VReg_384_Lo256_Align2RegClassID:
2905 case AMDGPU::SGPR_512RegClassID:
2906 case AMDGPU::SReg_512RegClassID:
2907 case AMDGPU::VReg_512RegClassID:
2908 case AMDGPU::AReg_512RegClassID:
2909 case AMDGPU::VReg_512_Align2RegClassID:
2910 case AMDGPU::AReg_512_Align2RegClassID:
2911 case AMDGPU::AV_512RegClassID:
2912 case AMDGPU::AV_512_Align2RegClassID:
2913 case AMDGPU::VReg_512_Lo256_Align2RegClassID:
2915 case AMDGPU::SGPR_1024RegClassID:
2916 case AMDGPU::SReg_1024RegClassID:
2917 case AMDGPU::VReg_1024RegClassID:
2918 case AMDGPU::AReg_1024RegClassID:
2919 case AMDGPU::VReg_1024_Align2RegClassID:
2920 case AMDGPU::AReg_1024_Align2RegClassID:
2921 case AMDGPU::AV_1024RegClassID:
2922 case AMDGPU::AV_1024_Align2RegClassID:
2923 case AMDGPU::VReg_1024_Lo256_Align2RegClassID:
2948 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2974 (Val == 0x3e22f983 && HasInv2Pi);
2983 return Val == 0x3F00 ||
3004 return Val == 0x3C00 ||
3031 return 192 + std::abs(
Signed);
3036 case 0x3800:
return 240;
3037 case 0xB800:
return 241;
3038 case 0x3C00:
return 242;
3039 case 0xBC00:
return 243;
3040 case 0x4000:
return 244;
3041 case 0xC000:
return 245;
3042 case 0x4400:
return 246;
3043 case 0xC400:
return 247;
3044 case 0x3118:
return 248;
3051 case 0x3F000000:
return 240;
3052 case 0xBF000000:
return 241;
3053 case 0x3F800000:
return 242;
3054 case 0xBF800000:
return 243;
3055 case 0x40000000:
return 244;
3056 case 0xC0000000:
return 245;
3057 case 0x40800000:
return 246;
3058 case 0xC0800000:
return 247;
3059 case 0x3E22F983:
return 248;
3082 return 192 + std::abs(
Signed);
3086 case 0x3F00:
return 240;
3087 case 0xBF00:
return 241;
3088 case 0x3F80:
return 242;
3089 case 0xBF80:
return 243;
3090 case 0x4000:
return 244;
3091 case 0xC000:
return 245;
3092 case 0x4080:
return 246;
3093 case 0xC080:
return 247;
3094 case 0x3E22:
return 248;
3099 return std::nullopt;
3157 return Imm & 0xffff;
3198 return A->hasAttribute(Attribute::InReg) ||
3199 A->hasAttribute(Attribute::ByVal);
3202 return A->hasAttribute(Attribute::InReg);
3237 int64_t EncodedOffset) {
3246 int64_t EncodedOffset,
bool IsBuffer) {
3248 if (IsBuffer && EncodedOffset < 0)
3257 return (ByteOffset & 3) == 0;
3266 return ByteOffset >> 2;
3270 int64_t ByteOffset,
bool IsBuffer,
3276 return std::nullopt;
3279 return isInt<24>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3285 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3290 return std::nullopt;
3294 ? std::optional<int64_t>(EncodedOffset)
3299 int64_t ByteOffset) {
3301 return std::nullopt;
3304 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
3319struct SourceOfDivergence {
3322const SourceOfDivergence *lookupSourceOfDivergence(
unsigned Intr);
3327const AlwaysUniform *lookupAlwaysUniform(
unsigned Intr);
3329#define GET_SourcesOfDivergence_IMPL
3330#define GET_UniformIntrinsics_IMPL
3331#define GET_Gfx9BufferFormat_IMPL
3332#define GET_Gfx10BufferFormat_IMPL
3333#define GET_Gfx11PlusBufferFormat_IMPL
3335#include "AMDGPUGenSearchableTables.inc"
3340 return lookupSourceOfDivergence(IntrID);
3344 return lookupAlwaysUniform(IntrID);
3351 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(
3352 BitsPerComp, NumComponents, NumFormat)
3354 ? getGfx10BufferFormatInfo(BitsPerComp, NumComponents, NumFormat)
3355 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
3362 : getGfx9BufferFormatInfo(
Format);
3367 const unsigned VGPRClasses[] = {
3368 AMDGPU::VGPR_16RegClassID, AMDGPU::VGPR_32RegClassID,
3369 AMDGPU::VReg_64RegClassID, AMDGPU::VReg_96RegClassID,
3370 AMDGPU::VReg_128RegClassID, AMDGPU::VReg_160RegClassID,
3371 AMDGPU::VReg_192RegClassID, AMDGPU::VReg_224RegClassID,
3372 AMDGPU::VReg_256RegClassID, AMDGPU::VReg_288RegClassID,
3373 AMDGPU::VReg_320RegClassID, AMDGPU::VReg_352RegClassID,
3374 AMDGPU::VReg_384RegClassID, AMDGPU::VReg_512RegClassID,
3375 AMDGPU::VReg_1024RegClassID};
3377 for (
unsigned RCID : VGPRClasses) {
3387 unsigned Enc =
MRI.getEncodingValue(
Reg);
3394 unsigned Enc =
MRI.getEncodingValue(
Reg);
3404 if (RC->
getID() == AMDGPU::VGPR_16RegClassID) {
3414std::pair<const AMDGPU::OpName *, const AMDGPU::OpName *>
3416 static const AMDGPU::OpName VOPOps[4] = {
3417 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2,
3418 AMDGPU::OpName::vdst};
3419 static const AMDGPU::OpName VDSOps[4] = {
3420 AMDGPU::OpName::addr, AMDGPU::OpName::data0, AMDGPU::OpName::data1,
3421 AMDGPU::OpName::vdst};
3422 static const AMDGPU::OpName FLATOps[4] = {
3423 AMDGPU::OpName::vaddr, AMDGPU::OpName::vdata,
3424 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdst};
3425 static const AMDGPU::OpName BUFOps[4] = {
3426 AMDGPU::OpName::vaddr, AMDGPU::OpName::NUM_OPERAND_NAMES,
3427 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdata};
3428 static const AMDGPU::OpName VIMGOps[4] = {
3429 AMDGPU::OpName::vaddr0, AMDGPU::OpName::vaddr1, AMDGPU::OpName::vaddr2,
3430 AMDGPU::OpName::vdata};
3435 static const AMDGPU::OpName VOPDOpsX[4] = {
3436 AMDGPU::OpName::src0X, AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vsrc2X,
3437 AMDGPU::OpName::vdstX};
3438 static const AMDGPU::OpName VOPDOpsY[4] = {
3439 AMDGPU::OpName::src0Y, AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vsrc2Y,
3440 AMDGPU::OpName::vdstY};
3443 static const AMDGPU::OpName VOP2MADMKOps[4] = {
3444 AMDGPU::OpName::src0, AMDGPU::OpName::NUM_OPERAND_NAMES,
3445 AMDGPU::OpName::src1, AMDGPU::OpName::vdst};
3447 unsigned TSFlags =
Desc.TSFlags;
3452 switch (
Desc.getOpcode()) {
3454 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32:
3455 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32_gfx1250:
3456 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64:
3457 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64_gfx1250:
3459 case AMDGPU::V_FMAMK_F16:
3460 case AMDGPU::V_FMAMK_F16_t16:
3461 case AMDGPU::V_FMAMK_F16_t16_gfx12:
3462 case AMDGPU::V_FMAMK_F16_fake16:
3463 case AMDGPU::V_FMAMK_F16_fake16_gfx12:
3464 case AMDGPU::V_FMAMK_F32:
3465 case AMDGPU::V_FMAMK_F32_gfx12:
3466 case AMDGPU::V_FMAMK_F64:
3467 case AMDGPU::V_FMAMK_F64_gfx1250:
3468 return {VOP2MADMKOps,
nullptr};
3472 return {VOPOps,
nullptr};
3476 return {VDSOps,
nullptr};
3479 return {FLATOps,
nullptr};
3482 return {BUFOps,
nullptr};
3485 return {VIMGOps,
nullptr};
3488 return {VOPDOpsX, VOPDOpsY};
3494 " these instructions are not expected on gfx1250");
3520 for (
auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) {
3528 if (RegClass == AMDGPU::VReg_64RegClassID ||
3529 RegClass == AMDGPU::VReg_64_Align2RegClassID)
3538 case AMDGPU::V_MUL_LO_U32_e64:
3539 case AMDGPU::V_MUL_LO_U32_e64_dpp:
3540 case AMDGPU::V_MUL_LO_U32_e64_dpp_gfx1250:
3541 case AMDGPU::V_MUL_HI_U32_e64:
3542 case AMDGPU::V_MUL_HI_U32_e64_dpp:
3543 case AMDGPU::V_MUL_HI_U32_e64_dpp_gfx1250:
3544 case AMDGPU::V_MUL_HI_I32_e64:
3545 case AMDGPU::V_MUL_HI_I32_e64_dpp:
3546 case AMDGPU::V_MUL_HI_I32_e64_dpp_gfx1250:
3547 case AMDGPU::V_MAD_U32_e64:
3548 case AMDGPU::V_MAD_U32_e64_dpp:
3549 case AMDGPU::V_MAD_U32_e64_dpp_gfx1250:
3558 if (!ST.hasFeature(AMDGPU::FeatureDPALU_DPP))
3562 return ST.hasFeature(AMDGPU::FeatureGFX1250Insts);
3568 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
3570 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
3572 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
3574 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
3581 case AMDGPU::V_PK_ADD_F32:
3582 case AMDGPU::V_PK_ADD_F32_gfx12:
3583 case AMDGPU::V_PK_MUL_F32:
3584 case AMDGPU::V_PK_MUL_F32_gfx12:
3585 case AMDGPU::V_PK_FMA_F32:
3586 case AMDGPU::V_PK_FMA_F32_gfx12:
3606 OS << EncoNoCluster <<
',' << EncoNoCluster <<
',' << EncoNoCluster;
3607 return Buffer.
c_str();
3610 OS << EncoVariableDims <<
',' << EncoVariableDims <<
','
3611 << EncoVariableDims;
3612 return Buffer.
c_str();
3615 OS << Dims[0] <<
',' << Dims[1] <<
',' << Dims[2];
3616 return Buffer.
c_str();
3623 std::optional<SmallVector<unsigned>> Attr =
3627 if (!Attr.has_value())
3629 else if (
all_of(*Attr, [](
unsigned V) {
return V == EncoNoCluster; }))
3631 else if (
all_of(*Attr, [](
unsigned V) {
return V == EncoVariableDims; }))
3636 A.Dims = {(*Attr)[0], (*Attr)[1], (*Attr)[2]};
3647 OS <<
"Unsupported";
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static llvm::cl::opt< unsigned > DefaultAMDHSACodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::init(llvm::AMDGPU::AMDHSA_COV6), llvm::cl::desc("Set default AMDHSA Code Object Version (module flag " "or asm directive still take priority if present)"))
Provides AMDGPU specific target descriptions.
MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where required.
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Register const TargetRegisterInfo * TRI
#define S_00B848_MEM_ORDERED(x)
#define S_00B848_WGP_MODE(x)
#define S_00B848_FWD_PROGRESS(x)
unsigned unsigned DefaultVal
static const int BlockSize
static ClusterDimsAttr get(const Function &F)
ClusterDimsAttr()=default
std::string to_string() const
const std::array< unsigned, 3 > & getDims() const
bool isSramEccSupported() const
void setTargetIDFromFeaturesString(StringRef FS)
TargetIDSetting getXnackSetting() const
AMDGPUTargetID(const MCSubtargetInfo &STI)
bool isXnackSupported() const
void setTargetIDFromTargetIDStream(StringRef TargetID)
std::string toString() const
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfDstInParsedOperands() const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
int getBitOp3OperandIdx() const
unsigned getCompParsedSrcOperandsNum() const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< MCRegister(unsigned, unsigned)> GetRegIdx, const MCRegisterInfo &MRI, bool SkipSrc=false, bool AllowSameVGPR=false, bool VOPD3=false) const
std::array< MCRegister, Component::MAX_OPR_NUM > RegIndices
This class represents an incoming formal argument to a Function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayStore() const
Return true if this instruction could possibly modify memory.
bool mayLoad() const
Return true if this instruction could possibly read memory.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Interface to description of machine instruction set.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo, unsigned HwModeId) const
Return the ID of the register class to use for OpInfo, for the active HwMode HwModeId.
This holds information about one operand of a machine instruction, indicating the register class for ...
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
constexpr unsigned id() const
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
A Module instance is used to store all the information related to an LLVM module.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr size_t size() const
size - Get the string size.
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
OSType getOS() const
Get the parsed operating system type of this triple.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
unsigned decodeFieldVaVcc(unsigned Encoded)
unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc)
unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt)
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned decodeFieldVaSdst(unsigned Encoded)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaSsrc(unsigned Encoded)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
unsigned decodeFieldHoldCnt(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
@ ET_DUAL_SRC_BLEND_MAX_IDX
constexpr uint32_t VersionMinor
HSA metadata minor version.
constexpr uint32_t VersionMajor
HSA metadata major version.
@ COMPLETION_ACTION_OFFSET
@ MULTIGRID_SYNC_ARG_OFFSET
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
@ FIXED_NUM_SGPRS_FOR_INIT_BUG
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getArchVGPRAllocGranule()
For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage, returns the allocation granule...
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs, unsigned Granule)
unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
constexpr unsigned VOPD3_VGPR_BANK_MASKS[]
bool isPackedFP32Inst(unsigned Opc)
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isInlineValue(MCRegister Reg)
bool isGFX10_GFX11(const MCSubtargetInfo &STI)
bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Storecnt)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
bool isVOPCAsmOnly(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool getWMMAIsXDL(unsigned Opc)
uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isInlinableLiteralV2I16(uint32_t Literal)
int getMTBUFElements(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
unsigned getTemporalHintType(const MCInstrDesc TID)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
bool isGFX10(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV216(bool IsFloat, uint32_t Literal)
FPType getFPDstSelType(unsigned Opc)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
bool isGFX12Plus(const MCSubtargetInfo &STI)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getStorecntBitMask(const IsaVersion &Version)
unsigned getLdsDwGranularity(const MCSubtargetInfo &ST)
bool isGFX940(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX10_3_GFX11(const MCSubtargetInfo &STI)
bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val)
Checks if Val is inside MD, a !range-like metadata.
uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal)
unsigned getVOPDOpcode(unsigned Opc, bool VOPD3)
bool isGroupSegment(const GlobalValue *GV)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
static unsigned getCombinedCountBitMask(const IsaVersion &Version, bool IsStore)
CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
unsigned getDefaultAMDHSACodeObjectVersion()
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool isDPALU_DPP32BitOpc(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
std::tuple< char, unsigned, unsigned > parseAsmPhysRegName(StringRef RegName)
Returns a valid charcode or 0 in the first entry if this is a valid physical register name.
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isAsyncStore(unsigned Opc)
unsigned getDynamicVGPRBlockSize(const Function &F)
unsigned getKmcntBitMask(const IsaVersion &Version)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
unsigned getBitOp2(unsigned Opc)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
unsigned getXcntBitMask(const IsaVersion &Version)
bool isGenericAtomic(unsigned Opc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool getMUBUFTfe(unsigned Opc)
unsigned getBvhcntBitMask(const IsaVersion &Version)
bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
std::tuple< char, unsigned, unsigned > parseAsmConstraintPhysReg(StringRef Constraint)
Returns a valid charcode or 0 in the first entry if this is a valid physical register constraint.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Loadcnt)
bool isGFX10Plus(const MCSubtargetInfo &STI)
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
static bool isValidRegPrefix(char C)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer, bool HasSOffset)
bool isGlobalSegment(const GlobalValue *GV)
int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_LAST
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_INLINE_AC_FIRST
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FIRST
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_AC_LAST
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode, const MCSubtargetInfo *STI)
bool isNotGFX9Plus(const MCSubtargetInfo &STI)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI)
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool isGFX1250(const MCSubtargetInfo &STI)
int getMCOpcode(uint16_t Opcode, unsigned Gen)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
bool isTensorStore(unsigned Opc)
bool getMUBUFIsBufferInv(unsigned Opc)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool supportsWGP(const MCSubtargetInfo &STI)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
bool getVOP2IsSingle(unsigned Opc)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isNotGFX12Plus(const MCSubtargetInfo &STI)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
unsigned getLoadcntBitMask(const IsaVersion &Version)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily, bool VOPD3)
static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Dscnt)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
unsigned getDscntBitMask(const IsaVersion &Version)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
@ ELFABIVERSION_AMDGPU_HSA_V4
@ ELFABIVERSION_AMDGPU_HSA_V5
@ ELFABIVERSION_AMDGPU_HSA_V6
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
testing::Matcher< const detail::ErrorHolder & > Failed()
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
std::string utostr(uint64_t X, bool isNeg=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
To bit_cast(const From &from) noexcept
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
@ AlwaysUniform
The result values are always uniform.
@ Default
The result values are uniform if and only if all operands are uniform.
AMD Kernel Code Object (amd_kernel_code_t).
uint16_t amd_machine_version_major
uint16_t amd_machine_kind
uint16_t amd_machine_version_stepping
uint8_t private_segment_alignment
int64_t kernel_code_entry_byte_offset
uint32_t amd_kernel_code_version_major
uint16_t amd_machine_version_minor
uint8_t group_segment_alignment
uint8_t kernarg_segment_alignment
uint32_t amd_kernel_code_version_minor
uint64_t compute_pgm_resource_registers
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.