LLVM 22.0.0git
AMDGPUBaseInfo.cpp
Go to the documentation of this file.
1//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AMDGPUBaseInfo.h"
10#include "AMDGPU.h"
11#include "AMDGPUAsmUtils.h"
12#include "AMDKernelCodeT.h"
17#include "llvm/IR/Attributes.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Function.h"
20#include "llvm/IR/GlobalValue.h"
21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Metadata.h"
25#include "llvm/MC/MCInstrInfo.h"
30#include <optional>
31
32#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRMAP_INFO
34#include "AMDGPUGenInstrInfo.inc"
35
37 "amdhsa-code-object-version", llvm::cl::Hidden,
39 llvm::cl::desc("Set default AMDHSA Code Object Version (module flag "
40 "or asm directive still take priority if present)"));
41
42namespace {
43
44/// \returns Bit mask for given bit \p Shift and bit \p Width.
45unsigned getBitMask(unsigned Shift, unsigned Width) {
46 return ((1 << Width) - 1) << Shift;
47}
48
49/// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
50///
51/// \returns Packed \p Dst.
52unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
53 unsigned Mask = getBitMask(Shift, Width);
54 return ((Src << Shift) & Mask) | (Dst & ~Mask);
55}
56
57/// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
58///
59/// \returns Unpacked bits.
60unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
61 return (Src & getBitMask(Shift, Width)) >> Shift;
62}
63
64/// \returns Vmcnt bit shift (lower bits).
65unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
66 return VersionMajor >= 11 ? 10 : 0;
67}
68
69/// \returns Vmcnt bit width (lower bits).
70unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
71 return VersionMajor >= 11 ? 6 : 4;
72}
73
74/// \returns Expcnt bit shift.
75unsigned getExpcntBitShift(unsigned VersionMajor) {
76 return VersionMajor >= 11 ? 0 : 4;
77}
78
79/// \returns Expcnt bit width.
80unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
81
82/// \returns Lgkmcnt bit shift.
83unsigned getLgkmcntBitShift(unsigned VersionMajor) {
84 return VersionMajor >= 11 ? 4 : 8;
85}
86
87/// \returns Lgkmcnt bit width.
88unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
89 return VersionMajor >= 10 ? 6 : 4;
90}
91
92/// \returns Vmcnt bit shift (higher bits).
93unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
94
95/// \returns Vmcnt bit width (higher bits).
96unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
97 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
98}
99
100/// \returns Loadcnt bit width
101unsigned getLoadcntBitWidth(unsigned VersionMajor) {
102 return VersionMajor >= 12 ? 6 : 0;
103}
104
105/// \returns Samplecnt bit width.
106unsigned getSamplecntBitWidth(unsigned VersionMajor) {
107 return VersionMajor >= 12 ? 6 : 0;
108}
109
110/// \returns Bvhcnt bit width.
111unsigned getBvhcntBitWidth(unsigned VersionMajor) {
112 return VersionMajor >= 12 ? 3 : 0;
113}
114
115/// \returns Dscnt bit width.
116unsigned getDscntBitWidth(unsigned VersionMajor) {
117 return VersionMajor >= 12 ? 6 : 0;
118}
119
120/// \returns Dscnt bit shift in combined S_WAIT instructions.
121unsigned getDscntBitShift(unsigned VersionMajor) { return 0; }
122
123/// \returns Storecnt or Vscnt bit width, depending on VersionMajor.
124unsigned getStorecntBitWidth(unsigned VersionMajor) {
125 return VersionMajor >= 10 ? 6 : 0;
126}
127
128/// \returns Kmcnt bit width.
129unsigned getKmcntBitWidth(unsigned VersionMajor) {
130 return VersionMajor >= 12 ? 5 : 0;
131}
132
133/// \returns Xcnt bit width.
134unsigned getXcntBitWidth(unsigned VersionMajor, unsigned VersionMinor) {
135 return VersionMajor == 12 && VersionMinor == 5 ? 6 : 0;
136}
137
138/// \returns shift for Loadcnt/Storecnt in combined S_WAIT instructions.
139unsigned getLoadcntStorecntBitShift(unsigned VersionMajor) {
140 return VersionMajor >= 12 ? 8 : 0;
141}
142
143/// \returns VaSdst bit width
144inline unsigned getVaSdstBitWidth() { return 3; }
145
146/// \returns VaSdst bit shift
147inline unsigned getVaSdstBitShift() { return 9; }
148
149/// \returns VmVsrc bit width
150inline unsigned getVmVsrcBitWidth() { return 3; }
151
152/// \returns VmVsrc bit shift
153inline unsigned getVmVsrcBitShift() { return 2; }
154
155/// \returns VaVdst bit width
156inline unsigned getVaVdstBitWidth() { return 4; }
157
158/// \returns VaVdst bit shift
159inline unsigned getVaVdstBitShift() { return 12; }
160
161/// \returns VaVcc bit width
162inline unsigned getVaVccBitWidth() { return 1; }
163
164/// \returns VaVcc bit shift
165inline unsigned getVaVccBitShift() { return 1; }
166
167/// \returns SaSdst bit width
168inline unsigned getSaSdstBitWidth() { return 1; }
169
170/// \returns SaSdst bit shift
171inline unsigned getSaSdstBitShift() { return 0; }
172
173/// \returns VaSsrc width
174inline unsigned getVaSsrcBitWidth() { return 1; }
175
176/// \returns VaSsrc bit shift
177inline unsigned getVaSsrcBitShift() { return 8; }
178
179/// \returns HoldCnt bit shift
180inline unsigned getHoldCntWidth() { return 1; }
181
182/// \returns HoldCnt bit shift
183inline unsigned getHoldCntBitShift() { return 7; }
184
185} // end anonymous namespace
186
187namespace llvm {
188
189namespace AMDGPU {
190
191/// \returns true if the target supports signed immediate offset for SMRD
192/// instructions.
194 return isGFX9Plus(ST);
195}
196
197/// \returns True if \p STI is AMDHSA.
198bool isHsaAbi(const MCSubtargetInfo &STI) {
199 return STI.getTargetTriple().getOS() == Triple::AMDHSA;
200}
201
204 M.getModuleFlag("amdhsa_code_object_version"))) {
205 return (unsigned)Ver->getZExtValue() / 100;
206 }
207
209}
210
214
215unsigned getAMDHSACodeObjectVersion(unsigned ABIVersion) {
216 switch (ABIVersion) {
218 return 4;
220 return 5;
222 return 6;
223 default:
225 }
226}
227
228uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion) {
229 if (T.getOS() != Triple::AMDHSA)
230 return 0;
231
232 switch (CodeObjectVersion) {
233 case 4:
235 case 5:
237 case 6:
239 default:
240 report_fatal_error("Unsupported AMDHSA Code Object Version " +
241 Twine(CodeObjectVersion));
242 }
243}
244
245unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion) {
246 switch (CodeObjectVersion) {
247 case AMDHSA_COV4:
248 return 48;
249 case AMDHSA_COV5:
250 case AMDHSA_COV6:
251 default:
253 }
254}
255
256// FIXME: All such magic numbers about the ABI should be in a
257// central TD file.
258unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion) {
259 switch (CodeObjectVersion) {
260 case AMDHSA_COV4:
261 return 24;
262 case AMDHSA_COV5:
263 case AMDHSA_COV6:
264 default:
266 }
267}
268
269unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion) {
270 switch (CodeObjectVersion) {
271 case AMDHSA_COV4:
272 return 32;
273 case AMDHSA_COV5:
274 case AMDHSA_COV6:
275 default:
277 }
278}
279
280unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
281 switch (CodeObjectVersion) {
282 case AMDHSA_COV4:
283 return 40;
284 case AMDHSA_COV5:
285 case AMDHSA_COV6:
286 default:
288 }
289}
290
291#define GET_MIMGBaseOpcodesTable_IMPL
292#define GET_MIMGDimInfoTable_IMPL
293#define GET_MIMGInfoTable_IMPL
294#define GET_MIMGLZMappingTable_IMPL
295#define GET_MIMGMIPMappingTable_IMPL
296#define GET_MIMGBiasMappingTable_IMPL
297#define GET_MIMGOffsetMappingTable_IMPL
298#define GET_MIMGG16MappingTable_IMPL
299#define GET_MAIInstInfoTable_IMPL
300#define GET_WMMAInstInfoTable_IMPL
301#include "AMDGPUGenSearchableTables.inc"
302
303int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
304 unsigned VDataDwords, unsigned VAddrDwords) {
305 const MIMGInfo *Info =
306 getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
307 return Info ? Info->Opcode : -1;
308}
309
311 const MIMGInfo *Info = getMIMGInfo(Opc);
312 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
313}
314
315int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
316 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
317 const MIMGInfo *NewInfo =
318 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
319 NewChannels, OrigInfo->VAddrDwords);
320 return NewInfo ? NewInfo->Opcode : -1;
321}
322
323unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
324 const MIMGDimInfo *Dim, bool IsA16,
325 bool IsG16Supported) {
326 unsigned AddrWords = BaseOpcode->NumExtraArgs;
327 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
328 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
329 if (IsA16)
330 AddrWords += divideCeil(AddrComponents, 2);
331 else
332 AddrWords += AddrComponents;
333
334 // Note: For subtargets that support A16 but not G16, enabling A16 also
335 // enables 16 bit gradients.
336 // For subtargets that support A16 (operand) and G16 (done with a different
337 // instruction encoding), they are independent.
338
339 if (BaseOpcode->Gradients) {
340 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
341 // There are two gradients per coordinate, we pack them separately.
342 // For the 3d case,
343 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
344 AddrWords += alignTo<2>(Dim->NumGradients / 2);
345 else
346 AddrWords += Dim->NumGradients;
347 }
348 return AddrWords;
349}
350
361
370
375
380
384
388
392
399
407
412
413#define GET_FP4FP8DstByteSelTable_DECL
414#define GET_FP4FP8DstByteSelTable_IMPL
415
420
426
427#define GET_MTBUFInfoTable_DECL
428#define GET_MTBUFInfoTable_IMPL
429#define GET_MUBUFInfoTable_DECL
430#define GET_MUBUFInfoTable_IMPL
431#define GET_SMInfoTable_DECL
432#define GET_SMInfoTable_IMPL
433#define GET_VOP1InfoTable_DECL
434#define GET_VOP1InfoTable_IMPL
435#define GET_VOP2InfoTable_DECL
436#define GET_VOP2InfoTable_IMPL
437#define GET_VOP3InfoTable_DECL
438#define GET_VOP3InfoTable_IMPL
439#define GET_VOPC64DPPTable_DECL
440#define GET_VOPC64DPPTable_IMPL
441#define GET_VOPC64DPP8Table_DECL
442#define GET_VOPC64DPP8Table_IMPL
443#define GET_VOPCAsmOnlyInfoTable_DECL
444#define GET_VOPCAsmOnlyInfoTable_IMPL
445#define GET_VOP3CAsmOnlyInfoTable_DECL
446#define GET_VOP3CAsmOnlyInfoTable_IMPL
447#define GET_VOPDComponentTable_DECL
448#define GET_VOPDComponentTable_IMPL
449#define GET_VOPDPairs_DECL
450#define GET_VOPDPairs_IMPL
451#define GET_VOPTrue16Table_DECL
452#define GET_VOPTrue16Table_IMPL
453#define GET_True16D16Table_IMPL
454#define GET_WMMAOpcode2AddrMappingTable_DECL
455#define GET_WMMAOpcode2AddrMappingTable_IMPL
456#define GET_WMMAOpcode3AddrMappingTable_DECL
457#define GET_WMMAOpcode3AddrMappingTable_IMPL
458#define GET_getMFMA_F8F6F4_WithSize_DECL
459#define GET_getMFMA_F8F6F4_WithSize_IMPL
460#define GET_isMFMA_F8F6F4Table_IMPL
461#define GET_isCvtScaleF32_F32F16ToF8F4Table_IMPL
462
463#include "AMDGPUGenSearchableTables.inc"
464
465int getMTBUFBaseOpcode(unsigned Opc) {
466 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
467 return Info ? Info->BaseOpcode : -1;
468}
469
470int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
471 const MTBUFInfo *Info =
472 getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
473 return Info ? Info->Opcode : -1;
474}
475
476int getMTBUFElements(unsigned Opc) {
477 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
478 return Info ? Info->elements : 0;
479}
480
481bool getMTBUFHasVAddr(unsigned Opc) {
482 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
483 return Info && Info->has_vaddr;
484}
485
486bool getMTBUFHasSrsrc(unsigned Opc) {
487 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
488 return Info && Info->has_srsrc;
489}
490
491bool getMTBUFHasSoffset(unsigned Opc) {
492 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
493 return Info && Info->has_soffset;
494}
495
496int getMUBUFBaseOpcode(unsigned Opc) {
497 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
498 return Info ? Info->BaseOpcode : -1;
499}
500
501int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
502 const MUBUFInfo *Info =
503 getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
504 return Info ? Info->Opcode : -1;
505}
506
507int getMUBUFElements(unsigned Opc) {
508 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
509 return Info ? Info->elements : 0;
510}
511
512bool getMUBUFHasVAddr(unsigned Opc) {
513 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
514 return Info && Info->has_vaddr;
515}
516
517bool getMUBUFHasSrsrc(unsigned Opc) {
518 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
519 return Info && Info->has_srsrc;
520}
521
522bool getMUBUFHasSoffset(unsigned Opc) {
523 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
524 return Info && Info->has_soffset;
525}
526
527bool getMUBUFIsBufferInv(unsigned Opc) {
528 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
529 return Info && Info->IsBufferInv;
530}
531
532bool getMUBUFTfe(unsigned Opc) {
533 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
534 return Info && Info->tfe;
535}
536
537bool getSMEMIsBuffer(unsigned Opc) {
538 const SMInfo *Info = getSMEMOpcodeHelper(Opc);
539 return Info && Info->IsBuffer;
540}
541
542bool getVOP1IsSingle(unsigned Opc) {
543 const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
544 return !Info || Info->IsSingle;
545}
546
547bool getVOP2IsSingle(unsigned Opc) {
548 const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
549 return !Info || Info->IsSingle;
550}
551
552bool getVOP3IsSingle(unsigned Opc) {
553 const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
554 return !Info || Info->IsSingle;
555}
556
557bool isVOPC64DPP(unsigned Opc) {
558 return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
559}
560
561bool isVOPCAsmOnly(unsigned Opc) { return isVOPCAsmOnlyOpcodeHelper(Opc); }
562
563bool getMAIIsDGEMM(unsigned Opc) {
564 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
565 return Info && Info->is_dgemm;
566}
567
568bool getMAIIsGFX940XDL(unsigned Opc) {
569 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
570 return Info && Info->is_gfx940_xdl;
571}
572
573bool getWMMAIsXDL(unsigned Opc) {
574 const WMMAInstInfo *Info = getWMMAInstInfoHelper(Opc);
575 return Info ? Info->is_wmma_xdl : false;
576}
577
579 switch (EncodingVal) {
582 return 6;
584 return 4;
587 default:
588 return 8;
589 }
590
591 llvm_unreachable("covered switch over mfma scale formats");
592}
593
595 unsigned BLGP,
596 unsigned F8F8Opcode) {
597 uint8_t SrcANumRegs = mfmaScaleF8F6F4FormatToNumRegs(CBSZ);
598 uint8_t SrcBNumRegs = mfmaScaleF8F6F4FormatToNumRegs(BLGP);
599 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
600}
601
603 switch (Fmt) {
606 return 16;
609 return 12;
611 return 8;
612 }
613
614 llvm_unreachable("covered switch over wmma scale formats");
615}
616
618 unsigned FmtB,
619 unsigned F8F8Opcode) {
620 uint8_t SrcANumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtA);
621 uint8_t SrcBNumRegs = wmmaScaleF8F6F4FormatToNumRegs(FmtB);
622 return getMFMA_F8F6F4_InstWithNumRegs(SrcANumRegs, SrcBNumRegs, F8F8Opcode);
623}
624
626 if (ST.hasFeature(AMDGPU::FeatureGFX1250Insts))
628 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts))
630 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts))
632 llvm_unreachable("Subtarget generation does not support VOPD!");
633}
634
635CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3) {
636 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
637 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
638 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
639 if (Info) {
640 // Check that Opc can be used as VOPDY for this encoding. V_MOV_B32 as a
641 // VOPDX is just a placeholder here, it is supported on all encodings.
642 // TODO: This can be optimized by creating tables of supported VOPDY
643 // opcodes per encoding.
644 unsigned VOPDMov = AMDGPU::getVOPDOpcode(AMDGPU::V_MOV_B32_e32, VOPD3);
645 bool CanBeVOPDY = getVOPDFull(VOPDMov, AMDGPU::getVOPDOpcode(Opc, VOPD3),
646 EncodingFamily, VOPD3) != -1;
647 return {VOPD3 ? Info->CanBeVOPD3X : Info->CanBeVOPDX, CanBeVOPDY};
648 }
649
650 return {false, false};
651}
652
653unsigned getVOPDOpcode(unsigned Opc, bool VOPD3) {
654 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(Opc) : 0;
655 Opc = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : Opc;
656 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
657 return Info ? Info->VOPDOp : ~0u;
658}
659
660bool isVOPD(unsigned Opc) {
661 return AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0X);
662}
663
664bool isMAC(unsigned Opc) {
665 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
666 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
667 Opc == AMDGPU::V_MAC_F32_e64_vi ||
668 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
669 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
670 Opc == AMDGPU::V_MAC_F16_e64_vi ||
671 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
672 Opc == AMDGPU::V_FMAC_F64_e64_gfx12 ||
673 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
674 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
675 Opc == AMDGPU::V_FMAC_F32_e64_gfx12 ||
676 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
677 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
678 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
679 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
680 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
681 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx11 ||
682 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx12 ||
683 Opc == AMDGPU::V_FMAC_F16_fake16_e64_gfx12 ||
684 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
685 Opc == AMDGPU::V_DOT2C_F32_BF16_e64_vi ||
686 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
687 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
688 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
689}
690
691bool isPermlane16(unsigned Opc) {
692 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
693 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
694 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
695 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11 ||
696 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx12 ||
697 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx12 ||
698 Opc == AMDGPU::V_PERMLANE16_VAR_B32_e64_gfx12 ||
699 Opc == AMDGPU::V_PERMLANEX16_VAR_B32_e64_gfx12;
700}
701
703 return Opc == AMDGPU::V_CVT_F32_BF8_e64_gfx12 ||
704 Opc == AMDGPU::V_CVT_F32_FP8_e64_gfx12 ||
705 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp_gfx12 ||
706 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp_gfx12 ||
707 Opc == AMDGPU::V_CVT_F32_BF8_e64_dpp8_gfx12 ||
708 Opc == AMDGPU::V_CVT_F32_FP8_e64_dpp8_gfx12 ||
709 Opc == AMDGPU::V_CVT_PK_F32_BF8_fake16_e64_gfx12 ||
710 Opc == AMDGPU::V_CVT_PK_F32_FP8_fake16_e64_gfx12 ||
711 Opc == AMDGPU::V_CVT_PK_F32_BF8_t16_e64_gfx12 ||
712 Opc == AMDGPU::V_CVT_PK_F32_FP8_t16_e64_gfx12;
713}
714
715bool isGenericAtomic(unsigned Opc) {
716 return Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP ||
717 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD ||
718 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB ||
719 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN ||
720 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN ||
721 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX ||
722 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX ||
723 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND ||
724 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR ||
725 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR ||
726 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC ||
727 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC ||
728 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD ||
729 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMIN ||
730 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FMAX ||
731 Opc == AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP ||
732 Opc == AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG;
733}
734
735bool isAsyncStore(unsigned Opc) {
736 return Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_gfx1250 ||
737 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_gfx1250 ||
738 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_gfx1250 ||
739 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_gfx1250 ||
740 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B8_SADDR_gfx1250 ||
741 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B32_SADDR_gfx1250 ||
742 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B64_SADDR_gfx1250 ||
743 Opc == GLOBAL_STORE_ASYNC_FROM_LDS_B128_SADDR_gfx1250;
744}
745
746bool isTensorStore(unsigned Opc) {
747 return Opc == TENSOR_STORE_FROM_LDS_gfx1250 ||
748 Opc == TENSOR_STORE_FROM_LDS_D2_gfx1250;
749}
750
751unsigned getTemporalHintType(const MCInstrDesc TID) {
754 unsigned Opc = TID.getOpcode();
755 // Async and Tensor store should have the temporal hint type of TH_TYPE_STORE
756 if (TID.mayStore() &&
757 (isAsyncStore(Opc) || isTensorStore(Opc) || !TID.mayLoad()))
758 return CPol::TH_TYPE_STORE;
759
760 // This will default to returning TH_TYPE_LOAD when neither MayStore nor
761 // MayLoad flag is present which is the case with instructions like
762 // image_get_resinfo.
763 return CPol::TH_TYPE_LOAD;
764}
765
766bool isTrue16Inst(unsigned Opc) {
767 const VOPTrue16Info *Info = getTrue16OpcodeHelper(Opc);
768 return Info && Info->IsTrue16;
769}
770
772 const FP4FP8DstByteSelInfo *Info = getFP4FP8DstByteSelHelper(Opc);
773 if (!Info)
774 return FPType::None;
775 if (Info->HasFP8DstByteSel)
776 return FPType::FP8;
777 if (Info->HasFP4DstByteSel)
778 return FPType::FP4;
779
780 return FPType::None;
781}
782
783unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
784 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
785 return Info ? Info->Opcode3Addr : ~0u;
786}
787
788unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
789 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
790 return Info ? Info->Opcode2Addr : ~0u;
791}
792
793// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
794// header files, so we need to wrap it in a function that takes unsigned
795// instead.
796int getMCOpcode(uint16_t Opcode, unsigned Gen) {
797 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
798}
799
800unsigned getBitOp2(unsigned Opc) {
801 switch (Opc) {
802 default:
803 return 0;
804 case AMDGPU::V_AND_B32_e32:
805 return 0x40;
806 case AMDGPU::V_OR_B32_e32:
807 return 0x54;
808 case AMDGPU::V_XOR_B32_e32:
809 return 0x14;
810 case AMDGPU::V_XNOR_B32_e32:
811 return 0x41;
812 }
813}
814
815int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily,
816 bool VOPD3) {
817 bool IsConvertibleToBitOp = VOPD3 ? getBitOp2(OpY) : 0;
818 OpY = IsConvertibleToBitOp ? (unsigned)AMDGPU::V_BITOP3_B32_e64 : OpY;
819 const VOPDInfo *Info =
820 getVOPDInfoFromComponentOpcodes(OpX, OpY, EncodingFamily, VOPD3);
821 return Info ? Info->Opcode : -1;
822}
823
824std::pair<unsigned, unsigned> getVOPDComponents(unsigned VOPDOpcode) {
825 const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
826 assert(Info);
827 const auto *OpX = getVOPDBaseFromComponent(Info->OpX);
828 const auto *OpY = getVOPDBaseFromComponent(Info->OpY);
829 assert(OpX && OpY);
830 return {OpX->BaseVOP, OpY->BaseVOP};
831}
832
833namespace VOPD {
834
835ComponentProps::ComponentProps(const MCInstrDesc &OpDesc, bool VOP3Layout) {
837
840 auto TiedIdx = OpDesc.getOperandConstraint(Component::SRC2, MCOI::TIED_TO);
841 assert(TiedIdx == -1 || TiedIdx == Component::DST);
842 HasSrc2Acc = TiedIdx != -1;
843 Opcode = OpDesc.getOpcode();
844
845 IsVOP3 = VOP3Layout || (OpDesc.TSFlags & SIInstrFlags::VOP3);
846 SrcOperandsNum = AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2) ? 3
847 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::imm) ? 3
848 : AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1) ? 2
849 : 1;
850 assert(SrcOperandsNum <= Component::MAX_SRC_NUM);
851
852 if (Opcode == AMDGPU::V_CNDMASK_B32_e32 ||
853 Opcode == AMDGPU::V_CNDMASK_B32_e64) {
854 // CNDMASK is an awkward exception, it has FP modifiers, but not FP
855 // operands.
856 NumVOPD3Mods = 2;
857 if (IsVOP3)
858 SrcOperandsNum = 3;
859 } else if (isSISrcFPOperand(OpDesc,
860 getNamedOperandIdx(Opcode, OpName::src0))) {
861 // All FP VOPD instructions have Neg modifiers for all operands except
862 // for tied src2.
863 NumVOPD3Mods = SrcOperandsNum;
864 if (HasSrc2Acc)
865 --NumVOPD3Mods;
866 }
867
868 if (OpDesc.TSFlags & SIInstrFlags::VOP3)
869 return;
870
871 auto OperandsNum = OpDesc.getNumOperands();
872 unsigned CompOprIdx;
873 for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
874 if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
875 MandatoryLiteralIdx = CompOprIdx;
876 break;
877 }
878 }
879}
880
882 return getNamedOperandIdx(Opcode, OpName::bitop3);
883}
884
885unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
886 assert(CompOprIdx < Component::MAX_OPR_NUM);
887
888 if (CompOprIdx == Component::DST)
890
891 auto CompSrcIdx = CompOprIdx - Component::DST_NUM;
892 if (CompSrcIdx < getCompParsedSrcOperandsNum())
893 return getIndexOfSrcInParsedOperands(CompSrcIdx);
894
895 // The specified operand does not exist.
896 return 0;
897}
898
900 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
901 const MCRegisterInfo &MRI, bool SkipSrc, bool AllowSameVGPR,
902 bool VOPD3) const {
903
904 auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx,
905 CompInfo[ComponentIndex::X].isVOP3());
906 auto OpYRegs = getRegIndices(ComponentIndex::Y, GetRegIdx,
907 CompInfo[ComponentIndex::Y].isVOP3());
908
909 const auto banksOverlap = [&MRI](MCRegister X, MCRegister Y,
910 unsigned BanksMask) -> bool {
911 MCRegister BaseX = MRI.getSubReg(X, AMDGPU::sub0);
912 MCRegister BaseY = MRI.getSubReg(Y, AMDGPU::sub0);
913 if (!BaseX)
914 BaseX = X;
915 if (!BaseY)
916 BaseY = Y;
917 if ((BaseX.id() & BanksMask) == (BaseY.id() & BanksMask))
918 return true;
919 if (BaseX != X /* This is 64-bit register */ &&
920 ((BaseX.id() + 1) & BanksMask) == (BaseY.id() & BanksMask))
921 return true;
922 if (BaseY != Y &&
923 (BaseX.id() & BanksMask) == ((BaseY.id() + 1) & BanksMask))
924 return true;
925
926 // If both are 64-bit bank conflict will be detected yet while checking
927 // the first subreg.
928 return false;
929 };
930
931 unsigned CompOprIdx;
932 for (CompOprIdx = 0; CompOprIdx < Component::MAX_OPR_NUM; ++CompOprIdx) {
933 unsigned BanksMasks = VOPD3 ? VOPD3_VGPR_BANK_MASKS[CompOprIdx]
934 : VOPD_VGPR_BANK_MASKS[CompOprIdx];
935 if (!OpXRegs[CompOprIdx] || !OpYRegs[CompOprIdx])
936 continue;
937
938 if (getVGPREncodingMSBs(OpXRegs[CompOprIdx], MRI) !=
939 getVGPREncodingMSBs(OpYRegs[CompOprIdx], MRI))
940 return CompOprIdx;
941
942 if (SkipSrc && CompOprIdx >= Component::DST_NUM)
943 continue;
944
945 if (CompOprIdx < Component::DST_NUM) {
946 // Even if we do not check vdst parity, vdst operands still shall not
947 // overlap.
948 if (MRI.regsOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx]))
949 return CompOprIdx;
950 if (VOPD3) // No need to check dst parity.
951 continue;
952 }
953
954 if (banksOverlap(OpXRegs[CompOprIdx], OpYRegs[CompOprIdx], BanksMasks) &&
955 (!AllowSameVGPR || CompOprIdx < Component::DST_NUM ||
956 OpXRegs[CompOprIdx] != OpYRegs[CompOprIdx]))
957 return CompOprIdx;
958 }
959
960 return {};
961}
962
963// Return an array of VGPR registers [DST,SRC0,SRC1,SRC2] used
964// by the specified component. If an operand is unused
965// or is not a VGPR, the corresponding value is 0.
966//
967// GetRegIdx(Component, MCOperandIdx) must return a VGPR register index
968// for the specified component and MC operand. The callback must return 0
969// if the operand is not a register or not a VGPR.
971InstInfo::getRegIndices(unsigned CompIdx,
972 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
973 bool VOPD3) const {
974 assert(CompIdx < COMPONENTS_NUM);
975
976 const auto &Comp = CompInfo[CompIdx];
978
979 RegIndices[DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
980
981 for (unsigned CompOprIdx : {SRC0, SRC1, SRC2}) {
982 unsigned CompSrcIdx = CompOprIdx - DST_NUM;
983 RegIndices[CompOprIdx] =
984 Comp.hasRegSrcOperand(CompSrcIdx)
985 ? GetRegIdx(CompIdx,
986 Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
987 : MCRegister();
988 }
989 return RegIndices;
990}
991
992} // namespace VOPD
993
995 return VOPD::InstInfo(OpX, OpY);
996}
997
998VOPD::InstInfo getVOPDInstInfo(unsigned VOPDOpcode,
999 const MCInstrInfo *InstrInfo) {
1000 auto [OpX, OpY] = getVOPDComponents(VOPDOpcode);
1001 const auto &OpXDesc = InstrInfo->get(OpX);
1002 const auto &OpYDesc = InstrInfo->get(OpY);
1003 bool VOPD3 = InstrInfo->get(VOPDOpcode).TSFlags & SIInstrFlags::VOPD3;
1005 VOPD::ComponentInfo OpYInfo(OpYDesc, OpXInfo, VOPD3);
1006 return VOPD::InstInfo(OpXInfo, OpYInfo);
1007}
1008
1009namespace IsaInfo {
1010
1012 : STI(STI), XnackSetting(TargetIDSetting::Any),
1013 SramEccSetting(TargetIDSetting::Any) {
1014 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
1015 XnackSetting = TargetIDSetting::Unsupported;
1016 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
1017 SramEccSetting = TargetIDSetting::Unsupported;
1018}
1019
1021 // Check if xnack or sramecc is explicitly enabled or disabled. In the
1022 // absence of the target features we assume we must generate code that can run
1023 // in any environment.
1024 SubtargetFeatures Features(FS);
1025 std::optional<bool> XnackRequested;
1026 std::optional<bool> SramEccRequested;
1027
1028 for (const std::string &Feature : Features.getFeatures()) {
1029 if (Feature == "+xnack")
1030 XnackRequested = true;
1031 else if (Feature == "-xnack")
1032 XnackRequested = false;
1033 else if (Feature == "+sramecc")
1034 SramEccRequested = true;
1035 else if (Feature == "-sramecc")
1036 SramEccRequested = false;
1037 }
1038
1039 bool XnackSupported = isXnackSupported();
1040 bool SramEccSupported = isSramEccSupported();
1041
1042 if (XnackRequested) {
1043 if (XnackSupported) {
1044 XnackSetting =
1045 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1046 } else {
1047 // If a specific xnack setting was requested and this GPU does not support
1048 // xnack emit a warning. Setting will remain set to "Unsupported".
1049 if (*XnackRequested) {
1050 errs() << "warning: xnack 'On' was requested for a processor that does "
1051 "not support it!\n";
1052 } else {
1053 errs() << "warning: xnack 'Off' was requested for a processor that "
1054 "does not support it!\n";
1055 }
1056 }
1057 }
1058
1059 if (SramEccRequested) {
1060 if (SramEccSupported) {
1061 SramEccSetting =
1062 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
1063 } else {
1064 // If a specific sramecc setting was requested and this GPU does not
1065 // support sramecc emit a warning. Setting will remain set to
1066 // "Unsupported".
1067 if (*SramEccRequested) {
1068 errs() << "warning: sramecc 'On' was requested for a processor that "
1069 "does not support it!\n";
1070 } else {
1071 errs() << "warning: sramecc 'Off' was requested for a processor that "
1072 "does not support it!\n";
1073 }
1074 }
1075 }
1076}
1077
1078static TargetIDSetting
1080 if (FeatureString.ends_with("-"))
1081 return TargetIDSetting::Off;
1082 if (FeatureString.ends_with("+"))
1083 return TargetIDSetting::On;
1084
1085 llvm_unreachable("Malformed feature string");
1086}
1087
1089 SmallVector<StringRef, 3> TargetIDSplit;
1090 TargetID.split(TargetIDSplit, ':');
1091
1092 for (const auto &FeatureString : TargetIDSplit) {
1093 if (FeatureString.starts_with("xnack"))
1094 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
1095 if (FeatureString.starts_with("sramecc"))
1096 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
1097 }
1098}
1099
1100std::string AMDGPUTargetID::toString() const {
1101 std::string StringRep;
1102 raw_string_ostream StreamRep(StringRep);
1103
1104 auto TargetTriple = STI.getTargetTriple();
1105 auto Version = getIsaVersion(STI.getCPU());
1106
1107 StreamRep << TargetTriple.getArchName() << '-' << TargetTriple.getVendorName()
1108 << '-' << TargetTriple.getOSName() << '-'
1109 << TargetTriple.getEnvironmentName() << '-';
1110
1111 std::string Processor;
1112 // TODO: Following else statement is present here because we used various
1113 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
1114 // Remove once all aliases are removed from GCNProcessors.td.
1115 if (Version.Major >= 9)
1116 Processor = STI.getCPU().str();
1117 else
1118 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
1119 Twine(Version.Stepping))
1120 .str();
1121
1122 std::string Features;
1123 if (STI.getTargetTriple().getOS() == Triple::AMDHSA) {
1124 // sramecc.
1126 Features += ":sramecc-";
1128 Features += ":sramecc+";
1129 // xnack.
1131 Features += ":xnack-";
1133 Features += ":xnack+";
1134 }
1135
1136 StreamRep << Processor << Features;
1137
1138 return StringRep;
1139}
1140
1141unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
1142 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
1143 return 16;
1144 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
1145 return 32;
1146
1147 return 64;
1148}
1149
1151 unsigned BytesPerCU = getAddressableLocalMemorySize(STI);
1152
1153 // "Per CU" really means "per whatever functional block the waves of a
1154 // workgroup must share". So the effective local memory size is doubled in
1155 // WGP mode on gfx10.
1156 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1157 BytesPerCU *= 2;
1158
1159 return BytesPerCU;
1160}
1161
1163 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
1164 return 32768;
1165 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
1166 return 65536;
1167 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
1168 return 163840;
1169 if (STI->getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
1170 return 327680;
1171 return 32768;
1172}
1173
1174unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
1175 // "Per CU" really means "per whatever functional block the waves of a
1176 // workgroup must share".
1177
1178 // GFX12.5 only supports CU mode, which contains four SIMDs.
1179 if (isGFX1250(*STI)) {
1180 assert(STI->getFeatureBits().test(FeatureCuMode));
1181 return 4;
1182 }
1183
1184 // For gfx10 in CU mode the functional block is the CU, which contains
1185 // two SIMDs.
1186 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
1187 return 2;
1188
1189 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP
1190 // contains two CUs, so a total of four SIMDs.
1191 return 4;
1192}
1193
1195 unsigned FlatWorkGroupSize) {
1196 assert(FlatWorkGroupSize != 0);
1197 if (!STI->getTargetTriple().isAMDGCN())
1198 return 8;
1199 unsigned MaxWaves = getMaxWavesPerEU(STI) * getEUsPerCU(STI);
1200 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
1201 if (N == 1) {
1202 // Single-wave workgroups don't consume barrier resources.
1203 return MaxWaves;
1204 }
1205
1206 unsigned MaxBarriers = 16;
1207 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
1208 MaxBarriers = 32;
1209
1210 return std::min(MaxWaves / N, MaxBarriers);
1211}
1212
1213unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { return 1; }
1214
1215unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
1216 // FIXME: Need to take scratch memory into account.
1217 if (isGFX90A(*STI))
1218 return 8;
1219 if (!isGFX10Plus(*STI))
1220 return 10;
1221 return hasGFX10_3Insts(*STI) ? 16 : 20;
1222}
1223
1225 unsigned FlatWorkGroupSize) {
1226 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
1227 getEUsPerCU(STI));
1228}
1229
1230unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { return 1; }
1231
1233 // Some subtargets allow encoding 2048, but this isn't tested or supported.
1234 return 1024;
1235}
1236
1238 unsigned FlatWorkGroupSize) {
1239 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
1240}
1241
1244 if (Version.Major >= 10)
1245 return getAddressableNumSGPRs(STI);
1246 if (Version.Major >= 8)
1247 return 16;
1248 return 8;
1249}
1250
1251unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { return 8; }
1252
1253unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
1255 if (Version.Major >= 8)
1256 return 800;
1257 return 512;
1258}
1259
1261 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
1263
1265 if (Version.Major >= 10)
1266 return 106;
1267 if (Version.Major >= 8)
1268 return 102;
1269 return 104;
1270}
1271
1272unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1273 assert(WavesPerEU != 0);
1274
1276 if (Version.Major >= 10)
1277 return 0;
1278
1279 if (WavesPerEU >= getMaxWavesPerEU(STI))
1280 return 0;
1281
1282 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
1283 if (STI->getFeatureBits().test(FeatureTrapHandler))
1284 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1285 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
1286 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
1287}
1288
1289unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1290 bool Addressable) {
1291 assert(WavesPerEU != 0);
1292
1293 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
1295 if (Version.Major >= 10)
1296 return Addressable ? AddressableNumSGPRs : 108;
1297 if (Version.Major >= 8 && !Addressable)
1298 AddressableNumSGPRs = 112;
1299 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
1300 if (STI->getFeatureBits().test(FeatureTrapHandler))
1301 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1302 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
1303 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1304}
1305
1306unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1307 bool FlatScrUsed, bool XNACKUsed) {
1308 unsigned ExtraSGPRs = 0;
1309 if (VCCUsed)
1310 ExtraSGPRs = 2;
1311
1313 if (Version.Major >= 10)
1314 return ExtraSGPRs;
1315
1316 if (Version.Major < 8) {
1317 if (FlatScrUsed)
1318 ExtraSGPRs = 4;
1319 } else {
1320 if (XNACKUsed)
1321 ExtraSGPRs = 4;
1322
1323 if (FlatScrUsed ||
1324 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
1325 ExtraSGPRs = 6;
1326 }
1327
1328 return ExtraSGPRs;
1329}
1330
1331unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1332 bool FlatScrUsed) {
1333 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
1334 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
1335}
1336
1337static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs,
1338 unsigned Granule) {
1339 return divideCeil(std::max(1u, NumRegs), Granule);
1340}
1341
1342unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
1343 // SGPRBlocks is actual number of SGPR blocks minus 1.
1345 1;
1346}
1347
1349 unsigned DynamicVGPRBlockSize,
1350 std::optional<bool> EnableWavefrontSize32) {
1351 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1352 return 8;
1353
1354 if (DynamicVGPRBlockSize != 0)
1355 return DynamicVGPRBlockSize;
1356
1357 bool IsWave32 = EnableWavefrontSize32
1358 ? *EnableWavefrontSize32
1359 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1360
1361 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1362 return IsWave32 ? 24 : 12;
1363
1364 if (hasGFX10_3Insts(*STI))
1365 return IsWave32 ? 16 : 8;
1366
1367 return IsWave32 ? 8 : 4;
1368}
1369
1371 std::optional<bool> EnableWavefrontSize32) {
1372 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1373 return 8;
1374
1375 bool IsWave32 = EnableWavefrontSize32
1376 ? *EnableWavefrontSize32
1377 : STI->getFeatureBits().test(FeatureWavefrontSize32);
1378
1379 if (STI->getFeatureBits().test(Feature1024AddressableVGPRs))
1380 return IsWave32 ? 16 : 8;
1381
1382 return IsWave32 ? 8 : 4;
1383}
1384
1385unsigned getArchVGPRAllocGranule() { return 4; }
1386
1387unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
1388 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1389 return 512;
1390 if (!isGFX10Plus(*STI))
1391 return 256;
1392 bool IsWave32 = STI->getFeatureBits().test(FeatureWavefrontSize32);
1393 if (STI->getFeatureBits().test(Feature1_5xVGPRs))
1394 return IsWave32 ? 1536 : 768;
1395 return IsWave32 ? 1024 : 512;
1396}
1397
1399 const auto &Features = STI->getFeatureBits();
1400 if (Features.test(Feature1024AddressableVGPRs))
1401 return Features.test(FeatureWavefrontSize32) ? 1024 : 512;
1402 return 256;
1403}
1404
1406 unsigned DynamicVGPRBlockSize) {
1407 const auto &Features = STI->getFeatureBits();
1408 if (Features.test(FeatureGFX90AInsts))
1409 return 512;
1410
1411 if (DynamicVGPRBlockSize != 0)
1412 // On GFX12 we can allocate at most 8 blocks of VGPRs.
1413 return 8 * getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1414 return getAddressableNumArchVGPRs(STI);
1415}
1416
1418 unsigned NumVGPRs,
1419 unsigned DynamicVGPRBlockSize) {
1421 NumVGPRs, getVGPRAllocGranule(STI, DynamicVGPRBlockSize),
1423}
1424
1425unsigned getNumWavesPerEUWithNumVGPRs(unsigned NumVGPRs, unsigned Granule,
1426 unsigned MaxWaves,
1427 unsigned TotalNumVGPRs) {
1428 if (NumVGPRs < Granule)
1429 return MaxWaves;
1430 unsigned RoundedRegs = alignTo(NumVGPRs, Granule);
1431 return std::min(std::max(TotalNumVGPRs / RoundedRegs, 1u), MaxWaves);
1432}
1433
1434unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves,
1436 if (Gen >= AMDGPUSubtarget::GFX10)
1437 return MaxWaves;
1438
1440 if (SGPRs <= 80)
1441 return 10;
1442 if (SGPRs <= 88)
1443 return 9;
1444 if (SGPRs <= 100)
1445 return 8;
1446 return 7;
1447 }
1448 if (SGPRs <= 48)
1449 return 10;
1450 if (SGPRs <= 56)
1451 return 9;
1452 if (SGPRs <= 64)
1453 return 8;
1454 if (SGPRs <= 72)
1455 return 7;
1456 if (SGPRs <= 80)
1457 return 6;
1458 return 5;
1459}
1460
1461unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1462 unsigned DynamicVGPRBlockSize) {
1463 assert(WavesPerEU != 0);
1464
1465 unsigned MaxWavesPerEU = getMaxWavesPerEU(STI);
1466 if (WavesPerEU >= MaxWavesPerEU)
1467 return 0;
1468
1469 unsigned TotNumVGPRs = getTotalNumVGPRs(STI);
1470 unsigned AddrsableNumVGPRs =
1471 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1472 unsigned Granule = getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
1473 unsigned MaxNumVGPRs = alignDown(TotNumVGPRs / WavesPerEU, Granule);
1474
1475 if (MaxNumVGPRs == alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1476 return 0;
1477
1478 unsigned MinWavesPerEU = getNumWavesPerEUWithNumVGPRs(STI, AddrsableNumVGPRs,
1479 DynamicVGPRBlockSize);
1480 if (WavesPerEU < MinWavesPerEU)
1481 return getMinNumVGPRs(STI, MinWavesPerEU, DynamicVGPRBlockSize);
1482
1483 unsigned MaxNumVGPRsNext = alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1484 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1485 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1486}
1487
1488unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
1489 unsigned DynamicVGPRBlockSize) {
1490 assert(WavesPerEU != 0);
1491
1492 unsigned MaxNumVGPRs =
1493 alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
1494 getVGPRAllocGranule(STI, DynamicVGPRBlockSize));
1495 unsigned AddressableNumVGPRs =
1496 getAddressableNumVGPRs(STI, DynamicVGPRBlockSize);
1497 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1498}
1499
1500unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
1501 std::optional<bool> EnableWavefrontSize32) {
1503 NumVGPRs, getVGPREncodingGranule(STI, EnableWavefrontSize32)) -
1504 1;
1505}
1506
1508 unsigned NumVGPRs,
1509 unsigned DynamicVGPRBlockSize,
1510 std::optional<bool> EnableWavefrontSize32) {
1512 NumVGPRs,
1513 getVGPRAllocGranule(STI, DynamicVGPRBlockSize, EnableWavefrontSize32));
1514}
1515} // end namespace IsaInfo
1516
1518 const MCSubtargetInfo *STI) {
1520 KernelCode.amd_kernel_code_version_major = 1;
1521 KernelCode.amd_kernel_code_version_minor = 2;
1522 KernelCode.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
1523 KernelCode.amd_machine_version_major = Version.Major;
1524 KernelCode.amd_machine_version_minor = Version.Minor;
1525 KernelCode.amd_machine_version_stepping = Version.Stepping;
1527 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
1528 KernelCode.wavefront_size = 5;
1530 } else {
1531 KernelCode.wavefront_size = 6;
1532 }
1533
1534 // If the code object does not support indirect functions, then the value must
1535 // be 0xffffffff.
1536 KernelCode.call_convention = -1;
1537
1538 // These alignment values are specified in powers of two, so alignment =
1539 // 2^n. The minimum alignment is 2^4 = 16.
1540 KernelCode.kernarg_segment_alignment = 4;
1541 KernelCode.group_segment_alignment = 4;
1542 KernelCode.private_segment_alignment = 4;
1543
1544 if (Version.Major >= 10) {
1545 KernelCode.compute_pgm_resource_registers |=
1546 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
1548 }
1549}
1550
1553}
1554
1557}
1558
1560 unsigned AS = GV->getAddressSpace();
1561 return AS == AMDGPUAS::CONSTANT_ADDRESS ||
1563}
1564
1566 return TT.getArch() == Triple::r600;
1567}
1568
1569static bool isValidRegPrefix(char C) {
1570 return C == 'v' || C == 's' || C == 'a';
1571}
1572
1573std::tuple<char, unsigned, unsigned> parseAsmPhysRegName(StringRef RegName) {
1574 char Kind = RegName.front();
1575 if (!isValidRegPrefix(Kind))
1576 return {};
1577
1578 RegName = RegName.drop_front();
1579 if (RegName.consume_front("[")) {
1580 unsigned Idx, End;
1581 bool Failed = RegName.consumeInteger(10, Idx);
1582 Failed |= !RegName.consume_front(":");
1583 Failed |= RegName.consumeInteger(10, End);
1584 Failed |= !RegName.consume_back("]");
1585 if (!Failed) {
1586 unsigned NumRegs = End - Idx + 1;
1587 if (NumRegs > 1)
1588 return {Kind, Idx, NumRegs};
1589 }
1590 } else {
1591 unsigned Idx;
1592 bool Failed = RegName.getAsInteger(10, Idx);
1593 if (!Failed)
1594 return {Kind, Idx, 1};
1595 }
1596
1597 return {};
1598}
1599
1600std::tuple<char, unsigned, unsigned>
1602 StringRef RegName = Constraint;
1603 if (!RegName.consume_front("{") || !RegName.consume_back("}"))
1604 return {};
1606}
1607
1608std::pair<unsigned, unsigned>
1610 std::pair<unsigned, unsigned> Default,
1611 bool OnlyFirstRequired) {
1612 if (auto Attr = getIntegerPairAttribute(F, Name, OnlyFirstRequired))
1613 return {Attr->first, Attr->second.value_or(Default.second)};
1614 return Default;
1615}
1616
1617std::optional<std::pair<unsigned, std::optional<unsigned>>>
1619 bool OnlyFirstRequired) {
1620 Attribute A = F.getFnAttribute(Name);
1621 if (!A.isStringAttribute())
1622 return std::nullopt;
1623
1624 LLVMContext &Ctx = F.getContext();
1625 std::pair<unsigned, std::optional<unsigned>> Ints;
1626 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
1627 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1628 Ctx.emitError("can't parse first integer attribute " + Name);
1629 return std::nullopt;
1630 }
1631 unsigned Second = 0;
1632 if (Strs.second.trim().getAsInteger(0, Second)) {
1633 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1634 Ctx.emitError("can't parse second integer attribute " + Name);
1635 return std::nullopt;
1636 }
1637 } else {
1638 Ints.second = Second;
1639 }
1640
1641 return Ints;
1642}
1643
1645 unsigned Size,
1646 unsigned DefaultVal) {
1647 std::optional<SmallVector<unsigned>> R =
1649 return R.has_value() ? *R : SmallVector<unsigned>(Size, DefaultVal);
1650}
1651
1652std::optional<SmallVector<unsigned>>
1654 assert(Size > 2);
1655 LLVMContext &Ctx = F.getContext();
1656
1657 Attribute A = F.getFnAttribute(Name);
1658 if (!A.isValid())
1659 return std::nullopt;
1660 if (!A.isStringAttribute()) {
1661 Ctx.emitError(Name + " is not a string attribute");
1662 return std::nullopt;
1663 }
1664
1666
1667 StringRef S = A.getValueAsString();
1668 unsigned i = 0;
1669 for (; !S.empty() && i < Size; i++) {
1670 std::pair<StringRef, StringRef> Strs = S.split(',');
1671 unsigned IntVal;
1672 if (Strs.first.trim().getAsInteger(0, IntVal)) {
1673 Ctx.emitError("can't parse integer attribute " + Strs.first + " in " +
1674 Name);
1675 return std::nullopt;
1676 }
1677 Vals[i] = IntVal;
1678 S = Strs.second;
1679 }
1680
1681 if (!S.empty() || i < Size) {
1682 Ctx.emitError("attribute " + Name +
1683 " has incorrect number of integers; expected " +
1685 return std::nullopt;
1686 }
1687 return Vals;
1688}
1689
1690bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val) {
1691 assert((MD.getNumOperands() % 2 == 0) && "invalid number of operands!");
1692 for (unsigned I = 0, E = MD.getNumOperands() / 2; I != E; ++I) {
1693 auto Low =
1694 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 0))->getValue();
1695 auto High =
1696 mdconst::extract<ConstantInt>(MD.getOperand(2 * I + 1))->getValue();
1697 // There are two types of [A; B) ranges:
1698 // A < B, e.g. [4; 5) which is a range that only includes 4.
1699 // A > B, e.g. [5; 4) which is a range that wraps around and includes
1700 // everything except 4.
1701 if (Low.ult(High)) {
1702 if (Low.ule(Val) && High.ugt(Val))
1703 return true;
1704 } else {
1705 if (Low.uge(Val) && High.ult(Val))
1706 return true;
1707 }
1708 }
1709
1710 return false;
1711}
1712
1714 return (1 << (getVmcntBitWidthLo(Version.Major) +
1715 getVmcntBitWidthHi(Version.Major))) -
1716 1;
1717}
1718
1720 return (1 << getLoadcntBitWidth(Version.Major)) - 1;
1721}
1722
1724 return (1 << getSamplecntBitWidth(Version.Major)) - 1;
1725}
1726
1728 return (1 << getBvhcntBitWidth(Version.Major)) - 1;
1729}
1730
1732 return (1 << getExpcntBitWidth(Version.Major)) - 1;
1733}
1734
1736 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1737}
1738
1740 return (1 << getDscntBitWidth(Version.Major)) - 1;
1741}
1742
1744 return (1 << getKmcntBitWidth(Version.Major)) - 1;
1745}
1746
1748 return (1 << getXcntBitWidth(Version.Major, Version.Minor)) - 1;
1749}
1750
1752 return (1 << getStorecntBitWidth(Version.Major)) - 1;
1753}
1754
1756 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1757 getVmcntBitWidthLo(Version.Major));
1758 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1759 getExpcntBitWidth(Version.Major));
1760 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1761 getLgkmcntBitWidth(Version.Major));
1762 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1763 getVmcntBitWidthHi(Version.Major));
1764 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1765}
1766
1767unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1768 unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1769 getVmcntBitWidthLo(Version.Major));
1770 unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1771 getVmcntBitWidthHi(Version.Major));
1772 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1773}
1774
1775unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1776 return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1777 getExpcntBitWidth(Version.Major));
1778}
1779
1780unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1781 return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1782 getLgkmcntBitWidth(Version.Major));
1783}
1784
1785void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt,
1786 unsigned &Expcnt, unsigned &Lgkmcnt) {
1787 Vmcnt = decodeVmcnt(Version, Waitcnt);
1788 Expcnt = decodeExpcnt(Version, Waitcnt);
1789 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1790}
1791
1792Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1793 Waitcnt Decoded;
1794 Decoded.LoadCnt = decodeVmcnt(Version, Encoded);
1795 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1796 Decoded.DsCnt = decodeLgkmcnt(Version, Encoded);
1797 return Decoded;
1798}
1799
1800unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1801 unsigned Vmcnt) {
1802 Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1803 getVmcntBitWidthLo(Version.Major));
1804 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1805 getVmcntBitShiftHi(Version.Major),
1806 getVmcntBitWidthHi(Version.Major));
1807}
1808
1809unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1810 unsigned Expcnt) {
1811 return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1812 getExpcntBitWidth(Version.Major));
1813}
1814
1815unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1816 unsigned Lgkmcnt) {
1817 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1818 getLgkmcntBitWidth(Version.Major));
1819}
1820
1821unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt,
1822 unsigned Expcnt, unsigned Lgkmcnt) {
1823 unsigned Waitcnt = getWaitcntBitMask(Version);
1825 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1826 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1827 return Waitcnt;
1828}
1829
1830unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1831 return encodeWaitcnt(Version, Decoded.LoadCnt, Decoded.ExpCnt, Decoded.DsCnt);
1832}
1833
1835 bool IsStore) {
1836 unsigned Dscnt = getBitMask(getDscntBitShift(Version.Major),
1837 getDscntBitWidth(Version.Major));
1838 if (IsStore) {
1839 unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1840 getStorecntBitWidth(Version.Major));
1841 return Dscnt | Storecnt;
1842 }
1843 unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
1844 getLoadcntBitWidth(Version.Major));
1845 return Dscnt | Loadcnt;
1846}
1847
1848Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt) {
1849 Waitcnt Decoded;
1850 Decoded.LoadCnt =
1851 unpackBits(LoadcntDscnt, getLoadcntStorecntBitShift(Version.Major),
1852 getLoadcntBitWidth(Version.Major));
1853 Decoded.DsCnt = unpackBits(LoadcntDscnt, getDscntBitShift(Version.Major),
1854 getDscntBitWidth(Version.Major));
1855 return Decoded;
1856}
1857
1858Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt) {
1859 Waitcnt Decoded;
1860 Decoded.StoreCnt =
1861 unpackBits(StorecntDscnt, getLoadcntStorecntBitShift(Version.Major),
1862 getStorecntBitWidth(Version.Major));
1863 Decoded.DsCnt = unpackBits(StorecntDscnt, getDscntBitShift(Version.Major),
1864 getDscntBitWidth(Version.Major));
1865 return Decoded;
1866}
1867
1868static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt,
1869 unsigned Loadcnt) {
1870 return packBits(Loadcnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1871 getLoadcntBitWidth(Version.Major));
1872}
1873
1874static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt,
1875 unsigned Storecnt) {
1876 return packBits(Storecnt, Waitcnt, getLoadcntStorecntBitShift(Version.Major),
1877 getStorecntBitWidth(Version.Major));
1878}
1879
1880static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt,
1881 unsigned Dscnt) {
1882 return packBits(Dscnt, Waitcnt, getDscntBitShift(Version.Major),
1883 getDscntBitWidth(Version.Major));
1884}
1885
1886static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt,
1887 unsigned Dscnt) {
1888 unsigned Waitcnt = getCombinedCountBitMask(Version, false);
1889 Waitcnt = encodeLoadcnt(Version, Waitcnt, Loadcnt);
1891 return Waitcnt;
1892}
1893
1894unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1895 return encodeLoadcntDscnt(Version, Decoded.LoadCnt, Decoded.DsCnt);
1896}
1897
1899 unsigned Storecnt, unsigned Dscnt) {
1900 unsigned Waitcnt = getCombinedCountBitMask(Version, true);
1901 Waitcnt = encodeStorecnt(Version, Waitcnt, Storecnt);
1903 return Waitcnt;
1904}
1905
1907 const Waitcnt &Decoded) {
1908 return encodeStorecntDscnt(Version, Decoded.StoreCnt, Decoded.DsCnt);
1909}
1910
1911//===----------------------------------------------------------------------===//
1912// Custom Operand Values
1913//===----------------------------------------------------------------------===//
1914
1916 int Size,
1917 const MCSubtargetInfo &STI) {
1918 unsigned Enc = 0;
1919 for (int Idx = 0; Idx < Size; ++Idx) {
1920 const auto &Op = Opr[Idx];
1921 if (Op.isSupported(STI))
1922 Enc |= Op.encode(Op.Default);
1923 }
1924 return Enc;
1925}
1926
1928 int Size, unsigned Code,
1929 bool &HasNonDefaultVal,
1930 const MCSubtargetInfo &STI) {
1931 unsigned UsedOprMask = 0;
1932 HasNonDefaultVal = false;
1933 for (int Idx = 0; Idx < Size; ++Idx) {
1934 const auto &Op = Opr[Idx];
1935 if (!Op.isSupported(STI))
1936 continue;
1937 UsedOprMask |= Op.getMask();
1938 unsigned Val = Op.decode(Code);
1939 if (!Op.isValid(Val))
1940 return false;
1941 HasNonDefaultVal |= (Val != Op.Default);
1942 }
1943 return (Code & ~UsedOprMask) == 0;
1944}
1945
1946static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1947 unsigned Code, int &Idx, StringRef &Name,
1948 unsigned &Val, bool &IsDefault,
1949 const MCSubtargetInfo &STI) {
1950 while (Idx < Size) {
1951 const auto &Op = Opr[Idx++];
1952 if (Op.isSupported(STI)) {
1953 Name = Op.Name;
1954 Val = Op.decode(Code);
1955 IsDefault = (Val == Op.Default);
1956 return true;
1957 }
1958 }
1959
1960 return false;
1961}
1962
1964 int64_t InputVal) {
1965 if (InputVal < 0 || InputVal > Op.Max)
1966 return OPR_VAL_INVALID;
1967 return Op.encode(InputVal);
1968}
1969
1970static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1971 const StringRef Name, int64_t InputVal,
1972 unsigned &UsedOprMask,
1973 const MCSubtargetInfo &STI) {
1974 int InvalidId = OPR_ID_UNKNOWN;
1975 for (int Idx = 0; Idx < Size; ++Idx) {
1976 const auto &Op = Opr[Idx];
1977 if (Op.Name == Name) {
1978 if (!Op.isSupported(STI)) {
1979 InvalidId = OPR_ID_UNSUPPORTED;
1980 continue;
1981 }
1982 auto OprMask = Op.getMask();
1983 if (OprMask & UsedOprMask)
1984 return OPR_ID_DUPLICATE;
1985 UsedOprMask |= OprMask;
1986 return encodeCustomOperandVal(Op, InputVal);
1987 }
1988 }
1989 return InvalidId;
1990}
1991
1992//===----------------------------------------------------------------------===//
1993// DepCtr
1994//===----------------------------------------------------------------------===//
1995
1996namespace DepCtr {
1997
1999 static int Default = -1;
2000 if (Default == -1)
2002 return Default;
2003}
2004
2005bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
2006 const MCSubtargetInfo &STI) {
2008 HasNonDefaultVal, STI);
2009}
2010
2011bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
2012 bool &IsDefault, const MCSubtargetInfo &STI) {
2013 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
2014 IsDefault, STI);
2015}
2016
2017int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
2018 const MCSubtargetInfo &STI) {
2019 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
2020 STI);
2021}
2022
2023unsigned decodeFieldVmVsrc(unsigned Encoded) {
2024 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2025}
2026
2027unsigned decodeFieldVaVdst(unsigned Encoded) {
2028 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2029}
2030
2031unsigned decodeFieldSaSdst(unsigned Encoded) {
2032 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2033}
2034
2035unsigned decodeFieldVaSdst(unsigned Encoded) {
2036 return unpackBits(Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2037}
2038
2039unsigned decodeFieldVaVcc(unsigned Encoded) {
2040 return unpackBits(Encoded, getVaVccBitShift(), getVaVccBitWidth());
2041}
2042
2043unsigned decodeFieldVaSsrc(unsigned Encoded) {
2044 return unpackBits(Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2045}
2046
2047unsigned decodeFieldHoldCnt(unsigned Encoded) {
2048 return unpackBits(Encoded, getHoldCntBitShift(), getHoldCntWidth());
2049}
2050
2051unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc) {
2052 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
2053}
2054
2055unsigned encodeFieldVmVsrc(unsigned VmVsrc) {
2056 return encodeFieldVmVsrc(0xffff, VmVsrc);
2057}
2058
2059unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst) {
2060 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
2061}
2062
2063unsigned encodeFieldVaVdst(unsigned VaVdst) {
2064 return encodeFieldVaVdst(0xffff, VaVdst);
2065}
2066
2067unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst) {
2068 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
2069}
2070
2071unsigned encodeFieldSaSdst(unsigned SaSdst) {
2072 return encodeFieldSaSdst(0xffff, SaSdst);
2073}
2074
2075unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst) {
2076 return packBits(VaSdst, Encoded, getVaSdstBitShift(), getVaSdstBitWidth());
2077}
2078
2079unsigned encodeFieldVaSdst(unsigned VaSdst) {
2080 return encodeFieldVaSdst(0xffff, VaSdst);
2081}
2082
2083unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc) {
2084 return packBits(VaVcc, Encoded, getVaVccBitShift(), getVaVccBitWidth());
2085}
2086
2087unsigned encodeFieldVaVcc(unsigned VaVcc) {
2088 return encodeFieldVaVcc(0xffff, VaVcc);
2089}
2090
2091unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc) {
2092 return packBits(VaSsrc, Encoded, getVaSsrcBitShift(), getVaSsrcBitWidth());
2093}
2094
2095unsigned encodeFieldVaSsrc(unsigned VaSsrc) {
2096 return encodeFieldVaSsrc(0xffff, VaSsrc);
2097}
2098
2099unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt) {
2100 return packBits(HoldCnt, Encoded, getHoldCntBitShift(), getHoldCntWidth());
2101}
2102
2103unsigned encodeFieldHoldCnt(unsigned HoldCnt) {
2104 return encodeFieldHoldCnt(0xffff, HoldCnt);
2105}
2106
2107} // namespace DepCtr
2108
2109//===----------------------------------------------------------------------===//
2110// exp tgt
2111//===----------------------------------------------------------------------===//
2112
2113namespace Exp {
2114
2115struct ExpTgt {
2117 unsigned Tgt;
2118 unsigned MaxIndex;
2119};
2120
2121// clang-format off
2122static constexpr ExpTgt ExpTgtInfo[] = {
2123 {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
2124 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
2125 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
2126 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
2127 {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
2128 {{"dual_src_blend"},ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
2129 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
2130};
2131// clang-format on
2132
2133bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
2134 for (const ExpTgt &Val : ExpTgtInfo) {
2135 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
2136 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
2137 Name = Val.Name;
2138 return true;
2139 }
2140 }
2141 return false;
2142}
2143
2144unsigned getTgtId(const StringRef Name) {
2145
2146 for (const ExpTgt &Val : ExpTgtInfo) {
2147 if (Val.MaxIndex == 0 && Name == Val.Name)
2148 return Val.Tgt;
2149
2150 if (Val.MaxIndex > 0 && Name.starts_with(Val.Name)) {
2151 StringRef Suffix = Name.drop_front(Val.Name.size());
2152
2153 unsigned Id;
2154 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
2155 return ET_INVALID;
2156
2157 // Disable leading zeroes
2158 if (Suffix.size() > 1 && Suffix[0] == '0')
2159 return ET_INVALID;
2160
2161 return Val.Tgt + Id;
2162 }
2163 }
2164 return ET_INVALID;
2165}
2166
2167bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
2168 switch (Id) {
2169 case ET_NULL:
2170 return !isGFX11Plus(STI);
2171 case ET_POS4:
2172 case ET_PRIM:
2173 return isGFX10Plus(STI);
2174 case ET_DUAL_SRC_BLEND0:
2175 case ET_DUAL_SRC_BLEND1:
2176 return isGFX11Plus(STI);
2177 default:
2178 if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
2179 return !isGFX11Plus(STI);
2180 return true;
2181 }
2182}
2183
2184} // namespace Exp
2185
2186//===----------------------------------------------------------------------===//
2187// MTBUF Format
2188//===----------------------------------------------------------------------===//
2189
2190namespace MTBUFFormat {
2191
2192int64_t getDfmt(const StringRef Name) {
2193 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
2194 if (Name == DfmtSymbolic[Id])
2195 return Id;
2196 }
2197 return DFMT_UNDEF;
2198}
2199
2201 assert(Id <= DFMT_MAX);
2202 return DfmtSymbolic[Id];
2203}
2204
2206 if (isSI(STI) || isCI(STI))
2207 return NfmtSymbolicSICI;
2208 if (isVI(STI) || isGFX9(STI))
2209 return NfmtSymbolicVI;
2210 return NfmtSymbolicGFX10;
2211}
2212
2213int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
2214 const auto *lookupTable = getNfmtLookupTable(STI);
2215 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
2216 if (Name == lookupTable[Id])
2217 return Id;
2218 }
2219 return NFMT_UNDEF;
2220}
2221
2222StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
2223 assert(Id <= NFMT_MAX);
2224 return getNfmtLookupTable(STI)[Id];
2225}
2226
2227bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2228 unsigned Dfmt;
2229 unsigned Nfmt;
2230 decodeDfmtNfmt(Id, Dfmt, Nfmt);
2231 return isValidNfmt(Nfmt, STI);
2232}
2233
2234bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
2235 return !getNfmtName(Id, STI).empty();
2236}
2237
2238int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
2239 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
2240}
2241
2242void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
2243 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
2244 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
2245}
2246
2247int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
2248 if (isGFX11Plus(STI)) {
2249 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2250 if (Name == UfmtSymbolicGFX11[Id])
2251 return Id;
2252 }
2253 } else {
2254 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2255 if (Name == UfmtSymbolicGFX10[Id])
2256 return Id;
2257 }
2258 }
2259 return UFMT_UNDEF;
2260}
2261
2263 if (isValidUnifiedFormat(Id, STI))
2264 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
2265 return "";
2266}
2267
2268bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
2269 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
2270}
2271
2272int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
2273 const MCSubtargetInfo &STI) {
2274 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
2275 if (isGFX11Plus(STI)) {
2276 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
2277 if (Fmt == DfmtNfmt2UFmtGFX11[Id])
2278 return Id;
2279 }
2280 } else {
2281 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
2282 if (Fmt == DfmtNfmt2UFmtGFX10[Id])
2283 return Id;
2284 }
2285 }
2286 return UFMT_UNDEF;
2287}
2288
2289bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
2290 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
2291}
2292
2294 if (isGFX10Plus(STI))
2295 return UFMT_DEFAULT;
2296 return DFMT_NFMT_DEFAULT;
2297}
2298
2299} // namespace MTBUFFormat
2300
2301//===----------------------------------------------------------------------===//
2302// SendMsg
2303//===----------------------------------------------------------------------===//
2304
2305namespace SendMsg {
2306
2310
2311bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
2312 return (MsgId & ~(getMsgIdMask(STI))) == 0;
2313}
2314
2315bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
2316 bool Strict) {
2317 assert(isValidMsgId(MsgId, STI));
2318
2319 if (!Strict)
2320 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
2321
2322 if (msgRequiresOp(MsgId, STI)) {
2323 if (MsgId == ID_GS_PreGFX11 && OpId == OP_GS_NOP)
2324 return false;
2325
2326 return !getMsgOpName(MsgId, OpId, STI).empty();
2327 }
2328
2329 return OpId == OP_NONE_;
2330}
2331
2332bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
2333 const MCSubtargetInfo &STI, bool Strict) {
2334 assert(isValidMsgOp(MsgId, OpId, STI, Strict));
2335
2336 if (!Strict)
2338
2339 if (!isGFX11Plus(STI)) {
2340 switch (MsgId) {
2341 case ID_GS_PreGFX11:
2344 return (OpId == OP_GS_NOP)
2347 }
2348 }
2349 return StreamId == STREAM_ID_NONE_;
2350}
2351
2352bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
2353 return MsgId == ID_SYSMSG ||
2354 (!isGFX11Plus(STI) &&
2355 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
2356}
2357
2358bool msgSupportsStream(int64_t MsgId, int64_t OpId,
2359 const MCSubtargetInfo &STI) {
2360 return !isGFX11Plus(STI) &&
2361 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
2362 OpId != OP_GS_NOP;
2363}
2364
2365void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
2366 uint16_t &StreamId, const MCSubtargetInfo &STI) {
2367 MsgId = Val & getMsgIdMask(STI);
2368 if (isGFX11Plus(STI)) {
2369 OpId = 0;
2370 StreamId = 0;
2371 } else {
2372 OpId = (Val & OP_MASK_) >> OP_SHIFT_;
2374 }
2375}
2376
2378 return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
2379}
2380
2381} // namespace SendMsg
2382
2383//===----------------------------------------------------------------------===//
2384//
2385//===----------------------------------------------------------------------===//
2386
2388 return F.getFnAttributeAsParsedInteger("InitialPSInputAddr", 0);
2389}
2390
2392 // As a safe default always respond as if PS has color exports.
2393 return F.getFnAttributeAsParsedInteger(
2394 "amdgpu-color-export",
2395 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
2396}
2397
2399 return F.getFnAttributeAsParsedInteger("amdgpu-depth-export", 0) != 0;
2400}
2401
2403 unsigned BlockSize =
2404 F.getFnAttributeAsParsedInteger("amdgpu-dynamic-vgpr-block-size", 0);
2405
2406 if (BlockSize == 16 || BlockSize == 32)
2407 return BlockSize;
2408
2409 return 0;
2410}
2411
2412bool hasXNACK(const MCSubtargetInfo &STI) {
2413 return STI.hasFeature(AMDGPU::FeatureXNACK);
2414}
2415
2416bool hasSRAMECC(const MCSubtargetInfo &STI) {
2417 return STI.hasFeature(AMDGPU::FeatureSRAMECC);
2418}
2419
2421 return STI.hasFeature(AMDGPU::FeatureMIMG_R128) &&
2422 !STI.hasFeature(AMDGPU::FeatureR128A16);
2423}
2424
2425bool hasA16(const MCSubtargetInfo &STI) {
2426 return STI.hasFeature(AMDGPU::FeatureA16);
2427}
2428
2429bool hasG16(const MCSubtargetInfo &STI) {
2430 return STI.hasFeature(AMDGPU::FeatureG16);
2431}
2432
2434 return !STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !isCI(STI) &&
2435 !isSI(STI);
2436}
2437
2438bool hasGDS(const MCSubtargetInfo &STI) {
2439 return STI.hasFeature(AMDGPU::FeatureGDS);
2440}
2441
2442unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler) {
2443 auto Version = getIsaVersion(STI.getCPU());
2444 if (Version.Major == 10)
2445 return Version.Minor >= 3 ? 13 : 5;
2446 if (Version.Major == 11)
2447 return 5;
2448 if (Version.Major >= 12)
2449 return HasSampler ? 4 : 5;
2450 return 0;
2451}
2452
2454 if (isGFX1250(STI))
2455 return 32;
2456 return 16;
2457}
2458
2459bool isSI(const MCSubtargetInfo &STI) {
2460 return STI.hasFeature(AMDGPU::FeatureSouthernIslands);
2461}
2462
2463bool isCI(const MCSubtargetInfo &STI) {
2464 return STI.hasFeature(AMDGPU::FeatureSeaIslands);
2465}
2466
2467bool isVI(const MCSubtargetInfo &STI) {
2468 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2469}
2470
2471bool isGFX9(const MCSubtargetInfo &STI) {
2472 return STI.hasFeature(AMDGPU::FeatureGFX9);
2473}
2474
2476 return isGFX9(STI) || isGFX10(STI);
2477}
2478
2480 return isGFX9(STI) || isGFX10(STI) || isGFX11(STI);
2481}
2482
2484 return isVI(STI) || isGFX9(STI) || isGFX10(STI);
2485}
2486
2487bool isGFX8Plus(const MCSubtargetInfo &STI) {
2488 return isVI(STI) || isGFX9Plus(STI);
2489}
2490
2491bool isGFX9Plus(const MCSubtargetInfo &STI) {
2492 return isGFX9(STI) || isGFX10Plus(STI);
2493}
2494
2495bool isNotGFX9Plus(const MCSubtargetInfo &STI) { return !isGFX9Plus(STI); }
2496
2497bool isGFX10(const MCSubtargetInfo &STI) {
2498 return STI.hasFeature(AMDGPU::FeatureGFX10);
2499}
2500
2502 return isGFX10(STI) || isGFX11(STI);
2503}
2504
2506 return isGFX10(STI) || isGFX11Plus(STI);
2507}
2508
2509bool isGFX11(const MCSubtargetInfo &STI) {
2510 return STI.hasFeature(AMDGPU::FeatureGFX11);
2511}
2512
2514 return isGFX11(STI) || isGFX12Plus(STI);
2515}
2516
2517bool isGFX12(const MCSubtargetInfo &STI) {
2518 return STI.getFeatureBits()[AMDGPU::FeatureGFX12];
2519}
2520
2521bool isGFX12Plus(const MCSubtargetInfo &STI) { return isGFX12(STI); }
2522
2523bool isNotGFX12Plus(const MCSubtargetInfo &STI) { return !isGFX12Plus(STI); }
2524
2525bool isGFX1250(const MCSubtargetInfo &STI) {
2526 return STI.getFeatureBits()[AMDGPU::FeatureGFX1250Insts];
2527}
2528
2530 if (isGFX1250(STI))
2531 return false;
2532 return isGFX10Plus(STI);
2533}
2534
2535bool isNotGFX11Plus(const MCSubtargetInfo &STI) { return !isGFX11Plus(STI); }
2536
2538 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
2539}
2540
2542 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
2543}
2544
2546 return STI.hasFeature(AMDGPU::FeatureGCN3Encoding);
2547}
2548
2550 return STI.hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2551}
2552
2554 return STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2555}
2556
2558 return STI.hasFeature(AMDGPU::FeatureGFX10_3Insts);
2559}
2560
2562 return isGFX10_BEncoding(STI) && !isGFX12Plus(STI);
2563}
2564
2565bool isGFX90A(const MCSubtargetInfo &STI) {
2566 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2567}
2568
2569bool isGFX940(const MCSubtargetInfo &STI) {
2570 return STI.hasFeature(AMDGPU::FeatureGFX940Insts);
2571}
2572
2574 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2575}
2576
2578 return STI.hasFeature(AMDGPU::FeatureMAIInsts);
2579}
2580
2581bool hasVOPD(const MCSubtargetInfo &STI) {
2582 return STI.hasFeature(AMDGPU::FeatureVOPD);
2583}
2584
2586 return STI.hasFeature(AMDGPU::FeatureDPPSrc1SGPR);
2587}
2588
2590 return STI.hasFeature(AMDGPU::FeatureKernargPreload);
2591}
2592
2593int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
2594 int32_t ArgNumVGPR) {
2595 if (has90AInsts && ArgNumAGPR)
2596 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2597 return std::max(ArgNumVGPR, ArgNumAGPR);
2598}
2599
2601 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
2602 const MCRegister FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
2603 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
2604 Reg == AMDGPU::SCC;
2605}
2606
2608 return MRI.getEncodingValue(Reg) & AMDGPU::HWEncoding::IS_HI16;
2609}
2610
2611#define MAP_REG2REG \
2612 using namespace AMDGPU; \
2613 switch (Reg.id()) { \
2614 default: \
2615 return Reg; \
2616 CASE_CI_VI(FLAT_SCR) \
2617 CASE_CI_VI(FLAT_SCR_LO) \
2618 CASE_CI_VI(FLAT_SCR_HI) \
2619 CASE_VI_GFX9PLUS(TTMP0) \
2620 CASE_VI_GFX9PLUS(TTMP1) \
2621 CASE_VI_GFX9PLUS(TTMP2) \
2622 CASE_VI_GFX9PLUS(TTMP3) \
2623 CASE_VI_GFX9PLUS(TTMP4) \
2624 CASE_VI_GFX9PLUS(TTMP5) \
2625 CASE_VI_GFX9PLUS(TTMP6) \
2626 CASE_VI_GFX9PLUS(TTMP7) \
2627 CASE_VI_GFX9PLUS(TTMP8) \
2628 CASE_VI_GFX9PLUS(TTMP9) \
2629 CASE_VI_GFX9PLUS(TTMP10) \
2630 CASE_VI_GFX9PLUS(TTMP11) \
2631 CASE_VI_GFX9PLUS(TTMP12) \
2632 CASE_VI_GFX9PLUS(TTMP13) \
2633 CASE_VI_GFX9PLUS(TTMP14) \
2634 CASE_VI_GFX9PLUS(TTMP15) \
2635 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2636 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2637 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2638 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2639 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2640 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2641 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2642 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2643 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2644 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2645 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2646 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2647 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2648 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2649 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2650 CASE_VI_GFX9PLUS( \
2651 TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2652 CASE_GFXPRE11_GFX11PLUS(M0) \
2653 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2654 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2655 }
2656
2657#define CASE_CI_VI(node) \
2658 assert(!isSI(STI)); \
2659 case node: \
2660 return isCI(STI) ? node##_ci : node##_vi;
2661
2662#define CASE_VI_GFX9PLUS(node) \
2663 case node: \
2664 return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2665
2666#define CASE_GFXPRE11_GFX11PLUS(node) \
2667 case node: \
2668 return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2669
2670#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2671 case node: \
2672 return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2673
2675 if (STI.getTargetTriple().getArch() == Triple::r600)
2676 return Reg;
2678}
2679
2680#undef CASE_CI_VI
2681#undef CASE_VI_GFX9PLUS
2682#undef CASE_GFXPRE11_GFX11PLUS
2683#undef CASE_GFXPRE11_GFX11PLUS_TO
2684
2685#define CASE_CI_VI(node) \
2686 case node##_ci: \
2687 case node##_vi: \
2688 return node;
2689#define CASE_VI_GFX9PLUS(node) \
2690 case node##_vi: \
2691 case node##_gfx9plus: \
2692 return node;
2693#define CASE_GFXPRE11_GFX11PLUS(node) \
2694 case node##_gfx11plus: \
2695 case node##_gfxpre11: \
2696 return node;
2697#define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2698
2700
2702 switch (Reg.id()) {
2703 case AMDGPU::SRC_SHARED_BASE_LO:
2704 case AMDGPU::SRC_SHARED_BASE:
2705 case AMDGPU::SRC_SHARED_LIMIT_LO:
2706 case AMDGPU::SRC_SHARED_LIMIT:
2707 case AMDGPU::SRC_PRIVATE_BASE_LO:
2708 case AMDGPU::SRC_PRIVATE_BASE:
2709 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2710 case AMDGPU::SRC_PRIVATE_LIMIT:
2711 case AMDGPU::SRC_FLAT_SCRATCH_BASE_LO:
2712 case AMDGPU::SRC_FLAT_SCRATCH_BASE_HI:
2713 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2714 return true;
2715 case AMDGPU::SRC_VCCZ:
2716 case AMDGPU::SRC_EXECZ:
2717 case AMDGPU::SRC_SCC:
2718 return true;
2719 case AMDGPU::SGPR_NULL:
2720 return true;
2721 default:
2722 return false;
2723 }
2724}
2725
2726#undef CASE_CI_VI
2727#undef CASE_VI_GFX9PLUS
2728#undef CASE_GFXPRE11_GFX11PLUS
2729#undef CASE_GFXPRE11_GFX11PLUS_TO
2730#undef MAP_REG2REG
2731
2732bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2733 assert(OpNo < Desc.NumOperands);
2734 unsigned OpType = Desc.operands()[OpNo].OperandType;
2735 return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
2736 OpType <= AMDGPU::OPERAND_KIMM_LAST;
2737}
2738
2739bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2740 assert(OpNo < Desc.NumOperands);
2741 unsigned OpType = Desc.operands()[OpNo].OperandType;
2742 switch (OpType) {
2755 return true;
2756 default:
2757 return false;
2758 }
2759}
2760
2761bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2762 assert(OpNo < Desc.NumOperands);
2763 unsigned OpType = Desc.operands()[OpNo].OperandType;
2764 return (OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
2768}
2769
2770// Avoid using MCRegisterClass::getSize, since that function will go away
2771// (move from MC* level to Target* level). Return size in bits.
2772unsigned getRegBitWidth(unsigned RCID) {
2773 switch (RCID) {
2774 case AMDGPU::VGPR_16RegClassID:
2775 case AMDGPU::VGPR_16_Lo128RegClassID:
2776 case AMDGPU::SGPR_LO16RegClassID:
2777 case AMDGPU::AGPR_LO16RegClassID:
2778 return 16;
2779 case AMDGPU::SGPR_32RegClassID:
2780 case AMDGPU::VGPR_32RegClassID:
2781 case AMDGPU::VGPR_32_Lo256RegClassID:
2782 case AMDGPU::VRegOrLds_32RegClassID:
2783 case AMDGPU::AGPR_32RegClassID:
2784 case AMDGPU::VS_32RegClassID:
2785 case AMDGPU::AV_32RegClassID:
2786 case AMDGPU::SReg_32RegClassID:
2787 case AMDGPU::SReg_32_XM0RegClassID:
2788 case AMDGPU::SRegOrLds_32RegClassID:
2789 return 32;
2790 case AMDGPU::SGPR_64RegClassID:
2791 case AMDGPU::VS_64RegClassID:
2792 case AMDGPU::SReg_64RegClassID:
2793 case AMDGPU::VReg_64RegClassID:
2794 case AMDGPU::AReg_64RegClassID:
2795 case AMDGPU::SReg_64_XEXECRegClassID:
2796 case AMDGPU::VReg_64_Align2RegClassID:
2797 case AMDGPU::AReg_64_Align2RegClassID:
2798 case AMDGPU::AV_64RegClassID:
2799 case AMDGPU::AV_64_Align2RegClassID:
2800 case AMDGPU::VReg_64_Lo256_Align2RegClassID:
2801 case AMDGPU::VS_64_Lo256RegClassID:
2802 return 64;
2803 case AMDGPU::SGPR_96RegClassID:
2804 case AMDGPU::SReg_96RegClassID:
2805 case AMDGPU::VReg_96RegClassID:
2806 case AMDGPU::AReg_96RegClassID:
2807 case AMDGPU::VReg_96_Align2RegClassID:
2808 case AMDGPU::AReg_96_Align2RegClassID:
2809 case AMDGPU::AV_96RegClassID:
2810 case AMDGPU::AV_96_Align2RegClassID:
2811 case AMDGPU::VReg_96_Lo256_Align2RegClassID:
2812 return 96;
2813 case AMDGPU::SGPR_128RegClassID:
2814 case AMDGPU::SReg_128RegClassID:
2815 case AMDGPU::VReg_128RegClassID:
2816 case AMDGPU::AReg_128RegClassID:
2817 case AMDGPU::VReg_128_Align2RegClassID:
2818 case AMDGPU::AReg_128_Align2RegClassID:
2819 case AMDGPU::AV_128RegClassID:
2820 case AMDGPU::AV_128_Align2RegClassID:
2821 case AMDGPU::SReg_128_XNULLRegClassID:
2822 case AMDGPU::VReg_128_Lo256_Align2RegClassID:
2823 return 128;
2824 case AMDGPU::SGPR_160RegClassID:
2825 case AMDGPU::SReg_160RegClassID:
2826 case AMDGPU::VReg_160RegClassID:
2827 case AMDGPU::AReg_160RegClassID:
2828 case AMDGPU::VReg_160_Align2RegClassID:
2829 case AMDGPU::AReg_160_Align2RegClassID:
2830 case AMDGPU::AV_160RegClassID:
2831 case AMDGPU::AV_160_Align2RegClassID:
2832 case AMDGPU::VReg_160_Lo256_Align2RegClassID:
2833 return 160;
2834 case AMDGPU::SGPR_192RegClassID:
2835 case AMDGPU::SReg_192RegClassID:
2836 case AMDGPU::VReg_192RegClassID:
2837 case AMDGPU::AReg_192RegClassID:
2838 case AMDGPU::VReg_192_Align2RegClassID:
2839 case AMDGPU::AReg_192_Align2RegClassID:
2840 case AMDGPU::AV_192RegClassID:
2841 case AMDGPU::AV_192_Align2RegClassID:
2842 case AMDGPU::VReg_192_Lo256_Align2RegClassID:
2843 return 192;
2844 case AMDGPU::SGPR_224RegClassID:
2845 case AMDGPU::SReg_224RegClassID:
2846 case AMDGPU::VReg_224RegClassID:
2847 case AMDGPU::AReg_224RegClassID:
2848 case AMDGPU::VReg_224_Align2RegClassID:
2849 case AMDGPU::AReg_224_Align2RegClassID:
2850 case AMDGPU::AV_224RegClassID:
2851 case AMDGPU::AV_224_Align2RegClassID:
2852 case AMDGPU::VReg_224_Lo256_Align2RegClassID:
2853 return 224;
2854 case AMDGPU::SGPR_256RegClassID:
2855 case AMDGPU::SReg_256RegClassID:
2856 case AMDGPU::VReg_256RegClassID:
2857 case AMDGPU::AReg_256RegClassID:
2858 case AMDGPU::VReg_256_Align2RegClassID:
2859 case AMDGPU::AReg_256_Align2RegClassID:
2860 case AMDGPU::AV_256RegClassID:
2861 case AMDGPU::AV_256_Align2RegClassID:
2862 case AMDGPU::SReg_256_XNULLRegClassID:
2863 case AMDGPU::VReg_256_Lo256_Align2RegClassID:
2864 return 256;
2865 case AMDGPU::SGPR_288RegClassID:
2866 case AMDGPU::SReg_288RegClassID:
2867 case AMDGPU::VReg_288RegClassID:
2868 case AMDGPU::AReg_288RegClassID:
2869 case AMDGPU::VReg_288_Align2RegClassID:
2870 case AMDGPU::AReg_288_Align2RegClassID:
2871 case AMDGPU::AV_288RegClassID:
2872 case AMDGPU::AV_288_Align2RegClassID:
2873 case AMDGPU::VReg_288_Lo256_Align2RegClassID:
2874 return 288;
2875 case AMDGPU::SGPR_320RegClassID:
2876 case AMDGPU::SReg_320RegClassID:
2877 case AMDGPU::VReg_320RegClassID:
2878 case AMDGPU::AReg_320RegClassID:
2879 case AMDGPU::VReg_320_Align2RegClassID:
2880 case AMDGPU::AReg_320_Align2RegClassID:
2881 case AMDGPU::AV_320RegClassID:
2882 case AMDGPU::AV_320_Align2RegClassID:
2883 case AMDGPU::VReg_320_Lo256_Align2RegClassID:
2884 return 320;
2885 case AMDGPU::SGPR_352RegClassID:
2886 case AMDGPU::SReg_352RegClassID:
2887 case AMDGPU::VReg_352RegClassID:
2888 case AMDGPU::AReg_352RegClassID:
2889 case AMDGPU::VReg_352_Align2RegClassID:
2890 case AMDGPU::AReg_352_Align2RegClassID:
2891 case AMDGPU::AV_352RegClassID:
2892 case AMDGPU::AV_352_Align2RegClassID:
2893 case AMDGPU::VReg_352_Lo256_Align2RegClassID:
2894 return 352;
2895 case AMDGPU::SGPR_384RegClassID:
2896 case AMDGPU::SReg_384RegClassID:
2897 case AMDGPU::VReg_384RegClassID:
2898 case AMDGPU::AReg_384RegClassID:
2899 case AMDGPU::VReg_384_Align2RegClassID:
2900 case AMDGPU::AReg_384_Align2RegClassID:
2901 case AMDGPU::AV_384RegClassID:
2902 case AMDGPU::AV_384_Align2RegClassID:
2903 case AMDGPU::VReg_384_Lo256_Align2RegClassID:
2904 return 384;
2905 case AMDGPU::SGPR_512RegClassID:
2906 case AMDGPU::SReg_512RegClassID:
2907 case AMDGPU::VReg_512RegClassID:
2908 case AMDGPU::AReg_512RegClassID:
2909 case AMDGPU::VReg_512_Align2RegClassID:
2910 case AMDGPU::AReg_512_Align2RegClassID:
2911 case AMDGPU::AV_512RegClassID:
2912 case AMDGPU::AV_512_Align2RegClassID:
2913 case AMDGPU::VReg_512_Lo256_Align2RegClassID:
2914 return 512;
2915 case AMDGPU::SGPR_1024RegClassID:
2916 case AMDGPU::SReg_1024RegClassID:
2917 case AMDGPU::VReg_1024RegClassID:
2918 case AMDGPU::AReg_1024RegClassID:
2919 case AMDGPU::VReg_1024_Align2RegClassID:
2920 case AMDGPU::AReg_1024_Align2RegClassID:
2921 case AMDGPU::AV_1024RegClassID:
2922 case AMDGPU::AV_1024_Align2RegClassID:
2923 case AMDGPU::VReg_1024_Lo256_Align2RegClassID:
2924 return 1024;
2925 default:
2926 llvm_unreachable("Unexpected register class");
2927 }
2928}
2929
2930unsigned getRegBitWidth(const MCRegisterClass &RC) {
2931 return getRegBitWidth(RC.getID());
2932}
2933
2934bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2936 return true;
2937
2938 uint64_t Val = static_cast<uint64_t>(Literal);
2939 return (Val == llvm::bit_cast<uint64_t>(0.0)) ||
2940 (Val == llvm::bit_cast<uint64_t>(1.0)) ||
2941 (Val == llvm::bit_cast<uint64_t>(-1.0)) ||
2942 (Val == llvm::bit_cast<uint64_t>(0.5)) ||
2943 (Val == llvm::bit_cast<uint64_t>(-0.5)) ||
2944 (Val == llvm::bit_cast<uint64_t>(2.0)) ||
2945 (Val == llvm::bit_cast<uint64_t>(-2.0)) ||
2946 (Val == llvm::bit_cast<uint64_t>(4.0)) ||
2947 (Val == llvm::bit_cast<uint64_t>(-4.0)) ||
2948 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2949}
2950
2951bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2953 return true;
2954
2955 // The actual type of the operand does not seem to matter as long
2956 // as the bits match one of the inline immediate values. For example:
2957 //
2958 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2959 // so it is a legal inline immediate.
2960 //
2961 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2962 // floating-point, so it is a legal inline immediate.
2963
2964 uint32_t Val = static_cast<uint32_t>(Literal);
2965 return (Val == llvm::bit_cast<uint32_t>(0.0f)) ||
2966 (Val == llvm::bit_cast<uint32_t>(1.0f)) ||
2967 (Val == llvm::bit_cast<uint32_t>(-1.0f)) ||
2968 (Val == llvm::bit_cast<uint32_t>(0.5f)) ||
2969 (Val == llvm::bit_cast<uint32_t>(-0.5f)) ||
2970 (Val == llvm::bit_cast<uint32_t>(2.0f)) ||
2971 (Val == llvm::bit_cast<uint32_t>(-2.0f)) ||
2972 (Val == llvm::bit_cast<uint32_t>(4.0f)) ||
2973 (Val == llvm::bit_cast<uint32_t>(-4.0f)) ||
2974 (Val == 0x3e22f983 && HasInv2Pi);
2975}
2976
2977bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi) {
2978 if (!HasInv2Pi)
2979 return false;
2981 return true;
2982 uint16_t Val = static_cast<uint16_t>(Literal);
2983 return Val == 0x3F00 || // 0.5
2984 Val == 0xBF00 || // -0.5
2985 Val == 0x3F80 || // 1.0
2986 Val == 0xBF80 || // -1.0
2987 Val == 0x4000 || // 2.0
2988 Val == 0xC000 || // -2.0
2989 Val == 0x4080 || // 4.0
2990 Val == 0xC080 || // -4.0
2991 Val == 0x3E22; // 1.0 / (2.0 * pi)
2992}
2993
2994bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi) {
2995 return isInlinableLiteral32(Literal, HasInv2Pi);
2996}
2997
2998bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi) {
2999 if (!HasInv2Pi)
3000 return false;
3002 return true;
3003 uint16_t Val = static_cast<uint16_t>(Literal);
3004 return Val == 0x3C00 || // 1.0
3005 Val == 0xBC00 || // -1.0
3006 Val == 0x3800 || // 0.5
3007 Val == 0xB800 || // -0.5
3008 Val == 0x4000 || // 2.0
3009 Val == 0xC000 || // -2.0
3010 Val == 0x4400 || // 4.0
3011 Val == 0xC400 || // -4.0
3012 Val == 0x3118; // 1/2pi
3013}
3014
3015std::optional<unsigned> getInlineEncodingV216(bool IsFloat, uint32_t Literal) {
3016 // Unfortunately, the Instruction Set Architecture Reference Guide is
3017 // misleading about how the inline operands work for (packed) 16-bit
3018 // instructions. In a nutshell, the actual HW behavior is:
3019 //
3020 // - integer encodings (-16 .. 64) are always produced as sign-extended
3021 // 32-bit values
3022 // - float encodings are produced as:
3023 // - for F16 instructions: corresponding half-precision float values in
3024 // the LSBs, 0 in the MSBs
3025 // - for UI16 instructions: corresponding single-precision float value
3026 int32_t Signed = static_cast<int32_t>(Literal);
3027 if (Signed >= 0 && Signed <= 64)
3028 return 128 + Signed;
3029
3030 if (Signed >= -16 && Signed <= -1)
3031 return 192 + std::abs(Signed);
3032
3033 if (IsFloat) {
3034 // clang-format off
3035 switch (Literal) {
3036 case 0x3800: return 240; // 0.5
3037 case 0xB800: return 241; // -0.5
3038 case 0x3C00: return 242; // 1.0
3039 case 0xBC00: return 243; // -1.0
3040 case 0x4000: return 244; // 2.0
3041 case 0xC000: return 245; // -2.0
3042 case 0x4400: return 246; // 4.0
3043 case 0xC400: return 247; // -4.0
3044 case 0x3118: return 248; // 1.0 / (2.0 * pi)
3045 default: break;
3046 }
3047 // clang-format on
3048 } else {
3049 // clang-format off
3050 switch (Literal) {
3051 case 0x3F000000: return 240; // 0.5
3052 case 0xBF000000: return 241; // -0.5
3053 case 0x3F800000: return 242; // 1.0
3054 case 0xBF800000: return 243; // -1.0
3055 case 0x40000000: return 244; // 2.0
3056 case 0xC0000000: return 245; // -2.0
3057 case 0x40800000: return 246; // 4.0
3058 case 0xC0800000: return 247; // -4.0
3059 case 0x3E22F983: return 248; // 1.0 / (2.0 * pi)
3060 default: break;
3061 }
3062 // clang-format on
3063 }
3064
3065 return {};
3066}
3067
3068// Encoding of the literal as an inline constant for a V_PK_*_IU16 instruction
3069// or nullopt.
3070std::optional<unsigned> getInlineEncodingV2I16(uint32_t Literal) {
3071 return getInlineEncodingV216(false, Literal);
3072}
3073
3074// Encoding of the literal as an inline constant for a V_PK_*_BF16 instruction
3075// or nullopt.
3076std::optional<unsigned> getInlineEncodingV2BF16(uint32_t Literal) {
3077 int32_t Signed = static_cast<int32_t>(Literal);
3078 if (Signed >= 0 && Signed <= 64)
3079 return 128 + Signed;
3080
3081 if (Signed >= -16 && Signed <= -1)
3082 return 192 + std::abs(Signed);
3083
3084 // clang-format off
3085 switch (Literal) {
3086 case 0x3F00: return 240; // 0.5
3087 case 0xBF00: return 241; // -0.5
3088 case 0x3F80: return 242; // 1.0
3089 case 0xBF80: return 243; // -1.0
3090 case 0x4000: return 244; // 2.0
3091 case 0xC000: return 245; // -2.0
3092 case 0x4080: return 246; // 4.0
3093 case 0xC080: return 247; // -4.0
3094 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
3095 default: break;
3096 }
3097 // clang-format on
3098
3099 return std::nullopt;
3100}
3101
3102// Encoding of the literal as an inline constant for a V_PK_*_F16 instruction
3103// or nullopt.
3104std::optional<unsigned> getInlineEncodingV2F16(uint32_t Literal) {
3105 return getInlineEncodingV216(true, Literal);
3106}
3107
3108// Whether the given literal can be inlined for a V_PK_* instruction.
3110 switch (OpType) {
3113 return getInlineEncodingV216(false, Literal).has_value();
3116 return getInlineEncodingV216(true, Literal).has_value();
3121 return false;
3122 default:
3123 llvm_unreachable("bad packed operand type");
3124 }
3125}
3126
3127// Whether the given literal can be inlined for a V_PK_*_IU16 instruction.
3131
3132// Whether the given literal can be inlined for a V_PK_*_BF16 instruction.
3136
3137// Whether the given literal can be inlined for a V_PK_*_F16 instruction.
3141
3142bool isValid32BitLiteral(uint64_t Val, bool IsFP64) {
3143 if (IsFP64)
3144 return !Lo_32(Val);
3145
3146 return isUInt<32>(Val) || isInt<32>(Val);
3147}
3148
3149int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit) {
3150 switch (Type) {
3151 default:
3152 break;
3157 return Imm & 0xffff;
3170 return Lo_32(Imm);
3172 return IsLit ? Imm : Hi_32(Imm);
3173 }
3174 return Imm;
3175}
3176
3178 const Function *F = A->getParent();
3179
3180 // Arguments to compute shaders are never a source of divergence.
3181 CallingConv::ID CC = F->getCallingConv();
3182 switch (CC) {
3185 return true;
3196 // For non-compute shaders, SGPR inputs are marked with either inreg or
3197 // byval. Everything else is in VGPRs.
3198 return A->hasAttribute(Attribute::InReg) ||
3199 A->hasAttribute(Attribute::ByVal);
3200 default:
3201 // TODO: treat i1 as divergent?
3202 return A->hasAttribute(Attribute::InReg);
3203 }
3204}
3205
3206bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo) {
3207 // Arguments to compute shaders are never a source of divergence.
3209 switch (CC) {
3212 return true;
3223 // For non-compute shaders, SGPR inputs are marked with either inreg or
3224 // byval. Everything else is in VGPRs.
3225 return CB->paramHasAttr(ArgNo, Attribute::InReg) ||
3226 CB->paramHasAttr(ArgNo, Attribute::ByVal);
3227 default:
3228 return CB->paramHasAttr(ArgNo, Attribute::InReg);
3229 }
3230}
3231
3232static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
3233 return isGCN3Encoding(ST) || isGFX10Plus(ST);
3234}
3235
3237 int64_t EncodedOffset) {
3238 if (isGFX12Plus(ST))
3239 return isUInt<23>(EncodedOffset);
3240
3241 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
3242 : isUInt<8>(EncodedOffset);
3243}
3244
3246 int64_t EncodedOffset, bool IsBuffer) {
3247 if (isGFX12Plus(ST)) {
3248 if (IsBuffer && EncodedOffset < 0)
3249 return false;
3250 return isInt<24>(EncodedOffset);
3251 }
3252
3253 return !IsBuffer && hasSMRDSignedImmOffset(ST) && isInt<21>(EncodedOffset);
3254}
3255
3256static bool isDwordAligned(uint64_t ByteOffset) {
3257 return (ByteOffset & 3) == 0;
3258}
3259
3261 uint64_t ByteOffset) {
3262 if (hasSMEMByteOffset(ST))
3263 return ByteOffset;
3264
3265 assert(isDwordAligned(ByteOffset));
3266 return ByteOffset >> 2;
3267}
3268
3269std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
3270 int64_t ByteOffset, bool IsBuffer,
3271 bool HasSOffset) {
3272 // For unbuffered smem loads, it is illegal for the Immediate Offset to be
3273 // negative if the resulting (Offset + (M0 or SOffset or zero) is negative.
3274 // Handle case where SOffset is not present.
3275 if (!IsBuffer && !HasSOffset && ByteOffset < 0 && hasSMRDSignedImmOffset(ST))
3276 return std::nullopt;
3277
3278 if (isGFX12Plus(ST)) // 24 bit signed offsets
3279 return isInt<24>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3280 : std::nullopt;
3281
3282 // The signed version is always a byte offset.
3283 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
3285 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
3286 : std::nullopt;
3287 }
3288
3289 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
3290 return std::nullopt;
3291
3292 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3293 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
3294 ? std::optional<int64_t>(EncodedOffset)
3295 : std::nullopt;
3296}
3297
3298std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
3299 int64_t ByteOffset) {
3300 if (!isCI(ST) || !isDwordAligned(ByteOffset))
3301 return std::nullopt;
3302
3303 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
3304 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
3305 : std::nullopt;
3306}
3307
3309 if (AMDGPU::isGFX10(ST))
3310 return 12;
3311
3312 if (AMDGPU::isGFX12(ST))
3313 return 24;
3314 return 13;
3315}
3316
3317namespace {
3318
3319struct SourceOfDivergence {
3320 unsigned Intr;
3321};
3322const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
3323
3324struct AlwaysUniform {
3325 unsigned Intr;
3326};
3327const AlwaysUniform *lookupAlwaysUniform(unsigned Intr);
3328
3329#define GET_SourcesOfDivergence_IMPL
3330#define GET_UniformIntrinsics_IMPL
3331#define GET_Gfx9BufferFormat_IMPL
3332#define GET_Gfx10BufferFormat_IMPL
3333#define GET_Gfx11PlusBufferFormat_IMPL
3334
3335#include "AMDGPUGenSearchableTables.inc"
3336
3337} // end anonymous namespace
3338
3339bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
3340 return lookupSourceOfDivergence(IntrID);
3341}
3342
3343bool isIntrinsicAlwaysUniform(unsigned IntrID) {
3344 return lookupAlwaysUniform(IntrID);
3345}
3346
3348 uint8_t NumComponents,
3349 uint8_t NumFormat,
3350 const MCSubtargetInfo &STI) {
3351 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(
3352 BitsPerComp, NumComponents, NumFormat)
3353 : isGFX10(STI)
3354 ? getGfx10BufferFormatInfo(BitsPerComp, NumComponents, NumFormat)
3355 : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
3356}
3357
3359 const MCSubtargetInfo &STI) {
3360 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
3361 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
3362 : getGfx9BufferFormatInfo(Format);
3363}
3364
3366 const MCRegisterInfo &MRI) {
3367 const unsigned VGPRClasses[] = {
3368 AMDGPU::VGPR_16RegClassID, AMDGPU::VGPR_32RegClassID,
3369 AMDGPU::VReg_64RegClassID, AMDGPU::VReg_96RegClassID,
3370 AMDGPU::VReg_128RegClassID, AMDGPU::VReg_160RegClassID,
3371 AMDGPU::VReg_192RegClassID, AMDGPU::VReg_224RegClassID,
3372 AMDGPU::VReg_256RegClassID, AMDGPU::VReg_288RegClassID,
3373 AMDGPU::VReg_320RegClassID, AMDGPU::VReg_352RegClassID,
3374 AMDGPU::VReg_384RegClassID, AMDGPU::VReg_512RegClassID,
3375 AMDGPU::VReg_1024RegClassID};
3376
3377 for (unsigned RCID : VGPRClasses) {
3378 const MCRegisterClass &RC = MRI.getRegClass(RCID);
3379 if (RC.contains(Reg))
3380 return &RC;
3381 }
3382
3383 return nullptr;
3384}
3385
3387 unsigned Enc = MRI.getEncodingValue(Reg);
3388 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3389 return Idx >> 8;
3390}
3391
3393 const MCRegisterInfo &MRI) {
3394 unsigned Enc = MRI.getEncodingValue(Reg);
3395 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
3396 if (Idx >= 0x100)
3397 return MCRegister();
3398
3400 if (!RC)
3401 return MCRegister();
3402
3403 Idx |= MSBs << 8;
3404 if (RC->getID() == AMDGPU::VGPR_16RegClassID) {
3405 // This class has 2048 registers with interleaved lo16 and hi16.
3406 Idx *= 2;
3408 ++Idx;
3409 }
3410
3411 return RC->getRegister(Idx);
3412}
3413
3414std::pair<const AMDGPU::OpName *, const AMDGPU::OpName *>
3416 static const AMDGPU::OpName VOPOps[4] = {
3417 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2,
3418 AMDGPU::OpName::vdst};
3419 static const AMDGPU::OpName VDSOps[4] = {
3420 AMDGPU::OpName::addr, AMDGPU::OpName::data0, AMDGPU::OpName::data1,
3421 AMDGPU::OpName::vdst};
3422 static const AMDGPU::OpName FLATOps[4] = {
3423 AMDGPU::OpName::vaddr, AMDGPU::OpName::vdata,
3424 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdst};
3425 static const AMDGPU::OpName BUFOps[4] = {
3426 AMDGPU::OpName::vaddr, AMDGPU::OpName::NUM_OPERAND_NAMES,
3427 AMDGPU::OpName::NUM_OPERAND_NAMES, AMDGPU::OpName::vdata};
3428 static const AMDGPU::OpName VIMGOps[4] = {
3429 AMDGPU::OpName::vaddr0, AMDGPU::OpName::vaddr1, AMDGPU::OpName::vaddr2,
3430 AMDGPU::OpName::vdata};
3431
3432 // For VOPD instructions MSB of a corresponding Y component operand VGPR
3433 // address is supposed to match X operand, otherwise VOPD shall not be
3434 // combined.
3435 static const AMDGPU::OpName VOPDOpsX[4] = {
3436 AMDGPU::OpName::src0X, AMDGPU::OpName::vsrc1X, AMDGPU::OpName::vsrc2X,
3437 AMDGPU::OpName::vdstX};
3438 static const AMDGPU::OpName VOPDOpsY[4] = {
3439 AMDGPU::OpName::src0Y, AMDGPU::OpName::vsrc1Y, AMDGPU::OpName::vsrc2Y,
3440 AMDGPU::OpName::vdstY};
3441
3442 // VOP2 MADMK instructions use src0, imm, src1 scheme.
3443 static const AMDGPU::OpName VOP2MADMKOps[4] = {
3444 AMDGPU::OpName::src0, AMDGPU::OpName::NUM_OPERAND_NAMES,
3445 AMDGPU::OpName::src1, AMDGPU::OpName::vdst};
3446
3447 unsigned TSFlags = Desc.TSFlags;
3448
3449 if (TSFlags &
3452 switch (Desc.getOpcode()) {
3453 // LD_SCALE operands ignore MSB.
3454 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32:
3455 case AMDGPU::V_WMMA_LD_SCALE_PAIRED_B32_gfx1250:
3456 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64:
3457 case AMDGPU::V_WMMA_LD_SCALE16_PAIRED_B64_gfx1250:
3458 return {};
3459 case AMDGPU::V_FMAMK_F16:
3460 case AMDGPU::V_FMAMK_F16_t16:
3461 case AMDGPU::V_FMAMK_F16_t16_gfx12:
3462 case AMDGPU::V_FMAMK_F16_fake16:
3463 case AMDGPU::V_FMAMK_F16_fake16_gfx12:
3464 case AMDGPU::V_FMAMK_F32:
3465 case AMDGPU::V_FMAMK_F32_gfx12:
3466 case AMDGPU::V_FMAMK_F64:
3467 case AMDGPU::V_FMAMK_F64_gfx1250:
3468 return {VOP2MADMKOps, nullptr};
3469 default:
3470 break;
3471 }
3472 return {VOPOps, nullptr};
3473 }
3474
3475 if (TSFlags & SIInstrFlags::DS)
3476 return {VDSOps, nullptr};
3477
3478 if (TSFlags & SIInstrFlags::FLAT)
3479 return {FLATOps, nullptr};
3480
3481 if (TSFlags & (SIInstrFlags::MUBUF | SIInstrFlags::MTBUF))
3482 return {BUFOps, nullptr};
3483
3484 if (TSFlags & SIInstrFlags::VIMAGE)
3485 return {VIMGOps, nullptr};
3486
3487 if (AMDGPU::isVOPD(Desc.getOpcode()))
3488 return {VOPDOpsX, VOPDOpsY};
3489
3490 assert(!(TSFlags & SIInstrFlags::MIMG));
3491
3492 if (TSFlags & (SIInstrFlags::VSAMPLE | SIInstrFlags::EXP))
3493 llvm_unreachable("Sample and export VGPR lowering is not implemented and"
3494 " these instructions are not expected on gfx1250");
3495
3496 return {};
3497}
3498
3499bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode) {
3500 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3501
3502 if (TSFlags & SIInstrFlags::SMRD)
3503 return !getSMEMIsBuffer(Opcode);
3504 if (!(TSFlags & SIInstrFlags::FLAT))
3505 return false;
3506
3507 // Only SV and SVS modes are supported.
3508 if (TSFlags & SIInstrFlags::FlatScratch)
3509 return hasNamedOperand(Opcode, OpName::vaddr);
3510
3511 // Only GVS mode is supported.
3512 return hasNamedOperand(Opcode, OpName::vaddr) &&
3513 hasNamedOperand(Opcode, OpName::saddr);
3514
3515 return false;
3516}
3517
3518bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3519 const MCSubtargetInfo &ST) {
3520 for (auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) {
3521 int Idx = getNamedOperandIdx(OpDesc.getOpcode(), OpName);
3522 if (Idx == -1)
3523 continue;
3524
3525 const MCOperandInfo &OpInfo = OpDesc.operands()[Idx];
3526 int16_t RegClass = MII.getOpRegClassID(
3527 OpInfo, ST.getHwMode(MCSubtargetInfo::HwMode_RegInfo));
3528 if (RegClass == AMDGPU::VReg_64RegClassID ||
3529 RegClass == AMDGPU::VReg_64_Align2RegClassID)
3530 return true;
3531 }
3532
3533 return false;
3534}
3535
3536bool isDPALU_DPP32BitOpc(unsigned Opc) {
3537 switch (Opc) {
3538 case AMDGPU::V_MUL_LO_U32_e64:
3539 case AMDGPU::V_MUL_LO_U32_e64_dpp:
3540 case AMDGPU::V_MUL_LO_U32_e64_dpp_gfx1250:
3541 case AMDGPU::V_MUL_HI_U32_e64:
3542 case AMDGPU::V_MUL_HI_U32_e64_dpp:
3543 case AMDGPU::V_MUL_HI_U32_e64_dpp_gfx1250:
3544 case AMDGPU::V_MUL_HI_I32_e64:
3545 case AMDGPU::V_MUL_HI_I32_e64_dpp:
3546 case AMDGPU::V_MUL_HI_I32_e64_dpp_gfx1250:
3547 case AMDGPU::V_MAD_U32_e64:
3548 case AMDGPU::V_MAD_U32_e64_dpp:
3549 case AMDGPU::V_MAD_U32_e64_dpp_gfx1250:
3550 return true;
3551 default:
3552 return false;
3553 }
3554}
3555
3556bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
3557 const MCSubtargetInfo &ST) {
3558 if (!ST.hasFeature(AMDGPU::FeatureDPALU_DPP))
3559 return false;
3560
3561 if (isDPALU_DPP32BitOpc(OpDesc.getOpcode()))
3562 return ST.hasFeature(AMDGPU::FeatureGFX1250Insts);
3563
3564 return hasAny64BitVGPROperands(OpDesc, MII, ST);
3565}
3566
3568 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize32768))
3569 return 64;
3570 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize65536))
3571 return 128;
3572 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize163840))
3573 return 320;
3574 if (ST.getFeatureBits().test(FeatureAddressableLocalMemorySize327680))
3575 return 512;
3576 return 64; // In sync with getAddressableLocalMemorySize
3577}
3578
3579bool isPackedFP32Inst(unsigned Opc) {
3580 switch (Opc) {
3581 case AMDGPU::V_PK_ADD_F32:
3582 case AMDGPU::V_PK_ADD_F32_gfx12:
3583 case AMDGPU::V_PK_MUL_F32:
3584 case AMDGPU::V_PK_MUL_F32_gfx12:
3585 case AMDGPU::V_PK_FMA_F32:
3586 case AMDGPU::V_PK_FMA_F32_gfx12:
3587 return true;
3588 default:
3589 return false;
3590 }
3591}
3592
3593const std::array<unsigned, 3> &ClusterDimsAttr::getDims() const {
3594 assert(isFixedDims() && "expect kind to be FixedDims");
3595 return Dims;
3596}
3597
3598std::string ClusterDimsAttr::to_string() const {
3599 SmallString<10> Buffer;
3600 raw_svector_ostream OS(Buffer);
3601
3602 switch (getKind()) {
3603 case Kind::Unknown:
3604 return "";
3605 case Kind::NoCluster: {
3606 OS << EncoNoCluster << ',' << EncoNoCluster << ',' << EncoNoCluster;
3607 return Buffer.c_str();
3608 }
3609 case Kind::VariableDims: {
3610 OS << EncoVariableDims << ',' << EncoVariableDims << ','
3611 << EncoVariableDims;
3612 return Buffer.c_str();
3613 }
3614 case Kind::FixedDims: {
3615 OS << Dims[0] << ',' << Dims[1] << ',' << Dims[2];
3616 return Buffer.c_str();
3617 }
3618 }
3619 llvm_unreachable("Unknown ClusterDimsAttr kind");
3620}
3621
3623 std::optional<SmallVector<unsigned>> Attr =
3624 getIntegerVecAttribute(F, "amdgpu-cluster-dims", /*Size=*/3);
3626
3627 if (!Attr.has_value())
3628 AttrKind = Kind::Unknown;
3629 else if (all_of(*Attr, [](unsigned V) { return V == EncoNoCluster; }))
3630 AttrKind = Kind::NoCluster;
3631 else if (all_of(*Attr, [](unsigned V) { return V == EncoVariableDims; }))
3632 AttrKind = Kind::VariableDims;
3633
3634 ClusterDimsAttr A(AttrKind);
3635 if (AttrKind == Kind::FixedDims)
3636 A.Dims = {(*Attr)[0], (*Attr)[1], (*Attr)[2]};
3637
3638 return A;
3639}
3640
3641} // namespace AMDGPU
3642
3645 switch (S) {
3647 OS << "Unsupported";
3648 break;
3650 OS << "Any";
3651 break;
3653 OS << "Off";
3654 break;
3656 OS << "On";
3657 break;
3658 }
3659 return OS;
3660}
3661
3662} // namespace llvm
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static llvm::cl::opt< unsigned > DefaultAMDHSACodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::init(llvm::AMDGPU::AMDHSA_COV6), llvm::cl::desc("Set default AMDHSA Code Object Version (module flag " "or asm directive still take priority if present)"))
#define MAP_REG2REG
Provides AMDGPU specific target descriptions.
MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where required.
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define RegName(no)
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define T
uint64_t High
if(PassOpts->AAPipeline)
#define S_00B848_MEM_ORDERED(x)
Definition SIDefines.h:1236
#define S_00B848_WGP_MODE(x)
Definition SIDefines.h:1233
#define S_00B848_FWD_PROGRESS(x)
Definition SIDefines.h:1239
unsigned unsigned DefaultVal
This file contains some functions that are useful when dealing with strings.
static const int BlockSize
Definition TarWriter.cpp:33
static ClusterDimsAttr get(const Function &F)
const std::array< unsigned, 3 > & getDims() const
TargetIDSetting getXnackSetting() const
AMDGPUTargetID(const MCSubtargetInfo &STI)
void setTargetIDFromTargetIDStream(StringRef TargetID)
TargetIDSetting getSramEccSetting() const
unsigned getIndexInParsedOperands(unsigned CompOprIdx) const
unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const
std::optional< unsigned > getInvalidCompOperandIndex(std::function< MCRegister(unsigned, unsigned)> GetRegIdx, const MCRegisterInfo &MRI, bool SkipSrc=false, bool AllowSameVGPR=false, bool VOPD3=false) const
std::array< MCRegister, Component::MAX_OPR_NUM > RegIndices
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
constexpr bool test(unsigned I) const
unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayStore() const
Return true if this instruction could possibly modify memory.
bool mayLoad() const
Return true if this instruction could possibly read memory.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
unsigned getOpcode() const
Return the opcode number for this descriptor.
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
int16_t getOpRegClassID(const MCOperandInfo &OpInfo, unsigned HwModeId) const
Return the ID of the register class to use for OpInfo, for the active HwMode HwModeId.
Definition MCInstrInfo.h:80
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:87
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
StringRef getCPU() const
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
const char * c_str()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition StringRef.h:854
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
Manages the enabling and disabling of subtarget specific features.
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
OSType getOS() const
Get the parsed operating system type of this triple.
Definition Triple.h:422
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:413
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Definition Triple.h:927
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
unsigned decodeFieldVaVcc(unsigned Encoded)
unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc)
unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt)
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc)
unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst)
unsigned decodeFieldSaSdst(unsigned Encoded)
unsigned decodeFieldVaSdst(unsigned Encoded)
unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc)
unsigned decodeFieldVaSsrc(unsigned Encoded)
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
const CustomOperandVal DepCtrInfo[]
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
unsigned decodeFieldVaVdst(unsigned Encoded)
unsigned decodeFieldHoldCnt(unsigned Encoded)
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
unsigned decodeFieldVmVsrc(unsigned Encoded)
unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
static constexpr ExpTgt ExpTgtInfo[]
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
unsigned getTgtId(const StringRef Name)
constexpr uint32_t VersionMinor
HSA metadata minor version.
constexpr uint32_t VersionMajor
HSA metadata major version.
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
unsigned getArchVGPRAllocGranule()
For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage, returns the allocation granule...
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, unsigned DynamicVGPRBlockSize)
unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
unsigned getEncodedNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, std::optional< bool > EnableWavefrontSize32)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
static unsigned getGranulatedNumRegisterBlocks(unsigned NumRegs, unsigned Granule)
unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI)
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize)
StringLiteral const UfmtSymbolicGFX11[]
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI)
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX10[]
StringLiteral const DfmtSymbolic[]
static StringLiteral const * getNfmtLookupTable(const MCSubtargetInfo &STI)
bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI)
StringLiteral const NfmtSymbolicGFX10[]
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, const MCSubtargetInfo &STI)
StringRef getDfmtName(unsigned Id)
int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt)
int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI)
bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI)
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
unsigned const DfmtNfmt2UFmtGFX11[]
StringLiteral const NfmtSymbolicVI[]
StringLiteral const NfmtSymbolicSICI[]
int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI)
int64_t getDfmt(const StringRef Name)
StringLiteral const UfmtSymbolicGFX10[]
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
constexpr unsigned VOPD_VGPR_BANK_MASKS[]
constexpr unsigned COMPONENTS_NUM
constexpr unsigned VOPD3_VGPR_BANK_MASKS[]
bool isPackedFP32Inst(unsigned Opc)
bool isGCN3Encoding(const MCSubtargetInfo &STI)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
bool isInlineValue(MCRegister Reg)
bool isGFX10_GFX11(const MCSubtargetInfo &STI)
bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
static unsigned encodeStorecnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Storecnt)
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
bool isVOPCAsmOnly(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
bool getMTBUFHasSrsrc(unsigned Opc)
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool getWMMAIsXDL(unsigned Opc)
uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt)
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isGFX10Before1030(const MCSubtargetInfo &STI)
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
const int OPR_ID_UNSUPPORTED
bool shouldEmitConstantsToTextSection(const Triple &TT)
bool isInlinableLiteralV2I16(uint32_t Literal)
int getMTBUFElements(unsigned Opc)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
unsigned getTemporalHintType(const MCInstrDesc TID)
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
bool isGFX10(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV216(bool IsFloat, uint32_t Literal)
FPType getFPDstSelType(unsigned Opc)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool hasA16(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
bool isGFX12Plus(const MCSubtargetInfo &STI)
unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler)
const MCRegisterClass * getVGPRPhysRegClass(MCRegister Reg, const MCRegisterInfo &MRI)
bool hasPackedD16(const MCSubtargetInfo &STI)
unsigned getStorecntBitMask(const IsaVersion &Version)
unsigned getLdsDwGranularity(const MCSubtargetInfo &ST)
bool isGFX940(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isHsaAbi(const MCSubtargetInfo &STI)
bool isGFX11(const MCSubtargetInfo &STI)
const int OPR_VAL_INVALID
bool getSMEMIsBuffer(unsigned Opc)
bool isGFX10_3_GFX11(const MCSubtargetInfo &STI)
bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val)
Checks if Val is inside MD, a !range-like metadata.
uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal)
unsigned getVOPDOpcode(unsigned Opc, bool VOPD3)
bool isGroupSegment(const GlobalValue *GV)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
bool getMTBUFHasSoffset(unsigned Opc)
bool hasXNACK(const MCSubtargetInfo &STI)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
static unsigned getCombinedCountBitMask(const IsaVersion &Version, bool IsStore)
CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3)
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
bool isVOPC64DPP(unsigned Opc)
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool getMAIIsGFX940XDL(unsigned Opc)
bool isSI(const MCSubtargetInfo &STI)
unsigned getDefaultAMDHSACodeObjectVersion()
bool isReadOnlySegment(const GlobalValue *GV)
bool isArgPassedInSGPR(const Argument *A)
bool isIntrinsicAlwaysUniform(unsigned IntrID)
int getMUBUFBaseOpcode(unsigned Opc)
unsigned getAMDHSACodeObjectVersion(const Module &M)
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
unsigned getWaitcntBitMask(const IsaVersion &Version)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool isGFX9(const MCSubtargetInfo &STI)
bool isDPALU_DPP32BitOpc(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
static bool isDwordAligned(uint64_t ByteOffset)
unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST)
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool getHasColorExport(const Function &F)
int getMTBUFBaseOpcode(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
unsigned getSamplecntBitMask(const IsaVersion &Version)
unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion)
std::tuple< char, unsigned, unsigned > parseAsmPhysRegName(StringRef RegName)
Returns a valid charcode or 0 in the first entry if this is a valid physical register name.
bool hasSRAMECC(const MCSubtargetInfo &STI)
bool getHasDepthExport(const Function &F)
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
bool getMUBUFHasVAddr(unsigned Opc)
bool isTrue16Inst(unsigned Opc)
unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI)
std::pair< unsigned, unsigned > getVOPDComponents(unsigned VOPDOpcode)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getInitialPSInputAddr(const Function &F)
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
bool isAsyncStore(unsigned Opc)
unsigned getDynamicVGPRBlockSize(const Function &F)
unsigned getKmcntBitMask(const IsaVersion &Version)
MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
unsigned getVmcntBitMask(const IsaVersion &Version)
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
bool hasMAIInsts(const MCSubtargetInfo &STI)
unsigned getBitOp2(unsigned Opc)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
unsigned getXcntBitMask(const IsaVersion &Version)
bool isGenericAtomic(unsigned Opc)
const MFMA_F8F6F4_Info * getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA, unsigned FmtB, unsigned F8F8Opcode)
Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt)
bool isGFX8Plus(const MCSubtargetInfo &STI)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool getMUBUFTfe(unsigned Opc)
unsigned getBvhcntBitMask(const IsaVersion &Version)
bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
bool hasMIMG_R128(const MCSubtargetInfo &STI)
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
bool hasG16(const MCSubtargetInfo &STI)
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool getMUBUFHasSoffset(unsigned Opc)
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
std::tuple< char, unsigned, unsigned > parseAsmConstraintPhysReg(StringRef Constraint)
Returns a valid charcode or 0 in the first entry if this is a valid physical register constraint.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion)
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
static unsigned encodeLoadcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Loadcnt)
bool isGFX10Plus(const MCSubtargetInfo &STI)
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
static bool isValidRegPrefix(char C)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer, bool HasSOffset)
bool isGlobalSegment(const GlobalValue *GV)
int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition SIDefines.h:231
@ OPERAND_REG_INLINE_C_LAST
Definition SIDefines.h:254
@ OPERAND_REG_IMM_V2FP16
Definition SIDefines.h:209
@ OPERAND_REG_INLINE_C_FP64
Definition SIDefines.h:222
@ OPERAND_REG_INLINE_C_BF16
Definition SIDefines.h:219
@ OPERAND_REG_INLINE_C_V2BF16
Definition SIDefines.h:224
@ OPERAND_REG_IMM_V2INT16
Definition SIDefines.h:210
@ OPERAND_REG_IMM_BF16
Definition SIDefines.h:206
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
Definition SIDefines.h:201
@ OPERAND_REG_IMM_V2BF16
Definition SIDefines.h:208
@ OPERAND_REG_INLINE_AC_FIRST
Definition SIDefines.h:256
@ OPERAND_REG_IMM_FP16
Definition SIDefines.h:207
@ OPERAND_REG_IMM_NOINLINE_V2FP16
Definition SIDefines.h:211
@ OPERAND_REG_IMM_FP64
Definition SIDefines.h:205
@ OPERAND_REG_INLINE_C_V2FP16
Definition SIDefines.h:225
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
Definition SIDefines.h:236
@ OPERAND_REG_INLINE_AC_FP32
Definition SIDefines.h:237
@ OPERAND_REG_IMM_V2INT32
Definition SIDefines.h:212
@ OPERAND_REG_IMM_FP32
Definition SIDefines.h:204
@ OPERAND_REG_INLINE_C_FIRST
Definition SIDefines.h:253
@ OPERAND_REG_INLINE_C_FP32
Definition SIDefines.h:221
@ OPERAND_REG_INLINE_AC_LAST
Definition SIDefines.h:257
@ OPERAND_REG_INLINE_C_INT32
Definition SIDefines.h:217
@ OPERAND_REG_INLINE_C_V2INT16
Definition SIDefines.h:223
@ OPERAND_REG_IMM_V2FP32
Definition SIDefines.h:213
@ OPERAND_REG_INLINE_AC_FP64
Definition SIDefines.h:238
@ OPERAND_REG_INLINE_C_FP16
Definition SIDefines.h:220
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition SIDefines.h:228
void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode, const MCSubtargetInfo *STI)
bool isNotGFX9Plus(const MCSubtargetInfo &STI)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
bool hasGDS(const MCSubtargetInfo &STI)
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
bool isGFX9Plus(const MCSubtargetInfo &STI)
bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI)
const int OPR_ID_DUPLICATE
bool isVOPD(unsigned Opc)
VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt)
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
static unsigned encodeStorecntDscnt(const IsaVersion &Version, unsigned Storecnt, unsigned Dscnt)
bool isGFX1250(const MCSubtargetInfo &STI)
int getMCOpcode(uint16_t Opcode, unsigned Gen)
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
bool isVI(const MCSubtargetInfo &STI)
bool isTensorStore(unsigned Opc)
bool getMUBUFIsBufferInv(unsigned Opc)
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
bool supportsWGP(const MCSubtargetInfo &STI)
bool isMAC(unsigned Opc)
bool isCI(const MCSubtargetInfo &STI)
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
bool getVOP2IsSingle(unsigned Opc)
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
const int OPR_ID_UNKNOWN
unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion)
SmallVector< unsigned > getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size, unsigned DefaultVal)
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
bool isNotGFX12Plus(const MCSubtargetInfo &STI)
bool getMTBUFHasVAddr(unsigned Opc)
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
uint8_t getELFABIVersion(const Triple &T, unsigned CodeObjectVersion)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
unsigned getLoadcntBitMask(const IsaVersion &Version)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
bool hasVOPD(const MCSubtargetInfo &STI)
int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily, bool VOPD3)
static unsigned encodeDscnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Dscnt)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MFMA_F8F6F4_Info * getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ, unsigned BLGP, unsigned F8F8Opcode)
unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion)
bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI)
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
int getMUBUFElements(unsigned Opc)
static unsigned encodeLoadcntDscnt(const IsaVersion &Version, unsigned Loadcnt, unsigned Dscnt)
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
bool isPermlane16(unsigned Opc)
bool getMUBUFHasSrsrc(unsigned Opc)
unsigned getDscntBitMask(const IsaVersion &Version)
bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ ELFABIVERSION_AMDGPU_HSA_V4
Definition ELF.h:384
@ ELFABIVERSION_AMDGPU_HSA_V5
Definition ELF.h:385
@ ELFABIVERSION_AMDGPU_HSA_V6
Definition ELF.h:386
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
std::string utostr(uint64_t X, bool isNeg=false)
Op::Description Desc
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:150
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:155
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
To bit_cast(const From &from) noexcept
Definition bit.h:90
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
#define N
AMD Kernel Code Object (amd_kernel_code_t).
Instruction set architecture version.
Represents the counter values to wait for in an s_waitcnt instruction.