LLVM  15.0.0git
AMDGPUBaseInfo.cpp
Go to the documentation of this file.
1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPU.h"
11 #include "AMDGPUAsmUtils.h"
12 #include "AMDKernelCodeT.h"
13 #include "GCNSubtarget.h"
15 #include "llvm/BinaryFormat/ELF.h"
16 #include "llvm/IR/Attributes.h"
17 #include "llvm/IR/Function.h"
18 #include "llvm/IR/GlobalValue.h"
19 #include "llvm/IR/IntrinsicsAMDGPU.h"
20 #include "llvm/IR/IntrinsicsR600.h"
21 #include "llvm/IR/LLVMContext.h"
26 
27 #define GET_INSTRINFO_NAMED_OPS
28 #define GET_INSTRMAP_INFO
29 #include "AMDGPUGenInstrInfo.inc"
30 
32  AmdhsaCodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden,
33  llvm::cl::desc("AMDHSA Code Object Version"),
34  llvm::cl::init(4));
35 
36 // TODO-GFX11: Remove this when full 16-bit codegen is implemented.
38  LimitTo128VGPRs("amdgpu-limit-to-128-vgprs", llvm::cl::Hidden,
39  llvm::cl::desc("Never use more than 128 VGPRs"));
40 
41 namespace {
42 
43 /// \returns Bit mask for given bit \p Shift and bit \p Width.
44 unsigned getBitMask(unsigned Shift, unsigned Width) {
45  return ((1 << Width) - 1) << Shift;
46 }
47 
48 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
49 ///
50 /// \returns Packed \p Dst.
51 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
52  unsigned Mask = getBitMask(Shift, Width);
53  return ((Src << Shift) & Mask) | (Dst & ~Mask);
54 }
55 
56 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
57 ///
58 /// \returns Unpacked bits.
59 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
60  return (Src & getBitMask(Shift, Width)) >> Shift;
61 }
62 
63 /// \returns Vmcnt bit shift (lower bits).
64 unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
65  return VersionMajor >= 11 ? 10 : 0;
66 }
67 
68 /// \returns Vmcnt bit width (lower bits).
69 unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
70  return VersionMajor >= 11 ? 6 : 4;
71 }
72 
73 /// \returns Expcnt bit shift.
74 unsigned getExpcntBitShift(unsigned VersionMajor) {
75  return VersionMajor >= 11 ? 0 : 4;
76 }
77 
78 /// \returns Expcnt bit width.
79 unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
80 
81 /// \returns Lgkmcnt bit shift.
82 unsigned getLgkmcntBitShift(unsigned VersionMajor) {
83  return VersionMajor >= 11 ? 4 : 8;
84 }
85 
86 /// \returns Lgkmcnt bit width.
87 unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
88  return VersionMajor >= 10 ? 6 : 4;
89 }
90 
91 /// \returns Vmcnt bit shift (higher bits).
92 unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
93 
94 /// \returns Vmcnt bit width (higher bits).
95 unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
96  return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
97 }
98 
99 } // end namespace anonymous
100 
101 namespace llvm {
102 
103 namespace AMDGPU {
104 
106  if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
107  return None;
108 
109  switch (AmdhsaCodeObjectVersion) {
110  case 2:
112  case 3:
114  case 4:
116  case 5:
118  default:
119  report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") +
121  }
122 }
123 
125  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
126  return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
127  return false;
128 }
129 
131  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
132  return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
133  return false;
134 }
135 
137  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
138  return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
139  return false;
140 }
141 
143  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
144  return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
145  return false;
146 }
147 
149  return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) ||
150  isHsaAbiVersion5(STI);
151 }
152 
155 }
156 
158  switch (AmdhsaCodeObjectVersion) {
159  case 2:
160  case 3:
161  case 4:
162  return 48;
163  case 5:
165  default:
166  llvm_unreachable("Unexpected code object version");
167  return 0;
168  }
169 }
170 
171 
172 // FIXME: All such magic numbers about the ABI should be in a
173 // central TD file.
175  switch (AmdhsaCodeObjectVersion) {
176  case 2:
177  case 3:
178  case 4:
179  return 24;
180  case 5:
182  default:
183  llvm_unreachable("Unexpected code object version");
184  return 0;
185  }
186 }
187 
188 #define GET_MIMGBaseOpcodesTable_IMPL
189 #define GET_MIMGDimInfoTable_IMPL
190 #define GET_MIMGInfoTable_IMPL
191 #define GET_MIMGLZMappingTable_IMPL
192 #define GET_MIMGMIPMappingTable_IMPL
193 #define GET_MIMGBiasMappingTable_IMPL
194 #define GET_MIMGOffsetMappingTable_IMPL
195 #define GET_MIMGG16MappingTable_IMPL
196 #define GET_MAIInstInfoTable_IMPL
197 #include "AMDGPUGenSearchableTables.inc"
198 
199 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
200  unsigned VDataDwords, unsigned VAddrDwords) {
201  const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
202  VDataDwords, VAddrDwords);
203  return Info ? Info->Opcode : -1;
204 }
205 
206 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
207  const MIMGInfo *Info = getMIMGInfo(Opc);
208  return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
209 }
210 
211 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
212  const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
213  const MIMGInfo *NewInfo =
214  getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
215  NewChannels, OrigInfo->VAddrDwords);
216  return NewInfo ? NewInfo->Opcode : -1;
217 }
218 
219 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
220  const MIMGDimInfo *Dim, bool IsA16,
221  bool IsG16Supported) {
222  unsigned AddrWords = BaseOpcode->NumExtraArgs;
223  unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
224  (BaseOpcode->LodOrClampOrMip ? 1 : 0);
225  if (IsA16)
226  AddrWords += divideCeil(AddrComponents, 2);
227  else
228  AddrWords += AddrComponents;
229 
230  // Note: For subtargets that support A16 but not G16, enabling A16 also
231  // enables 16 bit gradients.
232  // For subtargets that support A16 (operand) and G16 (done with a different
233  // instruction encoding), they are independent.
234 
235  if (BaseOpcode->Gradients) {
236  if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
237  // There are two gradients per coordinate, we pack them separately.
238  // For the 3d case,
239  // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
240  AddrWords += alignTo<2>(Dim->NumGradients / 2);
241  else
242  AddrWords += Dim->NumGradients;
243  }
244  return AddrWords;
245 }
246 
247 struct MUBUFInfo {
250  uint8_t elements;
251  bool has_vaddr;
252  bool has_srsrc;
255 };
256 
257 struct MTBUFInfo {
260  uint8_t elements;
261  bool has_vaddr;
262  bool has_srsrc;
264 };
265 
266 struct SMInfo {
268  bool IsBuffer;
269 };
270 
271 struct VOPInfo {
273  bool IsSingle;
274 };
275 
278 };
279 
280 #define GET_MTBUFInfoTable_DECL
281 #define GET_MTBUFInfoTable_IMPL
282 #define GET_MUBUFInfoTable_DECL
283 #define GET_MUBUFInfoTable_IMPL
284 #define GET_SMInfoTable_DECL
285 #define GET_SMInfoTable_IMPL
286 #define GET_VOP1InfoTable_DECL
287 #define GET_VOP1InfoTable_IMPL
288 #define GET_VOP2InfoTable_DECL
289 #define GET_VOP2InfoTable_IMPL
290 #define GET_VOP3InfoTable_DECL
291 #define GET_VOP3InfoTable_IMPL
292 #define GET_VOPC64DPPTable_DECL
293 #define GET_VOPC64DPPTable_IMPL
294 #define GET_VOPC64DPP8Table_DECL
295 #define GET_VOPC64DPP8Table_IMPL
296 #define GET_WMMAOpcode2AddrMappingTable_DECL
297 #define GET_WMMAOpcode2AddrMappingTable_IMPL
298 #define GET_WMMAOpcode3AddrMappingTable_DECL
299 #define GET_WMMAOpcode3AddrMappingTable_IMPL
300 #include "AMDGPUGenSearchableTables.inc"
301 
302 int getMTBUFBaseOpcode(unsigned Opc) {
303  const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
304  return Info ? Info->BaseOpcode : -1;
305 }
306 
307 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
308  const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
309  return Info ? Info->Opcode : -1;
310 }
311 
312 int getMTBUFElements(unsigned Opc) {
313  const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
314  return Info ? Info->elements : 0;
315 }
316 
317 bool getMTBUFHasVAddr(unsigned Opc) {
318  const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
319  return Info ? Info->has_vaddr : false;
320 }
321 
322 bool getMTBUFHasSrsrc(unsigned Opc) {
323  const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
324  return Info ? Info->has_srsrc : false;
325 }
326 
327 bool getMTBUFHasSoffset(unsigned Opc) {
328  const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
329  return Info ? Info->has_soffset : false;
330 }
331 
332 int getMUBUFBaseOpcode(unsigned Opc) {
333  const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
334  return Info ? Info->BaseOpcode : -1;
335 }
336 
337 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
338  const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
339  return Info ? Info->Opcode : -1;
340 }
341 
342 int getMUBUFElements(unsigned Opc) {
343  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
344  return Info ? Info->elements : 0;
345 }
346 
347 bool getMUBUFHasVAddr(unsigned Opc) {
348  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
349  return Info ? Info->has_vaddr : false;
350 }
351 
352 bool getMUBUFHasSrsrc(unsigned Opc) {
353  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
354  return Info ? Info->has_srsrc : false;
355 }
356 
357 bool getMUBUFHasSoffset(unsigned Opc) {
358  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
359  return Info ? Info->has_soffset : false;
360 }
361 
362 bool getMUBUFIsBufferInv(unsigned Opc) {
363  const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
364  return Info ? Info->IsBufferInv : false;
365 }
366 
367 bool getSMEMIsBuffer(unsigned Opc) {
368  const SMInfo *Info = getSMEMOpcodeHelper(Opc);
369  return Info ? Info->IsBuffer : false;
370 }
371 
372 bool getVOP1IsSingle(unsigned Opc) {
373  const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
374  return Info ? Info->IsSingle : false;
375 }
376 
377 bool getVOP2IsSingle(unsigned Opc) {
378  const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
379  return Info ? Info->IsSingle : false;
380 }
381 
382 bool getVOP3IsSingle(unsigned Opc) {
383  const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
384  return Info ? Info->IsSingle : false;
385 }
386 
387 bool isVOPC64DPP(unsigned Opc) {
388  return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
389 }
390 
391 bool getMAIIsDGEMM(unsigned Opc) {
392  const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
393  return Info ? Info->is_dgemm : false;
394 }
395 
396 bool getMAIIsGFX940XDL(unsigned Opc) {
397  const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
398  return Info ? Info->is_gfx940_xdl : false;
399 }
400 
401 unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
402  const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
403  return Info ? Info->Opcode3Addr : ~0u;
404 }
405 
406 unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
407  const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
408  return Info ? Info->Opcode2Addr : ~0u;
409 }
410 
411 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
412 // header files, so we need to wrap it in a function that takes unsigned
413 // instead.
414 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
415  return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
416 }
417 
418 namespace IsaInfo {
419 
421  : STI(STI), XnackSetting(TargetIDSetting::Any),
422  SramEccSetting(TargetIDSetting::Any) {
423  if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
424  XnackSetting = TargetIDSetting::Unsupported;
425  if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
426  SramEccSetting = TargetIDSetting::Unsupported;
427 }
428 
430  // Check if xnack or sramecc is explicitly enabled or disabled. In the
431  // absence of the target features we assume we must generate code that can run
432  // in any environment.
433  SubtargetFeatures Features(FS);
434  Optional<bool> XnackRequested;
435  Optional<bool> SramEccRequested;
436 
437  for (const std::string &Feature : Features.getFeatures()) {
438  if (Feature == "+xnack")
439  XnackRequested = true;
440  else if (Feature == "-xnack")
441  XnackRequested = false;
442  else if (Feature == "+sramecc")
443  SramEccRequested = true;
444  else if (Feature == "-sramecc")
445  SramEccRequested = false;
446  }
447 
448  bool XnackSupported = isXnackSupported();
449  bool SramEccSupported = isSramEccSupported();
450 
451  if (XnackRequested) {
452  if (XnackSupported) {
453  XnackSetting =
454  *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
455  } else {
456  // If a specific xnack setting was requested and this GPU does not support
457  // xnack emit a warning. Setting will remain set to "Unsupported".
458  if (*XnackRequested) {
459  errs() << "warning: xnack 'On' was requested for a processor that does "
460  "not support it!\n";
461  } else {
462  errs() << "warning: xnack 'Off' was requested for a processor that "
463  "does not support it!\n";
464  }
465  }
466  }
467 
468  if (SramEccRequested) {
469  if (SramEccSupported) {
470  SramEccSetting =
471  *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
472  } else {
473  // If a specific sramecc setting was requested and this GPU does not
474  // support sramecc emit a warning. Setting will remain set to
475  // "Unsupported".
476  if (*SramEccRequested) {
477  errs() << "warning: sramecc 'On' was requested for a processor that "
478  "does not support it!\n";
479  } else {
480  errs() << "warning: sramecc 'Off' was requested for a processor that "
481  "does not support it!\n";
482  }
483  }
484  }
485 }
486 
487 static TargetIDSetting
489  if (FeatureString.endswith("-"))
490  return TargetIDSetting::Off;
491  if (FeatureString.endswith("+"))
492  return TargetIDSetting::On;
493 
494  llvm_unreachable("Malformed feature string");
495 }
496 
498  SmallVector<StringRef, 3> TargetIDSplit;
499  TargetID.split(TargetIDSplit, ':');
500 
501  for (const auto &FeatureString : TargetIDSplit) {
502  if (FeatureString.startswith("xnack"))
503  XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
504  if (FeatureString.startswith("sramecc"))
505  SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
506  }
507 }
508 
509 std::string AMDGPUTargetID::toString() const {
510  std::string StringRep;
511  raw_string_ostream StreamRep(StringRep);
512 
513  auto TargetTriple = STI.getTargetTriple();
514  auto Version = getIsaVersion(STI.getCPU());
515 
516  StreamRep << TargetTriple.getArchName() << '-'
517  << TargetTriple.getVendorName() << '-'
518  << TargetTriple.getOSName() << '-'
519  << TargetTriple.getEnvironmentName() << '-';
520 
521  std::string Processor;
522  // TODO: Following else statement is present here because we used various
523  // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
524  // Remove once all aliases are removed from GCNProcessors.td.
525  if (Version.Major >= 9)
526  Processor = STI.getCPU().str();
527  else
528  Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
529  Twine(Version.Stepping))
530  .str();
531 
532  std::string Features;
533  if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
534  switch (*HsaAbiVersion) {
536  // Code object V2 only supported specific processors and had fixed
537  // settings for the XNACK.
538  if (Processor == "gfx600") {
539  } else if (Processor == "gfx601") {
540  } else if (Processor == "gfx602") {
541  } else if (Processor == "gfx700") {
542  } else if (Processor == "gfx701") {
543  } else if (Processor == "gfx702") {
544  } else if (Processor == "gfx703") {
545  } else if (Processor == "gfx704") {
546  } else if (Processor == "gfx705") {
547  } else if (Processor == "gfx801") {
548  if (!isXnackOnOrAny())
550  "AMD GPU code object V2 does not support processor " +
551  Twine(Processor) + " without XNACK");
552  } else if (Processor == "gfx802") {
553  } else if (Processor == "gfx803") {
554  } else if (Processor == "gfx805") {
555  } else if (Processor == "gfx810") {
556  if (!isXnackOnOrAny())
558  "AMD GPU code object V2 does not support processor " +
559  Twine(Processor) + " without XNACK");
560  } else if (Processor == "gfx900") {
561  if (isXnackOnOrAny())
562  Processor = "gfx901";
563  } else if (Processor == "gfx902") {
564  if (isXnackOnOrAny())
565  Processor = "gfx903";
566  } else if (Processor == "gfx904") {
567  if (isXnackOnOrAny())
568  Processor = "gfx905";
569  } else if (Processor == "gfx906") {
570  if (isXnackOnOrAny())
571  Processor = "gfx907";
572  } else if (Processor == "gfx90c") {
573  if (isXnackOnOrAny())
575  "AMD GPU code object V2 does not support processor " +
576  Twine(Processor) + " with XNACK being ON or ANY");
577  } else {
579  "AMD GPU code object V2 does not support processor " +
580  Twine(Processor));
581  }
582  break;
584  // xnack.
585  if (isXnackOnOrAny())
586  Features += "+xnack";
587  // In code object v2 and v3, "sramecc" feature was spelled with a
588  // hyphen ("sram-ecc").
589  if (isSramEccOnOrAny())
590  Features += "+sram-ecc";
591  break;
594  // sramecc.
596  Features += ":sramecc-";
598  Features += ":sramecc+";
599  // xnack.
601  Features += ":xnack-";
602  else if (getXnackSetting() == TargetIDSetting::On)
603  Features += ":xnack+";
604  break;
605  default:
606  break;
607  }
608  }
609 
610  StreamRep << Processor << Features;
611 
612  StreamRep.flush();
613  return StringRep;
614 }
615 
616 unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
617  if (STI->getFeatureBits().test(FeatureWavefrontSize16))
618  return 16;
619  if (STI->getFeatureBits().test(FeatureWavefrontSize32))
620  return 32;
621 
622  return 64;
623 }
624 
625 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
626  if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
627  return 32768;
628  if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
629  return 65536;
630 
631  return 0;
632 }
633 
634 unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
635  // "Per CU" really means "per whatever functional block the waves of a
636  // workgroup must share". For gfx10 in CU mode this is the CU, which contains
637  // two SIMDs.
638  if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
639  return 2;
640  // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains
641  // two CUs, so a total of four SIMDs.
642  return 4;
643 }
644 
646  unsigned FlatWorkGroupSize) {
647  assert(FlatWorkGroupSize != 0);
648  if (STI->getTargetTriple().getArch() != Triple::amdgcn)
649  return 8;
650  unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
651  if (N == 1)
652  return 40;
653  N = 40 / N;
654  return std::min(N, 16u);
655 }
656 
657 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
658  return 1;
659 }
660 
661 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
662  // FIXME: Need to take scratch memory into account.
663  if (isGFX90A(*STI))
664  return 8;
665  if (!isGFX10Plus(*STI))
666  return 10;
667  return hasGFX10_3Insts(*STI) ? 16 : 20;
668 }
669 
671  unsigned FlatWorkGroupSize) {
672  return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
673  getEUsPerCU(STI));
674 }
675 
677  return 1;
678 }
679 
681  // Some subtargets allow encoding 2048, but this isn't tested or supported.
682  return 1024;
683 }
684 
686  unsigned FlatWorkGroupSize) {
687  return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
688 }
689 
690 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
692  if (Version.Major >= 10)
693  return getAddressableNumSGPRs(STI);
694  if (Version.Major >= 8)
695  return 16;
696  return 8;
697 }
698 
700  return 8;
701 }
702 
703 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
705  if (Version.Major >= 8)
706  return 800;
707  return 512;
708 }
709 
711  if (STI->getFeatureBits().test(FeatureSGPRInitBug))
713 
715  if (Version.Major >= 10)
716  return 106;
717  if (Version.Major >= 8)
718  return 102;
719  return 104;
720 }
721 
722 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
723  assert(WavesPerEU != 0);
724 
726  if (Version.Major >= 10)
727  return 0;
728 
729  if (WavesPerEU >= getMaxWavesPerEU(STI))
730  return 0;
731 
732  unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
733  if (STI->getFeatureBits().test(FeatureTrapHandler))
734  MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
735  MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
736  return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
737 }
738 
739 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
740  bool Addressable) {
741  assert(WavesPerEU != 0);
742 
743  unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
745  if (Version.Major >= 10)
746  return Addressable ? AddressableNumSGPRs : 108;
747  if (Version.Major >= 8 && !Addressable)
748  AddressableNumSGPRs = 112;
749  unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
750  if (STI->getFeatureBits().test(FeatureTrapHandler))
751  MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
752  MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
753  return std::min(MaxNumSGPRs, AddressableNumSGPRs);
754 }
755 
756 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
757  bool FlatScrUsed, bool XNACKUsed) {
758  unsigned ExtraSGPRs = 0;
759  if (VCCUsed)
760  ExtraSGPRs = 2;
761 
763  if (Version.Major >= 10)
764  return ExtraSGPRs;
765 
766  if (Version.Major < 8) {
767  if (FlatScrUsed)
768  ExtraSGPRs = 4;
769  } else {
770  if (XNACKUsed)
771  ExtraSGPRs = 4;
772 
773  if (FlatScrUsed ||
774  STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
775  ExtraSGPRs = 6;
776  }
777 
778  return ExtraSGPRs;
779 }
780 
781 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
782  bool FlatScrUsed) {
783  return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
784  STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
785 }
786 
787 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
789  // SGPRBlocks is actual number of SGPR blocks minus 1.
790  return NumSGPRs / getSGPREncodingGranule(STI) - 1;
791 }
792 
794  Optional<bool> EnableWavefrontSize32) {
795  if (STI->getFeatureBits().test(FeatureGFX90AInsts))
796  return 8;
797 
798  bool IsWave32 = EnableWavefrontSize32 ?
799  *EnableWavefrontSize32 :
800  STI->getFeatureBits().test(FeatureWavefrontSize32);
801 
802  if (hasGFX10_3Insts(*STI))
803  return IsWave32 ? 16 : 8;
804 
805  return IsWave32 ? 8 : 4;
806 }
807 
809  Optional<bool> EnableWavefrontSize32) {
810  if (STI->getFeatureBits().test(FeatureGFX90AInsts))
811  return 8;
812 
813  bool IsWave32 = EnableWavefrontSize32 ?
814  *EnableWavefrontSize32 :
815  STI->getFeatureBits().test(FeatureWavefrontSize32);
816 
817  return IsWave32 ? 8 : 4;
818 }
819 
820 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
821  if (STI->getFeatureBits().test(FeatureGFX90AInsts))
822  return 512;
823  if (!isGFX10Plus(*STI))
824  return 256;
825  return STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1024 : 512;
826 }
827 
830  : isGFX11Plus(*STI)) {
831  // GFX11 changes the encoding of 16-bit operands in VOP1/2/C instructions
832  // such that values 128..255 no longer mean v128..v255, they mean
833  // v0.hi..v127.hi instead. Until the compiler understands this, it is not
834  // safe to use v128..v255.
835  // TODO-GFX11: Remove this when full 16-bit codegen is implemented.
836  return 128;
837  }
838  if (STI->getFeatureBits().test(FeatureGFX90AInsts))
839  return 512;
840  return 256;
841 }
842 
843 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
844  assert(WavesPerEU != 0);
845 
846  if (WavesPerEU >= getMaxWavesPerEU(STI))
847  return 0;
848  unsigned MinNumVGPRs =
849  alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1),
850  getVGPRAllocGranule(STI)) + 1;
851  return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI));
852 }
853 
854 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
855  assert(WavesPerEU != 0);
856 
857  unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
858  getVGPRAllocGranule(STI));
859  unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
860  return std::min(MaxNumVGPRs, AddressableNumVGPRs);
861 }
862 
863 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
864  Optional<bool> EnableWavefrontSize32) {
866  getVGPREncodingGranule(STI, EnableWavefrontSize32));
867  // VGPRBlocks is actual number of VGPR blocks minus 1.
868  return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1;
869 }
870 
871 } // end namespace IsaInfo
872 
874  const MCSubtargetInfo *STI) {
876 
877  memset(&Header, 0, sizeof(Header));
878 
881  Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
882  Header.amd_machine_version_major = Version.Major;
883  Header.amd_machine_version_minor = Version.Minor;
884  Header.amd_machine_version_stepping = Version.Stepping;
885  Header.kernel_code_entry_byte_offset = sizeof(Header);
886  Header.wavefront_size = 6;
887 
888  // If the code object does not support indirect functions, then the value must
889  // be 0xffffffff.
890  Header.call_convention = -1;
891 
892  // These alignment values are specified in powers of two, so alignment =
893  // 2^n. The minimum alignment is 2^4 = 16.
894  Header.kernarg_segment_alignment = 4;
895  Header.group_segment_alignment = 4;
896  Header.private_segment_alignment = 4;
897 
898  if (Version.Major >= 10) {
899  if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
900  Header.wavefront_size = 5;
902  }
904  S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
906  }
907 }
908 
910  const MCSubtargetInfo *STI) {
912 
914  memset(&KD, 0, sizeof(KD));
915 
917  amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
920  amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
922  amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
924  amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
925  if (Version.Major >= 10) {
927  amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
928  STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0);
930  amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
931  STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
933  amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
934  }
935  if (AMDGPU::isGFX90A(*STI)) {
937  amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
938  STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0);
939  }
940  return KD;
941 }
942 
943 bool isGroupSegment(const GlobalValue *GV) {
945 }
946 
947 bool isGlobalSegment(const GlobalValue *GV) {
949 }
950 
952  unsigned AS = GV->getAddressSpace();
953  return AS == AMDGPUAS::CONSTANT_ADDRESS ||
955 }
956 
958  return TT.getArch() == Triple::r600;
959 }
960 
961 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
962  Attribute A = F.getFnAttribute(Name);
963  int Result = Default;
964 
965  if (A.isStringAttribute()) {
966  StringRef Str = A.getValueAsString();
967  if (Str.getAsInteger(0, Result)) {
968  LLVMContext &Ctx = F.getContext();
969  Ctx.emitError("can't parse integer attribute " + Name);
970  }
971  }
972 
973  return Result;
974 }
975 
976 std::pair<int, int> getIntegerPairAttribute(const Function &F,
977  StringRef Name,
978  std::pair<int, int> Default,
979  bool OnlyFirstRequired) {
980  Attribute A = F.getFnAttribute(Name);
981  if (!A.isStringAttribute())
982  return Default;
983 
984  LLVMContext &Ctx = F.getContext();
985  std::pair<int, int> Ints = Default;
986  std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
987  if (Strs.first.trim().getAsInteger(0, Ints.first)) {
988  Ctx.emitError("can't parse first integer attribute " + Name);
989  return Default;
990  }
991  if (Strs.second.trim().getAsInteger(0, Ints.second)) {
992  if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
993  Ctx.emitError("can't parse second integer attribute " + Name);
994  return Default;
995  }
996  }
997 
998  return Ints;
999 }
1000 
1002  return (1 << (getVmcntBitWidthLo(Version.Major) +
1003  getVmcntBitWidthHi(Version.Major))) -
1004  1;
1005 }
1006 
1008  return (1 << getExpcntBitWidth(Version.Major)) - 1;
1009 }
1010 
1012  return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1013 }
1014 
1016  unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1017  getVmcntBitWidthLo(Version.Major));
1018  unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1019  getExpcntBitWidth(Version.Major));
1020  unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1021  getLgkmcntBitWidth(Version.Major));
1022  unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1023  getVmcntBitWidthHi(Version.Major));
1024  return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1025 }
1026 
1027 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1028  unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1029  getVmcntBitWidthLo(Version.Major));
1030  unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1031  getVmcntBitWidthHi(Version.Major));
1032  return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1033 }
1034 
1035 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1036  return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1037  getExpcntBitWidth(Version.Major));
1038 }
1039 
1040 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1041  return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1042  getLgkmcntBitWidth(Version.Major));
1043 }
1044 
1045 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
1046  unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
1047  Vmcnt = decodeVmcnt(Version, Waitcnt);
1048  Expcnt = decodeExpcnt(Version, Waitcnt);
1049  Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1050 }
1051 
1052 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1053  Waitcnt Decoded;
1054  Decoded.VmCnt = decodeVmcnt(Version, Encoded);
1055  Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1056  Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
1057  return Decoded;
1058 }
1059 
1060 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1061  unsigned Vmcnt) {
1062  Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1063  getVmcntBitWidthLo(Version.Major));
1064  return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1065  getVmcntBitShiftHi(Version.Major),
1066  getVmcntBitWidthHi(Version.Major));
1067 }
1068 
1069 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1070  unsigned Expcnt) {
1071  return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1072  getExpcntBitWidth(Version.Major));
1073 }
1074 
1075 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1076  unsigned Lgkmcnt) {
1077  return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1078  getLgkmcntBitWidth(Version.Major));
1079 }
1080 
1082  unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
1083  unsigned Waitcnt = getWaitcntBitMask(Version);
1084  Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
1085  Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1086  Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1087  return Waitcnt;
1088 }
1089 
1090 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1091  return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
1092 }
1093 
1094 //===----------------------------------------------------------------------===//
1095 // Custom Operands.
1096 //
1097 // A table of custom operands shall describe "primary" operand names
1098 // first followed by aliases if any. It is not required but recommended
1099 // to arrange operands so that operand encoding match operand position
1100 // in the table. This will make disassembly a bit more efficient.
1101 // Unused slots in the table shall have an empty name.
1102 //
1103 //===----------------------------------------------------------------------===//
1104 
1105 template <class T>
1106 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize,
1107  T Context) {
1108  return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() &&
1109  (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context));
1110 }
1111 
1112 template <class T>
1113 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test,
1114  const CustomOperand<T> OpInfo[], int OpInfoSize,
1115  T Context) {
1116  int InvalidIdx = OPR_ID_UNKNOWN;
1117  for (int Idx = 0; Idx < OpInfoSize; ++Idx) {
1118  if (Test(OpInfo[Idx])) {
1119  if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context))
1120  return Idx;
1122  }
1123  }
1124  return InvalidIdx;
1125 }
1126 
1127 template <class T>
1128 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[],
1129  int OpInfoSize, T Context) {
1130  auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; };
1131  return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1132 }
1133 
1134 template <class T>
1135 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize,
1136  T Context, bool QuickCheck = true) {
1137  auto Test = [=](const CustomOperand<T> &Op) {
1138  return Op.Encoding == Id && !Op.Name.empty();
1139  };
1140  // This is an optimization that should work in most cases.
1141  // As a side effect, it may cause selection of an alias
1142  // instead of a primary operand name in case of sparse tables.
1143  if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) &&
1144  OpInfo[Id].Encoding == Id) {
1145  return Id;
1146  }
1147  return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1148 }
1149 
1150 //===----------------------------------------------------------------------===//
1151 // Custom Operand Values
1152 //===----------------------------------------------------------------------===//
1153 
1155  int Size,
1156  const MCSubtargetInfo &STI) {
1157  unsigned Enc = 0;
1158  for (int Idx = 0; Idx < Size; ++Idx) {
1159  const auto &Op = Opr[Idx];
1160  if (Op.isSupported(STI))
1161  Enc |= Op.encode(Op.Default);
1162  }
1163  return Enc;
1164 }
1165 
1167  int Size, unsigned Code,
1168  bool &HasNonDefaultVal,
1169  const MCSubtargetInfo &STI) {
1170  unsigned UsedOprMask = 0;
1171  HasNonDefaultVal = false;
1172  for (int Idx = 0; Idx < Size; ++Idx) {
1173  const auto &Op = Opr[Idx];
1174  if (!Op.isSupported(STI))
1175  continue;
1176  UsedOprMask |= Op.getMask();
1177  unsigned Val = Op.decode(Code);
1178  if (!Op.isValid(Val))
1179  return false;
1180  HasNonDefaultVal |= (Val != Op.Default);
1181  }
1182  return (Code & ~UsedOprMask) == 0;
1183 }
1184 
1185 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1186  unsigned Code, int &Idx, StringRef &Name,
1187  unsigned &Val, bool &IsDefault,
1188  const MCSubtargetInfo &STI) {
1189  while (Idx < Size) {
1190  const auto &Op = Opr[Idx++];
1191  if (Op.isSupported(STI)) {
1192  Name = Op.Name;
1193  Val = Op.decode(Code);
1194  IsDefault = (Val == Op.Default);
1195  return true;
1196  }
1197  }
1198 
1199  return false;
1200 }
1201 
1203  int64_t InputVal) {
1204  if (InputVal < 0 || InputVal > Op.Max)
1205  return OPR_VAL_INVALID;
1206  return Op.encode(InputVal);
1207 }
1208 
1209 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1210  const StringRef Name, int64_t InputVal,
1211  unsigned &UsedOprMask,
1212  const MCSubtargetInfo &STI) {
1213  int InvalidId = OPR_ID_UNKNOWN;
1214  for (int Idx = 0; Idx < Size; ++Idx) {
1215  const auto &Op = Opr[Idx];
1216  if (Op.Name == Name) {
1217  if (!Op.isSupported(STI)) {
1218  InvalidId = OPR_ID_UNSUPPORTED;
1219  continue;
1220  }
1221  auto OprMask = Op.getMask();
1222  if (OprMask & UsedOprMask)
1223  return OPR_ID_DUPLICATE;
1224  UsedOprMask |= OprMask;
1225  return encodeCustomOperandVal(Op, InputVal);
1226  }
1227  }
1228  return InvalidId;
1229 }
1230 
1231 //===----------------------------------------------------------------------===//
1232 // DepCtr
1233 //===----------------------------------------------------------------------===//
1234 
1235 namespace DepCtr {
1236 
1238  static int Default = -1;
1239  if (Default == -1)
1241  return Default;
1242 }
1243 
1244 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
1245  const MCSubtargetInfo &STI) {
1247  HasNonDefaultVal, STI);
1248 }
1249 
1250 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
1251  bool &IsDefault, const MCSubtargetInfo &STI) {
1252  return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
1253  IsDefault, STI);
1254 }
1255 
1256 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
1257  const MCSubtargetInfo &STI) {
1258  return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
1259  STI);
1260 }
1261 
1262 } // namespace DepCtr
1263 
1264 //===----------------------------------------------------------------------===//
1265 // hwreg
1266 //===----------------------------------------------------------------------===//
1267 
1268 namespace Hwreg {
1269 
1270 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) {
1271  int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI);
1272  return (Idx < 0) ? Idx : Opr[Idx].Encoding;
1273 }
1274 
1275 bool isValidHwreg(int64_t Id) {
1276  return 0 <= Id && isUInt<ID_WIDTH_>(Id);
1277 }
1278 
1280  return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
1281 }
1282 
1283 bool isValidHwregWidth(int64_t Width) {
1284  return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
1285 }
1286 
1288  return (Id << ID_SHIFT_) |
1289  (Offset << OFFSET_SHIFT_) |
1290  ((Width - 1) << WIDTH_M1_SHIFT_);
1291 }
1292 
1293 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
1294  int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI);
1295  return (Idx < 0) ? "" : Opr[Idx].Name;
1296 }
1297 
1298 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
1299  Id = (Val & ID_MASK_) >> ID_SHIFT_;
1300  Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
1301  Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
1302 }
1303 
1304 } // namespace Hwreg
1305 
1306 //===----------------------------------------------------------------------===//
1307 // exp tgt
1308 //===----------------------------------------------------------------------===//
1309 
1310 namespace Exp {
1311 
1312 struct ExpTgt {
1314  unsigned Tgt;
1315  unsigned MaxIndex;
1316 };
1317 
1318 static constexpr ExpTgt ExpTgtInfo[] = {
1319  {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
1320  {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
1321  {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
1322  {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
1323  {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
1324  {{"dual_src_blend"}, ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
1325  {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
1326 };
1327 
1328 bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
1329  for (const ExpTgt &Val : ExpTgtInfo) {
1330  if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
1331  Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
1332  Name = Val.Name;
1333  return true;
1334  }
1335  }
1336  return false;
1337 }
1338 
1339 unsigned getTgtId(const StringRef Name) {
1340 
1341  for (const ExpTgt &Val : ExpTgtInfo) {
1342  if (Val.MaxIndex == 0 && Name == Val.Name)
1343  return Val.Tgt;
1344 
1345  if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) {
1346  StringRef Suffix = Name.drop_front(Val.Name.size());
1347 
1348  unsigned Id;
1349  if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
1350  return ET_INVALID;
1351 
1352  // Disable leading zeroes
1353  if (Suffix.size() > 1 && Suffix[0] == '0')
1354  return ET_INVALID;
1355 
1356  return Val.Tgt + Id;
1357  }
1358  }
1359  return ET_INVALID;
1360 }
1361 
1362 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
1363  switch (Id) {
1364  case ET_NULL:
1365  return !isGFX11Plus(STI);
1366  case ET_POS4:
1367  case ET_PRIM:
1368  return isGFX10Plus(STI);
1369  case ET_DUAL_SRC_BLEND0:
1370  case ET_DUAL_SRC_BLEND1:
1371  return isGFX11Plus(STI);
1372  default:
1373  if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
1374  return !isGFX11Plus(STI);
1375  return true;
1376  }
1377 }
1378 
1379 } // namespace Exp
1380 
1381 //===----------------------------------------------------------------------===//
1382 // MTBUF Format
1383 //===----------------------------------------------------------------------===//
1384 
1385 namespace MTBUFFormat {
1386 
1387 int64_t getDfmt(const StringRef Name) {
1388  for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
1389  if (Name == DfmtSymbolic[Id])
1390  return Id;
1391  }
1392  return DFMT_UNDEF;
1393 }
1394 
1396  assert(Id <= DFMT_MAX);
1397  return DfmtSymbolic[Id];
1398 }
1399 
1401  if (isSI(STI) || isCI(STI))
1402  return NfmtSymbolicSICI;
1403  if (isVI(STI) || isGFX9(STI))
1404  return NfmtSymbolicVI;
1405  return NfmtSymbolicGFX10;
1406 }
1407 
1408 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
1409  auto lookupTable = getNfmtLookupTable(STI);
1410  for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
1411  if (Name == lookupTable[Id])
1412  return Id;
1413  }
1414  return NFMT_UNDEF;
1415 }
1416 
1417 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
1418  assert(Id <= NFMT_MAX);
1419  return getNfmtLookupTable(STI)[Id];
1420 }
1421 
1422 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1423  unsigned Dfmt;
1424  unsigned Nfmt;
1425  decodeDfmtNfmt(Id, Dfmt, Nfmt);
1426  return isValidNfmt(Nfmt, STI);
1427 }
1428 
1429 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1430  return !getNfmtName(Id, STI).empty();
1431 }
1432 
1433 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
1434  return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
1435 }
1436 
1437 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
1438  Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
1439  Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
1440 }
1441 
1442 int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
1443  if (isGFX11Plus(STI)) {
1444  for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1445  if (Name == UfmtSymbolicGFX11[Id])
1446  return Id;
1447  }
1448  } else {
1449  for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1450  if (Name == UfmtSymbolicGFX10[Id])
1451  return Id;
1452  }
1453  }
1454  return UFMT_UNDEF;
1455 }
1456 
1458  if(isValidUnifiedFormat(Id, STI))
1459  return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
1460  return "";
1461 }
1462 
1463 bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
1464  return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
1465 }
1466 
1467 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
1468  const MCSubtargetInfo &STI) {
1469  int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
1470  if (isGFX11Plus(STI)) {
1471  for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1472  if (Fmt == DfmtNfmt2UFmtGFX11[Id])
1473  return Id;
1474  }
1475  } else {
1476  for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1477  if (Fmt == DfmtNfmt2UFmtGFX10[Id])
1478  return Id;
1479  }
1480  }
1481  return UFMT_UNDEF;
1482 }
1483 
1484 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
1485  return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
1486 }
1487 
1489  if (isGFX10Plus(STI))
1490  return UFMT_DEFAULT;
1491  return DFMT_NFMT_DEFAULT;
1492 }
1493 
1494 } // namespace MTBUFFormat
1495 
1496 //===----------------------------------------------------------------------===//
1497 // SendMsg
1498 //===----------------------------------------------------------------------===//
1499 
1500 namespace SendMsg {
1501 
1504 }
1505 
1506 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) {
1507  int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI);
1508  return (Idx < 0) ? Idx : Msg[Idx].Encoding;
1509 }
1510 
1511 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
1512  return (MsgId & ~(getMsgIdMask(STI))) == 0;
1513 }
1514 
1515 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) {
1516  int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI);
1517  return (Idx < 0) ? "" : Msg[Idx].Name;
1518 }
1519 
1520 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) {
1521  const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1522  const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1523  const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1524  for (int i = F; i < L; ++i) {
1525  if (Name == S[i]) {
1526  return i;
1527  }
1528  }
1529  return OP_UNKNOWN_;
1530 }
1531 
1532 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
1533  bool Strict) {
1534  assert(isValidMsgId(MsgId, STI));
1535 
1536  if (!Strict)
1537  return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
1538 
1539  if (MsgId == ID_SYSMSG)
1540  return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_;
1541  if (!isGFX11Plus(STI)) {
1542  switch (MsgId) {
1543  case ID_GS_PreGFX11:
1544  return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP;
1545  case ID_GS_DONE_PreGFX11:
1546  return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_;
1547  }
1548  }
1549  return OpId == OP_NONE_;
1550 }
1551 
1552 StringRef getMsgOpName(int64_t MsgId, int64_t OpId,
1553  const MCSubtargetInfo &STI) {
1554  assert(msgRequiresOp(MsgId, STI));
1555  return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId];
1556 }
1557 
1558 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
1559  const MCSubtargetInfo &STI, bool Strict) {
1560  assert(isValidMsgOp(MsgId, OpId, STI, Strict));
1561 
1562  if (!Strict)
1563  return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId);
1564 
1565  if (!isGFX11Plus(STI)) {
1566  switch (MsgId) {
1567  case ID_GS_PreGFX11:
1569  case ID_GS_DONE_PreGFX11:
1570  return (OpId == OP_GS_NOP) ?
1571  (StreamId == STREAM_ID_NONE_) :
1573  }
1574  }
1575  return StreamId == STREAM_ID_NONE_;
1576 }
1577 
1578 bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
1579  return MsgId == ID_SYSMSG ||
1580  (!isGFX11Plus(STI) &&
1581  (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
1582 }
1583 
1584 bool msgSupportsStream(int64_t MsgId, int64_t OpId,
1585  const MCSubtargetInfo &STI) {
1586  return !isGFX11Plus(STI) &&
1587  (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
1588  OpId != OP_GS_NOP;
1589 }
1590 
1591 void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
1592  uint16_t &StreamId, const MCSubtargetInfo &STI) {
1593  MsgId = Val & getMsgIdMask(STI);
1594  if (isGFX11Plus(STI)) {
1595  OpId = 0;
1596  StreamId = 0;
1597  } else {
1598  OpId = (Val & OP_MASK_) >> OP_SHIFT_;
1600  }
1601 }
1602 
1604  uint64_t OpId,
1605  uint64_t StreamId) {
1606  return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
1607 }
1608 
1609 } // namespace SendMsg
1610 
1611 //===----------------------------------------------------------------------===//
1612 //
1613 //===----------------------------------------------------------------------===//
1614 
1616  return getIntegerAttribute(F, "InitialPSInputAddr", 0);
1617 }
1618 
1620  // As a safe default always respond as if PS has color exports.
1621  return getIntegerAttribute(
1622  F, "amdgpu-color-export",
1623  F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
1624 }
1625 
1627  return getIntegerAttribute(F, "amdgpu-depth-export", 0) != 0;
1628 }
1629 
1631  switch(cc) {
1639  return true;
1640  default:
1641  return false;
1642  }
1643 }
1644 
1646  return isShader(cc) || cc == CallingConv::AMDGPU_Gfx;
1647 }
1648 
1650  return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS;
1651 }
1652 
1654  switch (CC) {
1664  return true;
1665  default:
1666  return false;
1667  }
1668 }
1669 
1671  switch (CC) {
1673  return true;
1674  default:
1675  return isEntryFunctionCC(CC);
1676  }
1677 }
1678 
1679 bool isKernelCC(const Function *Func) {
1680  return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
1681 }
1682 
1683 bool hasXNACK(const MCSubtargetInfo &STI) {
1684  return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
1685 }
1686 
1687 bool hasSRAMECC(const MCSubtargetInfo &STI) {
1688  return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC];
1689 }
1690 
1691 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
1692  return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16];
1693 }
1694 
1695 bool hasGFX10A16(const MCSubtargetInfo &STI) {
1696  return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16];
1697 }
1698 
1699 bool hasG16(const MCSubtargetInfo &STI) {
1700  return STI.getFeatureBits()[AMDGPU::FeatureG16];
1701 }
1702 
1703 bool hasPackedD16(const MCSubtargetInfo &STI) {
1704  return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem] && !isCI(STI) &&
1705  !isSI(STI);
1706 }
1707 
1708 bool isSI(const MCSubtargetInfo &STI) {
1709  return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
1710 }
1711 
1712 bool isCI(const MCSubtargetInfo &STI) {
1713  return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
1714 }
1715 
1716 bool isVI(const MCSubtargetInfo &STI) {
1717  return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1718 }
1719 
1720 bool isGFX9(const MCSubtargetInfo &STI) {
1721  return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
1722 }
1723 
1724 bool isGFX9_GFX10(const MCSubtargetInfo &STI) {
1725  return isGFX9(STI) || isGFX10(STI);
1726 }
1727 
1729  return isVI(STI) || isGFX9(STI) || isGFX10(STI);
1730 }
1731 
1732 bool isGFX8Plus(const MCSubtargetInfo &STI) {
1733  return isVI(STI) || isGFX9Plus(STI);
1734 }
1735 
1736 bool isGFX9Plus(const MCSubtargetInfo &STI) {
1737  return isGFX9(STI) || isGFX10Plus(STI);
1738 }
1739 
1740 bool isGFX10(const MCSubtargetInfo &STI) {
1741  return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
1742 }
1743 
1744 bool isGFX10Plus(const MCSubtargetInfo &STI) {
1745  return isGFX10(STI) || isGFX11Plus(STI);
1746 }
1747 
1748 bool isGFX11(const MCSubtargetInfo &STI) {
1749  return STI.getFeatureBits()[AMDGPU::FeatureGFX11];
1750 }
1751 
1752 bool isGFX11Plus(const MCSubtargetInfo &STI) {
1753  return isGFX11(STI);
1754 }
1755 
1757  return !isGFX11Plus(STI);
1758 }
1759 
1761  return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
1762 }
1763 
1765  return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
1766 }
1767 
1769  return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
1770 }
1771 
1773  return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding];
1774 }
1775 
1777  return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding];
1778 }
1779 
1781  return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts];
1782 }
1783 
1784 bool isGFX90A(const MCSubtargetInfo &STI) {
1785  return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts];
1786 }
1787 
1788 bool isGFX940(const MCSubtargetInfo &STI) {
1789  return STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts];
1790 }
1791 
1793  return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
1794 }
1795 
1796 bool hasMAIInsts(const MCSubtargetInfo &STI) {
1797  return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts];
1798 }
1799 
1800 bool hasVOPD(const MCSubtargetInfo &STI) {
1801  return STI.getFeatureBits()[AMDGPU::FeatureVOPD];
1802 }
1803 
1804 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
1805  int32_t ArgNumVGPR) {
1806  if (has90AInsts && ArgNumAGPR)
1807  return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
1808  return std::max(ArgNumVGPR, ArgNumAGPR);
1809 }
1810 
1811 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
1812  const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
1813  const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
1814  return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
1815  Reg == AMDGPU::SCC;
1816 }
1817 
1818 #define MAP_REG2REG \
1819  using namespace AMDGPU; \
1820  switch(Reg) { \
1821  default: return Reg; \
1822  CASE_CI_VI(FLAT_SCR) \
1823  CASE_CI_VI(FLAT_SCR_LO) \
1824  CASE_CI_VI(FLAT_SCR_HI) \
1825  CASE_VI_GFX9PLUS(TTMP0) \
1826  CASE_VI_GFX9PLUS(TTMP1) \
1827  CASE_VI_GFX9PLUS(TTMP2) \
1828  CASE_VI_GFX9PLUS(TTMP3) \
1829  CASE_VI_GFX9PLUS(TTMP4) \
1830  CASE_VI_GFX9PLUS(TTMP5) \
1831  CASE_VI_GFX9PLUS(TTMP6) \
1832  CASE_VI_GFX9PLUS(TTMP7) \
1833  CASE_VI_GFX9PLUS(TTMP8) \
1834  CASE_VI_GFX9PLUS(TTMP9) \
1835  CASE_VI_GFX9PLUS(TTMP10) \
1836  CASE_VI_GFX9PLUS(TTMP11) \
1837  CASE_VI_GFX9PLUS(TTMP12) \
1838  CASE_VI_GFX9PLUS(TTMP13) \
1839  CASE_VI_GFX9PLUS(TTMP14) \
1840  CASE_VI_GFX9PLUS(TTMP15) \
1841  CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
1842  CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
1843  CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
1844  CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
1845  CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
1846  CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
1847  CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
1848  CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
1849  CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
1850  CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
1851  CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
1852  CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
1853  CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
1854  CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
1855  CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1856  CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1857  CASE_GFXPRE11_GFX11PLUS(M0) \
1858  CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
1859  CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
1860  }
1861 
1862 #define CASE_CI_VI(node) \
1863  assert(!isSI(STI)); \
1864  case node: return isCI(STI) ? node##_ci : node##_vi;
1865 
1866 #define CASE_VI_GFX9PLUS(node) \
1867  case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
1868 
1869 #define CASE_GFXPRE11_GFX11PLUS(node) \
1870  case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
1871 
1872 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
1873  case node: return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
1874 
1875 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
1876  if (STI.getTargetTriple().getArch() == Triple::r600)
1877  return Reg;
1878  MAP_REG2REG
1879 }
1880 
1881 #undef CASE_CI_VI
1882 #undef CASE_VI_GFX9PLUS
1883 #undef CASE_GFXPRE11_GFX11PLUS
1884 #undef CASE_GFXPRE11_GFX11PLUS_TO
1885 
1886 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
1887 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node;
1888 #define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node;
1889 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
1890 
1891 unsigned mc2PseudoReg(unsigned Reg) {
1892  MAP_REG2REG
1893 }
1894 
1895 #undef CASE_CI_VI
1896 #undef CASE_VI_GFX9PLUS
1897 #undef CASE_GFXPRE11_GFX11PLUS
1898 #undef CASE_GFXPRE11_GFX11PLUS_TO
1899 #undef MAP_REG2REG
1900 
1901 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1902  assert(OpNo < Desc.NumOperands);
1903  unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1904  return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
1905  OpType <= AMDGPU::OPERAND_SRC_LAST;
1906 }
1907 
1908 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1909  assert(OpNo < Desc.NumOperands);
1910  unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1911  switch (OpType) {
1931  return true;
1932  default:
1933  return false;
1934  }
1935 }
1936 
1937 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1938  assert(OpNo < Desc.NumOperands);
1939  unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1940  return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
1942 }
1943 
1944 // Avoid using MCRegisterClass::getSize, since that function will go away
1945 // (move from MC* level to Target* level). Return size in bits.
1946 unsigned getRegBitWidth(unsigned RCID) {
1947  switch (RCID) {
1948  case AMDGPU::VGPR_LO16RegClassID:
1949  case AMDGPU::VGPR_HI16RegClassID:
1950  case AMDGPU::SGPR_LO16RegClassID:
1951  case AMDGPU::AGPR_LO16RegClassID:
1952  return 16;
1953  case AMDGPU::SGPR_32RegClassID:
1954  case AMDGPU::VGPR_32RegClassID:
1955  case AMDGPU::VRegOrLds_32RegClassID:
1956  case AMDGPU::AGPR_32RegClassID:
1957  case AMDGPU::VS_32RegClassID:
1958  case AMDGPU::AV_32RegClassID:
1959  case AMDGPU::SReg_32RegClassID:
1960  case AMDGPU::SReg_32_XM0RegClassID:
1961  case AMDGPU::SRegOrLds_32RegClassID:
1962  return 32;
1963  case AMDGPU::SGPR_64RegClassID:
1964  case AMDGPU::VS_64RegClassID:
1965  case AMDGPU::SReg_64RegClassID:
1966  case AMDGPU::VReg_64RegClassID:
1967  case AMDGPU::AReg_64RegClassID:
1968  case AMDGPU::SReg_64_XEXECRegClassID:
1969  case AMDGPU::VReg_64_Align2RegClassID:
1970  case AMDGPU::AReg_64_Align2RegClassID:
1971  case AMDGPU::AV_64RegClassID:
1972  case AMDGPU::AV_64_Align2RegClassID:
1973  return 64;
1974  case AMDGPU::SGPR_96RegClassID:
1975  case AMDGPU::SReg_96RegClassID:
1976  case AMDGPU::VReg_96RegClassID:
1977  case AMDGPU::AReg_96RegClassID:
1978  case AMDGPU::VReg_96_Align2RegClassID:
1979  case AMDGPU::AReg_96_Align2RegClassID:
1980  case AMDGPU::AV_96RegClassID:
1981  case AMDGPU::AV_96_Align2RegClassID:
1982  return 96;
1983  case AMDGPU::SGPR_128RegClassID:
1984  case AMDGPU::SReg_128RegClassID:
1985  case AMDGPU::VReg_128RegClassID:
1986  case AMDGPU::AReg_128RegClassID:
1987  case AMDGPU::VReg_128_Align2RegClassID:
1988  case AMDGPU::AReg_128_Align2RegClassID:
1989  case AMDGPU::AV_128RegClassID:
1990  case AMDGPU::AV_128_Align2RegClassID:
1991  return 128;
1992  case AMDGPU::SGPR_160RegClassID:
1993  case AMDGPU::SReg_160RegClassID:
1994  case AMDGPU::VReg_160RegClassID:
1995  case AMDGPU::AReg_160RegClassID:
1996  case AMDGPU::VReg_160_Align2RegClassID:
1997  case AMDGPU::AReg_160_Align2RegClassID:
1998  case AMDGPU::AV_160RegClassID:
1999  case AMDGPU::AV_160_Align2RegClassID:
2000  return 160;
2001  case AMDGPU::SGPR_192RegClassID:
2002  case AMDGPU::SReg_192RegClassID:
2003  case AMDGPU::VReg_192RegClassID:
2004  case AMDGPU::AReg_192RegClassID:
2005  case AMDGPU::VReg_192_Align2RegClassID:
2006  case AMDGPU::AReg_192_Align2RegClassID:
2007  case AMDGPU::AV_192RegClassID:
2008  case AMDGPU::AV_192_Align2RegClassID:
2009  return 192;
2010  case AMDGPU::SGPR_224RegClassID:
2011  case AMDGPU::SReg_224RegClassID:
2012  case AMDGPU::VReg_224RegClassID:
2013  case AMDGPU::AReg_224RegClassID:
2014  case AMDGPU::VReg_224_Align2RegClassID:
2015  case AMDGPU::AReg_224_Align2RegClassID:
2016  case AMDGPU::AV_224RegClassID:
2017  case AMDGPU::AV_224_Align2RegClassID:
2018  return 224;
2019  case AMDGPU::SGPR_256RegClassID:
2020  case AMDGPU::SReg_256RegClassID:
2021  case AMDGPU::VReg_256RegClassID:
2022  case AMDGPU::AReg_256RegClassID:
2023  case AMDGPU::VReg_256_Align2RegClassID:
2024  case AMDGPU::AReg_256_Align2RegClassID:
2025  case AMDGPU::AV_256RegClassID:
2026  case AMDGPU::AV_256_Align2RegClassID:
2027  return 256;
2028  case AMDGPU::SGPR_512RegClassID:
2029  case AMDGPU::SReg_512RegClassID:
2030  case AMDGPU::VReg_512RegClassID:
2031  case AMDGPU::AReg_512RegClassID:
2032  case AMDGPU::VReg_512_Align2RegClassID:
2033  case AMDGPU::AReg_512_Align2RegClassID:
2034  case AMDGPU::AV_512RegClassID:
2035  case AMDGPU::AV_512_Align2RegClassID:
2036  return 512;
2037  case AMDGPU::SGPR_1024RegClassID:
2038  case AMDGPU::SReg_1024RegClassID:
2039  case AMDGPU::VReg_1024RegClassID:
2040  case AMDGPU::AReg_1024RegClassID:
2041  case AMDGPU::VReg_1024_Align2RegClassID:
2042  case AMDGPU::AReg_1024_Align2RegClassID:
2043  case AMDGPU::AV_1024RegClassID:
2044  case AMDGPU::AV_1024_Align2RegClassID:
2045  return 1024;
2046  default:
2047  llvm_unreachable("Unexpected register class");
2048  }
2049 }
2050 
2051 unsigned getRegBitWidth(const MCRegisterClass &RC) {
2052  return getRegBitWidth(RC.getID());
2053 }
2054 
2055 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
2056  unsigned OpNo) {
2057  assert(OpNo < Desc.NumOperands);
2058  unsigned RCID = Desc.OpInfo[OpNo].RegClass;
2059  return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
2060 }
2061 
2062 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2063  if (isInlinableIntLiteral(Literal))
2064  return true;
2065 
2066  uint64_t Val = static_cast<uint64_t>(Literal);
2067  return (Val == DoubleToBits(0.0)) ||
2068  (Val == DoubleToBits(1.0)) ||
2069  (Val == DoubleToBits(-1.0)) ||
2070  (Val == DoubleToBits(0.5)) ||
2071  (Val == DoubleToBits(-0.5)) ||
2072  (Val == DoubleToBits(2.0)) ||
2073  (Val == DoubleToBits(-2.0)) ||
2074  (Val == DoubleToBits(4.0)) ||
2075  (Val == DoubleToBits(-4.0)) ||
2076  (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2077 }
2078 
2079 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2080  if (isInlinableIntLiteral(Literal))
2081  return true;
2082 
2083  // The actual type of the operand does not seem to matter as long
2084  // as the bits match one of the inline immediate values. For example:
2085  //
2086  // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2087  // so it is a legal inline immediate.
2088  //
2089  // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2090  // floating-point, so it is a legal inline immediate.
2091 
2092  uint32_t Val = static_cast<uint32_t>(Literal);
2093  return (Val == FloatToBits(0.0f)) ||
2094  (Val == FloatToBits(1.0f)) ||
2095  (Val == FloatToBits(-1.0f)) ||
2096  (Val == FloatToBits(0.5f)) ||
2097  (Val == FloatToBits(-0.5f)) ||
2098  (Val == FloatToBits(2.0f)) ||
2099  (Val == FloatToBits(-2.0f)) ||
2100  (Val == FloatToBits(4.0f)) ||
2101  (Val == FloatToBits(-4.0f)) ||
2102  (Val == 0x3e22f983 && HasInv2Pi);
2103 }
2104 
2105 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
2106  if (!HasInv2Pi)
2107  return false;
2108 
2109  if (isInlinableIntLiteral(Literal))
2110  return true;
2111 
2112  uint16_t Val = static_cast<uint16_t>(Literal);
2113  return Val == 0x3C00 || // 1.0
2114  Val == 0xBC00 || // -1.0
2115  Val == 0x3800 || // 0.5
2116  Val == 0xB800 || // -0.5
2117  Val == 0x4000 || // 2.0
2118  Val == 0xC000 || // -2.0
2119  Val == 0x4400 || // 4.0
2120  Val == 0xC400 || // -4.0
2121  Val == 0x3118; // 1/2pi
2122 }
2123 
2124 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2125  assert(HasInv2Pi);
2126 
2127  if (isInt<16>(Literal) || isUInt<16>(Literal)) {
2128  int16_t Trunc = static_cast<int16_t>(Literal);
2129  return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
2130  }
2131  if (!(Literal & 0xffff))
2132  return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
2133 
2134  int16_t Lo16 = static_cast<int16_t>(Literal);
2135  int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2136  return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
2137 }
2138 
2139 bool isInlinableIntLiteralV216(int32_t Literal) {
2140  int16_t Lo16 = static_cast<int16_t>(Literal);
2141  if (isInt<16>(Literal) || isUInt<16>(Literal))
2142  return isInlinableIntLiteral(Lo16);
2143 
2144  int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2145  if (!(Literal & 0xffff))
2146  return isInlinableIntLiteral(Hi16);
2147  return Lo16 == Hi16 && isInlinableIntLiteral(Lo16);
2148 }
2149 
2150 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2151  assert(HasInv2Pi);
2152 
2153  int16_t Lo16 = static_cast<int16_t>(Literal);
2154  if (isInt<16>(Literal) || isUInt<16>(Literal))
2155  return true;
2156 
2157  int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2158  if (!(Literal & 0xffff))
2159  return true;
2160  return Lo16 == Hi16;
2161 }
2162 
2163 bool isArgPassedInSGPR(const Argument *A) {
2164  const Function *F = A->getParent();
2165 
2166  // Arguments to compute shaders are never a source of divergence.
2167  CallingConv::ID CC = F->getCallingConv();
2168  switch (CC) {
2171  return true;
2180  // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
2181  // Everything else is in VGPRs.
2182  return F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::InReg) ||
2183  F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::ByVal);
2184  default:
2185  // TODO: Should calls support inreg for SGPR inputs?
2186  return false;
2187  }
2188 }
2189 
2190 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
2191  return isGCN3Encoding(ST) || isGFX10Plus(ST);
2192 }
2193 
2195  return isGFX9Plus(ST);
2196 }
2197 
2199  int64_t EncodedOffset) {
2200  return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
2201  : isUInt<8>(EncodedOffset);
2202 }
2203 
2205  int64_t EncodedOffset,
2206  bool IsBuffer) {
2207  return !IsBuffer &&
2209  isInt<21>(EncodedOffset);
2210 }
2211 
2212 static bool isDwordAligned(uint64_t ByteOffset) {
2213  return (ByteOffset & 3) == 0;
2214 }
2215 
2217  uint64_t ByteOffset) {
2218  if (hasSMEMByteOffset(ST))
2219  return ByteOffset;
2220 
2221  assert(isDwordAligned(ByteOffset));
2222  return ByteOffset >> 2;
2223 }
2224 
2226  int64_t ByteOffset, bool IsBuffer) {
2227  // The signed version is always a byte offset.
2228  if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
2230  return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : None;
2231  }
2232 
2233  if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
2234  return None;
2235 
2236  int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2237  return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
2238  ? Optional<int64_t>(EncodedOffset)
2239  : None;
2240 }
2241 
2243  int64_t ByteOffset) {
2244  if (!isCI(ST) || !isDwordAligned(ByteOffset))
2245  return None;
2246 
2247  int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2248  return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) : None;
2249 }
2250 
2252  // Address offset is 12-bit signed for GFX10, 13-bit for GFX9 and GFX11+.
2253  if (AMDGPU::isGFX10(ST))
2254  return Signed ? 12 : 11;
2255 
2256  return Signed ? 13 : 12;
2257 }
2258 
2259 // Given Imm, split it into the values to put into the SOffset and ImmOffset
2260 // fields in an MUBUF instruction. Return false if it is not possible (due to a
2261 // hardware bug needing a workaround).
2262 //
2263 // The required alignment ensures that individual address components remain
2264 // aligned if they are aligned to begin with. It also ensures that additional
2265 // offsets within the given alignment can be added to the resulting ImmOffset.
2266 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
2267  const GCNSubtarget *Subtarget, Align Alignment) {
2268  const uint32_t MaxImm = alignDown(4095, Alignment.value());
2269  uint32_t Overflow = 0;
2270 
2271  if (Imm > MaxImm) {
2272  if (Imm <= MaxImm + 64) {
2273  // Use an SOffset inline constant for 4..64
2274  Overflow = Imm - MaxImm;
2275  Imm = MaxImm;
2276  } else {
2277  // Try to keep the same value in SOffset for adjacent loads, so that
2278  // the corresponding register contents can be re-used.
2279  //
2280  // Load values with all low-bits (except for alignment bits) set into
2281  // SOffset, so that a larger range of values can be covered using
2282  // s_movk_i32.
2283  //
2284  // Atomic operations fail to work correctly when individual address
2285  // components are unaligned, even if their sum is aligned.
2286  uint32_t High = (Imm + Alignment.value()) & ~4095;
2287  uint32_t Low = (Imm + Alignment.value()) & 4095;
2288  Imm = Low;
2289  Overflow = High - Alignment.value();
2290  }
2291  }
2292 
2293  // There is a hardware bug in SI and CI which prevents address clamping in
2294  // MUBUF instructions from working correctly with SOffsets. The immediate
2295  // offset is unaffected.
2296  if (Overflow > 0 &&
2298  return false;
2299 
2300  ImmOffset = Imm;
2301  SOffset = Overflow;
2302  return true;
2303 }
2304 
2306  *this = getDefaultForCallingConv(F.getCallingConv());
2307 
2308  StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString();
2309  if (!IEEEAttr.empty())
2310  IEEE = IEEEAttr == "true";
2311 
2312  StringRef DX10ClampAttr
2313  = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString();
2314  if (!DX10ClampAttr.empty())
2315  DX10Clamp = DX10ClampAttr == "true";
2316 
2317  StringRef DenormF32Attr = F.getFnAttribute("denormal-fp-math-f32").getValueAsString();
2318  if (!DenormF32Attr.empty()) {
2319  DenormalMode DenormMode = parseDenormalFPAttribute(DenormF32Attr);
2322  }
2323 
2324  StringRef DenormAttr = F.getFnAttribute("denormal-fp-math").getValueAsString();
2325  if (!DenormAttr.empty()) {
2326  DenormalMode DenormMode = parseDenormalFPAttribute(DenormAttr);
2327 
2328  if (DenormF32Attr.empty()) {
2331  }
2332 
2335  }
2336 }
2337 
2338 namespace {
2339 
2340 struct SourceOfDivergence {
2341  unsigned Intr;
2342 };
2343 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
2344 
2345 #define GET_SourcesOfDivergence_IMPL
2346 #define GET_Gfx9BufferFormat_IMPL
2347 #define GET_Gfx10BufferFormat_IMPL
2348 #define GET_Gfx11PlusBufferFormat_IMPL
2349 #include "AMDGPUGenSearchableTables.inc"
2350 
2351 } // end anonymous namespace
2352 
2353 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
2354  return lookupSourceOfDivergence(IntrID);
2355 }
2356 
2357 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp,
2358  uint8_t NumComponents,
2359  uint8_t NumFormat,
2360  const MCSubtargetInfo &STI) {
2361  return isGFX11Plus(STI)
2362  ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents,
2363  NumFormat)
2364  : isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp,
2365  NumComponents, NumFormat)
2366  : getGfx9BufferFormatInfo(BitsPerComp,
2367  NumComponents, NumFormat);
2368 }
2369 
2371  const MCSubtargetInfo &STI) {
2372  return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
2373  : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
2374  : getGfx9BufferFormatInfo(Format);
2375 }
2376 
2377 } // namespace AMDGPU
2378 
2381  switch (S) {
2383  OS << "Unsupported";
2384  break;
2386  OS << "Any";
2387  break;
2389  OS << "Off";
2390  break;
2392  OS << "On";
2393  break;
2394  }
2395  return OS;
2396 }
2397 
2398 } // namespace llvm
llvm::AMDGPU::OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:173
llvm::AMDGPU::MTBUFFormat::isValidUnifiedFormat
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1463
llvm::AMDGPU::Hwreg::encodeHwreg
uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width)
Definition: AMDGPUBaseInfo.cpp:1287
llvm::AMDGPU::mapWMMA2AddrTo3AddrOpcode
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:401
i
i
Definition: README.txt:29
llvm::AMDGPU::MUBUFInfo::elements
uint8_t elements
Definition: AMDGPUBaseInfo.cpp:250
llvm::AMDGPU::Hwreg::OPR_SIZE
const int OPR_SIZE
Definition: AMDGPUAsmUtils.cpp:134
llvm::AMDGPU::OPR_ID_UNSUPPORTED
const int OPR_ID_UNSUPPORTED
Definition: AMDGPUAsmUtils.h:24
llvm::AMDGPU::getMUBUFIsBufferInv
bool getMUBUFIsBufferInv(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:362
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:156
llvm::AMDGPU::getMCReg
unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI)
If Reg is a pseudo reg, return the correct hardware register given STI otherwise return Reg.
Definition: AMDGPUBaseInfo.cpp:1875
llvm::AMDGPU::isHsaAbiVersion3
bool isHsaAbiVersion3(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:130
llvm::AMDGPU::UfmtGFX10::UFMT_FIRST
@ UFMT_FIRST
Definition: SIDefines.h:631
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::AMDGPUTargetID
AMDGPUTargetID(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:420
llvm::AMDGPU::mc2PseudoReg
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
Definition: AMDGPUBaseInfo.cpp:1891
llvm::AMDGPU::VOPInfo::IsSingle
bool IsSingle
Definition: AMDGPUBaseInfo.cpp:273
Signed
@ Signed
Definition: NVPTXISelLowering.cpp:4637
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::AMDGPU::getMAIIsDGEMM
bool getMAIIsDGEMM(unsigned Opc)
Returns true if MAI operation is a double precision GEMM.
Definition: AMDGPUBaseInfo.cpp:391
llvm::AMDGPU::IsaInfo::getSGPRAllocGranule
unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:690
llvm::AMDGPU::getMUBUFHasSoffset
bool getMUBUFHasSoffset(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:357
llvm::LLVMContext::emitError
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
Definition: LLVMContext.cpp:266
llvm::AMDGPU::MTBUFFormat::NumFormat
NumFormat
Definition: SIDefines.h:497
llvm::CallingConv::AMDGPU_HS
@ AMDGPU_HS
Calling convention used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:223
llvm::AMDGPU::Hwreg::getHwreg
StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1293
llvm::AMDGPU::MTBUFFormat::getDefaultFormatEncoding
unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1488
llvm::AMDGPU::hasVOPD
bool hasVOPD(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1800
llvm::AMDGPU::MIMGBaseOpcodeInfo::Gradients
bool Gradients
Definition: AMDGPUBaseInfo.h:309
llvm::AMDGPU::HSAMD::Kernel::CodeProps::Key::NumSGPRs
constexpr char NumSGPRs[]
Key for Kernel::CodeProps::Metadata::mNumSGPRs.
Definition: AMDGPUMetadata.h:258
llvm::AMDGPU::Hwreg::getHwregId
int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1270
llvm::AMDGPU::SendMsg::msgRequiresOp
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1578
llvm::CallingConv::AMDGPU_VS
@ AMDGPU_VS
Calling convention used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (ve...
Definition: CallingConv.h:204
llvm::AMDGPU::MIMGBaseOpcodeInfo::LodOrClampOrMip
bool LodOrClampOrMip
Definition: AMDGPUBaseInfo.h:312
llvm::AMDGPU::MTBUFFormat::getDfmtName
StringRef getDfmtName(unsigned Id)
Definition: AMDGPUBaseInfo.cpp:1395
llvm::AMDGPU::SendMsg::encodeMsg
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
Definition: AMDGPUBaseInfo.cpp:1603
llvm::AMDGPU::isGFX11Plus
bool isGFX11Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1752
llvm::AMDGPU::getIsaVersion
IsaVersion getIsaVersion(StringRef GPU)
Definition: TargetParser.cpp:193
llvm::AMDGPU::decodeLgkmcnt
unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt)
Definition: AMDGPUBaseInfo.cpp:1040
llvm::AMDGPU::Hwreg::ID_SHIFT_
@ ID_SHIFT_
Definition: SIDefines.h:411
llvm::StringRef::endswith
LLVM_NODISCARD bool endswith(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:301
llvm::AMDGPU::SendMsg::OpGsSymbolic
const char *const OpGsSymbolic[OP_GS_LAST_]
Definition: AMDGPUAsmUtils.cpp:77
llvm::Function
Definition: Function.h:60
llvm::AMDGPU::getMUBUFBaseOpcode
int getMUBUFBaseOpcode(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:332
llvm::Attribute
Definition: Attributes.h:65
llvm::AMDGPU::SIModeRegisterDefaults::FP32OutputDenormals
bool FP32OutputDenormals
Definition: AMDGPUBaseInfo.h:1012
llvm::AMDGPU::getSMRDEncodedOffset
Optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer)
Definition: AMDGPUBaseInfo.cpp:2225
llvm::AMDGPU::MTBUFFormat::DFMT_MASK
@ DFMT_MASK
Definition: SIDefines.h:494
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::isSramEccOnOrAny
bool isSramEccOnOrAny() const
Definition: AMDGPUBaseInfo.h:150
llvm::AMDGPU::getMCOpcode
int getMCOpcode(uint16_t Opcode, unsigned Gen)
Definition: AMDGPUBaseInfo.cpp:414
llvm::AMDGPU::decodeVmcnt
unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt)
Definition: AMDGPUBaseInfo.cpp:1027
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:632
llvm::AMDGPU::hasSRAMECC
bool hasSRAMECC(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1687
llvm::AMDGPU::SIModeRegisterDefaults::IEEE
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
Definition: AMDGPUBaseInfo.h:1003
llvm::AMDGPU::hasXNACK
bool hasXNACK(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1683
llvm::AMDGPU::IsaInfo::TargetIDSetting::Unsupported
@ Unsupported
llvm::AMDGPU::OPERAND_REG_IMM_V2FP16
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:162
High
uint64_t High
Definition: NVVMIntrRange.cpp:61
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::AMDGPU::IsaInfo::TargetIDSetting::On
@ On
llvm::AMDGPU::isGFX10_BEncoding
bool isGFX10_BEncoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1776
llvm::Triple::amdgcn
@ amdgcn
Definition: Triple.h:74
llvm::AMDGPU::CustomOperand::Name
StringLiteral Name
Definition: AMDGPUAsmUtils.h:29
llvm::AMDGPU::isGFX10_AEncoding
bool isGFX10_AEncoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1772
amd_kernel_code_t::compute_pgm_resource_registers
uint64_t compute_pgm_resource_registers
Shader program settings for CS.
Definition: AMDKernelCodeT.h:558
llvm::AMDGPU::getVOP2IsSingle
bool getVOP2IsSingle(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:377
llvm::AMDGPU::OPR_VAL_INVALID
const int OPR_VAL_INVALID
Definition: AMDGPUAsmUtils.h:26
llvm::AMDGPU::MIMGDimInfo
Definition: AMDGPUBaseInfo.h:324
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::AMDGPU::hasArchitectedFlatScratch
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1792
llvm::AMDGPU::MTBUFFormat::encodeDfmtNfmt
int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt)
Definition: AMDGPUBaseInfo.cpp:1433
llvm::AMDGPU::getHsaAbiVersion
Optional< uint8_t > getHsaAbiVersion(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:105
llvm::AMDGPU::MTBUFFormat::NFMT_UNDEF
@ NFMT_UNDEF
Definition: SIDefines.h:511
llvm::AMDGPUSubtarget::SEA_ISLANDS
@ SEA_ISLANDS
Definition: AMDGPUSubtarget.h:38
llvm::AMDGPU::Exp::ET_NULL
@ ET_NULL
Definition: SIDefines.h:862
llvm::AMDGPU::SendMsg::STREAM_ID_MASK_
@ STREAM_ID_MASK_
Definition: SIDefines.h:378
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
llvm::AMDGPU::SendMsg::OpSysSymbolic
const char *const OpSysSymbolic[OP_SYS_LAST_]
Definition: AMDGPUAsmUtils.cpp:69
llvm::AMDGPU::getSMRDEncodedLiteralOffset32
Optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
Definition: AMDGPUBaseInfo.cpp:2242
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::AMDGPU::getAmdhsaCodeObjectVersion
unsigned getAmdhsaCodeObjectVersion()
Definition: AMDGPUBaseInfo.cpp:153
llvm::AMDGPU::MTBUFInfo::has_vaddr
bool has_vaddr
Definition: AMDGPUBaseInfo.cpp:261
llvm::AMDGPU::SendMsg::OP_GS_LAST_
@ OP_GS_LAST_
Definition: SIDefines.h:360
llvm::AMDGPU::MTBUFFormat::isValidNfmt
bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1429
llvm::AMDGPU::DepCtr::encodeDepCtr
int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1256
llvm::AMDGPU::isGFX11
bool isGFX11(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1748
Shift
bool Shift
Definition: README.txt:468
llvm::AMDGPU::Hwreg::ID_MASK_
@ ID_MASK_
Definition: SIDefines.h:413
llvm::AMDGPU::Exp::ET_POS0
@ ET_POS0
Definition: SIDefines.h:863
llvm::AMDGPU::CustomOperand::Encoding
int Encoding
Definition: AMDGPUAsmUtils.h:30
llvm::AMDGPU::MTBUFFormat::DFMT_NFMT_MAX
@ DFMT_NFMT_MAX
Definition: SIDefines.h:526
llvm::FloatToBits
uint32_t FloatToBits(float Float)
This function takes a float and returns the bit equivalent 32-bit integer.
Definition: MathExtras.h:690
llvm::AMDGPU::IsaInfo::getNumExtraSGPRs
unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed, bool FlatScrUsed, bool XNACKUsed)
Definition: AMDGPUBaseInfo.cpp:756
llvm::AMDGPU::isGFX10
bool isGFX10(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1740
llvm::AMDGPU::IsaInfo::getTotalNumVGPRs
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:820
LimitTo128VGPRs
static llvm::cl::opt< bool > LimitTo128VGPRs("amdgpu-limit-to-128-vgprs", llvm::cl::Hidden, llvm::cl::desc("Never use more than 128 VGPRs"))
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::setTargetIDFromTargetIDStream
void setTargetIDFromTargetIDStream(StringRef TargetID)
Definition: AMDGPUBaseInfo.cpp:497
llvm::AMDGPU::IsaInfo::getMinWavesPerEU
unsigned getMinWavesPerEU(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:657
llvm::Optional< uint8_t >
llvm::AMDGPU::SIModeRegisterDefaults::getDefaultForCallingConv
static SIModeRegisterDefaults getDefaultForCallingConv(CallingConv::ID CC)
Definition: AMDGPUBaseInfo.h:1029
llvm::AMDGPU::MUBUFInfo::has_soffset
bool has_soffset
Definition: AMDGPUBaseInfo.cpp:253
llvm::MCRegisterClass::contains
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
Definition: MCRegisterInfo.h:68
llvm::AMDGPU::SIModeRegisterDefaults::FP32InputDenormals
bool FP32InputDenormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
Definition: AMDGPUBaseInfo.h:1011
llvm::AMDGPU::IsaInfo::TargetIDSetting::Any
@ Any
llvm::GCNSubtarget
Definition: GCNSubtarget.h:31
AMDGPUAsmUtils.h
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:893
llvm::AMDGPU::getVmcntBitMask
unsigned getVmcntBitMask(const IsaVersion &Version)
Definition: AMDGPUBaseInfo.cpp:1001
llvm::AMDGPU::isGlobalSegment
bool isGlobalSegment(const GlobalValue *GV)
Definition: AMDGPUBaseInfo.cpp:947
llvm::SPIRV::Dim
Dim
Definition: SPIRVBaseInfo.h:279
llvm::AMDGPU::hasGFX10_3Insts
bool hasGFX10_3Insts(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1780
TargetParser.h
llvm::AMDGPU::IsaInfo::getMaxNumVGPRs
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
Definition: AMDGPUBaseInfo.cpp:854
llvm::AMDGPU::Exp::ET_PARAM0
@ ET_PARAM0
Definition: SIDefines.h:870
llvm::AMDGPU::getWaitcntBitMask
unsigned getWaitcntBitMask(const IsaVersion &Version)
Definition: AMDGPUBaseInfo.cpp:1015
llvm::AMDGPU::isIntrinsicSourceOfDivergence
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
Definition: AMDGPUBaseInfo.cpp:2353
llvm::amdhsa::kernel_descriptor_t::compute_pgm_rsrc2
uint32_t compute_pgm_rsrc2
Definition: AMDHSAKernelDescriptor.h:178
llvm::AMDGPU::SendMsg::MSG_SIZE
const int MSG_SIZE
Definition: AMDGPUAsmUtils.cpp:65
llvm::CallingConv::AMDGPU_Gfx
@ AMDGPU_Gfx
Calling convention used for AMD graphics targets.
Definition: CallingConv.h:250
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
llvm::AMDGPU::getMTBUFOpcode
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements)
Definition: AMDGPUBaseInfo.cpp:307
llvm::AMDGPU::MTBUFFormat::getNfmtLookupTable
static const StringLiteral * getNfmtLookupTable(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1400
AmdhsaCodeObjectVersion
static llvm::cl::opt< unsigned > AmdhsaCodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden, llvm::cl::desc("AMDHSA Code Object Version"), llvm::cl::init(4))
llvm::AMDGPU::MUBUFInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:248
llvm::AMDGPU::MIMGInfo::VAddrDwords
uint8_t VAddrDwords
Definition: AMDGPUBaseInfo.h:405
llvm::AMDGPU::IsaInfo::getMaxWorkGroupsPerCU
unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
Definition: AMDGPUBaseInfo.cpp:645
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::AMDGPU::MTBUFFormat::isValidDfmtNfmt
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1422
llvm::AMDGPU::SendMsg::ID_MASK_PreGFX11_
@ ID_MASK_PreGFX11_
Definition: SIDefines.h:344
llvm::AMDGPU::DepCtr::decodeDepCtr
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1250
AMDHSAKernelDescriptor.h
llvm::AMDGPU::HSAMD::V3::VersionMajor
constexpr uint32_t VersionMajor
HSA metadata major version.
Definition: AMDGPUMetadata.h:459
llvm::AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG
@ FIXED_NUM_SGPRS_FOR_INIT_BUG
Definition: AMDGPUBaseInfo.h:94
llvm::MCRegisterClass
MCRegisterClass - Base class of TargetRegisterClass.
Definition: MCRegisterInfo.h:31
llvm::AMDGPUAS::CONSTANT_ADDRESS_32BIT
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
Definition: AMDGPU.h:369
llvm::AMDGPU::getDefaultCustomOperandEncoding
static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr, int Size, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1154
llvm::AMDGPU::IsaInfo::TargetIDSetting
TargetIDSetting
Definition: AMDGPUBaseInfo.h:98
llvm::parseDenormalFPAttribute
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
Definition: FloatingPointMode.h:176
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::AMDGPU::Hwreg::decodeHwreg
void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width)
Definition: AMDGPUBaseInfo.cpp:1298
llvm::AMDGPU::IsaVersion
Instruction set architecture version.
Definition: TargetParser.h:113
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::AMDGPU::IsaInfo::getSGPREncodingGranule
unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:699
CommandLine.h
llvm::AMDGPU::MTBUFFormat::NfmtSymbolicVI
const StringLiteral NfmtSymbolicVI[]
Definition: AMDGPUAsmUtils.cpp:182
llvm::AMDGPU::isGFX90A
bool isGFX90A(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1784
llvm::AMDGPU::OPR_ID_DUPLICATE
const int OPR_ID_DUPLICATE
Definition: AMDGPUAsmUtils.h:25
llvm::AMDGPU::SendMsg::OP_GS_FIRST_
@ OP_GS_FIRST_
Definition: SIDefines.h:361
llvm::AMDGPU::OPERAND_REG_IMM_FP32
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:157
llvm::AMDGPU::SendMsg::msgSupportsStream
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1584
llvm::AMDGPU::MTBUFFormat::UfmtSymbolicGFX11
const StringLiteral UfmtSymbolicGFX11[]
Definition: AMDGPUAsmUtils.cpp:381
llvm::AMDGPU::MUBUFInfo::has_srsrc
bool has_srsrc
Definition: AMDGPUBaseInfo.cpp:252
llvm::StringLiteral
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
Definition: StringRef.h:914
llvm::AMDGPU::getMTBUFHasSrsrc
bool getMTBUFHasSrsrc(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:322
GlobalValue.h
ELF.h
llvm::DenormalMode::Input
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
Definition: FloatingPointMode.h:92
llvm::AMDGPU::isShader
bool isShader(CallingConv::ID cc)
Definition: AMDGPUBaseInfo.cpp:1630
llvm::PGSOQueryType::Test
@ Test
llvm::AMDGPU::IsaInfo::getWavesPerWorkGroup
unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
Definition: AMDGPUBaseInfo.cpp:685
llvm::ARM::InvalidIdx
@ InvalidIdx
Definition: ARMRegisterBankInfo.cpp:69
llvm::AMDGPU::MTBUFInfo::has_soffset
bool has_soffset
Definition: AMDGPUBaseInfo.cpp:263
amd_kernel_code_t::amd_kernel_code_version_major
uint32_t amd_kernel_code_version_major
Definition: AMDKernelCodeT.h:527
llvm::AMDGPU::Exp::ET_INVALID
@ ET_INVALID
Definition: SIDefines.h:881
llvm::AMDGPU::Exp::ET_MRTZ_MAX_IDX
@ ET_MRTZ_MAX_IDX
Definition: SIDefines.h:874
GCNSubtarget.h
llvm::AMDGPU::hasSMEMByteOffset
static bool hasSMEMByteOffset(const MCSubtargetInfo &ST)
Definition: AMDGPUBaseInfo.cpp:2190
f
Itanium Name Demangler i e convert the string _Z1fv into f()". You can also use the CRTP base ManglingParser to perform some simple analysis on the mangled name
llvm::AMDGPU::getRegOperandSize
unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, unsigned OpNo)
Get size of register operand.
Definition: AMDGPUBaseInfo.cpp:2055
llvm::AMDGPU::SendMsg::OP_SYS_LAST_
@ OP_SYS_LAST_
Definition: SIDefines.h:367
llvm::AMDGPU::getMIMGBaseOpcode
const MIMGBaseOpcodeInfo * getMIMGBaseOpcode(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:206
llvm::StringRef::split
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:753
llvm::AMDGPU::MIMGBaseOpcodeInfo
Definition: AMDGPUBaseInfo.h:300
llvm::AMDGPU::isInlinableIntLiteralV216
bool isInlinableIntLiteralV216(int32_t Literal)
Definition: AMDGPUBaseInfo.cpp:2139
Intr
unsigned Intr
Definition: AMDGPUBaseInfo.cpp:2341
llvm::AMDGPU::OPERAND_REG_INLINE_AC_FP16
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:186
llvm::AMDGPU::MTBUFFormat::DfmtSymbolic
const StringLiteral DfmtSymbolic[]
Definition: AMDGPUAsmUtils.cpp:141
llvm::AMDGPU::getMUBUFOpcode
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
Definition: AMDGPUBaseInfo.cpp:337
llvm::MCSubtargetInfo::getTargetTriple
const Triple & getTargetTriple() const
Definition: MCSubtargetInfo.h:108
llvm::AMDGPU::getSMEMIsBuffer
bool getSMEMIsBuffer(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:367
llvm::AMDGPU::isGFX940
bool isGFX940(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1788
llvm::AMDGPU::IsaInfo::getMaxNumSGPRs
unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU, bool Addressable)
Definition: AMDGPUBaseInfo.cpp:739
llvm::AMDGPU::isSGPR
bool isSGPR(unsigned Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
Definition: AMDGPUBaseInfo.cpp:1811
llvm::AMDGPU::hasMAIInsts
bool hasMAIInsts(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1796
llvm::AMDGPU::VOPC64DPPInfo
Definition: AMDGPUBaseInfo.cpp:276
llvm::AMDGPU::IsaInfo::getTargetIDSettingFromFeatureString
static TargetIDSetting getTargetIDSettingFromFeatureString(StringRef FeatureString)
Definition: AMDGPUBaseInfo.cpp:488
llvm::AMDGPU::SendMsg::STREAM_ID_LAST_
@ STREAM_ID_LAST_
Definition: SIDefines.h:374
llvm::SubtargetFeatures::getFeatures
const std::vector< std::string > & getFeatures() const
Returns the vector of individual subtarget features.
Definition: SubtargetFeature.h:196
llvm::DoubleToBits
uint64_t DoubleToBits(double Double)
This function takes a double and returns the bit equivalent 64-bit integer.
Definition: MathExtras.h:680
llvm::SubtargetFeatures
Manages the enabling and disabling of subtarget specific features.
Definition: SubtargetFeature.h:183
llvm::AMDGPU::SendMsg::ID_SYSMSG
@ ID_SYSMSG
Definition: SIDefines.h:335
llvm::AMDGPU::Hwreg::Id
Id
Definition: SIDefines.h:385
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::isXnackOnOrAny
bool isXnackOnOrAny() const
Definition: AMDGPUBaseInfo.h:121
llvm::AMDGPU::decodeExpcnt
unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt)
Definition: AMDGPUBaseInfo.cpp:1035
llvm::AMDGPU::getMAIIsGFX940XDL
bool getMAIIsGFX940XDL(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:396
llvm::ELF::ELFABIVERSION_AMDGPU_HSA_V5
@ ELFABIVERSION_AMDGPU_HSA_V5
Definition: ELF.h:377
llvm::CallingConv::AMDGPU_ES
@ AMDGPU_ES
Calling convention used for AMDPAL shader stage before geometry shader if geometry is in use.
Definition: CallingConv.h:236
llvm::CallingConv::AMDGPU_GS
@ AMDGPU_GS
Calling convention used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:207
llvm::AMDGPU::MTBUFInfo::BaseOpcode
uint16_t BaseOpcode
Definition: AMDGPUBaseInfo.cpp:259
llvm::dwarf::Index
Index
Definition: Dwarf.h:472
llvm::alignDown
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
Definition: MathExtras.h:787
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
S_00B848_MEM_ORDERED
#define S_00B848_MEM_ORDERED(x)
Definition: SIDefines.h:1011
llvm::AMDGPU::hasGFX10A16
bool hasGFX10A16(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1695
MCSubtargetInfo.h
llvm::MCSubtargetInfo::getFeatureBits
const FeatureBitset & getFeatureBits() const
Definition: MCSubtargetInfo.h:112
llvm::AMDGPU::Exp::ET_PRIM
@ ET_PRIM
Definition: SIDefines.h:867
AMDGPU
Definition: AMDGPUReplaceLDSUseWithPointer.cpp:114
llvm::AMDGPUAS::LOCAL_ADDRESS
@ LOCAL_ADDRESS
Address space for local memory.
Definition: AMDGPU.h:366
llvm::AMDGPU::encodeCustomOperand
static int encodeCustomOperand(const CustomOperandVal *Opr, int Size, const StringRef Name, int64_t InputVal, unsigned &UsedOprMask, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1209
llvm::AMDGPU::SendMsg::OP_GS_NOP
@ OP_GS_NOP
Definition: SIDefines.h:356
llvm::AMDGPU::OPERAND_REG_IMM_FP64
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:158
llvm::AMDGPU::getMTBUFBaseOpcode
int getMTBUFBaseOpcode(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:302
llvm::AMDGPU::getInitialPSInputAddr
unsigned getInitialPSInputAddr(const Function &F)
Definition: AMDGPUBaseInfo.cpp:1615
llvm::AMDGPU::SendMsg::OP_SHIFT_
@ OP_SHIFT_
Definition: SIDefines.h:350
llvm::Triple::r600
@ r600
Definition: Triple.h:73
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
llvm::AMDGPU::Exp::ExpTgt::Name
StringLiteral Name
Definition: AMDGPUBaseInfo.cpp:1313
llvm::AMDGPU::IsaInfo::TRAP_NUM_SGPRS
@ TRAP_NUM_SGPRS
Definition: AMDGPUBaseInfo.h:95
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:54
llvm::AMDGPU::getMTBUFHasSoffset
bool getMTBUFHasSoffset(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:327
llvm::raw_ostream::flush
void flush()
Definition: raw_ostream.h:187
llvm::operator<<
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:230
llvm::AMDGPU::MTBUFFormat::DFMT_UNDEF
@ DFMT_UNDEF
Definition: SIDefines.h:490
llvm::cl::Option::getNumOccurrences
int getNumOccurrences() const
Definition: CommandLine.h:395
llvm::ThreadPriority::Low
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
llvm::AMDGPU::decodeWaitcnt
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
Definition: AMDGPUBaseInfo.cpp:1045
amd_kernel_code_t::wavefront_size
uint8_t wavefront_size
Wavefront size expressed as a power of two.
Definition: AMDKernelCodeT.h:643
llvm::AMDGPU::MTBUFInfo
Definition: AMDGPUBaseInfo.cpp:257
llvm::AMDGPU::OPERAND_REG_IMM_V2FP32
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:165
llvm::AMDGPU::IsaInfo::getMinFlatWorkGroupSize
unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:676
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::getSramEccSetting
TargetIDSetting getSramEccSetting() const
Definition: AMDGPUBaseInfo.h:164
llvm::amdhsa::kernel_descriptor_t::kernel_code_properties
uint16_t kernel_code_properties
Definition: AMDHSAKernelDescriptor.h:179
llvm::IndexedInstrProf::Version
const uint64_t Version
Definition: InstrProf.h:1027
llvm::AMDGPU::getMUBUFHasSrsrc
bool getMUBUFHasSrsrc(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:352
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::AMDGPU::convertSMRDOffsetUnits
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
Definition: AMDGPUBaseInfo.cpp:2216
llvm::AMDGPU::IsaInfo::getEUsPerCU
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:634
llvm::AMDGPU::MIMGInfo::MIMGEncoding
uint8_t MIMGEncoding
Definition: AMDGPUBaseInfo.h:403
llvm::MCOperandInfo::RegClass
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:90
llvm::AMDGPU::MTBUFInfo::elements
uint8_t elements
Definition: AMDGPUBaseInfo.cpp:260
llvm::AMDGPU::IsaInfo::getLocalMemorySize
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:625
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::AMDGPU::getMIMGOpcode
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
Definition: AMDGPUBaseInfo.cpp:199
llvm::Triple::getArch
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:345
llvm::StringRef::str
LLVM_NODISCARD std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:249
llvm::AMDGPU::isCI
bool isCI(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1712
llvm::StringRef::getAsInteger
std::enable_if_t< std::numeric_limits< T >::is_signed, bool > getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:514
llvm::None
const NoneType None
Definition: None.h:24
llvm::AMDGPU::SendMsg::StreamId
StreamId
Definition: SIDefines.h:371
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::AMDGPU::VOPC64DPPInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:277
llvm::AMDGPU::isGFX10Plus
bool isGFX10Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1744
llvm::AMDGPU::isSymbolicCustomOperandEncoding
static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr, int Size, unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1166
llvm::amdhsa::kernel_descriptor_t::compute_pgm_rsrc3
uint32_t compute_pgm_rsrc3
Definition: AMDHSAKernelDescriptor.h:176
llvm::AMDGPU::Exp::ET_DUAL_SRC_BLEND0
@ ET_DUAL_SRC_BLEND0
Definition: SIDefines.h:868
llvm::AMDGPU::isEntryFunctionCC
bool isEntryFunctionCC(CallingConv::ID CC)
Definition: AMDGPUBaseInfo.cpp:1653
amd_kernel_code_t::amd_machine_version_minor
uint16_t amd_machine_version_minor
Definition: AMDKernelCodeT.h:531
llvm::AMDGPU::MTBUFFormat::isValidFormatEncoding
bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1484
llvm::AMDGPU::isHsaAbiVersion2
bool isHsaAbiVersion2(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:124
llvm::AMDGPU::hasPackedD16
bool hasPackedD16(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1703
llvm::AMDGPU::MTBUFFormat::NFMT_MIN
@ NFMT_MIN
Definition: SIDefines.h:508
llvm::AMDGPU::SMInfo::IsBuffer
bool IsBuffer
Definition: AMDGPUBaseInfo.cpp:268
llvm::AMDGPU::Hwreg::OFFSET_MASK_
@ OFFSET_MASK_
Definition: SIDefines.h:420
llvm::AMDGPU::Hwreg::isValidHwregWidth
bool isValidHwregWidth(int64_t Width)
Definition: AMDGPUBaseInfo.cpp:1283
llvm::AMDGPU::DepCtr::getDefaultDepCtrEncoding
int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1237
llvm::AMDGPU::shouldEmitConstantsToTextSection
bool shouldEmitConstantsToTextSection(const Triple &TT)
Definition: AMDGPUBaseInfo.cpp:957
llvm::AMDGPU::SendMsg::getMsgId
int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1506
llvm::AMDGPU::Hwreg::WIDTH_M1_SHIFT_
@ WIDTH_M1_SHIFT_
Definition: SIDefines.h:430
llvm::AMDGPU::DepCtr::DepCtrInfo
const CustomOperandVal DepCtrInfo[]
Definition: AMDGPUAsmUtils.cpp:18
llvm::Triple::AMDHSA
@ AMDHSA
Definition: Triple.h:207
llvm::AMDGPU::Hwreg::isValidHwreg
bool isValidHwreg(int64_t Id)
Definition: AMDGPUBaseInfo.cpp:1275
llvm::AMDGPU::UfmtGFX11::UFMT_FIRST
@ UFMT_FIRST
Definition: SIDefines.h:717
llvm::AMDGPU::isVI
bool isVI(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1716
llvm::AMDGPU::MUBUFInfo::BaseOpcode
uint16_t BaseOpcode
Definition: AMDGPUBaseInfo.cpp:249
llvm::AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:160
llvm::AMDGPU::isInlinableLiteralV216
bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:2124
llvm::AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET
@ HOSTCALL_PTR_OFFSET
Definition: SIDefines.h:899
llvm::cl::opt
Definition: CommandLine.h:1392
llvm::AMDGPU::Exp::ET_PARAM_MAX_IDX
@ ET_PARAM_MAX_IDX
Definition: SIDefines.h:879
llvm::AMDGPU::getRegBitWidth
unsigned getRegBitWidth(unsigned RCID)
Get the size in bits of a register from the register class RC.
Definition: AMDGPUBaseInfo.cpp:1946
llvm::AMDGPU::MTBUFFormat::NFMT_MAX
@ NFMT_MAX
Definition: SIDefines.h:509
llvm::AMDGPU::getExpcntBitMask
unsigned getExpcntBitMask(const IsaVersion &Version)
Definition: AMDGPUBaseInfo.cpp:1007
llvm::MCInstrDesc::NumOperands
unsigned short NumOperands
Definition: MCInstrDesc.h:200
llvm::GlobalValue
Definition: GlobalValue.h:44
llvm::AMDGPU::getMultigridSyncArgImplicitArgPosition
unsigned getMultigridSyncArgImplicitArgPosition()
Definition: AMDGPUBaseInfo.cpp:157
llvm::divideCeil
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:776
llvm::AMDGPU::isHsaAbiVersion4
bool isHsaAbiVersion4(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:136
llvm::AMDGPU::IsaInfo::getMinNumSGPRs
unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
Definition: AMDGPUBaseInfo.cpp:722
llvm::AMDGPU::isInlinableIntLiteral
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
Definition: AMDGPUBaseInfo.h:923
llvm::AMDGPU::hasG16
bool hasG16(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1699
AMDGPUMCTargetDesc.h
llvm::AMDGPU::Hwreg::Offset
Offset
Definition: SIDefines.h:416
llvm::isUInt< 16 >
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:408
llvm::StringRef::empty
constexpr LLVM_NODISCARD bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:153
llvm::AMDGPU::SMInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:267
uint64_t
llvm::AMDGPU::MTBUFFormat::NFMT_MASK
@ NFMT_MASK
Definition: SIDefines.h:515
llvm::Triple::getOS
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:354
llvm::AMDGPU::isVOPC64DPP
bool isVOPC64DPP(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:387
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::AMDGPU::OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:172
llvm::AMDGPU::isGFX9
bool isGFX9(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1720
llvm::AMDGPU::MTBUFFormat::DFMT_SHIFT
@ DFMT_SHIFT
Definition: SIDefines.h:493
llvm::AMDGPU::SendMsg::ID_MASK_GFX11Plus_
@ ID_MASK_GFX11Plus_
Definition: SIDefines.h:345
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::MCOperandInfo::OperandType
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:96
llvm::AMDGPU::initDefaultAMDKernelCodeT
void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header, const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:873
llvm::AMDGPU::getIntegerAttribute
int getIntegerAttribute(const Function &F, StringRef Name, int Default)
Definition: AMDGPUBaseInfo.cpp:961
llvm::AMDGPU::Exp::ET_DUAL_SRC_BLEND1
@ ET_DUAL_SRC_BLEND1
Definition: SIDefines.h:869
llvm::AMDGPU::VOPInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:272
llvm::AMDGPU::OPERAND_SRC_FIRST
@ OPERAND_SRC_FIRST
Definition: SIDefines.h:203
amd_kernel_code_t::call_convention
int32_t call_convention
Definition: AMDKernelCodeT.h:645
llvm::AMDGPUAS::CONSTANT_ADDRESS
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
Definition: AMDGPU.h:365
llvm::MCSubtargetInfo::getCPU
StringRef getCPU() const
Definition: MCSubtargetInfo.h:109
llvm::AMDGPU::MUBUFInfo::IsBufferInv
bool IsBufferInv
Definition: AMDGPUBaseInfo.cpp:254
llvm::AMDGPU::OPR_ID_UNKNOWN
const int OPR_ID_UNKNOWN
Definition: AMDGPUAsmUtils.h:23
llvm::DenormalMode
Represent subnormal handling kind for floating point instruction inputs and outputs.
Definition: FloatingPointMode.h:69
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::AMDGPU::getGcnBufferFormatInfo
const GcnBufferFormatInfo * getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:2357
llvm::AMDGPU::SendMsg::OP_UNKNOWN_
@ OP_UNKNOWN_
Definition: SIDefines.h:349
llvm::AMDGPU::IsaInfo::getWavefrontSize
unsigned getWavefrontSize(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:616
llvm::ELF::ELFABIVERSION_AMDGPU_HSA_V2
@ ELFABIVERSION_AMDGPU_HSA_V2
Definition: ELF.h:374
llvm::isUInt< 32 >
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:411
llvm::AMDGPU::isSISrcInlinableOperand
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo)
Does this operand support only inlinable literals?
Definition: AMDGPUBaseInfo.cpp:1937
llvm::AMDGPU::Hwreg::isValidHwregOffset
bool isValidHwregOffset(int64_t Offset)
Definition: AMDGPUBaseInfo.cpp:1279
llvm::AMDGPU::SIModeRegisterDefaults::DX10Clamp
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
Definition: AMDGPUBaseInfo.h:1007
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::AMDGPU::encodeVmcnt
unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Vmcnt)
Definition: AMDGPUBaseInfo.cpp:1060
llvm::X86AS::FS
@ FS
Definition: X86.h:192
llvm::isUInt< 8 >
constexpr bool isUInt< 8 >(uint64_t x)
Definition: MathExtras.h:405
llvm::AMDGPU::getAddrSizeMIMGOp
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
Definition: AMDGPUBaseInfo.cpp:219
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::getXnackSetting
TargetIDSetting getXnackSetting() const
Definition: AMDGPUBaseInfo.h:135
llvm::AMDGPU::SIModeRegisterDefaults::FP64FP16InputDenormals
bool FP64FP16InputDenormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
Definition: AMDGPUBaseInfo.h:1016
amd_kernel_code_t::amd_machine_version_stepping
uint16_t amd_machine_version_stepping
Definition: AMDKernelCodeT.h:532
llvm::AMDGPU::isGFX9_GFX10
bool isGFX9_GFX10(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1724
amd_kernel_code_t::group_segment_alignment
uint8_t group_segment_alignment
Definition: AMDKernelCodeT.h:635
llvm::MCInstrDesc::OpInfo
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:208
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:82
llvm::AMDGPU::Waitcnt::LgkmCnt
unsigned LgkmCnt
Definition: AMDGPUBaseInfo.h:534
llvm::AMDGPU::Exp::isSupportedTgtId
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1362
llvm::AMDGPU::IsaInfo::getTotalNumSGPRs
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:703
llvm::AMDGPU::getMIMGInfo
const LLVM_READONLY MIMGInfo * getMIMGInfo(unsigned Opc)
llvm::AMDGPU::isGFX10Before1030
bool isGFX10Before1030(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1764
llvm::AMDGPU::VOPInfo
Definition: AMDGPUBaseInfo.cpp:271
llvm::AMDGPU::OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:174
llvm::AMDGPU::Exp::ET_MRT0
@ ET_MRT0
Definition: SIDefines.h:859
llvm::AMDGPU::IsaInfo::getAddressableNumSGPRs
unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:710
llvm::AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:161
llvm::DenormalMode::IEEE
@ IEEE
IEEE-754 denormal numbers preserved.
Definition: FloatingPointMode.h:76
llvm::AMDGPU::MTBUFFormat::convertDfmtNfmt2Ufmt
int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1467
llvm::AMDGPU::IsaInfo::getNumSGPRBlocks
unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs)
Definition: AMDGPUBaseInfo.cpp:787
amd_kernel_code_t::kernarg_segment_alignment
uint8_t kernarg_segment_alignment
The maximum byte alignment of variables used by the kernel in the specified memory segment.
Definition: AMDKernelCodeT.h:634
amd_kernel_code_t::amd_kernel_code_version_minor
uint32_t amd_kernel_code_version_minor
Definition: AMDKernelCodeT.h:528
llvm::AMDGPU::Exp::ExpTgtInfo
static constexpr ExpTgt ExpTgtInfo[]
Definition: AMDGPUBaseInfo.cpp:1318
llvm::AMDGPU::OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:187
llvm::AMDGPU::IsaInfo::TargetIDSetting::Off
@ Off
llvm::AMDGPU::MTBUFFormat::DFMT_MIN
@ DFMT_MIN
Definition: SIDefines.h:487
llvm::AMDGPU::MIMGInfo::BaseOpcode
uint16_t BaseOpcode
Definition: AMDGPUBaseInfo.h:402
llvm::AMDGPU::CustomOperand
Definition: AMDGPUAsmUtils.h:28
llvm::AMDGPU::isInlinableLiteral16
bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:2105
llvm::AMDGPU::getMIMGBaseOpcodeInfo
const LLVM_READONLY MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
llvm::AMDGPU::CPol::SCC
@ SCC
Definition: SIDefines.h:307
llvm::AMDGPU::MTBUFFormat::NfmtSymbolicGFX10
const StringLiteral NfmtSymbolicGFX10[]
Definition: AMDGPUAsmUtils.cpp:160
llvm::AMDGPU::isDwordAligned
static bool isDwordAligned(uint64_t ByteOffset)
Definition: AMDGPUBaseInfo.cpp:2212
llvm::AMDGPU::Exp::ExpTgt::Tgt
unsigned Tgt
Definition: AMDGPUBaseInfo.cpp:1314
llvm::AMDGPU::isHsaAbiVersion5
bool isHsaAbiVersion5(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:142
llvm::AMDGPU::DepCtr::DEP_CTR_SIZE
const int DEP_CTR_SIZE
Definition: AMDGPUAsmUtils.cpp:30
llvm::AMDGPU::hasSMRDSignedImmOffset
static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST)
Definition: AMDGPUBaseInfo.cpp:2194
llvm::AMDGPU::Exp::ET_MRTZ
@ ET_MRTZ
Definition: SIDefines.h:861
llvm::AMDGPU::MTBUFFormat::UFMT_UNDEF
@ UFMT_UNDEF
Definition: SIDefines.h:531
llvm::AMDGPU::Exp::ExpTgt::MaxIndex
unsigned MaxIndex
Definition: AMDGPUBaseInfo.cpp:1315
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::AMDGPU::WMMAOpcodeMappingInfo
Definition: AMDGPUBaseInfo.h:371
llvm::AMDGPU::isNotGFX10Plus
bool isNotGFX10Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1760
llvm::AMDGPU::SendMsg::isValidMsgId
bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1511
llvm::CallingConv::AMDGPU_PS
@ AMDGPU_PS
Calling convention used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:210
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:137
llvm::AMDGPU::DepCtr::isSymbolicDepCtrEncoding
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1244
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm::AMDGPUAS::GLOBAL_ADDRESS
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
Definition: AMDGPU.h:362
AMDGPU.h
MAP_REG2REG
#define MAP_REG2REG
Definition: AMDGPUBaseInfo.cpp:1818
llvm::AMDGPU::getVOP3IsSingle
bool getVOP3IsSingle(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:382
llvm::AMDGPU::Hwreg::OFFSET_SHIFT_
@ OFFSET_SHIFT_
Definition: SIDefines.h:418
llvm::AMDGPU::isModuleEntryFunctionCC
bool isModuleEntryFunctionCC(CallingConv::ID CC)
Definition: AMDGPUBaseInfo.cpp:1670
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::AMDGPU::isCompute
bool isCompute(CallingConv::ID cc)
Definition: AMDGPUBaseInfo.cpp:1649
llvm::AMDGPU::IsaInfo::getVGPREncodingGranule
unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI, Optional< bool > EnableWavefrontSize32)
Definition: AMDGPUBaseInfo.cpp:808
llvm::MCRegisterClass::getID
unsigned getID() const
getID() - Return the register class ID number.
Definition: MCRegisterInfo.h:48
uint32_t
llvm::AMDGPU::IsaInfo::getNumVGPRBlocks
unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs, Optional< bool > EnableWavefrontSize32)
Definition: AMDGPUBaseInfo.cpp:863
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::AMDGPU::MIMGBaseOpcodeInfo::Coordinates
bool Coordinates
Definition: AMDGPUBaseInfo.h:311
llvm::AMDGPU::isLegalSMRDEncodedUnsignedOffset
bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset)
Definition: AMDGPUBaseInfo.cpp:2198
llvm::AMDGPU::isSISrcOperand
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Can this operand also contain immediate values?
Definition: AMDGPUBaseInfo.cpp:1901
llvm::AMDGPU::isGraphics
bool isGraphics(CallingConv::ID cc)
Definition: AMDGPUBaseInfo.cpp:1645
amd_kernel_code_t
AMD Kernel Code Object (amd_kernel_code_t).
Definition: AMDKernelCodeT.h:526
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::isXnackSupported
bool isXnackSupported() const
Definition: AMDGPUBaseInfo.h:116
llvm::AMDGPU::IsaInfo::getMaxFlatWorkGroupSize
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:680
llvm::AMDGPU::Exp::getTgtName
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
Definition: AMDGPUBaseInfo.cpp:1328
llvm::AMDGPU::Waitcnt::VmCnt
unsigned VmCnt
Definition: AMDGPUBaseInfo.h:532
llvm::AMDGPU::getMTBUFHasVAddr
bool getMTBUFHasVAddr(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:317
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
AMDHSA_BITS_SET
#define AMDHSA_BITS_SET(DST, MSK, VAL)
Definition: AMDHSAKernelDescriptor.h:42
llvm::amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE
@ FLOAT_DENORM_MODE_FLUSH_NONE
Definition: AMDHSAKernelDescriptor.h:63
amd_kernel_code_t::kernel_code_entry_byte_offset
int64_t kernel_code_entry_byte_offset
Byte offset (possibly negative) from start of amd_kernel_code_t object to kernel's entry point instru...
Definition: AMDKernelCodeT.h:544
llvm::AMDGPU::MTBUFFormat::UfmtSymbolicGFX10
const StringLiteral UfmtSymbolicGFX10[]
Definition: AMDGPUAsmUtils.cpp:193
llvm::AMDGPU::GcnBufferFormatInfo
Definition: AMDGPUBaseInfo.h:66
llvm::AMDGPU::isGFX8_GFX9_GFX10
bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1728
llvm::AMDGPU::MTBUFFormat::getUnifiedFormat
int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1442
llvm::AMDGPU::isGFX9Plus
bool isGFX9Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1736
llvm::AMDGPU::getDefaultAmdhsaKernelDescriptor
amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:909
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::AMDGPU::MTBUFFormat::UFMT_DEFAULT
@ UFMT_DEFAULT
Definition: SIDefines.h:532
llvm::AMDGPU::SendMsg::Msg
const CustomOperand< const MCSubtargetInfo & > Msg[]
Definition: AMDGPUAsmUtils.cpp:39
amd_kernel_code_t::amd_machine_kind
uint16_t amd_machine_kind
Definition: AMDKernelCodeT.h:529
llvm::AMDGPU::SIModeRegisterDefaults::FP64FP16OutputDenormals
bool FP64FP16OutputDenormals
Definition: AMDGPUBaseInfo.h:1017
llvm::AMDGPU::isGroupSegment
bool isGroupSegment(const GlobalValue *GV)
Definition: AMDGPUBaseInfo.cpp:943
llvm::AMDGPU::SendMsg::STREAM_ID_FIRST_
@ STREAM_ID_FIRST_
Definition: SIDefines.h:375
llvm::AMDGPU::SMInfo
Definition: AMDGPUBaseInfo.cpp:266
llvm::CallingConv::AMDGPU_KERNEL
@ AMDGPU_KERNEL
Calling convention for AMDGPU code object kernels.
Definition: CallingConv.h:216
llvm::AMDGPU::encodeWaitcnt
unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt)
Encodes Vmcnt, Expcnt and Lgkmcnt into Waitcnt for given isa Version.
Definition: AMDGPUBaseInfo.cpp:1081
llvm::AMDGPU::SendMsg::isValidMsgOp
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
Definition: AMDGPUBaseInfo.cpp:1532
llvm::AMDGPU::SendMsg::OP_SYS_FIRST_
@ OP_SYS_FIRST_
Definition: SIDefines.h:368
Attributes.h
llvm::isInt< 16 >
constexpr bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:370
llvm::StringRef::size
constexpr LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:157
llvm::AMDGPU::isSI
bool isSI(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1708
llvm::ELF::ELFABIVERSION_AMDGPU_HSA_V4
@ ELFABIVERSION_AMDGPU_HSA_V4
Definition: ELF.h:376
llvm::AMDGPU::OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:188
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::toString
std::string toString() const
Definition: AMDGPUBaseInfo.cpp:509
amd_kernel_code_t::amd_machine_version_major
uint16_t amd_machine_version_major
Definition: AMDKernelCodeT.h:530
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:83
llvm::Any
Definition: Any.h:28
llvm::AMDGPU::OPERAND_SRC_LAST
@ OPERAND_SRC_LAST
Definition: SIDefines.h:204
llvm::AMDGPU::isArgPassedInSGPR
bool isArgPassedInSGPR(const Argument *A)
Definition: AMDGPUBaseInfo.cpp:2163
llvm::AMDGPU::UfmtGFX11::UFMT_LAST
@ UFMT_LAST
Definition: SIDefines.h:718
llvm::AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET
@ MULTIGRID_SYNC_ARG_OFFSET
Definition: SIDefines.h:900
llvm::AMDGPU::Waitcnt
Represents the counter values to wait for in an s_waitcnt instruction.
Definition: AMDGPUBaseInfo.h:531
llvm::AMDGPU::Exp::ET_DUAL_SRC_BLEND_MAX_IDX
@ ET_DUAL_SRC_BLEND_MAX_IDX
Definition: SIDefines.h:878
llvm::AMDGPU::Hwreg::Opr
const CustomOperand< const MCSubtargetInfo & > Opr[]
Definition: AMDGPUAsmUtils.cpp:90
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:50
llvm::AMDGPU::SendMsg::decodeMsg
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1591
amd_kernel_code_t::private_segment_alignment
uint8_t private_segment_alignment
Definition: AMDKernelCodeT.h:636
llvm::AMDGPU::SIModeRegisterDefaults::SIModeRegisterDefaults
SIModeRegisterDefaults()
Definition: AMDGPUBaseInfo.h:1019
llvm::AMDGPU::decodeCustomOperand
static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size, unsigned Code, int &Idx, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1185
llvm::AMDGPU::getMUBUFHasVAddr
bool getMUBUFHasVAddr(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:347
llvm::AMDGPU::isInlinableLiteral64
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
Definition: AMDGPUBaseInfo.cpp:2062
llvm::AMDGPU::OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:163
uint16_t
llvm::AMDGPU::getMUBUFElements
int getMUBUFElements(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:342
llvm::AMDGPU::encodeCustomOperandVal
static int encodeCustomOperandVal(const CustomOperandVal &Op, int64_t InputVal)
Definition: AMDGPUBaseInfo.cpp:1202
llvm::AMDGPU::MTBUFFormat::DFMT_NFMT_DEFAULT
@ DFMT_NFMT_DEFAULT
Definition: SIDefines.h:520
llvm::AMDGPU::SendMsg::getMsgOpId
int64_t getMsgOpId(int64_t MsgId, const StringRef Name)
Definition: AMDGPUBaseInfo.cpp:1520
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::AMDGPU::Exp::ET_POS4
@ ET_POS4
Definition: SIDefines.h:865
llvm::FPOpFusion::Strict
@ Strict
Definition: TargetOptions.h:39
amd_kernel_code_t::code_properties
uint32_t code_properties
Code properties.
Definition: AMDKernelCodeT.h:562
llvm::AMDGPU::isFoldableLiteralV216
bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:2150
llvm::AMDGPU::OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:175
llvm::AMDGPU::CustomOperand::Cond
bool(* Cond)(T Context)
Definition: AMDGPUAsmUtils.h:31
llvm::AMDGPU::MIMGInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.h:401
Function.h
llvm::AMDGPU::MUBUFInfo::has_vaddr
bool has_vaddr
Definition: AMDGPUBaseInfo.cpp:251
llvm::AMDGPU::OPERAND_REG_INLINE_C_FIRST
@ OPERAND_REG_INLINE_C_FIRST
Definition: SIDefines.h:197
llvm::amdhsa::kernel_descriptor_t
Definition: AMDHSAKernelDescriptor.h:169
llvm::AMDGPU::getVOP1IsSingle
bool getVOP1IsSingle(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:372
llvm::AMDGPU::splitMUBUFOffset
bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, const GCNSubtarget *Subtarget, Align Alignment)
Definition: AMDGPUBaseInfo.cpp:2266
llvm::AMDGPU::SendMsg::STREAM_ID_NONE_
@ STREAM_ID_NONE_
Definition: SIDefines.h:372
llvm::AMDGPU::MIMGInfo
Definition: AMDGPUBaseInfo.h:400
llvm::AMDGPU::MTBUFFormat::decodeDfmtNfmt
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
Definition: AMDGPUBaseInfo.cpp:1437
llvm::AMDGPU::Exp::getTgtId
unsigned getTgtId(const StringRef Name)
Definition: AMDGPUBaseInfo.cpp:1339
llvm::AMDGPU::mapWMMA3AddrTo2AddrOpcode
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:406
llvm::AMDGPU::MTBUFFormat::NfmtSymbolicSICI
const StringLiteral NfmtSymbolicSICI[]
Definition: AMDGPUAsmUtils.cpp:171
llvm::AMDGPU::MTBUFFormat::NFMT_SHIFT
@ NFMT_SHIFT
Definition: SIDefines.h:514
llvm::AMDGPU::getTotalNumVGPRs
int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR)
Definition: AMDGPUBaseInfo.cpp:1804
llvm::AMDGPU::OPERAND_REG_INLINE_C_V2FP32
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:177
llvm::AMDGPU::IsaInfo::getAddressableNumVGPRs
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:828
llvm::AMDGPU::SendMsg::isValidMsgStream
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
Definition: AMDGPUBaseInfo.cpp:1558
llvm::AMDGPU::MTBUFFormat::getNfmtName
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1417
AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
@ AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32
Definition: AMDKernelCodeT.h:127
llvm::amdhsa::kernel_descriptor_t::compute_pgm_rsrc1
uint32_t compute_pgm_rsrc1
Definition: AMDHSAKernelDescriptor.h:177
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:439
llvm::AMDGPU::SendMsg::STREAM_ID_SHIFT_
@ STREAM_ID_SHIFT_
Definition: SIDefines.h:376
llvm::AMDGPU::getLgkmcntBitMask
unsigned getLgkmcntBitMask(const IsaVersion &Version)
Definition: AMDGPUBaseInfo.cpp:1011
llvm::GlobalValue::getAddressSpace
unsigned getAddressSpace() const
Definition: Globals.cpp:121
llvm::GCNSubtarget::getGeneration
Generation getGeneration() const
Definition: GCNSubtarget.h:263
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
llvm::AMDGPU::Hwreg::WIDTH_M1_MASK_
@ WIDTH_M1_MASK_
Definition: SIDefines.h:432
llvm::AMDGPU::MIMGBaseOpcodeInfo::NumExtraArgs
uint8_t NumExtraArgs
Definition: AMDGPUBaseInfo.h:308
llvm::AMDGPU::isInlinableLiteral32
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
Definition: AMDGPUBaseInfo.cpp:2079
llvm::AMDGPU::isSISrcFPOperand
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this floating-point operand?
Definition: AMDGPUBaseInfo.cpp:1908
llvm::AMDGPU::isGFX8Plus
bool isGFX8Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1732
llvm::FeatureBitset::test
constexpr bool test(unsigned I) const
Definition: SubtargetFeature.h:90
llvm::AMDGPU::isNotGFX11Plus
bool isNotGFX11Plus(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1756
llvm::AMDGPU::encodeExpcnt
unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Expcnt)
Definition: AMDGPUBaseInfo.cpp:1069
llvm::AMDGPU::Exp::ET_POS_MAX_IDX
@ ET_POS_MAX_IDX
Definition: SIDefines.h:877
llvm::AMDGPU::isGCN3Encoding
bool isGCN3Encoding(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1768
llvm::AMDGPU::MTBUFFormat::getNfmt
int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1408
llvm::AMDGPU::Exp::ExpTgt
Definition: AMDGPUBaseInfo.cpp:1312
llvm::AMDGPU::isValidOpr
static bool isValidOpr(int Idx, const CustomOperand< T > OpInfo[], int OpInfoSize, T Context)
Definition: AMDGPUBaseInfo.cpp:1106
llvm::AMDGPU::SendMsg::getMsgOpName
StringRef getMsgOpName(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1552
llvm::AMDGPU::Exp::ET_PRIM_MAX_IDX
@ ET_PRIM_MAX_IDX
Definition: SIDefines.h:875
llvm::AMDGPU::getNumFlatOffsetBits
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST, bool Signed)
For FLAT segment the offset must be positive; MSB is ignored and forced to zero.
Definition: AMDGPUBaseInfo.cpp:2251
llvm::AMDGPU::SendMsg::ID_GS_DONE_PreGFX11
@ ID_GS_DONE_PreGFX11
Definition: SIDefines.h:322
N
#define N
AMDKernelCodeT.h
llvm::CallingConv::AMDGPU_LS
@ AMDGPU_LS
Calling convention used for AMDPAL vertex shader if tessellation is in use.
Definition: CallingConv.h:231
llvm::AMDGPU::IsaInfo::getWavesPerEUForWorkGroup
unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI, unsigned FlatWorkGroupSize)
Definition: AMDGPUBaseInfo.cpp:670
llvm::AMDGPU::IsaInfo::getMaxWavesPerEU
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:661
llvm::AMDGPU::isHsaAbiVersion3AndAbove
bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI)
Definition: AMDGPUBaseInfo.cpp:148
llvm::AMDGPU::MTBUFFormat::DfmtNfmt2UFmtGFX10
const unsigned DfmtNfmt2UFmtGFX10[]
Definition: AMDGPUAsmUtils.cpp:287
llvm::AMDGPU::IsaInfo::getMinNumVGPRs
unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU)
Definition: AMDGPUBaseInfo.cpp:843
llvm::AMDGPU::MTBUFFormat::UFMT_MAX
@ UFMT_MAX
Definition: SIDefines.h:530
llvm::AMDGPU::hasMIMG_R128
bool hasMIMG_R128(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1691
llvm::AMDGPU::MIMGBaseOpcodeInfo::G16
bool G16
Definition: AMDGPUBaseInfo.h:310
llvm::CallingConv::SPIR_KERNEL
@ SPIR_KERNEL
SPIR_KERNEL - Calling convention for SPIR kernel functions.
Definition: CallingConv.h:152
llvm::AMDGPU::Exp::ET_NULL_MAX_IDX
@ ET_NULL_MAX_IDX
Definition: SIDefines.h:873
llvm::DenormalMode::Output
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
Definition: FloatingPointMode.h:87
llvm::AMDGPU::OPERAND_REG_INLINE_C_FP16
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:171
llvm::CallingConv::AMDGPU_CS
@ AMDGPU_CS
Calling convention used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:213
llvm::AMDGPU::HSAMD::Kernel::CodeProps::Key::NumVGPRs
constexpr char NumVGPRs[]
Key for Kernel::CodeProps::Metadata::mNumVGPRs.
Definition: AMDGPUMetadata.h:260
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::setTargetIDFromFeaturesString
void setTargetIDFromFeaturesString(StringRef FS)
Definition: AMDGPUBaseInfo.cpp:429
llvm::AMDGPU::isReadOnlySegment
bool isReadOnlySegment(const GlobalValue *GV)
Definition: AMDGPUBaseInfo.cpp:951
llvm::AMDGPU::SendMsg::OP_NONE_
@ OP_NONE_
Definition: SIDefines.h:351
llvm::AMDGPU::isLegalSMRDEncodedSignedOffset
bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset, bool IsBuffer)
Definition: AMDGPUBaseInfo.cpp:2204
llvm::AMDGPU::OPERAND_REG_IMM_FP16
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:159
llvm::AMDGPU::Exp::ET_PARAM31
@ ET_PARAM31
Definition: SIDefines.h:871
S_00B848_WGP_MODE
#define S_00B848_WGP_MODE(x)
Definition: SIDefines.h:1008
LLVMContext.h
llvm::AMDGPU::encodeLgkmcnt
unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt)
Definition: AMDGPUBaseInfo.cpp:1075
llvm::AMDGPU::MAIInstInfo
Definition: AMDGPUBaseInfo.h:74
llvm::cl::desc
Definition: CommandLine.h:405
llvm::AMDGPU::OPERAND_REG_INLINE_AC_V2INT16
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:189
llvm::AMDGPU::MTBUFFormat::getDfmt
int64_t getDfmt(const StringRef Name)
Definition: AMDGPUBaseInfo.cpp:1387
llvm::AMDGPU::MTBUFFormat::DfmtNfmt2UFmtGFX11
const unsigned DfmtNfmt2UFmtGFX11[]
Definition: AMDGPUAsmUtils.cpp:461
llvm::AMDGPU::VGPRIndexMode::Id
Id
Definition: SIDefines.h:241
llvm::AMDGPU::CustomOperandVal
Definition: AMDGPUAsmUtils.h:34
llvm::AMDGPU::Waitcnt::ExpCnt
unsigned ExpCnt
Definition: AMDGPUBaseInfo.h:533
llvm::AMDGPU::getIntegerPairAttribute
std::pair< int, int > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< int, int > Default, bool OnlyFirstRequired)
Definition: AMDGPUBaseInfo.cpp:976
llvm::AMDGPU::IsaInfo::AMDGPUTargetID::isSramEccSupported
bool isSramEccSupported() const
Definition: AMDGPUBaseInfo.h:145
llvm::AMDGPU::SendMsg::ID_GS_PreGFX11
@ ID_GS_PreGFX11
Definition: SIDefines.h:321
llvm::AMDGPU::MTBUFInfo::Opcode
uint16_t Opcode
Definition: AMDGPUBaseInfo.cpp:258
llvm::AMDGPU::isKernelCC
bool isKernelCC(const Function *Func)
Definition: AMDGPUBaseInfo.cpp:1679
llvm::AMDGPU::MTBUFInfo::has_srsrc
bool has_srsrc
Definition: AMDGPUBaseInfo.cpp:262
llvm::AMDGPU::getHostcallImplicitArgPosition
unsigned getHostcallImplicitArgPosition()
Definition: AMDGPUBaseInfo.cpp:174
llvm::AMDGPU::SendMsg::getMsgIdMask
static uint64_t getMsgIdMask(const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1502
llvm::AMDGPU::SendMsg::OP_MASK_
@ OP_MASK_
Definition: SIDefines.h:354
llvm::AMDGPU::MTBUFFormat::getUnifiedFormatName
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1457
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:76
llvm::AMDGPU::MUBUFInfo
Definition: AMDGPUBaseInfo.cpp:247
llvm::AMDGPU::Exp::ET_MRT_MAX_IDX
@ ET_MRT_MAX_IDX
Definition: SIDefines.h:876
llvm::AMDGPU::getHasColorExport
bool getHasColorExport(const Function &F)
Definition: AMDGPUBaseInfo.cpp:1619
llvm::AMDGPU::getHasDepthExport
bool getHasDepthExport(const Function &F)
Definition: AMDGPUBaseInfo.cpp:1626
llvm::ELF::ELFABIVERSION_AMDGPU_HSA_V3
@ ELFABIVERSION_AMDGPU_HSA_V3
Definition: ELF.h:375
llvm::AMDGPU::IsaInfo::getVGPRAllocGranule
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, Optional< bool > EnableWavefrontSize32)
Definition: AMDGPUBaseInfo.cpp:793
llvm::AMDGPU::SendMsg::getMsgName
StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI)
Definition: AMDGPUBaseInfo.cpp:1515
llvm::AMDGPU::getMaskedMIMGOp
int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels)
Definition: AMDGPUBaseInfo.cpp:211
llvm::AMDGPU::getMTBUFElements
int getMTBUFElements(unsigned Opc)
Definition: AMDGPUBaseInfo.cpp:312
llvm::AMDGPU::OPERAND_REG_INLINE_C_LAST
@ OPERAND_REG_INLINE_C_LAST
Definition: SIDefines.h:198
llvm::AMDGPU::UfmtGFX10::UFMT_LAST
@ UFMT_LAST
Definition: SIDefines.h:632
AMDGPUBaseInfo.h
llvm::AMDGPU::OPERAND_REG_INLINE_AC_V2FP16
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:190
llvm::AMDGPU::getOprIdx
static int getOprIdx(std::function< bool(const CustomOperand< T > &)> Test, const CustomOperand< T > OpInfo[], int OpInfoSize, T Context)
Definition: AMDGPUBaseInfo.cpp:1113
llvm::AMDGPU::MTBUFFormat::DFMT_MAX
@ DFMT_MAX
Definition: SIDefines.h:488