39 uint32_t &Stepping,
bool Sramecc,
bool Xnack) {
40 if (Major == 9 && Minor == 0) {
61 if (!HSAMetadataDoc.
fromYAML(HSAMetadataString))
230 OS <<
"\t.amdgcn_target \"" <<
getTargetID()->toString() <<
"\"\n";
235 OS <<
"\t.hsa_code_object_version " <<
246 OS <<
"\t.hsa_code_object_isa " <<
Twine(Major) <<
"," <<
Twine(Minor) <<
","
247 <<
Twine(Stepping) <<
",\"" << VendorName <<
"\",\"" << ArchName <<
"\"\n";
252 OS <<
"\t.amd_kernel_code_t\n";
254 OS <<
"\t.end_amd_kernel_code_t\n";
262 OS <<
"\t.amdgpu_hsa_kernel " << SymbolName <<
'\n' ;
269 OS <<
"\t.amdgpu_lds " << Symbol->getName() <<
", " <<
Size <<
", "
270 << Alignment.
value() <<
'\n';
274 OS <<
"\t.amd_amdgpu_isa \"" <<
getTargetID()->toString() <<
"\"\n";
280 std::string HSAMetadataString;
285 OS << HSAMetadataString <<
'\n';
296 std::string HSAMetadataString;
298 HSAMetadataDoc.
toYAML(StrOS);
301 OS << StrOS.
str() <<
'\n';
307 const uint32_t Encoded_s_code_end = 0xbf9f0000;
308 const uint32_t Encoded_s_nop = 0xbf800000;
309 uint32_t Encoded_pad = Encoded_s_code_end;
319 Encoded_pad = Encoded_s_nop;
323 OS <<
"\t.p2alignl " << Log2CacheLineSize <<
", " << Encoded_pad <<
'\n';
324 OS <<
"\t.fill " << (FillSize / 4) <<
", 4, " << Encoded_pad <<
'\n';
331 bool ReserveVCC,
bool ReserveFlatScr,
unsigned CodeObjectVersion) {
334 OS <<
"\t.amdhsa_kernel " << KernelName <<
'\n';
336#define PRINT_FIELD(STREAM, DIRECTIVE, KERNEL_DESC, MEMBER_NAME, FIELD_NAME) \
337 STREAM << "\t\t" << DIRECTIVE << " " \
338 << AMDHSA_BITS_GET(KERNEL_DESC.MEMBER_NAME, FIELD_NAME) << '\n';
342 OS <<
"\t\t.amdhsa_private_segment_fixed_size "
344 OS <<
"\t\t.amdhsa_kernarg_size " << KD.
kernarg_size <<
'\n';
348 amdhsa::COMPUTE_PGM_RSRC2_USER_SGPR_COUNT);
352 OS,
".amdhsa_user_sgpr_private_segment_buffer", KD,
353 kernel_code_properties,
354 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
355 PRINT_FIELD(OS,
".amdhsa_user_sgpr_dispatch_ptr", KD,
356 kernel_code_properties,
357 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
359 kernel_code_properties,
360 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
361 PRINT_FIELD(OS,
".amdhsa_user_sgpr_kernarg_segment_ptr", KD,
362 kernel_code_properties,
363 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
364 PRINT_FIELD(OS,
".amdhsa_user_sgpr_dispatch_id", KD,
365 kernel_code_properties,
366 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
368 PRINT_FIELD(OS,
".amdhsa_user_sgpr_flat_scratch_init", KD,
369 kernel_code_properties,
370 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
372 PRINT_FIELD(OS,
".amdhsa_user_sgpr_kernarg_preload_length ", KD,
373 kernarg_preload, amdhsa::KERNARG_PRELOAD_SPEC_LENGTH);
374 PRINT_FIELD(OS,
".amdhsa_user_sgpr_kernarg_preload_offset ", KD,
375 kernarg_preload, amdhsa::KERNARG_PRELOAD_SPEC_OFFSET);
377 PRINT_FIELD(OS,
".amdhsa_user_sgpr_private_segment_size", KD,
378 kernel_code_properties,
379 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
380 if (IVersion.
Major >= 10)
382 kernel_code_properties,
383 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
385 PRINT_FIELD(OS,
".amdhsa_uses_dynamic_stack", KD, kernel_code_properties,
386 amdhsa::KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK);
389 ?
".amdhsa_enable_private_segment"
390 :
".amdhsa_system_sgpr_private_segment_wavefront_offset"),
391 KD, compute_pgm_rsrc2,
392 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
393 PRINT_FIELD(OS,
".amdhsa_system_sgpr_workgroup_id_x", KD,
395 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
396 PRINT_FIELD(OS,
".amdhsa_system_sgpr_workgroup_id_y", KD,
398 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
399 PRINT_FIELD(OS,
".amdhsa_system_sgpr_workgroup_id_z", KD,
401 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
402 PRINT_FIELD(OS,
".amdhsa_system_sgpr_workgroup_info", KD,
404 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
405 PRINT_FIELD(OS,
".amdhsa_system_vgpr_workitem_id", KD,
407 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
410 OS <<
"\t\t.amdhsa_next_free_vgpr " << NextVGPR <<
'\n';
411 OS <<
"\t\t.amdhsa_next_free_sgpr " << NextSGPR <<
'\n';
414 OS <<
"\t\t.amdhsa_accum_offset " <<
416 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
420 OS <<
"\t\t.amdhsa_reserve_vcc " << ReserveVCC <<
'\n';
422 OS <<
"\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr <<
'\n';
424 switch (CodeObjectVersion) {
431 OS <<
"\t\t.amdhsa_reserve_xnack_mask " <<
getTargetID()->isXnackOnOrAny() <<
'\n';
437 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
438 PRINT_FIELD(OS,
".amdhsa_float_round_mode_16_64", KD,
440 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
441 PRINT_FIELD(OS,
".amdhsa_float_denorm_mode_32", KD,
443 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
444 PRINT_FIELD(OS,
".amdhsa_float_denorm_mode_16_64", KD,
446 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
449 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP);
452 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE);
453 if (IVersion.
Major >= 9)
456 amdhsa::COMPUTE_PGM_RSRC1_FP16_OVFL);
460 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
461 if (IVersion.
Major >= 10) {
462 PRINT_FIELD(OS,
".amdhsa_workgroup_processor_mode", KD,
464 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE);
467 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED);
470 amdhsa::COMPUTE_PGM_RSRC1_FWD_PROGRESS);
471 PRINT_FIELD(OS,
".amdhsa_shared_vgpr_count", KD, compute_pgm_rsrc3,
472 amdhsa::COMPUTE_PGM_RSRC3_GFX10_PLUS_SHARED_VGPR_COUNT);
475 OS,
".amdhsa_exception_fp_ieee_invalid_op", KD,
477 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
478 PRINT_FIELD(OS,
".amdhsa_exception_fp_denorm_src", KD,
480 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
482 OS,
".amdhsa_exception_fp_ieee_div_zero", KD,
484 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
485 PRINT_FIELD(OS,
".amdhsa_exception_fp_ieee_overflow", KD,
487 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
488 PRINT_FIELD(OS,
".amdhsa_exception_fp_ieee_underflow", KD,
490 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
491 PRINT_FIELD(OS,
".amdhsa_exception_fp_ieee_inexact", KD,
493 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
494 PRINT_FIELD(OS,
".amdhsa_exception_int_div_zero", KD,
496 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
499 OS <<
"\t.end_amdhsa_kernel\n";
535void AMDGPUTargetELFStreamer::EmitNote(
539 auto &
Context = S.getContext();
541 auto NameSZ =
Name.size() + 1;
543 unsigned NoteFlags = 0;
553 S.emitValue(DescSZ, 4);
554 S.emitInt32(NoteType);
556 S.emitValueToAlignment(
Align(4), 0, 1, 0);
558 S.emitValueToAlignment(
Align(4), 0, 1, 0);
562unsigned AMDGPUTargetELFStreamer::getEFlags() {
567 return getEFlagsR600();
569 return getEFlagsAMDGCN();
573unsigned AMDGPUTargetELFStreamer::getEFlagsR600() {
579unsigned AMDGPUTargetELFStreamer::getEFlagsAMDGCN() {
587 return getEFlagsUnknownOS();
589 return getEFlagsAMDHSA();
591 return getEFlagsAMDPAL();
593 return getEFlagsMesa3D();
597unsigned AMDGPUTargetELFStreamer::getEFlagsUnknownOS() {
601 return getEFlagsV3();
604unsigned AMDGPUTargetELFStreamer::getEFlagsAMDHSA() {
608 switch (*HsaAbiVer) {
610 return getEFlagsV3();
613 return getEFlagsV4();
620unsigned AMDGPUTargetELFStreamer::getEFlagsAMDPAL() {
623 return getEFlagsV3();
626unsigned AMDGPUTargetELFStreamer::getEFlagsMesa3D() {
629 return getEFlagsV3();
632unsigned AMDGPUTargetELFStreamer::getEFlagsV3() {
633 unsigned EFlagsV3 = 0;
648unsigned AMDGPUTargetELFStreamer::getEFlagsV4() {
649 unsigned EFlagsV4 = 0;
709 unsigned DescSZ =
sizeof(VendorNameSize) +
sizeof(ArchNameSize) +
710 sizeof(Major) +
sizeof(Minor) +
sizeof(Stepping) +
711 VendorNameSize + ArchNameSize;
716 OS.emitInt16(VendorNameSize);
717 OS.emitInt16(ArchNameSize);
720 OS.emitInt32(Stepping);
721 OS.emitBytes(VendorName);
723 OS.emitBytes(ArchName);
733 OS.emitBytes(
StringRef((
const char*)&Header,
sizeof(Header)));
741 Symbol->setType(
Type);
746 MCSymbolELF *SymbolELF = cast<MCSymbolELF>(Symbol);
756 " redeclared as different type");
767 auto *DescBegin =
Context.createTempSymbol();
768 auto *DescEnd =
Context.createTempSymbol();
775 OS.emitLabel(DescBegin);
777 OS.emitLabel(DescEnd);
788 std::string HSAMetadataString;
794 auto *DescBegin =
Context.createTempSymbol();
795 auto *DescEnd =
Context.createTempSymbol();
802 OS.emitLabel(DescBegin);
803 OS.emitBytes(HSAMetadataString);
804 OS.emitLabel(DescEnd);
811 std::string HSAMetadataString;
818 auto *DescBegin =
Context.createTempSymbol();
819 auto *DescEnd =
Context.createTempSymbol();
826 OS.emitLabel(DescBegin);
827 OS.emitBytes(HSAMetadataString);
828 OS.emitLabel(DescEnd);
834 const uint32_t Encoded_s_code_end = 0xbf9f0000;
835 const uint32_t Encoded_s_nop = 0xbf800000;
836 uint32_t Encoded_pad = Encoded_s_code_end;
846 Encoded_pad = Encoded_s_nop;
853 for (
unsigned I = 0;
I < FillSize;
I += 4)
854 OS.emitInt32(Encoded_pad);
862 uint64_t NextSGPR,
bool ReserveVCC,
bool ReserveFlatScr,
863 unsigned CodeObjectVersion) {
869 MCSymbolELF *KernelDescriptorSymbol = cast<MCSymbolELF>(
879 KernelDescriptorSymbol->
setSize(
887 Streamer.
emitLabel(KernelDescriptorSymbol);
892 for (uint8_t Res : KernelDescriptor.
reserved0)
906 for (uint8_t Res : KernelDescriptor.
reserved1)
913 for (uint8_t Res : KernelDescriptor.
reserved3)
Enums and constants for AMDGPU PT_NOTE sections.
#define PRINT_FIELD(STREAM, DIRECTIVE, KERNEL_DESC, MEMBER_NAME, FIELD_NAME)
static void convertIsaVersionV2(uint32_t &Major, uint32_t &Minor, uint32_t &Stepping, bool Sramecc, bool Xnack)
AMDHSA kernel descriptor definitions.
#define AMDHSA_BITS_GET(SRC, MSK)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
verify safepoint Safepoint IR Verifier
void EmitAMDKernelCodeT(const amd_kernel_code_t &Header) override
void EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName, const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR, uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr, unsigned CodeObjectVersion) override
AMDGPUTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS)
bool EmitHSAMetadata(msgpack::Document &HSAMetadata, bool Strict) override
void EmitAMDGPUSymbolType(StringRef SymbolName, unsigned Type) override
bool EmitISAVersion() override
void EmitDirectiveAMDGCNTarget() override
void EmitDirectiveHSACodeObjectISAV2(uint32_t Major, uint32_t Minor, uint32_t Stepping, StringRef VendorName, StringRef ArchName) override
void EmitDirectiveHSACodeObjectVersion(uint32_t Major, uint32_t Minor) override
bool EmitCodeEnd(const MCSubtargetInfo &STI) override
void emitAMDGPULDS(MCSymbol *Sym, unsigned Size, Align Alignment) override
void EmitDirectiveAMDGCNTarget() override
bool EmitCodeEnd(const MCSubtargetInfo &STI) override
void EmitAMDKernelCodeT(const amd_kernel_code_t &Header) override
bool EmitHSAMetadata(msgpack::Document &HSAMetadata, bool Strict) override
void EmitDirectiveHSACodeObjectVersion(uint32_t Major, uint32_t Minor) override
AMDGPUTargetELFStreamer(MCStreamer &S, const MCSubtargetInfo &STI)
void EmitDirectiveHSACodeObjectISAV2(uint32_t Major, uint32_t Minor, uint32_t Stepping, StringRef VendorName, StringRef ArchName) override
void emitAMDGPULDS(MCSymbol *Sym, unsigned Size, Align Alignment) override
MCELFStreamer & getStreamer()
void EmitAMDGPUSymbolType(StringRef SymbolName, unsigned Type) override
void EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName, const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR, uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr, unsigned CodeObjectVersion) override
bool EmitISAVersion() override
virtual bool EmitHSAMetadata(msgpack::Document &HSAMetadata, bool Strict)
Emit HSA Metadata.
AMDGPUPALMetadata * getPALMetadata()
virtual bool EmitHSAMetadataV3(StringRef HSAMetadataString)
static unsigned getElfMach(StringRef GPU)
MCContext & getContext() const
static StringRef getArchNameFromElfMach(unsigned ElfMach)
const std::optional< AMDGPU::IsaInfo::AMDGPUTargetID > & getTargetID() const
std::optional< AMDGPU::IsaInfo::AMDGPUTargetID > TargetID
virtual bool EmitHSAMetadataV2(StringRef HSAMetadataString)
void setELFHeaderEFlags(unsigned Flags)
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Base class for the full range of assembler expressions which are needed for parsing.
MCAssembler & getAssembler()
Streaming machine code generation interface.
MCContext & getContext() const
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
void emitInt16(uint64_t Value)
void emitInt32(uint64_t Value)
void emitInt8(uint64_t Value)
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
unsigned getOther() const
void setVisibility(unsigned Visibility)
void setSize(const MCExpr *SS)
bool isBindingSet() const
void setBinding(unsigned Binding) const
unsigned getVisibility() const
unsigned getBinding() const
void setType(unsigned Type) const
void setOther(unsigned Other)
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
void setExternal(bool Value) const
void setIndex(uint32_t Value) const
Set the (implementation defined) index.
bool declareCommon(uint64_t Size, Align Alignment, bool Target=false)
Declare this symbol as being 'common'.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
OSType getOS() const
Get the parsed operating system type of this triple.
ArchType getArch() const
Get the parsed architecture type of this triple.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
An efficient, type-erasing, non-owning reference to a callable.
Simple in-memory representation of a document of msgpack objects with ability to find and create arra...
DocNode & getRoot()
Get ref to the document's root element.
void toYAML(raw_ostream &OS)
Convert MsgPack Document to YAML text.
void writeToBlob(std::string &Blob)
Write a MsgPack document to a binary MsgPack blob.
bool fromYAML(StringRef S)
Read YAML text into the MsgPack document. Returns false on failure.
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char AssemblerDirectiveBegin[]
HSA metadata beginning assembler directive.
constexpr char AssemblerDirectiveEnd[]
HSA metadata ending assembler directive.
constexpr char AssemblerDirectiveEnd[]
HSA metadata ending assembler directive.
std::error_code fromString(StringRef String, Metadata &HSAMetadata)
Converts String to HSAMetadata.
constexpr char AssemblerDirectiveBegin[]
HSA metadata beginning assembler directive.
std::error_code toString(Metadata HSAMetadata, std::string &String)
Converts HSAMetadata to String.
StringRef getArchNameR600(GPUKind AK)
GPUKind
GPU kinds supported by the AMDGPU target.
bool isHsaAbi(const MCSubtargetInfo &STI)
IsaVersion getIsaVersion(StringRef GPU)
bool isGFX90A(const MCSubtargetInfo &STI)
GPUKind parseArchAMDGCN(StringRef CPU)
bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI)
bool isGFX11Plus(const MCSubtargetInfo &STI)
StringRef getArchNameAMDGCN(GPUKind AK)
std::optional< uint8_t > getHsaAbiVersion(const MCSubtargetInfo *STI)
unsigned hasKernargPreload(const MCSubtargetInfo &STI)
GPUKind parseArchR600(StringRef CPU)
@ ELFABIVERSION_AMDGPU_HSA_V4
@ ELFABIVERSION_AMDGPU_HSA_V5
@ ELFABIVERSION_AMDGPU_HSA_V3
@ NT_AMD_HSA_CODE_OBJECT_VERSION
@ EF_AMDGPU_FEATURE_XNACK_ANY_V4
@ EF_AMDGPU_MACH_AMDGCN_GFX703
@ EF_AMDGPU_MACH_AMDGCN_GFX1035
@ EF_AMDGPU_FEATURE_SRAMECC_V3
@ EF_AMDGPU_MACH_AMDGCN_GFX1031
@ EF_AMDGPU_MACH_R600_CAYMAN
@ EF_AMDGPU_FEATURE_SRAMECC_UNSUPPORTED_V4
@ EF_AMDGPU_MACH_AMDGCN_GFX704
@ EF_AMDGPU_MACH_AMDGCN_GFX902
@ EF_AMDGPU_MACH_AMDGCN_GFX810
@ EF_AMDGPU_MACH_AMDGCN_GFX1036
@ EF_AMDGPU_MACH_AMDGCN_GFX1102
@ EF_AMDGPU_MACH_R600_RV730
@ EF_AMDGPU_MACH_R600_RV710
@ EF_AMDGPU_MACH_AMDGCN_GFX908
@ EF_AMDGPU_MACH_AMDGCN_GFX1011
@ EF_AMDGPU_MACH_R600_CYPRESS
@ EF_AMDGPU_MACH_AMDGCN_GFX1032
@ EF_AMDGPU_MACH_R600_R600
@ EF_AMDGPU_MACH_AMDGCN_GFX940
@ EF_AMDGPU_MACH_AMDGCN_GFX941
@ EF_AMDGPU_MACH_R600_TURKS
@ EF_AMDGPU_MACH_R600_JUNIPER
@ EF_AMDGPU_FEATURE_SRAMECC_OFF_V4
@ EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4
@ EF_AMDGPU_MACH_AMDGCN_GFX601
@ EF_AMDGPU_MACH_AMDGCN_GFX942
@ EF_AMDGPU_MACH_R600_R630
@ EF_AMDGPU_MACH_R600_REDWOOD
@ EF_AMDGPU_MACH_R600_RV770
@ EF_AMDGPU_FEATURE_XNACK_OFF_V4
@ EF_AMDGPU_MACH_AMDGCN_GFX600
@ EF_AMDGPU_FEATURE_XNACK_V3
@ EF_AMDGPU_MACH_AMDGCN_GFX602
@ EF_AMDGPU_MACH_AMDGCN_GFX1101
@ EF_AMDGPU_MACH_AMDGCN_GFX1100
@ EF_AMDGPU_MACH_AMDGCN_GFX1033
@ EF_AMDGPU_MACH_AMDGCN_GFX801
@ EF_AMDGPU_MACH_AMDGCN_GFX705
@ EF_AMDGPU_MACH_AMDGCN_GFX1010
@ EF_AMDGPU_MACH_R600_RV670
@ EF_AMDGPU_MACH_AMDGCN_GFX701
@ EF_AMDGPU_MACH_AMDGCN_GFX1012
@ EF_AMDGPU_MACH_AMDGCN_GFX1151
@ EF_AMDGPU_MACH_AMDGCN_GFX1030
@ EF_AMDGPU_MACH_R600_CEDAR
@ EF_AMDGPU_MACH_AMDGCN_GFX700
@ EF_AMDGPU_MACH_AMDGCN_GFX803
@ EF_AMDGPU_MACH_AMDGCN_GFX802
@ EF_AMDGPU_MACH_AMDGCN_GFX90C
@ EF_AMDGPU_FEATURE_XNACK_ON_V4
@ EF_AMDGPU_MACH_AMDGCN_GFX900
@ EF_AMDGPU_MACH_AMDGCN_GFX909
@ EF_AMDGPU_MACH_AMDGCN_GFX906
@ EF_AMDGPU_MACH_AMDGCN_GFX1103
@ EF_AMDGPU_MACH_R600_CAICOS
@ EF_AMDGPU_MACH_AMDGCN_GFX90A
@ EF_AMDGPU_MACH_AMDGCN_GFX1034
@ EF_AMDGPU_MACH_AMDGCN_GFX1013
@ EF_AMDGPU_MACH_AMDGCN_GFX904
@ EF_AMDGPU_MACH_R600_RS880
@ EF_AMDGPU_MACH_AMDGCN_GFX805
@ EF_AMDGPU_MACH_AMDGCN_GFX1150
@ EF_AMDGPU_MACH_R600_SUMO
@ EF_AMDGPU_MACH_R600_BARTS
@ EF_AMDGPU_FEATURE_SRAMECC_ANY_V4
@ EF_AMDGPU_FEATURE_SRAMECC_ON_V4
@ EF_AMDGPU_MACH_AMDGCN_GFX702
std::optional< const char * > toString(const std::optional< DWARFFormValue > &V)
Take an optional DWARFFormValue and try to extract a string value from it.
This is an optimization pass for GlobalISel generic memory operations.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
void dumpAmdKernelCode(const amd_kernel_code_t *C, raw_ostream &OS, const char *tab)
AMD Kernel Code Object (amd_kernel_code_t).
Instruction set architecture version.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
uint32_t group_segment_fixed_size
uint32_t compute_pgm_rsrc1
uint32_t private_segment_fixed_size
uint32_t compute_pgm_rsrc2
uint16_t kernel_code_properties
uint32_t compute_pgm_rsrc3
int64_t kernel_code_entry_byte_offset