36 const MCRegisterInfo &MRI;
37 const MCInstrInfo &MCII;
40 AMDGPUMCCodeEmitter(
const MCInstrInfo &MCII,
const MCRegisterInfo &MRI)
41 : MRI(MRI), MCII(MCII) {}
44 void encodeInstruction(
const MCInst &
MI, SmallVectorImpl<char> &CB,
45 SmallVectorImpl<MCFixup> &Fixups,
46 const MCSubtargetInfo &STI)
const override;
48 void getMachineOpValue(
const MCInst &
MI,
const MCOperand &MO, APInt &
Op,
49 SmallVectorImpl<MCFixup> &Fixups,
50 const MCSubtargetInfo &STI)
const;
52 void getMachineOpValueT16(
const MCInst &
MI,
unsigned OpNo, APInt &
Op,
53 SmallVectorImpl<MCFixup> &Fixups,
54 const MCSubtargetInfo &STI)
const;
56 void getMachineOpValueT16Lo128(
const MCInst &
MI,
unsigned OpNo, APInt &
Op,
57 SmallVectorImpl<MCFixup> &Fixups,
58 const MCSubtargetInfo &STI)
const;
62 void getSOPPBrEncoding(
const MCInst &
MI,
unsigned OpNo, APInt &
Op,
63 SmallVectorImpl<MCFixup> &Fixups,
64 const MCSubtargetInfo &STI)
const;
66 void getSMEMOffsetEncoding(
const MCInst &
MI,
unsigned OpNo, APInt &
Op,
67 SmallVectorImpl<MCFixup> &Fixups,
68 const MCSubtargetInfo &STI)
const;
70 void getSDWASrcEncoding(
const MCInst &
MI,
unsigned OpNo, APInt &
Op,
71 SmallVectorImpl<MCFixup> &Fixups,
72 const MCSubtargetInfo &STI)
const;
74 void getSDWAVopcDstEncoding(
const MCInst &
MI,
unsigned OpNo, APInt &
Op,
75 SmallVectorImpl<MCFixup> &Fixups,
76 const MCSubtargetInfo &STI)
const;
78 void getAVOperandEncoding(
const MCInst &
MI,
unsigned OpNo, APInt &
Op,
79 SmallVectorImpl<MCFixup> &Fixups,
80 const MCSubtargetInfo &STI)
const;
83 uint64_t getImplicitOpSelHiEncoding(
int Opcode)
const;
84 void getMachineOpValueCommon(
const MCInst &
MI,
const MCOperand &MO,
85 unsigned OpNo, APInt &
Op,
86 SmallVectorImpl<MCFixup> &Fixups,
87 const MCSubtargetInfo &STI)
const;
90 std::optional<uint64_t>
91 getLitEncoding(
const MCInstrDesc &
Desc,
const MCOperand &MO,
unsigned OpNo,
92 const MCSubtargetInfo &STI,
93 bool HasMandatoryLiteral =
false)
const;
95 void getBinaryCodeForInstr(
const MCInst &
MI, SmallVectorImpl<MCFixup> &Fixups,
96 APInt &Inst, APInt &Scratch,
97 const MCSubtargetInfo &STI)
const;
99 APInt postEncodeVOPCX(
const MCInst &
MI, APInt EncodedValue,
100 const MCSubtargetInfo &STI)
const;
107 return new AMDGPUMCCodeEmitter(MCII, *Ctx.getRegisterInfo());
117template <
typename IntTy>
119 if (Imm >= 0 && Imm <= 64)
122 if (Imm >= -16 && Imm <= -1)
123 return 192 + std::abs(Imm);
158 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
171 case 0x3F00:
return 240;
172 case 0xBF00:
return 241;
173 case 0x3F80:
return 242;
174 case 0xBF80:
return 243;
175 case 0x4000:
return 244;
176 case 0xC000:
return 245;
177 case 0x4080:
return 246;
178 case 0xC080:
return 247;
179 case 0x3E22:
return 248;
214 if (Val == 0x3e22f983 &&
215 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
255 if (Val == 0x3fc45f306dc9c882 &&
256 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
261 bool CanUse64BitLiterals =
262 STI.
hasFeature(AMDGPU::Feature64BitLiterals) &&
265 return CanUse64BitLiterals &&
Lo_32(Val) ? 254 : 255;
272std::optional<uint64_t> AMDGPUMCCodeEmitter::getLitEncoding(
275 const MCOperandInfo &OpInfo =
Desc.operands()[OpNo];
278 if (!MO.
getExpr()->evaluateAsAbsolute(Imm) ||
284 if (STI.
hasFeature(AMDGPU::Feature64BitLiterals) &&
320 return (HasMandatoryLiteral && Enc == 255) ? 254 : Enc;
366uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(
int Opcode)
const {
367 using namespace AMDGPU::VOP3PEncoding;
380void AMDGPUMCCodeEmitter::encodeInstruction(
const MCInst &
MI,
381 SmallVectorImpl<char> &CB,
382 SmallVectorImpl<MCFixup> &Fixups,
383 const MCSubtargetInfo &STI)
const {
384 int Opcode =
MI.getOpcode();
385 APInt Encoding, Scratch;
386 getBinaryCodeForInstr(
MI, Fixups, Encoding, Scratch, STI);
387 const MCInstrDesc &
Desc = MCII.
get(
MI.getOpcode());
388 unsigned bytes =
Desc.getSize();
393 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
394 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) &&
401 Encoding |= getImplicitOpSelHiEncoding(Opcode);
404 for (
unsigned i = 0; i < bytes; i++) {
410 int vaddr0 = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
411 AMDGPU::OpName::vaddr0);
412 int srsrc = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
413 AMDGPU::OpName::srsrc);
414 assert(vaddr0 >= 0 && srsrc > vaddr0);
415 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
416 unsigned NumPadding = (-NumExtraAddrs) & 3;
418 for (
unsigned i = 0; i < NumExtraAddrs; ++i) {
419 getMachineOpValue(
MI,
MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
426 if ((bytes > 8 && STI.
hasFeature(AMDGPU::FeatureVOP3Literal)) ||
427 (bytes > 4 && !STI.
hasFeature(AMDGPU::FeatureVOP3Literal)))
435 for (
unsigned i = 0, e =
Desc.getNumOperands(); i < e; ++i) {
442 const MCOperand &
Op =
MI.getOperand(i);
443 auto Enc = getLitEncoding(
Desc,
Op, i, STI);
444 if (!Enc || (*Enc != 255 && *Enc != 254))
453 else if (
Op.isExpr()) {
478void AMDGPUMCCodeEmitter::getSOPPBrEncoding(
const MCInst &
MI,
unsigned OpNo,
480 SmallVectorImpl<MCFixup> &Fixups,
481 const MCSubtargetInfo &STI)
const {
482 const MCOperand &MO =
MI.getOperand(OpNo);
485 const MCExpr *Expr = MO.
getExpr();
489 getMachineOpValue(
MI, MO,
Op, Fixups, STI);
493void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
494 const MCInst &
MI,
unsigned OpNo, APInt &
Op,
495 SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI)
const {
496 auto Offset =
MI.getOperand(OpNo).getImm();
502void AMDGPUMCCodeEmitter::getSDWASrcEncoding(
const MCInst &
MI,
unsigned OpNo,
504 SmallVectorImpl<MCFixup> &Fixups,
505 const MCSubtargetInfo &STI)
const {
506 using namespace AMDGPU::SDWA;
510 const MCOperand &MO =
MI.getOperand(OpNo);
514 RegEnc |=
MRI.getEncodingValue(
Reg);
515 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
517 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
522 const MCInstrDesc &
Desc = MCII.
get(
MI.getOpcode());
523 auto Enc = getLitEncoding(
Desc, MO, OpNo, STI);
524 if (Enc && *Enc != 255) {
525 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
533void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
534 const MCInst &
MI,
unsigned OpNo, APInt &
Op,
535 SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI)
const {
536 using namespace AMDGPU::SDWA;
540 const MCOperand &MO =
MI.getOperand(OpNo);
543 if (
Reg != AMDGPU::VCC &&
Reg != AMDGPU::VCC_LO) {
544 RegEnc |=
MRI.getEncodingValue(
Reg);
545 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
546 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
551void AMDGPUMCCodeEmitter::getAVOperandEncoding(
552 const MCInst &
MI,
unsigned OpNo, APInt &
Op,
553 SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI)
const {
554 MCRegister
Reg =
MI.getOperand(OpNo).getReg();
555 unsigned Enc =
MRI.getEncodingValue(
Reg);
565 Op = Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
592void AMDGPUMCCodeEmitter::getMachineOpValue(
const MCInst &
MI,
593 const MCOperand &MO, APInt &
Op,
594 SmallVectorImpl<MCFixup> &Fixups,
595 const MCSubtargetInfo &STI)
const {
597 unsigned Enc =
MRI.getEncodingValue(MO.
getReg());
601 Op = Idx | (IsVGPROrAGPR << 8);
604 unsigned OpNo = &MO -
MI.begin();
605 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
608void AMDGPUMCCodeEmitter::getMachineOpValueT16(
609 const MCInst &
MI,
unsigned OpNo, APInt &
Op,
610 SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI)
const {
611 const MCOperand &MO =
MI.getOperand(OpNo);
613 unsigned Enc =
MRI.getEncodingValue(MO.
getReg());
616 Op = Idx | (IsVGPR << 8);
619 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
626 if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
627 AMDGPU::OpName::src0_modifiers)) {
628 SrcMOIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src0);
630 AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::vdst);
631 if (VDstMOIdx != -1) {
632 auto DstReg =
MI.getOperand(VDstMOIdx).getReg();
636 }
else if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
637 MI.getOpcode(), AMDGPU::OpName::src1_modifiers))
638 SrcMOIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src1);
639 else if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
640 MI.getOpcode(), AMDGPU::OpName::src2_modifiers))
641 SrcMOIdx = AMDGPU::getNamedOperandIdx(
MI.getOpcode(), AMDGPU::OpName::src2);
645 const MCOperand &SrcMO =
MI.getOperand(SrcMOIdx);
648 auto SrcReg = SrcMO.
getReg();
655void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
656 const MCInst &
MI,
unsigned OpNo, APInt &
Op,
657 SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI)
const {
658 const MCOperand &MO =
MI.getOperand(OpNo);
660 uint16_t Encoding =
MRI.getEncodingValue(MO.
getReg());
665 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
668 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
671void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
672 const MCInst &
MI,
const MCOperand &MO,
unsigned OpNo, APInt &
Op,
673 SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI)
const {
674 bool isLikeImm =
false;
680 }
else if (MO.
isExpr() && MO.
getExpr()->evaluateAsAbsolute(Val)) {
694 const MCInstrDesc &
Desc = MCII.
get(
MI.getOpcode());
702 const MCInstrDesc &
Desc = MCII.
get(
MI.getOpcode());
704 bool HasMandatoryLiteral =
706 if (
auto Enc = getLitEncoding(
Desc, MO, OpNo, STI, HasMandatoryLiteral)) {
722APInt AMDGPUMCCodeEmitter::postEncodeVOPCX(
const MCInst &
MI, APInt EncodedValue,
723 const MCSubtargetInfo &STI)
const {
730 [[maybe_unused]]
const MCInstrDesc &
Desc = MCII.
get(
MI.getOpcode());
732 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC));
733 EncodedValue |=
MRI.getEncodingValue(AMDGPU::EXEC_LO) &
738#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
static void addFixup(SmallVectorImpl< MCFixup > &Fixups, uint32_t Offset, const MCExpr *Value, uint16_t Kind, bool PCRel=false)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static uint32_t getLit64Encoding(const MCInstrDesc &Desc, uint64_t Val, const MCSubtargetInfo &STI, bool IsFP)
static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI)
static void addFixup(SmallVectorImpl< MCFixup > &Fixups, uint32_t Offset, const MCExpr *Value, uint16_t Kind, bool PCRel=false)
static uint32_t getLitBF16Encoding(uint16_t Val)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
LLVM_ABI uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
MCCodeEmitter - Generic instruction encoding interface.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
@ Unary
Unary expressions.
@ Constant
Constant expressions.
@ SymbolRef
References to labels and assigned expressions.
@ Target
Target specific expression.
@ Specifier
Expression with a relocation specifier.
@ Binary
Binary expressions.
static MCFixupKind getDataKindForSize(unsigned Size)
Return the generic fixup kind for a value with the given size.
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, bool PCRel=false)
Consider bit fields if we need more flags.
Describe properties that are true of each instruction in the target description file.
Interface to description of machine instruction set.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint8_t OperandType
Information about the type of the operand.
Instances of this class represent operands of the MCInst class.
MCRegister getReg() const
Returns the register number.
const MCExpr * getExpr() const
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
static AMDGPUMCExpr::Specifier getSpecifier(const MCSymbolRefExpr *SRE)
LLVM_READONLY bool isLitExpr(const MCExpr *Expr)
@ fixup_si_sopp_br
16-bit PC relative fixup for SOPP branch instructions.
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
constexpr bool isSISrcOperand(const MCOperandInfo &OpInfo)
Is this an AMDGPU specific source operand?
LLVM_READONLY int64_t getLitValue(const MCExpr *Expr)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isGFX10Plus(const MCSubtargetInfo &STI)
int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
bool isVI(const MCSubtargetInfo &STI)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
LLVM_READNONE unsigned getOperandSize(const MCOperandInfo &OpInfo)
@ C
The default llvm calling convention, compatible with C.
void write(void *memory, value_type value, endianness endian)
Write a value to memory with a particular endianness.
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
uint16_t MCFixupKind
Extensible enumeration to represent the type of a fixup.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
To bit_cast(const From &from) noexcept
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
MCCodeEmitter * createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)