28#define GET_REGINFO_TARGET_DESC
29#include "RISCVGenRegisterInfo.inc"
38 cl::desc(
"Disable two address hints for register "
41static_assert(RISCV::X1 == RISCV::X0 + 1,
"Register list not consecutive");
42static_assert(RISCV::X31 == RISCV::X0 + 31,
"Register list not consecutive");
43static_assert(RISCV::F1_H == RISCV::F0_H + 1,
"Register list not consecutive");
44static_assert(RISCV::F31_H == RISCV::F0_H + 31,
45 "Register list not consecutive");
46static_assert(RISCV::F1_F == RISCV::F0_F + 1,
"Register list not consecutive");
47static_assert(RISCV::F31_F == RISCV::F0_F + 31,
48 "Register list not consecutive");
49static_assert(RISCV::F1_D == RISCV::F0_D + 1,
"Register list not consecutive");
50static_assert(RISCV::F31_D == RISCV::F0_D + 31,
51 "Register list not consecutive");
52static_assert(RISCV::V1 == RISCV::V0 + 1,
"Register list not consecutive");
53static_assert(RISCV::V31 == RISCV::V0 + 31,
"Register list not consecutive");
63 return CSR_NoRegs_SaveList;
65 if (Subtarget.hasStdExtD())
66 return CSR_XLEN_F64_Interrupt_SaveList;
67 if (Subtarget.hasStdExtF())
68 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
69 : CSR_XLEN_F32_Interrupt_SaveList;
70 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
71 : CSR_Interrupt_SaveList;
76 Subtarget.hasVInstructions();
78 switch (Subtarget.getTargetABI()) {
83 return CSR_ILP32E_LP64E_SaveList;
87 return CSR_ILP32_LP64_V_SaveList;
88 return CSR_ILP32_LP64_SaveList;
92 return CSR_ILP32F_LP64F_V_SaveList;
93 return CSR_ILP32F_LP64F_SaveList;
97 return CSR_ILP32D_LP64D_V_SaveList;
98 return CSR_ILP32D_LP64D_SaveList;
107 for (
size_t Reg = 0; Reg < getNumRegs(); Reg++) {
109 if (Subtarget.isRegisterReservedByUser(Reg))
113 if (isConstantPhysReg(Reg))
130 markSuperRegs(
Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
133 if (Subtarget.hasStdExtE())
134 for (
MCPhysReg Reg = RISCV::X16; Reg <= RISCV::X31; Reg++)
139 markSuperRegs(
Reserved, RISCV::VTYPE);
140 markSuperRegs(
Reserved, RISCV::VXSAT);
141 markSuperRegs(
Reserved, RISCV::VXRM);
144 markSuperRegs(
Reserved, RISCV::FRM);
145 markSuperRegs(
Reserved, RISCV::FFLAGS);
148 markSuperRegs(
Reserved, RISCV::VCIX_STATE);
151 if (Subtarget.hasStdExtE())
153 markSuperRegs(
Reserved, RISCV::X23);
154 markSuperRegs(
Reserved, RISCV::X27);
158 markSuperRegs(
Reserved, RISCV::SSP);
170 return CSR_NoRegs_RegMask;
180 if (DestReg == SrcReg && !
Offset.getFixed() && !
Offset.getScalable())
188 bool KillSrcReg =
false;
190 if (
Offset.getScalable()) {
191 unsigned ScalableAdjOpc = RISCV::ADD;
192 int64_t ScalableValue =
Offset.getScalable();
193 if (ScalableValue < 0) {
194 ScalableValue = -ScalableValue;
195 ScalableAdjOpc = RISCV::SUB;
199 if (DestReg == SrcReg)
200 ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
202 assert(ScalableValue > 0 &&
"There is no need to get VLEN scaled value.");
203 assert(ScalableValue % 8 == 0 &&
204 "Reserve the stack by the multiple of one vector size.");
205 assert(isInt<32>(ScalableValue / 8) &&
206 "Expect the number of vector registers within 32-bits.");
207 uint32_t NumOfVReg = ScalableValue / 8;
211 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
212 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
213 unsigned Opc = NumOfVReg == 2 ? RISCV::SH1ADD :
214 (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
219 TII->mulImm(MF,
MBB,
II,
DL, ScratchReg, NumOfVReg, Flag);
228 int64_t Val =
Offset.getFixed();
229 if (DestReg == SrcReg && Val == 0)
234 if (isInt<12>(Val)) {
248 assert(
Align < 2048 &&
"Required alignment too large");
249 int64_t MaxPosAdjStep = 2048 -
Align;
250 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
251 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
269 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
271 if (isShiftedInt<12, 3>(Val)) {
274 }
else if (isShiftedInt<12, 2>(Val)) {
279 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
280 TII->movImm(
MBB,
II,
DL, ScratchReg, Val, Flag);
289 unsigned Opc = RISCV::ADD;
295 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
296 TII->movImm(
MBB,
II,
DL, ScratchReg, Val, Flag);
315 unsigned NF = ZvlssegInfo->first;
316 unsigned LMUL = ZvlssegInfo->second;
317 assert(NF * LMUL <= 8 &&
"Invalid NF/LMUL combinations.");
318 unsigned Opcode, SubRegIdx;
323 Opcode = RISCV::VS1R_V;
324 SubRegIdx = RISCV::sub_vrm1_0;
327 Opcode = RISCV::VS2R_V;
328 SubRegIdx = RISCV::sub_vrm2_0;
331 Opcode = RISCV::VS4R_V;
332 SubRegIdx = RISCV::sub_vrm4_0;
335 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
336 "Unexpected subreg numbering");
337 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
338 "Unexpected subreg numbering");
339 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
340 "Unexpected subreg numbering");
342 Register VL =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
345 const int64_t VLENB = *VLEN / 8;
346 int64_t
Offset = VLENB * LMUL;
351 if (ShiftAmount != 0)
359 bool IsBaseKill =
II->getOperand(1).isKill();
360 Register NewBase =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
361 for (
unsigned I = 0;
I < NF; ++
I) {
367 .
addReg(
TRI->getSubReg(SrcReg, SubRegIdx +
I))
377 II->eraseFromParent();
392 unsigned NF = ZvlssegInfo->first;
393 unsigned LMUL = ZvlssegInfo->second;
394 assert(NF * LMUL <= 8 &&
"Invalid NF/LMUL combinations.");
395 unsigned Opcode, SubRegIdx;
400 Opcode = RISCV::VL1RE8_V;
401 SubRegIdx = RISCV::sub_vrm1_0;
404 Opcode = RISCV::VL2RE8_V;
405 SubRegIdx = RISCV::sub_vrm2_0;
408 Opcode = RISCV::VL4RE8_V;
409 SubRegIdx = RISCV::sub_vrm4_0;
412 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
413 "Unexpected subreg numbering");
414 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
415 "Unexpected subreg numbering");
416 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
417 "Unexpected subreg numbering");
419 Register VL =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
422 const int64_t VLENB = *VLEN / 8;
423 int64_t
Offset = VLENB * LMUL;
428 if (ShiftAmount != 0)
434 Register DestReg =
II->getOperand(0).getReg();
436 bool IsBaseKill =
II->getOperand(1).isKill();
437 Register NewBase =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
438 for (
unsigned I = 0;
I < NF; ++
I) {
440 TRI->getSubReg(DestReg, SubRegIdx +
I))
449 II->eraseFromParent();
453 int SPAdj,
unsigned FIOperandNum,
455 assert(SPAdj == 0 &&
"Unexpected non-zero SPAdj value");
463 int FrameIndex =
MI.getOperand(FIOperandNum).getIndex();
466 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
471 if (
Offset.getScalable() &&
472 ST.getRealMinVLen() == ST.getRealMaxVLen()) {
475 int64_t FixedValue =
Offset.getFixed();
476 int64_t ScalableValue =
Offset.getScalable();
477 assert(ScalableValue % 8 == 0 &&
478 "Scalable offset is not a multiple of a single vector size.");
479 int64_t NumOfVReg = ScalableValue / 8;
480 int64_t VLENB = ST.getRealMinVLen() / 8;
484 if (!isInt<32>(
Offset.getFixed())) {
486 "Frame offsets outside of the signed 32-bit range not supported");
490 int64_t Val =
Offset.getFixed();
491 int64_t Lo12 = SignExtend64<12>(Val);
492 unsigned Opc =
MI.getOpcode();
493 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
499 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
500 }
else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
501 Opc == RISCV::PREFETCH_W) &&
502 (Lo12 & 0b11111) != 0) {
504 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
505 }
else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
506 Opc == RISCV::PseudoRV32ZdinxSD) &&
511 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
516 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
524 if (
MI.getOpcode() == RISCV::ADDI)
525 DestReg =
MI.getOperand(0).getReg();
527 DestReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
530 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg,
false,
534 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg,
false,
540 if (
MI.getOpcode() == RISCV::ADDI &&
541 MI.getOperand(0).getReg() ==
MI.getOperand(1).getReg() &&
542 MI.getOperand(2).getImm() == 0) {
543 MI.eraseFromParent();
551 switch (
MI.getOpcode()) {
552 case RISCV::PseudoVSPILL2_M1:
553 case RISCV::PseudoVSPILL2_M2:
554 case RISCV::PseudoVSPILL2_M4:
555 case RISCV::PseudoVSPILL3_M1:
556 case RISCV::PseudoVSPILL3_M2:
557 case RISCV::PseudoVSPILL4_M1:
558 case RISCV::PseudoVSPILL4_M2:
559 case RISCV::PseudoVSPILL5_M1:
560 case RISCV::PseudoVSPILL6_M1:
561 case RISCV::PseudoVSPILL7_M1:
562 case RISCV::PseudoVSPILL8_M1:
565 case RISCV::PseudoVRELOAD2_M1:
566 case RISCV::PseudoVRELOAD2_M2:
567 case RISCV::PseudoVRELOAD2_M4:
568 case RISCV::PseudoVRELOAD3_M1:
569 case RISCV::PseudoVRELOAD3_M2:
570 case RISCV::PseudoVRELOAD4_M1:
571 case RISCV::PseudoVRELOAD4_M2:
572 case RISCV::PseudoVRELOAD5_M1:
573 case RISCV::PseudoVRELOAD6_M1:
574 case RISCV::PseudoVRELOAD7_M1:
575 case RISCV::PseudoVRELOAD8_M1:
594 unsigned FIOperandNum = 0;
595 for (; !
MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
596 assert(FIOperandNum < MI->getNumOperands() &&
597 "Instr doesn't have FrameIndex operand");
606 if (!
MI->mayLoad() && !
MI->mayStore())
614 if (TFI->
hasFP(MF) && !shouldRealignStack(MF)) {
618 unsigned CalleeSavedSize = 0;
621 if (Subtarget.isRegisterReservedByUser(Reg))
624 if (RISCV::GPRRegClass.
contains(Reg))
625 CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
626 else if (RISCV::FPR64RegClass.
contains(Reg))
627 CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
628 else if (RISCV::FPR32RegClass.
contains(Reg))
629 CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
633 int64_t MaxFPOffset =
Offset - CalleeSavedSize;
641 int64_t MaxSPOffset =
Offset + 128;
651 unsigned FIOperandNum = 0;
652 while (!
MI->getOperand(FIOperandNum).isFI()) {
654 assert(FIOperandNum < MI->getNumOperands() &&
655 "Instr does not have a FrameIndex operand!");
687 unsigned FIOperandNum = 0;
688 while (!
MI.getOperand(FIOperandNum).isFI()) {
690 assert(FIOperandNum <
MI.getNumOperands() &&
691 "Instr does not have a FrameIndex operand!");
697 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg,
false);
698 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(
Offset);
707 "The MI must be I or S format.");
708 assert(
MI->getOperand(
Idx).isFI() &&
"The Idx'th operand of MI is not a "
709 "FrameIndex operand");
710 return MI->getOperand(
Idx + 1).getImm();
715 return TFI->
hasFP(MF) ? RISCV::X8 : RISCV::X2;
724 return CSR_NoRegs_RegMask;
725 switch (Subtarget.getTargetABI()) {
730 return CSR_ILP32E_LP64E_RegMask;
734 return CSR_ILP32_LP64_V_RegMask;
735 return CSR_ILP32_LP64_RegMask;
739 return CSR_ILP32F_LP64F_V_RegMask;
740 return CSR_ILP32F_LP64F_RegMask;
744 return CSR_ILP32D_LP64D_V_RegMask;
745 return CSR_ILP32D_LP64D_RegMask;
752 if (RC == &RISCV::VMV0RegClass)
753 return &RISCV::VRRegClass;
754 if (RC == &RISCV::VRNoV0RegClass)
755 return &RISCV::VRRegClass;
756 if (RC == &RISCV::VRM2NoV0RegClass)
757 return &RISCV::VRM2RegClass;
758 if (RC == &RISCV::VRM4NoV0RegClass)
759 return &RISCV::VRM4RegClass;
760 if (RC == &RISCV::VRM8NoV0RegClass)
761 return &RISCV::VRM8RegClass;
770 assert(
Offset.getScalable() % 8 == 0 &&
"Invalid frame offset");
776 int64_t VLENBSized =
Offset.getScalable() / 8;
777 if (VLENBSized > 0) {
780 Ops.
append({dwarf::DW_OP_bregx, VLENB, 0ULL});
783 }
else if (VLENBSized < 0) {
786 Ops.
append({dwarf::DW_OP_bregx, VLENB, 0ULL});
810 VirtReg, Order, Hints, MF, VRM,
Matrix);
813 return BaseImplRetVal;
819 bool NeedGPRC) ->
void {
825 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.
contains(PhysReg)) &&
826 !MO.getSubReg() && !VRRegMO.
getSubReg()) {
828 TwoAddrHints.
insert(PhysReg);
834 auto isCompressible = [&Subtarget](
const MachineInstr &
MI,
bool &NeedGPRC) {
836 switch (
MI.getOpcode()) {
849 if (!
MI.getOperand(2).isImm())
851 int64_t Imm =
MI.getOperand(2).getImm();
855 return Subtarget.hasStdExtZcb() && Imm == 255;
866 return MI.getOperand(2).isImm() && isInt<6>(
MI.getOperand(2).getImm());
870 case RISCV::ZEXT_H_RV32:
871 case RISCV::ZEXT_H_RV64:
874 return Subtarget.hasStdExtZcb();
878 return Subtarget.hasStdExtZcb() &&
MI.getOperand(2).isReg() &&
879 MI.getOperand(2).getReg() == RISCV::X0;
883 return Subtarget.hasStdExtZcb() &&
MI.getOperand(2).isImm() &&
884 MI.getOperand(2).getImm() == -1;
897 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
900 for (
auto &MO :
MRI->reg_nodbg_operands(VirtReg)) {
902 unsigned OpIdx = MO.getOperandNo();
904 if (isCompressible(
MI, NeedGPRC)) {
905 if (OpIdx == 0 &&
MI.getOperand(1).isReg()) {
906 if (!NeedGPRC ||
MI.getNumExplicitOperands() < 3 ||
907 MI.getOpcode() == RISCV::ADD_UW ||
908 isCompressibleOpnd(
MI.getOperand(2)))
909 tryAddHint(MO,
MI.getOperand(1), NeedGPRC);
910 if (
MI.isCommutable() &&
MI.getOperand(2).isReg() &&
911 (!NeedGPRC || isCompressibleOpnd(
MI.getOperand(1))))
912 tryAddHint(MO,
MI.getOperand(2), NeedGPRC);
913 }
else if (OpIdx == 1 && (!NeedGPRC ||
MI.getNumExplicitOperands() < 3 ||
914 isCompressibleOpnd(
MI.getOperand(2)))) {
915 tryAddHint(MO,
MI.getOperand(0), NeedGPRC);
916 }
else if (
MI.isCommutable() && OpIdx == 2 &&
917 (!NeedGPRC || isCompressibleOpnd(
MI.getOperand(1)))) {
918 tryAddHint(MO,
MI.getOperand(0), NeedGPRC);
924 if (TwoAddrHints.
count(OrderReg))
927 return BaseImplRetVal;
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Wrapper class representing physical registers. Should be passed by value.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
Information about stack frame layout on the target.
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
static unsigned getFormat(uint64_t TSFlags)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
void lowerVRELOAD(MachineBasicBlock::iterator II) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerVSPILL(MachineBasicBlock::iterator II) const
Register getFrameRegister(const MachineFunction &MF) const override
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override