25#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
57 static constexpr unsigned MaxRecursionDepth = 6;
60 const unsigned Depth = 0)
const;
86 bool IsExternWeak =
false)
const;
94 unsigned &CurOp,
bool IsMasked,
95 bool IsStridedOrIndexed,
96 LLT *IndexVT =
nullptr)
const;
102 unsigned ShiftWidth)
const;
103 ComplexRendererFns selectShiftMaskXLen(
MachineOperand &Root)
const {
104 return selectShiftMask(Root, STI.
getXLen());
106 ComplexRendererFns selectShiftMask32(
MachineOperand &Root)
const {
107 return selectShiftMask(Root, 32);
111 ComplexRendererFns selectSExtBits(
MachineOperand &Root,
unsigned Bits)
const;
112 template <
unsigned Bits>
114 return selectSExtBits(Root, Bits);
117 ComplexRendererFns selectZExtBits(
MachineOperand &Root,
unsigned Bits)
const;
118 template <
unsigned Bits>
120 return selectZExtBits(Root, Bits);
123 ComplexRendererFns selectSHXADDOp(
MachineOperand &Root,
unsigned ShAmt)
const;
124 template <
unsigned ShAmt>
126 return selectSHXADDOp(Root, ShAmt);
130 unsigned ShAmt)
const;
131 template <
unsigned ShAmt>
132 ComplexRendererFns selectSHXADD_UWOp(
MachineOperand &Root)
const {
133 return selectSHXADD_UWOp(Root, ShAmt);
173#define GET_GLOBALISEL_PREDICATES_DECL
174#include "RISCVGenGlobalISel.inc"
175#undef GET_GLOBALISEL_PREDICATES_DECL
177#define GET_GLOBALISEL_TEMPORARIES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_TEMPORARIES_DECL
184#define GET_GLOBALISEL_IMPL
185#include "RISCVGenGlobalISel.inc"
186#undef GET_GLOBALISEL_IMPL
188RISCVInstructionSelector::RISCVInstructionSelector(
191 : STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()), RBI(RBI),
195#include
"RISCVGenGlobalISel.inc"
198#include
"RISCVGenGlobalISel.inc"
204bool RISCVInstructionSelector::hasAllNBitUsers(
const MachineInstr &
MI,
206 const unsigned Depth)
const {
208 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
209 MI.getOpcode() == TargetOpcode::G_SUB ||
210 MI.getOpcode() == TargetOpcode::G_MUL ||
211 MI.getOpcode() == TargetOpcode::G_SHL ||
212 MI.getOpcode() == TargetOpcode::G_LSHR ||
213 MI.getOpcode() == TargetOpcode::G_AND ||
214 MI.getOpcode() == TargetOpcode::G_OR ||
215 MI.getOpcode() == TargetOpcode::G_XOR ||
216 MI.getOpcode() == TargetOpcode::G_SEXT_INREG ||
Depth != 0) &&
217 "Unexpected opcode");
219 if (
Depth >= RISCVInstructionSelector::MaxRecursionDepth)
222 auto DestReg =
MI.getOperand(0).getReg();
224 assert(UserOp.getParent() &&
"UserOp must have a parent");
225 const MachineInstr &UserMI = *UserOp.getParent();
234 case RISCV::FCVT_D_W:
235 case RISCV::FCVT_S_W:
278InstructionSelector::ComplexRendererFns
279RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
280 unsigned ShiftWidth)
const {
284 using namespace llvm::MIPatternMatch;
290 ShAmtReg = ZExtSrcReg;
309 APInt ShMask(AndMask.
getBitWidth(), ShiftWidth - 1);
310 if (ShMask.isSubsetOf(AndMask)) {
311 ShAmtReg = AndSrcReg;
315 KnownBits Known = VT->getKnownBits(AndSrcReg);
316 if (ShMask.isSubsetOf(AndMask | Known.
Zero))
317 ShAmtReg = AndSrcReg;
324 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0)
329 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0) {
333 unsigned NegOpc = Subtarget->
is64Bit() ? RISCV::SUBW : RISCV::SUB;
334 return {{[=](MachineInstrBuilder &MIB) {
335 MachineIRBuilder(*MIB.getInstr())
336 .buildInstr(NegOpc, {ShAmtReg}, {
Register(RISCV::X0),
Reg});
337 MIB.addReg(ShAmtReg);
340 if (
Imm.urem(ShiftWidth) == ShiftWidth - 1) {
344 return {{[=](MachineInstrBuilder &MIB) {
345 MachineIRBuilder(*MIB.getInstr())
346 .buildInstr(RISCV::XORI, {ShAmtReg}, {
Reg})
348 MIB.addReg(ShAmtReg);
353 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
356InstructionSelector::ComplexRendererFns
357RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
358 unsigned Bits)
const {
362 MachineInstr *RootDef = MRI->
getVRegDef(RootReg);
364 if (RootDef->
getOpcode() == TargetOpcode::G_SEXT_INREG &&
367 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); }}};
371 if ((
Size - VT->computeNumSignBits(RootReg)) < Bits)
372 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
377InstructionSelector::ComplexRendererFns
378RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
379 unsigned Bits)
const {
387 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
392 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
396 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
401InstructionSelector::ComplexRendererFns
402RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
403 unsigned ShAmt)
const {
404 using namespace llvm::MIPatternMatch;
410 const unsigned XLen = STI.
getXLen();
429 if (
Mask.isShiftedMask()) {
430 unsigned Leading = XLen -
Mask.getActiveBits();
431 unsigned Trailing =
Mask.countr_zero();
434 if (*LeftShift && Leading == 0 && C2.
ult(Trailing) && Trailing == ShAmt) {
436 return {{[=](MachineInstrBuilder &MIB) {
437 MachineIRBuilder(*MIB.getInstr())
438 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
446 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
448 return {{[=](MachineInstrBuilder &MIB) {
449 MachineIRBuilder(*MIB.getInstr())
450 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
451 .addImm(Leading + Trailing);
472 unsigned Leading = XLen -
Mask.getActiveBits();
473 unsigned Trailing =
Mask.countr_zero();
487 return {{[=](MachineInstrBuilder &MIB) {
488 MachineIRBuilder(*MIB.getInstr())
489 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
499InstructionSelector::ComplexRendererFns
500RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
501 unsigned ShAmt)
const {
502 using namespace llvm::MIPatternMatch;
519 if (
Mask.isShiftedMask()) {
520 unsigned Leading =
Mask.countl_zero();
521 unsigned Trailing =
Mask.countr_zero();
522 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
524 return {{[=](MachineInstrBuilder &MIB) {
525 MachineIRBuilder(*MIB.getInstr())
526 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
537InstructionSelector::ComplexRendererFns
538RISCVInstructionSelector::renderVLOp(MachineOperand &Root)
const {
539 assert(Root.
isReg() &&
"Expected operand to be a Register");
542 if (RootDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
544 if (
C->getValue().isAllOnes())
548 return {{[=](MachineInstrBuilder &MIB) {
553 uint64_t ZExtC =
C->getZExtValue();
554 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
557 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); }}};
560InstructionSelector::ComplexRendererFns
561RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root)
const {
566 if (RootDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
568 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); },
569 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
573 if (isBaseWithConstantOffset(Root, *MRI)) {
581 if (LHSDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
583 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->
getOperand(1)); },
584 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
587 return {{[=](MachineInstrBuilder &MIB) { MIB.add(
LHS); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
594 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); },
595 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
604 case CmpInst::Predicate::ICMP_EQ:
606 case CmpInst::Predicate::ICMP_NE:
608 case CmpInst::Predicate::ICMP_ULT:
610 case CmpInst::Predicate::ICMP_SLT:
612 case CmpInst::Predicate::ICMP_UGE:
614 case CmpInst::Predicate::ICMP_SGE:
680 CC = getRISCVCCFromICmp(Pred);
687 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
692 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
694 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
696 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
698 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
706 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
710 return IsStore ? RISCV::SB : RISCV::LBU;
712 return IsStore ? RISCV::SH : RISCV::LH;
714 return IsStore ? RISCV::SW : RISCV::LW;
716 return IsStore ? RISCV::SD : RISCV::LD;
722void RISCVInstructionSelector::addVectorLoadStoreOperands(
723 MachineInstr &
I, SmallVectorImpl<Register> &SrcOps,
unsigned &CurOp,
724 bool IsMasked,
bool IsStridedOrIndexed, LLT *IndexVT)
const {
726 auto PtrReg =
I.getOperand(CurOp++).getReg();
730 if (IsStridedOrIndexed) {
731 auto StrideReg =
I.getOperand(CurOp++).getReg();
734 *IndexVT = MRI->
getType(StrideReg);
739 auto MaskReg =
I.getOperand(CurOp++).getReg();
744bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
745 MachineInstr &
I)
const {
752 case Intrinsic::riscv_vlm:
753 case Intrinsic::riscv_vle:
754 case Intrinsic::riscv_vle_mask:
755 case Intrinsic::riscv_vlse:
756 case Intrinsic::riscv_vlse_mask: {
757 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
758 IntrinID == Intrinsic::riscv_vlse_mask;
759 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
760 IntrinID == Intrinsic::riscv_vlse_mask;
761 LLT VT = MRI->
getType(
I.getOperand(0).getReg());
765 const Register DstReg =
I.getOperand(0).getReg();
768 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
773 if (HasPassthruOperand) {
774 auto PassthruReg =
I.getOperand(CurOp++).getReg();
780 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
783 const RISCV::VLEPseudo *
P =
784 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
785 static_cast<unsigned>(LMUL));
787 MachineInstrBuilder PseudoMI =
788 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(
P->Pseudo), DstReg);
793 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
794 for (
auto &RenderFn : *VLOpFn)
803 Policy =
I.getOperand(CurOp++).getImm();
813 case Intrinsic::riscv_vloxei:
814 case Intrinsic::riscv_vloxei_mask:
815 case Intrinsic::riscv_vluxei:
816 case Intrinsic::riscv_vluxei_mask: {
817 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
818 IntrinID == Intrinsic::riscv_vluxei_mask;
819 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
820 IntrinID == Intrinsic::riscv_vloxei_mask;
821 LLT VT = MRI->
getType(
I.getOperand(0).getReg());
825 const Register DstReg =
I.getOperand(0).getReg();
828 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
833 if (HasPassthruOperand) {
834 auto PassthruReg =
I.getOperand(CurOp++).getReg();
841 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
847 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
849 "values when XLEN=32");
851 const RISCV::VLX_VSXPseudo *
P = RISCV::getVLXPseudo(
852 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
853 static_cast<unsigned>(IndexLMUL));
855 MachineInstrBuilder PseudoMI =
856 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(
P->Pseudo), DstReg);
861 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
862 for (
auto &RenderFn : *VLOpFn)
871 Policy =
I.getOperand(CurOp++).getImm();
881 case Intrinsic::riscv_vsm:
882 case Intrinsic::riscv_vse:
883 case Intrinsic::riscv_vse_mask:
884 case Intrinsic::riscv_vsse:
885 case Intrinsic::riscv_vsse_mask: {
886 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
887 IntrinID == Intrinsic::riscv_vsse_mask;
888 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
889 IntrinID == Intrinsic::riscv_vsse_mask;
890 LLT VT = MRI->
getType(
I.getOperand(1).getReg());
898 auto PassthruReg =
I.getOperand(CurOp++).getReg();
901 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
904 const RISCV::VSEPseudo *
P = RISCV::getVSEPseudo(
905 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
907 MachineInstrBuilder PseudoMI =
913 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
914 for (
auto &RenderFn : *VLOpFn)
927 case Intrinsic::riscv_vsoxei:
928 case Intrinsic::riscv_vsoxei_mask:
929 case Intrinsic::riscv_vsuxei:
930 case Intrinsic::riscv_vsuxei_mask: {
931 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
932 IntrinID == Intrinsic::riscv_vsuxei_mask;
933 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
934 IntrinID == Intrinsic::riscv_vsoxei_mask;
935 LLT VT = MRI->
getType(
I.getOperand(1).getReg());
943 auto PassthruReg =
I.getOperand(CurOp++).getReg();
947 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
953 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
955 "values when XLEN=32");
957 const RISCV::VLX_VSXPseudo *
P = RISCV::getVSXPseudo(
958 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
959 static_cast<unsigned>(IndexLMUL));
961 MachineInstrBuilder PseudoMI =
967 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
968 for (
auto &RenderFn : *VLOpFn)
984bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &
I)
const {
991 case Intrinsic::riscv_vsetvli:
992 case Intrinsic::riscv_vsetvlimax: {
994 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
996 unsigned Offset = VLMax ? 2 : 3;
1004 Register DstReg =
I.getOperand(0).getReg();
1007 unsigned Opcode = RISCV::PseudoVSETVLI;
1011 Register AVLReg =
I.getOperand(2).getReg();
1013 uint64_t AVL = AVLConst->Value.getZExtValue();
1020 MachineInstr *AVLDef = MRI->
getVRegDef(AVLReg);
1021 if (AVLDef && AVLDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
1023 if (
C->getValue().isAllOnes())
1030 Opcode = RISCV::PseudoVSETVLIX0;
1032 Register AVLReg =
I.getOperand(2).getReg();
1037 uint64_t AVL = AVLConst->Value.getZExtValue();
1039 MachineInstr *PseudoMI =
1041 TII.get(RISCV::PseudoVSETIVLI), DstReg)
1044 I.eraseFromParent();
1051 MachineInstr *PseudoMI =
1052 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode), DstReg)
1055 I.eraseFromParent();
1062bool RISCVInstructionSelector::selectExtractSubvector(MachineInstr &
MI)
const {
1063 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1068 LLT DstTy = MRI->
getType(DstReg);
1069 LLT SrcTy = MRI->
getType(SrcReg);
1071 unsigned Idx =
static_cast<unsigned>(
MI.getOperand(2).
getImm());
1077 std::tie(SubRegIdx, Idx) =
1079 SrcMVT, DstMVT, Idx, &
TRI);
1085 const TargetRegisterClass *DstRC =
TRI.getRegClass(DstRegClassID);
1090 const TargetRegisterClass *SrcRC =
TRI.getRegClass(SrcRegClassID);
1094 BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1096 .
addReg(SrcReg, {}, SubRegIdx);
1098 MI.eraseFromParent();
1102bool RISCVInstructionSelector::select(MachineInstr &
MI) {
1104 const unsigned Opc =
MI.getOpcode();
1106 if (!
MI.isPreISelOpcode() ||
Opc == TargetOpcode::G_PHI) {
1107 if (
Opc == TargetOpcode::PHI ||
Opc == TargetOpcode::G_PHI) {
1108 const Register DefReg =
MI.getOperand(0).getReg();
1109 const LLT DefTy = MRI->
getType(DefReg);
1114 const TargetRegisterClass *DefRC =
1123 DefRC =
TRI.getRegClassForTypeOnBank(DefTy, RB, STI.
is64Bit());
1130 MI.setDesc(
TII.get(TargetOpcode::PHI));
1141 if (selectImpl(
MI, *CoverageInfo))
1145 case TargetOpcode::G_ANYEXT:
1146 case TargetOpcode::G_PTRTOINT:
1147 case TargetOpcode::G_INTTOPTR:
1148 case TargetOpcode::G_TRUNC:
1149 case TargetOpcode::G_FREEZE:
1151 case TargetOpcode::G_CONSTANT: {
1153 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1155 if (!materializeImm(DstReg, Imm,
MI))
1158 MI.eraseFromParent();
1161 case TargetOpcode::G_ZEXT:
1162 case TargetOpcode::G_SEXT: {
1163 bool IsSigned =
Opc != TargetOpcode::G_ZEXT;
1166 LLT SrcTy = MRI->
getType(SrcReg);
1173 RISCV::GPRBRegBankID &&
1174 "Unexpected ext regbank");
1177 if (IsSigned && SrcSize == 32) {
1178 MI.setDesc(
TII.get(RISCV::ADDIW));
1185 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1186 MI.setDesc(
TII.get(RISCV::ADD_UW));
1193 if (SrcSize == 16 &&
1194 (STI.hasStdExtZbb() || (!IsSigned && STI.hasStdExtZbkb()))) {
1195 MI.setDesc(
TII.get(IsSigned ? RISCV::SEXT_H
1196 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1197 : RISCV::ZEXT_H_RV32));
1204 MachineInstr *ShiftLeft =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1205 TII.get(RISCV::SLLI), ShiftLeftReg)
1209 MachineInstr *ShiftRight =
1211 TII.get(IsSigned ? RISCV::SRAI : RISCV::SRLI), DstReg)
1215 MI.eraseFromParent();
1218 case TargetOpcode::G_FCONSTANT: {
1221 const APFloat &FPimm =
MI.getOperand(1).getFPImm()->getValueAPF();
1230 if (!materializeImm(GPRReg,
Imm.getSExtValue(),
MI))
1234 unsigned Opcode =
Size == 64 ? RISCV::FMV_D_X
1235 :
Size == 32 ? RISCV::FMV_W_X
1237 MachineInstr *FMV =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1238 TII.get(Opcode), DstReg)
1244 "Unexpected size or subtarget");
1248 MachineInstr *FCVT =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1249 TII.get(RISCV::FCVT_D_W), DstReg)
1254 MI.eraseFromParent();
1262 if (!materializeImm(GPRRegHigh,
Imm.extractBits(32, 32).getSExtValue(),
1265 if (!materializeImm(GPRRegLow,
Imm.trunc(32).getSExtValue(),
MI))
1267 MachineInstr *PairF64 =
1269 TII.get(RISCV::BuildPairF64Pseudo), DstReg)
1275 MI.eraseFromParent();
1278 case TargetOpcode::G_GLOBAL_VALUE: {
1279 auto *GV =
MI.getOperand(1).getGlobal();
1280 if (GV->isThreadLocal()) {
1285 return selectAddr(
MI, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1287 case TargetOpcode::G_JUMP_TABLE:
1288 case TargetOpcode::G_CONSTANT_POOL:
1289 return selectAddr(
MI);
1290 case TargetOpcode::G_BRCOND: {
1295 MachineInstr *Bcc =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1299 .
addMBB(
MI.getOperand(1).getMBB());
1300 MI.eraseFromParent();
1304 case TargetOpcode::G_BRINDIRECT:
1305 MI.setDesc(
TII.get(RISCV::PseudoBRIND));
1309 case TargetOpcode::G_SELECT:
1310 return selectSelect(
MI);
1311 case TargetOpcode::G_FCMP:
1312 return selectFPCompare(
MI);
1313 case TargetOpcode::G_FENCE: {
1318 emitFence(FenceOrdering, FenceSSID,
MI);
1319 MI.eraseFromParent();
1322 case TargetOpcode::G_IMPLICIT_DEF:
1323 return selectImplicitDef(
MI);
1324 case TargetOpcode::G_UNMERGE_VALUES:
1326 case TargetOpcode::G_LOAD:
1327 case TargetOpcode::G_STORE: {
1331 LLT PtrTy = MRI->
getType(PtrReg);
1333 const RegisterBank &RB = *RBI.
getRegBank(ValReg, *MRI,
TRI);
1334 if (RB.
getID() != RISCV::GPRBRegBankID)
1338 const RegisterBank &PtrRB = *RBI.
getRegBank(PtrReg, *MRI,
TRI);
1341 "Load/Store pointer operand isn't a GPR");
1342 assert(PtrTy.
isPointer() &&
"Load/Store pointer operand isn't a pointer");
1359 if (NewOpc ==
MI.getOpcode())
1363 auto AddrModeFns = selectAddrRegImm(
MI.getOperand(1));
1368 MachineInstrBuilder NewInst =
1376 for (
auto &Fn : *AddrModeFns)
1378 MI.eraseFromParent();
1383 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1384 return selectIntrinsicWithSideEffects(
MI);
1385 case TargetOpcode::G_INTRINSIC:
1386 return selectIntrinsic(
MI);
1387 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1388 return selectExtractSubvector(
MI);
1394bool RISCVInstructionSelector::selectUnmergeValues(MachineInstr &
MI)
const {
1395 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1397 if (!Subtarget->hasStdExtZfa())
1401 if (
MI.getNumOperands() != 3)
1406 if (!isRegInFprb(Src) || !isRegInGprb(
Lo) || !isRegInGprb(
Hi))
1409 MachineInstr *ExtractLo =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1410 TII.get(RISCV::FMV_X_W_FPR64),
Lo)
1414 MachineInstr *ExtractHi =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1415 TII.get(RISCV::FMVH_X_D),
Hi)
1419 MI.eraseFromParent();
1423bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &
Op) {
1428 MachineInstr &ParentMI = *
Op.getParent();
1431 MachineInstr *PtrToInt =
1433 TII.get(TargetOpcode::G_PTRTOINT), IntReg)
1436 return select(*PtrToInt);
1439void RISCVInstructionSelector::preISelLower(MachineInstr &
MI) {
1440 switch (
MI.getOpcode()) {
1441 case TargetOpcode::G_PTR_ADD: {
1445 replacePtrWithInt(
MI.getOperand(1));
1446 MI.setDesc(
TII.get(TargetOpcode::G_ADD));
1450 case TargetOpcode::G_PTRMASK: {
1453 replacePtrWithInt(
MI.getOperand(1));
1454 MI.setDesc(
TII.get(TargetOpcode::G_AND));
1461void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1462 const MachineInstr &
MI,
1464 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1465 "Expected G_CONSTANT");
1466 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1470void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1471 const MachineInstr &
MI,
1473 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1474 "Expected G_CONSTANT");
1475 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1479void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1480 const MachineInstr &
MI,
1482 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1483 "Expected G_CONSTANT");
1484 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1488void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1489 const MachineInstr &
MI,
1491 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1492 "Expected G_CONSTANT");
1493 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1497void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1498 const MachineInstr &
MI,
1500 assert(
MI.getOpcode() == TargetOpcode::G_FRAME_INDEX &&
OpIdx == -1 &&
1501 "Expected G_FRAME_INDEX");
1502 MIB.
add(
MI.getOperand(1));
1505void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1506 const MachineInstr &
MI,
1508 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1509 "Expected G_CONSTANT");
1510 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1514void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1515 MachineInstrBuilder &MIB,
const MachineInstr &
MI,
int OpIdx)
const {
1516 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1517 "Expected G_CONSTANT");
1518 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1522void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1523 const MachineInstr &
MI,
1525 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1526 "Expected G_CONSTANT");
1527 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1528 int64_t Adj =
Imm < 0 ? -2048 : 2047;
1532void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1533 const MachineInstr &
MI,
1535 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1536 "Expected G_CONSTANT");
1537 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1541bool RISCVInstructionSelector::isRegInGprb(
Register Reg)
const {
1545bool RISCVInstructionSelector::isRegInFprb(
Register Reg)
const {
1549bool RISCVInstructionSelector::selectCopy(MachineInstr &
MI)
const {
1550 MachineOperand Dst =
MI.getOperand(0);
1556 const TargetRegisterClass *DstRC =
1557 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1560 "Register class not available for LLT, register bank combination");
1571 MI.setDesc(
TII.get(RISCV::COPY));
1575bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &
MI)
const {
1576 assert(
MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1578 const Register DstReg =
MI.getOperand(0).getReg();
1579 const TargetRegisterClass *DstRC =
TRI.getRegClassForTypeOnBank(
1583 "Register class not available for LLT, register bank combination");
1589 MI.setDesc(
TII.get(TargetOpcode::IMPLICIT_DEF));
1593bool RISCVInstructionSelector::materializeImm(
Register DstReg, int64_t Imm,
1594 MachineInstr &
MI)
const {
1595 MachineBasicBlock &
MBB = *
MI.getParent();
1605 unsigned NumInsts = Seq.
size();
1608 for (
unsigned i = 0; i < NumInsts; i++) {
1612 const RISCVMatInt::Inst &
I = Seq[i];
1615 switch (
I.getOpndKind()) {
1647bool RISCVInstructionSelector::selectAddr(MachineInstr &
MI,
bool IsLocal,
1648 bool IsExternWeak)
const {
1649 assert((
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1650 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1651 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1652 "Unexpected opcode");
1654 const MachineOperand &DispMO =
MI.getOperand(1);
1657 const LLT DefTy = MRI->
getType(DefReg);
1664 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1668 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1677 MachineFunction &MF = *
MI.getParent()->getParent();
1685 TII.get(RISCV::PseudoLGA), DefReg)
1691 MI.eraseFromParent();
1698 "Unsupported code model for lowering",
MI);
1706 MachineInstr *AddrHi =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1707 TII.get(RISCV::LUI), AddrHiDest)
1713 TII.get(RISCV::ADDI), DefReg)
1719 MI.eraseFromParent();
1732 MachineFunction &MF = *
MI.getParent()->getParent();
1740 TII.get(RISCV::PseudoLGA), DefReg)
1746 MI.eraseFromParent();
1753 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1761bool RISCVInstructionSelector::selectSelect(MachineInstr &
MI)
const {
1768 Register DstReg = SelectMI.getReg(0);
1770 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1773 Opc =
Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1774 : RISCV::Select_FPR64_Using_CC_GPR;
1783 .
addReg(SelectMI.getTrueReg())
1784 .
addReg(SelectMI.getFalseReg());
1785 MI.eraseFromParent();
1797 return Size == 16 ? RISCV::FLT_H :
Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1799 return Size == 16 ? RISCV::FLE_H :
Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1801 return Size == 16 ? RISCV::FEQ_H :
Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1814 assert(!isLegalFCmpPredicate(Pred) &&
"Predicate already legal?");
1817 if (isLegalFCmpPredicate(InvPred)) {
1825 if (isLegalFCmpPredicate(InvPred)) {
1830 if (isLegalFCmpPredicate(InvPred)) {
1842bool RISCVInstructionSelector::selectFPCompare(MachineInstr &
MI)
const {
1855 bool NeedInvert =
false;
1869 MachineInstr *Cmp1 =
1876 MachineInstr *Cmp2 =
1885 TII.get(RISCV::OR), TmpReg)
1904 MachineInstr *Cmp1 =
1911 MachineInstr *Cmp2 =
1918 TII.get(RISCV::AND), TmpReg)
1929 TII.get(RISCV::XORI), DstReg)
1935 MI.eraseFromParent();
1939void RISCVInstructionSelector::emitFence(
AtomicOrdering FenceOrdering,
1941 MachineInstr &
MI)
const {
1942 MachineBasicBlock &
MBB = *
MI.getParent();
1945 if (STI.hasStdExtZtso()) {
1948 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1972 unsigned Pred, Succ;
1973 switch (FenceOrdering) {
1976 case AtomicOrdering::AcquireRelease:
1980 case AtomicOrdering::Acquire:
1985 case AtomicOrdering::Release:
1990 case AtomicOrdering::SequentiallyConsistent:
2000InstructionSelector *
2004 return new RISCVInstructionSelector(TM, Subtarget, RBI);
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
APInt bitcastToAPInt() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This is an important base class in LLVM.
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addDisp(const MachineOperand &Disp, int64_t off, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
iterator_range< use_nodbg_iterator > use_nodbg_operands(Register Reg) const
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setRegBank(Register Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Analysis providing profile information.
This class provides the information for the target register banks.
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.