25#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
60 static constexpr unsigned MaxRecursionDepth = 6;
63 const unsigned Depth = 0)
const;
89 bool IsExternWeak =
false)
const;
97 unsigned &CurOp,
bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT =
nullptr)
const;
106 unsigned ShiftWidth)
const;
107 ComplexRendererFns selectShiftMaskXLen(
MachineOperand &Root)
const {
108 return selectShiftMask(Root, STI.
getXLen());
110 ComplexRendererFns selectShiftMask32(
MachineOperand &Root)
const {
111 return selectShiftMask(Root, 32);
115 ComplexRendererFns selectSExtBits(
MachineOperand &Root,
unsigned Bits)
const;
116 template <
unsigned Bits>
118 return selectSExtBits(Root, Bits);
121 ComplexRendererFns selectZExtBits(
MachineOperand &Root,
unsigned Bits)
const;
122 template <
unsigned Bits>
124 return selectZExtBits(Root, Bits);
127 ComplexRendererFns selectSHXADDOp(
MachineOperand &Root,
unsigned ShAmt)
const;
128 template <
unsigned ShAmt>
130 return selectSHXADDOp(Root, ShAmt);
134 unsigned ShAmt)
const;
135 template <
unsigned ShAmt>
136 ComplexRendererFns selectSHXADD_UWOp(
MachineOperand &Root)
const {
137 return selectSHXADD_UWOp(Root, ShAmt);
177#define GET_GLOBALISEL_PREDICATES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_PREDICATES_DECL
181#define GET_GLOBALISEL_TEMPORARIES_DECL
182#include "RISCVGenGlobalISel.inc"
183#undef GET_GLOBALISEL_TEMPORARIES_DECL
188#define GET_GLOBALISEL_IMPL
189#include "RISCVGenGlobalISel.inc"
190#undef GET_GLOBALISEL_IMPL
192RISCVInstructionSelector::RISCVInstructionSelector(
195 : STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()), RBI(RBI),
199#include
"RISCVGenGlobalISel.inc"
202#include
"RISCVGenGlobalISel.inc"
208bool RISCVInstructionSelector::hasAllNBitUsers(
const MachineInstr &
MI,
210 const unsigned Depth)
const {
212 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
213 MI.getOpcode() == TargetOpcode::G_SUB ||
214 MI.getOpcode() == TargetOpcode::G_MUL ||
215 MI.getOpcode() == TargetOpcode::G_SHL ||
216 MI.getOpcode() == TargetOpcode::G_LSHR ||
217 MI.getOpcode() == TargetOpcode::G_AND ||
218 MI.getOpcode() == TargetOpcode::G_OR ||
219 MI.getOpcode() == TargetOpcode::G_XOR ||
220 MI.getOpcode() == TargetOpcode::G_SEXT_INREG ||
Depth != 0) &&
221 "Unexpected opcode");
223 if (
Depth >= RISCVInstructionSelector::MaxRecursionDepth)
226 auto DestReg =
MI.getOperand(0).getReg();
227 for (
auto &UserOp :
MRI->use_nodbg_operands(DestReg)) {
228 assert(UserOp.getParent() &&
"UserOp must have a parent");
229 const MachineInstr &UserMI = *UserOp.getParent();
238 case RISCV::FCVT_D_W:
239 case RISCV::FCVT_S_W:
282InstructionSelector::ComplexRendererFns
283RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
284 unsigned ShiftWidth)
const {
288 using namespace llvm::MIPatternMatch;
294 ShAmtReg = ZExtSrcReg;
313 APInt ShMask(AndMask.
getBitWidth(), ShiftWidth - 1);
314 if (ShMask.isSubsetOf(AndMask)) {
315 ShAmtReg = AndSrcReg;
319 KnownBits Known = VT->getKnownBits(AndSrcReg);
320 if (ShMask.isSubsetOf(AndMask | Known.
Zero))
321 ShAmtReg = AndSrcReg;
328 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0)
333 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0) {
336 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
337 unsigned NegOpc = Subtarget->
is64Bit() ? RISCV::SUBW : RISCV::SUB;
338 return {{[=](MachineInstrBuilder &MIB) {
339 MachineIRBuilder(*MIB.getInstr())
340 .buildInstr(NegOpc, {ShAmtReg}, {
Register(RISCV::X0),
Reg});
341 MIB.addReg(ShAmtReg);
344 if (
Imm.urem(ShiftWidth) == ShiftWidth - 1) {
347 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
348 return {{[=](MachineInstrBuilder &MIB) {
349 MachineIRBuilder(*MIB.getInstr())
350 .buildInstr(RISCV::XORI, {ShAmtReg}, {
Reg})
352 MIB.addReg(ShAmtReg);
357 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
360InstructionSelector::ComplexRendererFns
361RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
362 unsigned Bits)
const {
366 MachineInstr *RootDef =
MRI->getVRegDef(RootReg);
368 if (RootDef->
getOpcode() == TargetOpcode::G_SEXT_INREG &&
371 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); }}};
374 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
375 if ((
Size - VT->computeNumSignBits(RootReg)) < Bits)
376 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
381InstructionSelector::ComplexRendererFns
382RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
383 unsigned Bits)
const {
391 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
395 MRI->getType(RegX).getScalarSizeInBits() == Bits)
396 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
398 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
400 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
405InstructionSelector::ComplexRendererFns
406RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
407 unsigned ShAmt)
const {
408 using namespace llvm::MIPatternMatch;
414 const unsigned XLen = STI.
getXLen();
433 if (
Mask.isShiftedMask()) {
434 unsigned Leading = XLen -
Mask.getActiveBits();
435 unsigned Trailing =
Mask.countr_zero();
438 if (*LeftShift && Leading == 0 && C2.
ult(Trailing) && Trailing == ShAmt) {
439 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
440 return {{[=](MachineInstrBuilder &MIB) {
441 MachineIRBuilder(*MIB.getInstr())
442 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
450 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
451 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
452 return {{[=](MachineInstrBuilder &MIB) {
453 MachineIRBuilder(*MIB.getInstr())
454 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
455 .addImm(Leading + Trailing);
476 unsigned Leading = XLen -
Mask.getActiveBits();
477 unsigned Trailing =
Mask.countr_zero();
490 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
491 return {{[=](MachineInstrBuilder &MIB) {
492 MachineIRBuilder(*MIB.getInstr())
493 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
503InstructionSelector::ComplexRendererFns
504RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
505 unsigned ShAmt)
const {
506 using namespace llvm::MIPatternMatch;
523 if (
Mask.isShiftedMask()) {
524 unsigned Leading =
Mask.countl_zero();
525 unsigned Trailing =
Mask.countr_zero();
526 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
527 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
528 return {{[=](MachineInstrBuilder &MIB) {
529 MachineIRBuilder(*MIB.getInstr())
530 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
541InstructionSelector::ComplexRendererFns
542RISCVInstructionSelector::renderVLOp(MachineOperand &Root)
const {
543 assert(Root.
isReg() &&
"Expected operand to be a Register");
544 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
546 if (RootDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
548 if (
C->getValue().isAllOnes())
552 return {{[=](MachineInstrBuilder &MIB) {
557 uint64_t ZExtC =
C->getZExtValue();
558 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
561 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); }}};
564InstructionSelector::ComplexRendererFns
565RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root)
const {
569 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
570 if (RootDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
572 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); },
573 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
577 if (isBaseWithConstantOffset(Root, *
MRI)) {
580 MachineInstr *LHSDef =
MRI->getVRegDef(
LHS.getReg());
581 MachineInstr *RHSDef =
MRI->getVRegDef(
RHS.getReg());
585 if (LHSDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
587 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->
getOperand(1)); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
591 return {{[=](MachineInstrBuilder &MIB) { MIB.add(
LHS); },
592 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
598 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); },
599 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
608 case CmpInst::Predicate::ICMP_EQ:
610 case CmpInst::Predicate::ICMP_NE:
612 case CmpInst::Predicate::ICMP_ULT:
614 case CmpInst::Predicate::ICMP_SLT:
616 case CmpInst::Predicate::ICMP_UGE:
618 case CmpInst::Predicate::ICMP_SGE:
684 CC = getRISCVCCFromICmp(Pred);
691 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
696 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
698 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
700 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
702 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
710 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
714 return IsStore ? RISCV::SB : RISCV::LBU;
716 return IsStore ? RISCV::SH : RISCV::LH;
718 return IsStore ? RISCV::SW : RISCV::LW;
720 return IsStore ? RISCV::SD : RISCV::LD;
726void RISCVInstructionSelector::addVectorLoadStoreOperands(
727 MachineInstr &
I, SmallVectorImpl<SrcOp> &SrcOps,
unsigned &CurOp,
728 bool IsMasked,
bool IsStridedOrIndexed, LLT *IndexVT)
const {
730 auto PtrReg =
I.getOperand(CurOp++).getReg();
734 if (IsStridedOrIndexed) {
735 auto StrideReg =
I.getOperand(CurOp++).getReg();
738 *IndexVT =
MRI->getType(StrideReg);
743 auto MaskReg =
I.getOperand(CurOp++).getReg();
748bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
749 MachineInstr &
I, MachineIRBuilder &MIB)
const {
756 case Intrinsic::riscv_vlm:
757 case Intrinsic::riscv_vle:
758 case Intrinsic::riscv_vle_mask:
759 case Intrinsic::riscv_vlse:
760 case Intrinsic::riscv_vlse_mask: {
761 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
762 IntrinID == Intrinsic::riscv_vlse_mask;
763 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
764 IntrinID == Intrinsic::riscv_vlse_mask;
765 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
769 const Register DstReg =
I.getOperand(0).getReg();
772 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
777 if (HasPassthruOperand) {
778 auto PassthruReg =
I.getOperand(CurOp++).getReg();
784 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
787 const RISCV::VLEPseudo *
P =
788 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
789 static_cast<unsigned>(LMUL));
791 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
794 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
795 for (
auto &RenderFn : *VLOpFn)
799 PseudoMI.addImm(Log2SEW);
804 Policy =
I.getOperand(CurOp++).getImm();
805 PseudoMI.addImm(Policy);
808 PseudoMI.cloneMemRefs(
I);
813 case Intrinsic::riscv_vloxei:
814 case Intrinsic::riscv_vloxei_mask:
815 case Intrinsic::riscv_vluxei:
816 case Intrinsic::riscv_vluxei_mask: {
817 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
818 IntrinID == Intrinsic::riscv_vluxei_mask;
819 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
820 IntrinID == Intrinsic::riscv_vloxei_mask;
821 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
825 const Register DstReg =
I.getOperand(0).getReg();
828 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
833 if (HasPassthruOperand) {
834 auto PassthruReg =
I.getOperand(CurOp++).getReg();
841 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
847 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
849 "values when XLEN=32");
851 const RISCV::VLX_VSXPseudo *
P = RISCV::getVLXPseudo(
852 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
853 static_cast<unsigned>(IndexLMUL));
855 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
858 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
859 for (
auto &RenderFn : *VLOpFn)
863 PseudoMI.addImm(Log2SEW);
868 Policy =
I.getOperand(CurOp++).getImm();
869 PseudoMI.addImm(Policy);
872 PseudoMI.cloneMemRefs(
I);
877 case Intrinsic::riscv_vsm:
878 case Intrinsic::riscv_vse:
879 case Intrinsic::riscv_vse_mask:
880 case Intrinsic::riscv_vsse:
881 case Intrinsic::riscv_vsse_mask: {
882 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
883 IntrinID == Intrinsic::riscv_vsse_mask;
884 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
885 IntrinID == Intrinsic::riscv_vsse_mask;
886 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
894 auto PassthruReg =
I.getOperand(CurOp++).getReg();
897 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
900 const RISCV::VSEPseudo *
P = RISCV::getVSEPseudo(
901 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
903 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
906 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
907 for (
auto &RenderFn : *VLOpFn)
911 PseudoMI.addImm(Log2SEW);
914 PseudoMI.cloneMemRefs(
I);
919 case Intrinsic::riscv_vsoxei:
920 case Intrinsic::riscv_vsoxei_mask:
921 case Intrinsic::riscv_vsuxei:
922 case Intrinsic::riscv_vsuxei_mask: {
923 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
924 IntrinID == Intrinsic::riscv_vsuxei_mask;
925 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
926 IntrinID == Intrinsic::riscv_vsoxei_mask;
927 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
935 auto PassthruReg =
I.getOperand(CurOp++).getReg();
939 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
945 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
947 "values when XLEN=32");
949 const RISCV::VLX_VSXPseudo *
P = RISCV::getVSXPseudo(
950 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
951 static_cast<unsigned>(IndexLMUL));
953 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
956 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
957 for (
auto &RenderFn : *VLOpFn)
961 PseudoMI.addImm(Log2SEW);
964 PseudoMI.cloneMemRefs(
I);
972bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &
I,
973 MachineIRBuilder &MIB)
const {
980 case Intrinsic::riscv_vsetvli:
981 case Intrinsic::riscv_vsetvlimax: {
983 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
985 unsigned Offset = VLMax ? 2 : 3;
993 Register DstReg =
I.getOperand(0).getReg();
996 unsigned Opcode = RISCV::PseudoVSETVLI;
1000 Register AVLReg =
I.getOperand(2).getReg();
1002 uint64_t AVL = AVLConst->Value.getZExtValue();
1009 MachineInstr *AVLDef =
MRI->getVRegDef(AVLReg);
1010 if (AVLDef && AVLDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
1012 if (
C->getValue().isAllOnes())
1019 Opcode = RISCV::PseudoVSETVLIX0;
1021 Register AVLReg =
I.getOperand(2).getReg();
1026 uint64_t AVL = AVLConst->Value.getZExtValue();
1028 auto PseudoMI = MIB.
buildInstr(RISCV::PseudoVSETIVLI, {DstReg}, {})
1031 I.eraseFromParent();
1038 MIB.
buildInstr(Opcode, {DstReg}, {VLOperand}).addImm(VTypeI);
1039 I.eraseFromParent();
1045bool RISCVInstructionSelector::selectExtractSubvector(
1046 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
1047 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1052 LLT DstTy =
MRI->getType(DstReg);
1053 LLT SrcTy =
MRI->getType(SrcReg);
1055 unsigned Idx =
static_cast<unsigned>(
MI.getOperand(2).
getImm());
1061 std::tie(SubRegIdx, Idx) =
1063 SrcMVT, DstMVT, Idx, &
TRI);
1069 const TargetRegisterClass *DstRC =
TRI.getRegClass(DstRegClassID);
1074 const TargetRegisterClass *SrcRC =
TRI.getRegClass(SrcRegClassID);
1078 MIB.
buildInstr(TargetOpcode::COPY, {DstReg}, {})
1079 .addReg(SrcReg, {}, SubRegIdx);
1081 MI.eraseFromParent();
1085bool RISCVInstructionSelector::select(MachineInstr &
MI) {
1086 MachineIRBuilder MIB(
MI);
1088 preISelLower(
MI, MIB);
1089 const unsigned Opc =
MI.getOpcode();
1091 if (!
MI.isPreISelOpcode() ||
Opc == TargetOpcode::G_PHI) {
1092 if (
Opc == TargetOpcode::PHI ||
Opc == TargetOpcode::G_PHI) {
1093 const Register DefReg =
MI.getOperand(0).getReg();
1094 const LLT DefTy =
MRI->getType(DefReg);
1097 MRI->getRegClassOrRegBank(DefReg);
1099 const TargetRegisterClass *DefRC =
1108 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1115 MI.setDesc(
TII.get(TargetOpcode::PHI));
1126 if (selectImpl(
MI, *CoverageInfo))
1130 case TargetOpcode::G_ANYEXT:
1131 case TargetOpcode::G_PTRTOINT:
1132 case TargetOpcode::G_INTTOPTR:
1133 case TargetOpcode::G_TRUNC:
1134 case TargetOpcode::G_FREEZE:
1136 case TargetOpcode::G_CONSTANT: {
1138 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1140 if (!materializeImm(DstReg, Imm, MIB))
1143 MI.eraseFromParent();
1146 case TargetOpcode::G_ZEXT:
1147 case TargetOpcode::G_SEXT: {
1148 bool IsSigned =
Opc != TargetOpcode::G_ZEXT;
1151 LLT SrcTy =
MRI->getType(SrcReg);
1158 RISCV::GPRBRegBankID &&
1159 "Unexpected ext regbank");
1162 if (IsSigned && SrcSize == 32) {
1163 MI.setDesc(
TII.get(RISCV::ADDIW));
1169 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1170 MI.setDesc(
TII.get(RISCV::ADD_UW));
1176 if (SrcSize == 16 && STI.hasStdExtZbb()) {
1177 MI.setDesc(
TII.get(IsSigned ? RISCV::SEXT_H
1178 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1179 : RISCV::ZEXT_H_RV32));
1184 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
1185 MI.setDesc(
TII.get(STI.
is64Bit() ? RISCV::PACKW : RISCV::PACK));
1192 MIB.
buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
1193 .addImm(STI.
getXLen() - SrcSize);
1195 auto ShiftRight = MIB.
buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
1196 {DstReg}, {ShiftLeft})
1197 .addImm(STI.
getXLen() - SrcSize);
1199 MI.eraseFromParent();
1202 case TargetOpcode::G_FCONSTANT: {
1205 const APFloat &FPimm =
MI.getOperand(1).getFPImm()->getValueAPF();
1206 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1212 GPRReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1214 if (!materializeImm(GPRReg,
Imm.getSExtValue(), MIB))
1218 unsigned Opcode =
Size == 64 ? RISCV::FMV_D_X
1219 :
Size == 32 ? RISCV::FMV_W_X
1221 auto FMV = MIB.
buildInstr(Opcode, {DstReg}, {GPRReg});
1222 if (!FMV.constrainAllUses(
TII,
TRI, RBI))
1227 "Unexpected size or subtarget");
1231 MachineInstrBuilder FCVT =
1237 MI.eraseFromParent();
1242 Register GPRRegHigh =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1243 Register GPRRegLow =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1245 if (!materializeImm(GPRRegHigh,
Imm.extractBits(32, 32).getSExtValue(),
1248 if (!materializeImm(GPRRegLow,
Imm.trunc(32).getSExtValue(), MIB))
1250 MachineInstrBuilder PairF64 = MIB.
buildInstr(
1251 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1256 MI.eraseFromParent();
1259 case TargetOpcode::G_GLOBAL_VALUE: {
1260 auto *GV =
MI.getOperand(1).getGlobal();
1261 if (GV->isThreadLocal()) {
1266 return selectAddr(
MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1268 case TargetOpcode::G_JUMP_TABLE:
1269 case TargetOpcode::G_CONSTANT_POOL:
1270 return selectAddr(
MI, MIB,
MRI);
1271 case TargetOpcode::G_BRCOND: {
1277 .addMBB(
MI.getOperand(1).getMBB());
1278 MI.eraseFromParent();
1281 case TargetOpcode::G_BRINDIRECT:
1282 MI.setDesc(
TII.get(RISCV::PseudoBRIND));
1285 case TargetOpcode::G_SELECT:
1286 return selectSelect(
MI, MIB);
1287 case TargetOpcode::G_FCMP:
1288 return selectFPCompare(
MI, MIB);
1289 case TargetOpcode::G_FENCE: {
1294 emitFence(FenceOrdering, FenceSSID, MIB);
1295 MI.eraseFromParent();
1298 case TargetOpcode::G_IMPLICIT_DEF:
1299 return selectImplicitDef(
MI, MIB);
1300 case TargetOpcode::G_UNMERGE_VALUES:
1302 case TargetOpcode::G_LOAD:
1303 case TargetOpcode::G_STORE: {
1307 LLT PtrTy =
MRI->getType(PtrReg);
1310 if (RB.
getID() != RISCV::GPRBRegBankID)
1317 "Load/Store pointer operand isn't a GPR");
1318 assert(PtrTy.
isPointer() &&
"Load/Store pointer operand isn't a pointer");
1334 if (NewOpc ==
MI.getOpcode())
1338 auto AddrModeFns = selectAddrRegImm(
MI.getOperand(1));
1343 auto NewInst = MIB.
buildInstr(NewOpc, {}, {},
MI.getFlags());
1349 for (
auto &Fn : *AddrModeFns)
1351 MI.eraseFromParent();
1355 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1356 return selectIntrinsicWithSideEffects(
MI, MIB);
1357 case TargetOpcode::G_INTRINSIC:
1358 return selectIntrinsic(
MI, MIB);
1359 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1360 return selectExtractSubvector(
MI, MIB);
1366bool RISCVInstructionSelector::selectUnmergeValues(
1367 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
1368 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1370 if (!Subtarget->hasStdExtZfa())
1374 if (
MI.getNumOperands() != 3)
1379 if (!isRegInFprb(Src) || !isRegInGprb(
Lo) || !isRegInGprb(
Hi))
1382 MachineInstr *ExtractLo = MIB.
buildInstr(RISCV::FMV_X_W_FPR64, {
Lo}, {Src});
1386 MachineInstr *ExtractHi = MIB.
buildInstr(RISCV::FMVH_X_D, {
Hi}, {Src});
1390 MI.eraseFromParent();
1394bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &
Op,
1395 MachineIRBuilder &MIB) {
1397 assert(
MRI->getType(PtrReg).isPointer() &&
"Operand is not a pointer!");
1401 MRI->setRegBank(PtrToInt.getReg(0), RBI.
getRegBank(RISCV::GPRBRegBankID));
1402 Op.setReg(PtrToInt.getReg(0));
1403 return select(*PtrToInt);
1406void RISCVInstructionSelector::preISelLower(MachineInstr &
MI,
1407 MachineIRBuilder &MIB) {
1408 switch (
MI.getOpcode()) {
1409 case TargetOpcode::G_PTR_ADD: {
1413 replacePtrWithInt(
MI.getOperand(1), MIB);
1414 MI.setDesc(
TII.get(TargetOpcode::G_ADD));
1415 MRI->setType(DstReg, sXLen);
1418 case TargetOpcode::G_PTRMASK: {
1421 replacePtrWithInt(
MI.getOperand(1), MIB);
1422 MI.setDesc(
TII.get(TargetOpcode::G_AND));
1423 MRI->setType(DstReg, sXLen);
1429void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1430 const MachineInstr &
MI,
1432 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1433 "Expected G_CONSTANT");
1434 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1438void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1439 const MachineInstr &
MI,
1441 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1442 "Expected G_CONSTANT");
1443 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1447void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1448 const MachineInstr &
MI,
1450 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1451 "Expected G_CONSTANT");
1452 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1456void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1457 const MachineInstr &
MI,
1459 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1460 "Expected G_CONSTANT");
1461 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1465void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1466 const MachineInstr &
MI,
1468 assert(
MI.getOpcode() == TargetOpcode::G_FRAME_INDEX &&
OpIdx == -1 &&
1469 "Expected G_FRAME_INDEX");
1470 MIB.
add(
MI.getOperand(1));
1473void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1474 const MachineInstr &
MI,
1476 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1477 "Expected G_CONSTANT");
1478 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1482void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1483 MachineInstrBuilder &MIB,
const MachineInstr &
MI,
int OpIdx)
const {
1484 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1485 "Expected G_CONSTANT");
1486 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1490void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1491 const MachineInstr &
MI,
1493 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1494 "Expected G_CONSTANT");
1495 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1496 int64_t Adj =
Imm < 0 ? -2048 : 2047;
1500void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1501 const MachineInstr &
MI,
1503 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1504 "Expected G_CONSTANT");
1505 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1509const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1510 LLT Ty,
const RegisterBank &RB)
const {
1511 if (RB.
getID() == RISCV::GPRBRegBankID) {
1513 return &RISCV::GPRRegClass;
1516 if (RB.
getID() == RISCV::FPRBRegBankID) {
1518 return &RISCV::FPR16RegClass;
1520 return &RISCV::FPR32RegClass;
1522 return &RISCV::FPR64RegClass;
1525 if (RB.
getID() == RISCV::VRBRegBankID) {
1527 return &RISCV::VRRegClass;
1530 return &RISCV::VRM2RegClass;
1533 return &RISCV::VRM4RegClass;
1536 return &RISCV::VRM8RegClass;
1542bool RISCVInstructionSelector::isRegInGprb(
Register Reg)
const {
1546bool RISCVInstructionSelector::isRegInFprb(
Register Reg)
const {
1550bool RISCVInstructionSelector::selectCopy(MachineInstr &
MI)
const {
1556 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1559 "Register class not available for LLT, register bank combination");
1570 MI.setDesc(
TII.get(RISCV::COPY));
1574bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &
MI,
1575 MachineIRBuilder &MIB)
const {
1576 assert(
MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1578 const Register DstReg =
MI.getOperand(0).getReg();
1579 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1583 "Register class not available for LLT, register bank combination");
1589 MI.setDesc(
TII.get(TargetOpcode::IMPLICIT_DEF));
1593bool RISCVInstructionSelector::materializeImm(
Register DstReg, int64_t Imm,
1594 MachineIRBuilder &MIB)
const {
1602 unsigned NumInsts = Seq.
size();
1605 for (
unsigned i = 0; i < NumInsts; i++) {
1607 ?
MRI->createVirtualRegister(&RISCV::GPRRegClass)
1609 const RISCVMatInt::Inst &
I = Seq[i];
1612 switch (
I.getOpndKind()) {
1621 {SrcReg, Register(RISCV::X0)});
1641bool RISCVInstructionSelector::selectAddr(MachineInstr &
MI,
1642 MachineIRBuilder &MIB,
bool IsLocal,
1643 bool IsExternWeak)
const {
1644 assert((
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1645 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1646 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1647 "Unexpected opcode");
1649 const MachineOperand &DispMO =
MI.getOperand(1);
1652 const LLT DefTy =
MRI->getType(DefReg);
1659 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1663 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1671 MachineFunction &MF = *
MI.getParent()->getParent();
1685 MI.eraseFromParent();
1692 "Unsupported code model for lowering",
MI);
1699 Register AddrHiDest =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1700 MachineInstr *AddrHi = MIB.
buildInstr(RISCV::LUI, {AddrHiDest}, {})
1712 MI.eraseFromParent();
1725 MachineFunction &MF = *
MI.getParent()->getParent();
1739 MI.eraseFromParent();
1746 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1753bool RISCVInstructionSelector::selectSelect(MachineInstr &
MI,
1754 MachineIRBuilder &MIB)
const {
1761 Register DstReg = SelectMI.getReg(0);
1763 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1765 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1766 Opc =
Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1767 : RISCV::Select_FPR64_Using_CC_GPR;
1775 .
addReg(SelectMI.getTrueReg())
1776 .
addReg(SelectMI.getFalseReg());
1777 MI.eraseFromParent();
1788 return Size == 16 ? RISCV::FLT_H :
Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1790 return Size == 16 ? RISCV::FLE_H :
Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1792 return Size == 16 ? RISCV::FEQ_H :
Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1805 assert(!isLegalFCmpPredicate(Pred) &&
"Predicate already legal?");
1808 if (isLegalFCmpPredicate(InvPred)) {
1816 if (isLegalFCmpPredicate(InvPred)) {
1821 if (isLegalFCmpPredicate(InvPred)) {
1833bool RISCVInstructionSelector::selectFPCompare(MachineInstr &
MI,
1834 MachineIRBuilder &MIB)
const {
1842 unsigned Size =
MRI->getType(
LHS).getSizeInBits();
1847 bool NeedInvert =
false;
1851 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1853 if (!
Cmp.constrainAllUses(
TII,
TRI, RBI))
1859 {&RISCV::GPRRegClass}, {
LHS,
RHS});
1860 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1863 {&RISCV::GPRRegClass}, {
RHS,
LHS});
1864 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1867 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1869 MIB.
buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1870 if (!
Or.constrainAllUses(
TII,
TRI, RBI))
1877 {&RISCV::GPRRegClass}, {
LHS,
LHS});
1878 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1881 {&RISCV::GPRRegClass}, {
RHS,
RHS});
1882 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1885 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1887 MIB.
buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1888 if (!
And.constrainAllUses(
TII,
TRI, RBI))
1895 auto Xor = MIB.
buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1896 if (!
Xor.constrainAllUses(
TII,
TRI, RBI))
1900 MI.eraseFromParent();
1904void RISCVInstructionSelector::emitFence(
AtomicOrdering FenceOrdering,
1906 MachineIRBuilder &MIB)
const {
1907 if (STI.hasStdExtZtso()) {
1910 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1920 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1928 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1934 unsigned Pred, Succ;
1935 switch (FenceOrdering) {
1938 case AtomicOrdering::AcquireRelease:
1942 case AtomicOrdering::Acquire:
1947 case AtomicOrdering::Release:
1952 case AtomicOrdering::SequentiallyConsistent:
1962InstructionSelector *
1966 return new RISCVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
APInt bitcastToAPInt() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This is an important base class in LLVM.
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.