25#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
60 static constexpr unsigned MaxRecursionDepth = 6;
63 const unsigned Depth = 0)
const;
89 bool IsExternWeak =
false)
const;
97 unsigned &CurOp,
bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT =
nullptr)
const;
105 unsigned ShiftWidth)
const;
106 ComplexRendererFns selectShiftMaskXLen(
MachineOperand &Root)
const {
107 return selectShiftMask(Root, STI.
getXLen());
109 ComplexRendererFns selectShiftMask32(
MachineOperand &Root)
const {
110 return selectShiftMask(Root, 32);
114 ComplexRendererFns selectSExtBits(
MachineOperand &Root,
unsigned Bits)
const;
115 template <
unsigned Bits>
117 return selectSExtBits(Root, Bits);
120 ComplexRendererFns selectZExtBits(
MachineOperand &Root,
unsigned Bits)
const;
121 template <
unsigned Bits>
123 return selectZExtBits(Root, Bits);
126 ComplexRendererFns selectSHXADDOp(
MachineOperand &Root,
unsigned ShAmt)
const;
127 template <
unsigned ShAmt>
129 return selectSHXADDOp(Root, ShAmt);
133 unsigned ShAmt)
const;
134 template <
unsigned ShAmt>
135 ComplexRendererFns selectSHXADD_UWOp(
MachineOperand &Root)
const {
136 return selectSHXADD_UWOp(Root, ShAmt);
176#define GET_GLOBALISEL_PREDICATES_DECL
177#include "RISCVGenGlobalISel.inc"
178#undef GET_GLOBALISEL_PREDICATES_DECL
180#define GET_GLOBALISEL_TEMPORARIES_DECL
181#include "RISCVGenGlobalISel.inc"
182#undef GET_GLOBALISEL_TEMPORARIES_DECL
187#define GET_GLOBALISEL_IMPL
188#include "RISCVGenGlobalISel.inc"
189#undef GET_GLOBALISEL_IMPL
191RISCVInstructionSelector::RISCVInstructionSelector(
194 : STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()), RBI(RBI),
198#include
"RISCVGenGlobalISel.inc"
201#include
"RISCVGenGlobalISel.inc"
207bool RISCVInstructionSelector::hasAllNBitUsers(
const MachineInstr &
MI,
209 const unsigned Depth)
const {
211 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
212 MI.getOpcode() == TargetOpcode::G_SUB ||
213 MI.getOpcode() == TargetOpcode::G_MUL ||
214 MI.getOpcode() == TargetOpcode::G_SHL ||
215 MI.getOpcode() == TargetOpcode::G_LSHR ||
216 MI.getOpcode() == TargetOpcode::G_AND ||
217 MI.getOpcode() == TargetOpcode::G_OR ||
218 MI.getOpcode() == TargetOpcode::G_XOR ||
219 MI.getOpcode() == TargetOpcode::G_SEXT_INREG ||
Depth != 0) &&
220 "Unexpected opcode");
222 if (
Depth >= RISCVInstructionSelector::MaxRecursionDepth)
225 auto DestReg =
MI.getOperand(0).getReg();
226 for (
auto &UserOp :
MRI->use_nodbg_operands(DestReg)) {
227 assert(UserOp.getParent() &&
"UserOp must have a parent");
228 const MachineInstr &UserMI = *UserOp.getParent();
237 case RISCV::FCVT_D_W:
238 case RISCV::FCVT_S_W:
281InstructionSelector::ComplexRendererFns
282RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
283 unsigned ShiftWidth)
const {
287 using namespace llvm::MIPatternMatch;
293 ShAmtReg = ZExtSrcReg;
312 APInt ShMask(AndMask.
getBitWidth(), ShiftWidth - 1);
313 if (ShMask.isSubsetOf(AndMask)) {
314 ShAmtReg = AndSrcReg;
318 KnownBits Known = VT->getKnownBits(AndSrcReg);
319 if (ShMask.isSubsetOf(AndMask | Known.
Zero))
320 ShAmtReg = AndSrcReg;
327 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0)
332 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0) {
335 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
336 unsigned NegOpc = Subtarget->
is64Bit() ? RISCV::SUBW : RISCV::SUB;
337 return {{[=](MachineInstrBuilder &MIB) {
338 MachineIRBuilder(*MIB.getInstr())
339 .buildInstr(NegOpc, {ShAmtReg}, {
Register(RISCV::X0),
Reg});
340 MIB.addReg(ShAmtReg);
343 if (
Imm.urem(ShiftWidth) == ShiftWidth - 1) {
346 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
347 return {{[=](MachineInstrBuilder &MIB) {
348 MachineIRBuilder(*MIB.getInstr())
349 .buildInstr(RISCV::XORI, {ShAmtReg}, {
Reg})
351 MIB.addReg(ShAmtReg);
356 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
359InstructionSelector::ComplexRendererFns
360RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
361 unsigned Bits)
const {
365 MachineInstr *RootDef =
MRI->getVRegDef(RootReg);
367 if (RootDef->
getOpcode() == TargetOpcode::G_SEXT_INREG &&
370 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); }}};
373 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
374 if ((
Size - VT->computeNumSignBits(RootReg)) < Bits)
375 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
380InstructionSelector::ComplexRendererFns
381RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
382 unsigned Bits)
const {
390 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
394 MRI->getType(RegX).getScalarSizeInBits() == Bits)
395 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
397 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
399 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
404InstructionSelector::ComplexRendererFns
405RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
406 unsigned ShAmt)
const {
407 using namespace llvm::MIPatternMatch;
413 const unsigned XLen = STI.
getXLen();
432 if (
Mask.isShiftedMask()) {
433 unsigned Leading = XLen -
Mask.getActiveBits();
434 unsigned Trailing =
Mask.countr_zero();
437 if (*LeftShift && Leading == 0 && C2.
ult(Trailing) && Trailing == ShAmt) {
438 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
439 return {{[=](MachineInstrBuilder &MIB) {
440 MachineIRBuilder(*MIB.getInstr())
441 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
449 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
450 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
451 return {{[=](MachineInstrBuilder &MIB) {
452 MachineIRBuilder(*MIB.getInstr())
453 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
454 .addImm(Leading + Trailing);
475 unsigned Leading = XLen -
Mask.getActiveBits();
476 unsigned Trailing =
Mask.countr_zero();
489 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
490 return {{[=](MachineInstrBuilder &MIB) {
491 MachineIRBuilder(*MIB.getInstr())
492 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
502InstructionSelector::ComplexRendererFns
503RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
504 unsigned ShAmt)
const {
505 using namespace llvm::MIPatternMatch;
522 if (
Mask.isShiftedMask()) {
523 unsigned Leading =
Mask.countl_zero();
524 unsigned Trailing =
Mask.countr_zero();
525 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
526 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
527 return {{[=](MachineInstrBuilder &MIB) {
528 MachineIRBuilder(*MIB.getInstr())
529 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
540InstructionSelector::ComplexRendererFns
541RISCVInstructionSelector::renderVLOp(MachineOperand &Root)
const {
542 assert(Root.
isReg() &&
"Expected operand to be a Register");
543 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
545 if (RootDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
547 if (
C->getValue().isAllOnes())
551 return {{[=](MachineInstrBuilder &MIB) {
556 uint64_t ZExtC =
C->getZExtValue();
557 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
560 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); }}};
563InstructionSelector::ComplexRendererFns
564RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root)
const {
568 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
569 if (RootDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
571 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); },
572 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
576 if (isBaseWithConstantOffset(Root, *
MRI)) {
579 MachineInstr *LHSDef =
MRI->getVRegDef(
LHS.getReg());
580 MachineInstr *RHSDef =
MRI->getVRegDef(
RHS.getReg());
584 if (LHSDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
586 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->
getOperand(1)); },
587 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
590 return {{[=](MachineInstrBuilder &MIB) { MIB.add(
LHS); },
591 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
597 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); },
598 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
607 case CmpInst::Predicate::ICMP_EQ:
609 case CmpInst::Predicate::ICMP_NE:
611 case CmpInst::Predicate::ICMP_ULT:
613 case CmpInst::Predicate::ICMP_SLT:
615 case CmpInst::Predicate::ICMP_UGE:
617 case CmpInst::Predicate::ICMP_SGE:
683 CC = getRISCVCCFromICmp(Pred);
690 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
695 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
697 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
699 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
701 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
709 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
713 return IsStore ? RISCV::SB : RISCV::LBU;
715 return IsStore ? RISCV::SH : RISCV::LH;
717 return IsStore ? RISCV::SW : RISCV::LW;
719 return IsStore ? RISCV::SD : RISCV::LD;
725void RISCVInstructionSelector::addVectorLoadStoreOperands(
726 MachineInstr &
I, SmallVectorImpl<SrcOp> &SrcOps,
unsigned &CurOp,
727 bool IsMasked,
bool IsStridedOrIndexed, LLT *IndexVT)
const {
729 auto PtrReg =
I.getOperand(CurOp++).getReg();
733 if (IsStridedOrIndexed) {
734 auto StrideReg =
I.getOperand(CurOp++).getReg();
737 *IndexVT =
MRI->getType(StrideReg);
742 auto MaskReg =
I.getOperand(CurOp++).getReg();
747bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
748 MachineInstr &
I, MachineIRBuilder &MIB)
const {
755 case Intrinsic::riscv_vlm:
756 case Intrinsic::riscv_vle:
757 case Intrinsic::riscv_vle_mask:
758 case Intrinsic::riscv_vlse:
759 case Intrinsic::riscv_vlse_mask: {
760 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
761 IntrinID == Intrinsic::riscv_vlse_mask;
762 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
763 IntrinID == Intrinsic::riscv_vlse_mask;
764 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
768 const Register DstReg =
I.getOperand(0).getReg();
771 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
776 if (HasPassthruOperand) {
777 auto PassthruReg =
I.getOperand(CurOp++).getReg();
783 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
786 const RISCV::VLEPseudo *
P =
787 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
788 static_cast<unsigned>(LMUL));
790 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
793 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
794 for (
auto &RenderFn : *VLOpFn)
798 PseudoMI.addImm(Log2SEW);
803 Policy =
I.getOperand(CurOp++).getImm();
804 PseudoMI.addImm(Policy);
807 PseudoMI.cloneMemRefs(
I);
812 case Intrinsic::riscv_vloxei:
813 case Intrinsic::riscv_vloxei_mask:
814 case Intrinsic::riscv_vluxei:
815 case Intrinsic::riscv_vluxei_mask: {
816 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
817 IntrinID == Intrinsic::riscv_vluxei_mask;
818 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
819 IntrinID == Intrinsic::riscv_vloxei_mask;
820 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
824 const Register DstReg =
I.getOperand(0).getReg();
827 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
832 if (HasPassthruOperand) {
833 auto PassthruReg =
I.getOperand(CurOp++).getReg();
840 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
846 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
848 "values when XLEN=32");
850 const RISCV::VLX_VSXPseudo *
P = RISCV::getVLXPseudo(
851 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
852 static_cast<unsigned>(IndexLMUL));
854 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
857 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
858 for (
auto &RenderFn : *VLOpFn)
862 PseudoMI.addImm(Log2SEW);
867 Policy =
I.getOperand(CurOp++).getImm();
868 PseudoMI.addImm(Policy);
871 PseudoMI.cloneMemRefs(
I);
876 case Intrinsic::riscv_vsm:
877 case Intrinsic::riscv_vse:
878 case Intrinsic::riscv_vse_mask:
879 case Intrinsic::riscv_vsse:
880 case Intrinsic::riscv_vsse_mask: {
881 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
882 IntrinID == Intrinsic::riscv_vsse_mask;
883 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
884 IntrinID == Intrinsic::riscv_vsse_mask;
885 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
893 auto PassthruReg =
I.getOperand(CurOp++).getReg();
896 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
899 const RISCV::VSEPseudo *
P = RISCV::getVSEPseudo(
900 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
902 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
905 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
906 for (
auto &RenderFn : *VLOpFn)
910 PseudoMI.addImm(Log2SEW);
913 PseudoMI.cloneMemRefs(
I);
918 case Intrinsic::riscv_vsoxei:
919 case Intrinsic::riscv_vsoxei_mask:
920 case Intrinsic::riscv_vsuxei:
921 case Intrinsic::riscv_vsuxei_mask: {
922 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
923 IntrinID == Intrinsic::riscv_vsuxei_mask;
924 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
925 IntrinID == Intrinsic::riscv_vsoxei_mask;
926 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
934 auto PassthruReg =
I.getOperand(CurOp++).getReg();
938 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
944 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
946 "values when XLEN=32");
948 const RISCV::VLX_VSXPseudo *
P = RISCV::getVSXPseudo(
949 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
950 static_cast<unsigned>(IndexLMUL));
952 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
955 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
956 for (
auto &RenderFn : *VLOpFn)
960 PseudoMI.addImm(Log2SEW);
963 PseudoMI.cloneMemRefs(
I);
971bool RISCVInstructionSelector::selectExtractSubvector(
972 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
973 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
978 LLT DstTy =
MRI->getType(DstReg);
979 LLT SrcTy =
MRI->getType(SrcReg);
981 unsigned Idx =
static_cast<unsigned>(
MI.getOperand(2).
getImm());
987 std::tie(SubRegIdx, Idx) =
989 SrcMVT, DstMVT, Idx, &
TRI);
995 const TargetRegisterClass *DstRC =
TRI.getRegClass(DstRegClassID);
1000 const TargetRegisterClass *SrcRC =
TRI.getRegClass(SrcRegClassID);
1004 MIB.
buildInstr(TargetOpcode::COPY, {DstReg}, {}).addReg(SrcReg, 0, SubRegIdx);
1006 MI.eraseFromParent();
1010bool RISCVInstructionSelector::select(MachineInstr &
MI) {
1011 MachineIRBuilder MIB(
MI);
1013 preISelLower(
MI, MIB);
1014 const unsigned Opc =
MI.getOpcode();
1016 if (!
MI.isPreISelOpcode() ||
Opc == TargetOpcode::G_PHI) {
1017 if (
Opc == TargetOpcode::PHI ||
Opc == TargetOpcode::G_PHI) {
1018 const Register DefReg =
MI.getOperand(0).getReg();
1019 const LLT DefTy =
MRI->getType(DefReg);
1022 MRI->getRegClassOrRegBank(DefReg);
1024 const TargetRegisterClass *DefRC =
1033 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1040 MI.setDesc(
TII.
get(TargetOpcode::PHI));
1051 if (selectImpl(
MI, *CoverageInfo))
1055 case TargetOpcode::G_ANYEXT:
1056 case TargetOpcode::G_PTRTOINT:
1057 case TargetOpcode::G_INTTOPTR:
1058 case TargetOpcode::G_TRUNC:
1059 case TargetOpcode::G_FREEZE:
1061 case TargetOpcode::G_CONSTANT: {
1063 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1065 if (!materializeImm(DstReg, Imm, MIB))
1068 MI.eraseFromParent();
1071 case TargetOpcode::G_ZEXT:
1072 case TargetOpcode::G_SEXT: {
1073 bool IsSigned =
Opc != TargetOpcode::G_ZEXT;
1076 LLT SrcTy =
MRI->getType(SrcReg);
1083 RISCV::GPRBRegBankID &&
1084 "Unexpected ext regbank");
1087 if (IsSigned && SrcSize == 32) {
1094 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1095 MI.setDesc(
TII.
get(RISCV::ADD_UW));
1101 if (SrcSize == 16 && STI.hasStdExtZbb()) {
1102 MI.setDesc(
TII.
get(IsSigned ? RISCV::SEXT_H
1103 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1104 : RISCV::ZEXT_H_RV32));
1109 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
1117 MIB.
buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
1118 .addImm(STI.
getXLen() - SrcSize);
1120 auto ShiftRight = MIB.
buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
1121 {DstReg}, {ShiftLeft})
1122 .addImm(STI.
getXLen() - SrcSize);
1124 MI.eraseFromParent();
1127 case TargetOpcode::G_FCONSTANT: {
1130 const APFloat &FPimm =
MI.getOperand(1).getFPImm()->getValueAPF();
1131 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1137 GPRReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1139 if (!materializeImm(GPRReg,
Imm.getSExtValue(), MIB))
1143 unsigned Opcode =
Size == 64 ? RISCV::FMV_D_X
1144 :
Size == 32 ? RISCV::FMV_W_X
1146 auto FMV = MIB.
buildInstr(Opcode, {DstReg}, {GPRReg});
1147 if (!FMV.constrainAllUses(
TII,
TRI, RBI))
1152 "Unexpected size or subtarget");
1156 MachineInstrBuilder FCVT =
1162 MI.eraseFromParent();
1167 Register GPRRegHigh =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1168 Register GPRRegLow =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1170 if (!materializeImm(GPRRegHigh,
Imm.extractBits(32, 32).getSExtValue(),
1173 if (!materializeImm(GPRRegLow,
Imm.trunc(32).getSExtValue(), MIB))
1175 MachineInstrBuilder PairF64 = MIB.
buildInstr(
1176 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1181 MI.eraseFromParent();
1184 case TargetOpcode::G_GLOBAL_VALUE: {
1185 auto *GV =
MI.getOperand(1).getGlobal();
1186 if (GV->isThreadLocal()) {
1191 return selectAddr(
MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1193 case TargetOpcode::G_JUMP_TABLE:
1194 case TargetOpcode::G_CONSTANT_POOL:
1195 return selectAddr(
MI, MIB,
MRI);
1196 case TargetOpcode::G_BRCOND: {
1202 .addMBB(
MI.getOperand(1).getMBB());
1203 MI.eraseFromParent();
1206 case TargetOpcode::G_BRINDIRECT:
1207 MI.setDesc(
TII.
get(RISCV::PseudoBRIND));
1210 case TargetOpcode::G_SELECT:
1211 return selectSelect(
MI, MIB);
1212 case TargetOpcode::G_FCMP:
1213 return selectFPCompare(
MI, MIB);
1214 case TargetOpcode::G_FENCE: {
1219 emitFence(FenceOrdering, FenceSSID, MIB);
1220 MI.eraseFromParent();
1223 case TargetOpcode::G_IMPLICIT_DEF:
1224 return selectImplicitDef(
MI, MIB);
1225 case TargetOpcode::G_UNMERGE_VALUES:
1227 case TargetOpcode::G_LOAD:
1228 case TargetOpcode::G_STORE: {
1232 LLT PtrTy =
MRI->getType(PtrReg);
1235 if (RB.
getID() != RISCV::GPRBRegBankID)
1242 "Load/Store pointer operand isn't a GPR");
1243 assert(PtrTy.
isPointer() &&
"Load/Store pointer operand isn't a pointer");
1259 if (NewOpc ==
MI.getOpcode())
1263 auto AddrModeFns = selectAddrRegImm(
MI.getOperand(1));
1268 auto NewInst = MIB.
buildInstr(NewOpc, {}, {},
MI.getFlags());
1274 for (
auto &Fn : *AddrModeFns)
1276 MI.eraseFromParent();
1280 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1281 return selectIntrinsicWithSideEffects(
MI, MIB);
1282 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1283 return selectExtractSubvector(
MI, MIB);
1289bool RISCVInstructionSelector::selectUnmergeValues(
1290 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
1291 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1293 if (!Subtarget->hasStdExtZfa())
1297 if (
MI.getNumOperands() != 3)
1302 if (!isRegInFprb(Src) || !isRegInGprb(
Lo) || !isRegInGprb(
Hi))
1305 MachineInstr *ExtractLo = MIB.
buildInstr(RISCV::FMV_X_W_FPR64, {
Lo}, {Src});
1309 MachineInstr *ExtractHi = MIB.
buildInstr(RISCV::FMVH_X_D, {
Hi}, {Src});
1313 MI.eraseFromParent();
1317bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &
Op,
1318 MachineIRBuilder &MIB) {
1320 assert(
MRI->getType(PtrReg).isPointer() &&
"Operand is not a pointer!");
1324 MRI->setRegBank(PtrToInt.getReg(0), RBI.
getRegBank(RISCV::GPRBRegBankID));
1325 Op.setReg(PtrToInt.getReg(0));
1326 return select(*PtrToInt);
1329void RISCVInstructionSelector::preISelLower(MachineInstr &
MI,
1330 MachineIRBuilder &MIB) {
1331 switch (
MI.getOpcode()) {
1332 case TargetOpcode::G_PTR_ADD: {
1336 replacePtrWithInt(
MI.getOperand(1), MIB);
1337 MI.setDesc(
TII.
get(TargetOpcode::G_ADD));
1338 MRI->setType(DstReg, sXLen);
1341 case TargetOpcode::G_PTRMASK: {
1344 replacePtrWithInt(
MI.getOperand(1), MIB);
1345 MI.setDesc(
TII.
get(TargetOpcode::G_AND));
1346 MRI->setType(DstReg, sXLen);
1352void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1353 const MachineInstr &
MI,
1355 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1356 "Expected G_CONSTANT");
1357 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1361void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1362 const MachineInstr &
MI,
1364 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1365 "Expected G_CONSTANT");
1366 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1370void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1371 const MachineInstr &
MI,
1373 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1374 "Expected G_CONSTANT");
1375 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1379void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1380 const MachineInstr &
MI,
1382 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1383 "Expected G_CONSTANT");
1384 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1388void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1389 const MachineInstr &
MI,
1391 assert(
MI.getOpcode() == TargetOpcode::G_FRAME_INDEX &&
OpIdx == -1 &&
1392 "Expected G_FRAME_INDEX");
1393 MIB.
add(
MI.getOperand(1));
1396void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1397 const MachineInstr &
MI,
1399 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1400 "Expected G_CONSTANT");
1401 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1405void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1406 MachineInstrBuilder &MIB,
const MachineInstr &
MI,
int OpIdx)
const {
1407 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1408 "Expected G_CONSTANT");
1409 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1413void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1414 const MachineInstr &
MI,
1416 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1417 "Expected G_CONSTANT");
1418 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1419 int64_t Adj =
Imm < 0 ? -2048 : 2047;
1423void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1424 const MachineInstr &
MI,
1426 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1427 "Expected G_CONSTANT");
1428 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1432const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1433 LLT Ty,
const RegisterBank &RB)
const {
1434 if (RB.
getID() == RISCV::GPRBRegBankID) {
1436 return &RISCV::GPRRegClass;
1439 if (RB.
getID() == RISCV::FPRBRegBankID) {
1441 return &RISCV::FPR16RegClass;
1443 return &RISCV::FPR32RegClass;
1445 return &RISCV::FPR64RegClass;
1448 if (RB.
getID() == RISCV::VRBRegBankID) {
1450 return &RISCV::VRRegClass;
1453 return &RISCV::VRM2RegClass;
1456 return &RISCV::VRM4RegClass;
1459 return &RISCV::VRM8RegClass;
1465bool RISCVInstructionSelector::isRegInGprb(
Register Reg)
const {
1469bool RISCVInstructionSelector::isRegInFprb(
Register Reg)
const {
1473bool RISCVInstructionSelector::selectCopy(MachineInstr &
MI)
const {
1479 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1482 "Register class not available for LLT, register bank combination");
1497bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &
MI,
1498 MachineIRBuilder &MIB)
const {
1499 assert(
MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1501 const Register DstReg =
MI.getOperand(0).getReg();
1502 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1506 "Register class not available for LLT, register bank combination");
1512 MI.setDesc(
TII.
get(TargetOpcode::IMPLICIT_DEF));
1516bool RISCVInstructionSelector::materializeImm(
Register DstReg, int64_t Imm,
1517 MachineIRBuilder &MIB)
const {
1525 unsigned NumInsts = Seq.
size();
1528 for (
unsigned i = 0; i < NumInsts; i++) {
1530 ?
MRI->createVirtualRegister(&RISCV::GPRRegClass)
1532 const RISCVMatInt::Inst &
I = Seq[i];
1535 switch (
I.getOpndKind()) {
1544 {SrcReg, Register(RISCV::X0)});
1564bool RISCVInstructionSelector::selectAddr(MachineInstr &
MI,
1565 MachineIRBuilder &MIB,
bool IsLocal,
1566 bool IsExternWeak)
const {
1567 assert((
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1568 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1569 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1570 "Unexpected opcode");
1572 const MachineOperand &DispMO =
MI.getOperand(1);
1575 const LLT DefTy =
MRI->getType(DefReg);
1582 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1586 MI.setDesc(
TII.
get(RISCV::PseudoLLA));
1594 MachineFunction &MF = *
MI.getParent()->getParent();
1608 MI.eraseFromParent();
1615 "Unsupported code model for lowering",
MI);
1622 Register AddrHiDest =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1623 MachineInstr *AddrHi = MIB.
buildInstr(RISCV::LUI, {AddrHiDest}, {})
1635 MI.eraseFromParent();
1648 MachineFunction &MF = *
MI.getParent()->getParent();
1662 MI.eraseFromParent();
1669 MI.setDesc(
TII.
get(RISCV::PseudoLLA));
1676bool RISCVInstructionSelector::selectSelect(MachineInstr &
MI,
1677 MachineIRBuilder &MIB)
const {
1684 Register DstReg = SelectMI.getReg(0);
1686 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1688 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1689 Opc =
Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1690 : RISCV::Select_FPR64_Using_CC_GPR;
1698 .
addReg(SelectMI.getTrueReg())
1699 .
addReg(SelectMI.getFalseReg());
1700 MI.eraseFromParent();
1711 return Size == 16 ? RISCV::FLT_H :
Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1713 return Size == 16 ? RISCV::FLE_H :
Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1715 return Size == 16 ? RISCV::FEQ_H :
Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1728 assert(!isLegalFCmpPredicate(Pred) &&
"Predicate already legal?");
1731 if (isLegalFCmpPredicate(InvPred)) {
1739 if (isLegalFCmpPredicate(InvPred)) {
1744 if (isLegalFCmpPredicate(InvPred)) {
1756bool RISCVInstructionSelector::selectFPCompare(MachineInstr &
MI,
1757 MachineIRBuilder &MIB)
const {
1765 unsigned Size =
MRI->getType(
LHS).getSizeInBits();
1770 bool NeedInvert =
false;
1774 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1776 if (!
Cmp.constrainAllUses(
TII,
TRI, RBI))
1782 {&RISCV::GPRRegClass}, {
LHS,
RHS});
1783 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1786 {&RISCV::GPRRegClass}, {
RHS,
LHS});
1787 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1790 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1792 MIB.
buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1793 if (!
Or.constrainAllUses(
TII,
TRI, RBI))
1800 {&RISCV::GPRRegClass}, {
LHS,
LHS});
1801 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1804 {&RISCV::GPRRegClass}, {
RHS,
RHS});
1805 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1808 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1810 MIB.
buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1811 if (!
And.constrainAllUses(
TII,
TRI, RBI))
1818 auto Xor = MIB.
buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1819 if (!
Xor.constrainAllUses(
TII,
TRI, RBI))
1823 MI.eraseFromParent();
1827void RISCVInstructionSelector::emitFence(
AtomicOrdering FenceOrdering,
1829 MachineIRBuilder &MIB)
const {
1830 if (STI.hasStdExtZtso()) {
1833 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1843 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1851 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1857 unsigned Pred, Succ;
1858 switch (FenceOrdering) {
1861 case AtomicOrdering::AcquireRelease:
1865 case AtomicOrdering::Acquire:
1870 case AtomicOrdering::Release:
1875 case AtomicOrdering::SequentiallyConsistent:
1885InstructionSelector *
1889 return new RISCVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
APInt bitcastToAPInt() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This is an important base class in LLVM.
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.