25#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
60 static constexpr unsigned MaxRecursionDepth = 6;
63 const unsigned Depth = 0)
const;
89 bool IsExternWeak =
false)
const;
97 unsigned &CurOp,
bool IsMasked,
98 bool IsStrided)
const;
103 unsigned ShiftWidth)
const;
104 ComplexRendererFns selectShiftMaskXLen(
MachineOperand &Root)
const {
105 return selectShiftMask(Root, STI.
getXLen());
107 ComplexRendererFns selectShiftMask32(
MachineOperand &Root)
const {
108 return selectShiftMask(Root, 32);
112 ComplexRendererFns selectSExtBits(
MachineOperand &Root,
unsigned Bits)
const;
113 template <
unsigned Bits>
115 return selectSExtBits(Root, Bits);
118 ComplexRendererFns selectZExtBits(
MachineOperand &Root,
unsigned Bits)
const;
119 template <
unsigned Bits>
121 return selectZExtBits(Root, Bits);
124 ComplexRendererFns selectSHXADDOp(
MachineOperand &Root,
unsigned ShAmt)
const;
125 template <
unsigned ShAmt>
127 return selectSHXADDOp(Root, ShAmt);
131 unsigned ShAmt)
const;
132 template <
unsigned ShAmt>
133 ComplexRendererFns selectSHXADD_UWOp(
MachineOperand &Root)
const {
134 return selectSHXADD_UWOp(Root, ShAmt);
174#define GET_GLOBALISEL_PREDICATES_DECL
175#include "RISCVGenGlobalISel.inc"
176#undef GET_GLOBALISEL_PREDICATES_DECL
178#define GET_GLOBALISEL_TEMPORARIES_DECL
179#include "RISCVGenGlobalISel.inc"
180#undef GET_GLOBALISEL_TEMPORARIES_DECL
185#define GET_GLOBALISEL_IMPL
186#include "RISCVGenGlobalISel.inc"
187#undef GET_GLOBALISEL_IMPL
189RISCVInstructionSelector::RISCVInstructionSelector(
192 : STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()), RBI(RBI),
196#include
"RISCVGenGlobalISel.inc"
199#include
"RISCVGenGlobalISel.inc"
205bool RISCVInstructionSelector::hasAllNBitUsers(
const MachineInstr &
MI,
207 const unsigned Depth)
const {
209 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
210 MI.getOpcode() == TargetOpcode::G_SUB ||
211 MI.getOpcode() == TargetOpcode::G_MUL ||
212 MI.getOpcode() == TargetOpcode::G_SHL ||
213 MI.getOpcode() == TargetOpcode::G_LSHR ||
214 MI.getOpcode() == TargetOpcode::G_AND ||
215 MI.getOpcode() == TargetOpcode::G_OR ||
216 MI.getOpcode() == TargetOpcode::G_XOR ||
217 MI.getOpcode() == TargetOpcode::G_SEXT_INREG ||
Depth != 0) &&
218 "Unexpected opcode");
220 if (
Depth >= RISCVInstructionSelector::MaxRecursionDepth)
223 auto DestReg =
MI.getOperand(0).getReg();
224 for (
auto &UserOp :
MRI->use_nodbg_operands(DestReg)) {
225 assert(UserOp.getParent() &&
"UserOp must have a parent");
226 const MachineInstr &UserMI = *UserOp.getParent();
235 case RISCV::FCVT_D_W:
236 case RISCV::FCVT_S_W:
279InstructionSelector::ComplexRendererFns
280RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
281 unsigned ShiftWidth)
const {
285 using namespace llvm::MIPatternMatch;
291 ShAmtReg = ZExtSrcReg;
310 APInt ShMask(AndMask.
getBitWidth(), ShiftWidth - 1);
311 if (ShMask.isSubsetOf(AndMask)) {
312 ShAmtReg = AndSrcReg;
316 KnownBits Known = VT->getKnownBits(AndSrcReg);
317 if (ShMask.isSubsetOf(AndMask | Known.
Zero))
318 ShAmtReg = AndSrcReg;
325 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0)
330 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0) {
333 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
334 unsigned NegOpc = Subtarget->
is64Bit() ? RISCV::SUBW : RISCV::SUB;
335 return {{[=](MachineInstrBuilder &MIB) {
336 MachineIRBuilder(*MIB.getInstr())
337 .buildInstr(NegOpc, {ShAmtReg}, {
Register(RISCV::X0),
Reg});
338 MIB.addReg(ShAmtReg);
341 if (
Imm.urem(ShiftWidth) == ShiftWidth - 1) {
344 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
345 return {{[=](MachineInstrBuilder &MIB) {
346 MachineIRBuilder(*MIB.getInstr())
347 .buildInstr(RISCV::XORI, {ShAmtReg}, {
Reg})
349 MIB.addReg(ShAmtReg);
354 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
357InstructionSelector::ComplexRendererFns
358RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
359 unsigned Bits)
const {
363 MachineInstr *RootDef =
MRI->getVRegDef(RootReg);
365 if (RootDef->
getOpcode() == TargetOpcode::G_SEXT_INREG &&
368 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); }}};
371 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
372 if ((
Size - VT->computeNumSignBits(RootReg)) < Bits)
373 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
378InstructionSelector::ComplexRendererFns
379RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
380 unsigned Bits)
const {
388 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
392 MRI->getType(RegX).getScalarSizeInBits() == Bits)
393 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
395 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
397 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
402InstructionSelector::ComplexRendererFns
403RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
404 unsigned ShAmt)
const {
405 using namespace llvm::MIPatternMatch;
411 const unsigned XLen = STI.
getXLen();
430 if (
Mask.isShiftedMask()) {
431 unsigned Leading = XLen -
Mask.getActiveBits();
432 unsigned Trailing =
Mask.countr_zero();
435 if (*LeftShift && Leading == 0 && C2.
ult(Trailing) && Trailing == ShAmt) {
436 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
437 return {{[=](MachineInstrBuilder &MIB) {
438 MachineIRBuilder(*MIB.getInstr())
439 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
447 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
448 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
449 return {{[=](MachineInstrBuilder &MIB) {
450 MachineIRBuilder(*MIB.getInstr())
451 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
452 .addImm(Leading + Trailing);
473 unsigned Leading = XLen -
Mask.getActiveBits();
474 unsigned Trailing =
Mask.countr_zero();
487 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
488 return {{[=](MachineInstrBuilder &MIB) {
489 MachineIRBuilder(*MIB.getInstr())
490 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
500InstructionSelector::ComplexRendererFns
501RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
502 unsigned ShAmt)
const {
503 using namespace llvm::MIPatternMatch;
520 if (
Mask.isShiftedMask()) {
521 unsigned Leading =
Mask.countl_zero();
522 unsigned Trailing =
Mask.countr_zero();
523 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
524 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
525 return {{[=](MachineInstrBuilder &MIB) {
526 MachineIRBuilder(*MIB.getInstr())
527 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
538InstructionSelector::ComplexRendererFns
539RISCVInstructionSelector::renderVLOp(MachineOperand &Root)
const {
540 assert(Root.
isReg() &&
"Expected operand to be a Register");
541 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
543 if (RootDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
545 if (
C->getValue().isAllOnes())
549 return {{[=](MachineInstrBuilder &MIB) {
554 uint64_t ZExtC =
C->getZExtValue();
555 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
558 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); }}};
561InstructionSelector::ComplexRendererFns
562RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root)
const {
566 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
567 if (RootDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
569 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); },
570 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
574 if (isBaseWithConstantOffset(Root, *
MRI)) {
577 MachineInstr *LHSDef =
MRI->getVRegDef(
LHS.getReg());
578 MachineInstr *RHSDef =
MRI->getVRegDef(
RHS.getReg());
582 if (LHSDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
584 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->
getOperand(1)); },
585 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
588 return {{[=](MachineInstrBuilder &MIB) { MIB.add(
LHS); },
589 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
595 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); },
596 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
605 case CmpInst::Predicate::ICMP_EQ:
607 case CmpInst::Predicate::ICMP_NE:
609 case CmpInst::Predicate::ICMP_ULT:
611 case CmpInst::Predicate::ICMP_SLT:
613 case CmpInst::Predicate::ICMP_UGE:
615 case CmpInst::Predicate::ICMP_SGE:
681 CC = getRISCVCCFromICmp(Pred);
688 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
693 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
695 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
697 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
699 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
707 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
711 return IsStore ? RISCV::SB : RISCV::LBU;
713 return IsStore ? RISCV::SH : RISCV::LH;
715 return IsStore ? RISCV::SW : RISCV::LW;
717 return IsStore ? RISCV::SD : RISCV::LD;
723void RISCVInstructionSelector::addVectorLoadStoreOperands(
724 MachineInstr &
I, SmallVectorImpl<SrcOp> &SrcOps,
unsigned &CurOp,
725 bool IsMasked,
bool IsStrided)
const {
727 auto PtrReg =
I.getOperand(CurOp++).getReg();
732 auto StrideReg =
I.getOperand(CurOp++).getReg();
738 auto MaskReg =
I.getOperand(CurOp++).getReg();
743bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
744 MachineInstr &
I, MachineIRBuilder &MIB)
const {
751 case Intrinsic::riscv_vlm:
752 case Intrinsic::riscv_vle:
753 case Intrinsic::riscv_vle_mask:
754 case Intrinsic::riscv_vlse:
755 case Intrinsic::riscv_vlse_mask: {
756 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
757 IntrinID == Intrinsic::riscv_vlse_mask;
758 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
759 IntrinID == Intrinsic::riscv_vlse_mask;
760 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
764 const Register DstReg =
I.getOperand(0).getReg();
767 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
772 if (HasPassthruOperand) {
773 auto PassthruReg =
I.getOperand(CurOp++).getReg();
779 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
782 const RISCV::VLEPseudo *
P =
783 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
784 static_cast<unsigned>(LMUL));
786 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
789 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
790 for (
auto &RenderFn : *VLOpFn)
794 PseudoMI.addImm(Log2SEW);
799 Policy =
I.getOperand(CurOp++).getImm();
800 PseudoMI.addImm(Policy);
803 PseudoMI.cloneMemRefs(
I);
808 case Intrinsic::riscv_vsm:
809 case Intrinsic::riscv_vse:
810 case Intrinsic::riscv_vse_mask:
811 case Intrinsic::riscv_vsse:
812 case Intrinsic::riscv_vsse_mask: {
813 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
814 IntrinID == Intrinsic::riscv_vsse_mask;
815 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
816 IntrinID == Intrinsic::riscv_vsse_mask;
817 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
825 auto PassthruReg =
I.getOperand(CurOp++).getReg();
828 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
831 const RISCV::VSEPseudo *
P = RISCV::getVSEPseudo(
832 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
834 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
837 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
838 for (
auto &RenderFn : *VLOpFn)
842 PseudoMI.addImm(Log2SEW);
845 PseudoMI.cloneMemRefs(
I);
853bool RISCVInstructionSelector::select(MachineInstr &
MI) {
854 MachineIRBuilder MIB(
MI);
856 preISelLower(
MI, MIB);
857 const unsigned Opc =
MI.getOpcode();
859 if (!
MI.isPreISelOpcode() ||
Opc == TargetOpcode::G_PHI) {
860 if (
Opc == TargetOpcode::PHI ||
Opc == TargetOpcode::G_PHI) {
861 const Register DefReg =
MI.getOperand(0).getReg();
862 const LLT DefTy =
MRI->getType(DefReg);
865 MRI->getRegClassOrRegBank(DefReg);
867 const TargetRegisterClass *DefRC =
876 DefRC = getRegClassForTypeOnBank(DefTy, RB);
883 MI.setDesc(
TII.get(TargetOpcode::PHI));
894 if (selectImpl(
MI, *CoverageInfo))
898 case TargetOpcode::G_ANYEXT:
899 case TargetOpcode::G_PTRTOINT:
900 case TargetOpcode::G_INTTOPTR:
901 case TargetOpcode::G_TRUNC:
902 case TargetOpcode::G_FREEZE:
904 case TargetOpcode::G_CONSTANT: {
906 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
908 if (!materializeImm(DstReg, Imm, MIB))
911 MI.eraseFromParent();
914 case TargetOpcode::G_ZEXT:
915 case TargetOpcode::G_SEXT: {
916 bool IsSigned =
Opc != TargetOpcode::G_ZEXT;
919 LLT SrcTy =
MRI->getType(SrcReg);
926 RISCV::GPRBRegBankID &&
927 "Unexpected ext regbank");
930 if (IsSigned && SrcSize == 32) {
931 MI.setDesc(
TII.get(RISCV::ADDIW));
937 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
938 MI.setDesc(
TII.get(RISCV::ADD_UW));
944 if (SrcSize == 16 && STI.hasStdExtZbb()) {
945 MI.setDesc(
TII.get(IsSigned ? RISCV::SEXT_H
946 : STI.isRV64() ? RISCV::ZEXT_H_RV64
947 : RISCV::ZEXT_H_RV32));
952 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
953 MI.setDesc(
TII.get(STI.
is64Bit() ? RISCV::PACKW : RISCV::PACK));
960 MIB.
buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
961 .addImm(STI.
getXLen() - SrcSize);
963 auto ShiftRight = MIB.
buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
964 {DstReg}, {ShiftLeft})
965 .addImm(STI.
getXLen() - SrcSize);
967 MI.eraseFromParent();
970 case TargetOpcode::G_FCONSTANT: {
973 const APFloat &FPimm =
MI.getOperand(1).getFPImm()->getValueAPF();
974 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
980 GPRReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
982 if (!materializeImm(GPRReg,
Imm.getSExtValue(), MIB))
986 unsigned Opcode =
Size == 64 ? RISCV::FMV_D_X
987 :
Size == 32 ? RISCV::FMV_W_X
989 auto FMV = MIB.
buildInstr(Opcode, {DstReg}, {GPRReg});
990 if (!FMV.constrainAllUses(
TII,
TRI, RBI))
995 "Unexpected size or subtarget");
999 MachineInstrBuilder FCVT =
1005 MI.eraseFromParent();
1010 Register GPRRegHigh =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1011 Register GPRRegLow =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1013 if (!materializeImm(GPRRegHigh,
Imm.extractBits(32, 32).getSExtValue(),
1016 if (!materializeImm(GPRRegLow,
Imm.trunc(32).getSExtValue(), MIB))
1018 MachineInstrBuilder PairF64 = MIB.
buildInstr(
1019 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1024 MI.eraseFromParent();
1027 case TargetOpcode::G_GLOBAL_VALUE: {
1028 auto *GV =
MI.getOperand(1).getGlobal();
1029 if (GV->isThreadLocal()) {
1034 return selectAddr(
MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1036 case TargetOpcode::G_JUMP_TABLE:
1037 case TargetOpcode::G_CONSTANT_POOL:
1038 return selectAddr(
MI, MIB,
MRI);
1039 case TargetOpcode::G_BRCOND: {
1045 .addMBB(
MI.getOperand(1).getMBB());
1046 MI.eraseFromParent();
1049 case TargetOpcode::G_BRINDIRECT:
1050 MI.setDesc(
TII.get(RISCV::PseudoBRIND));
1053 case TargetOpcode::G_SELECT:
1054 return selectSelect(
MI, MIB);
1055 case TargetOpcode::G_FCMP:
1056 return selectFPCompare(
MI, MIB);
1057 case TargetOpcode::G_FENCE: {
1062 emitFence(FenceOrdering, FenceSSID, MIB);
1063 MI.eraseFromParent();
1066 case TargetOpcode::G_IMPLICIT_DEF:
1067 return selectImplicitDef(
MI, MIB);
1068 case TargetOpcode::G_UNMERGE_VALUES:
1070 case TargetOpcode::G_LOAD:
1071 case TargetOpcode::G_STORE: {
1075 LLT PtrTy =
MRI->getType(PtrReg);
1078 if (RB.
getID() != RISCV::GPRBRegBankID)
1085 "Load/Store pointer operand isn't a GPR");
1086 assert(PtrTy.
isPointer() &&
"Load/Store pointer operand isn't a pointer");
1102 if (NewOpc ==
MI.getOpcode())
1106 auto AddrModeFns = selectAddrRegImm(
MI.getOperand(1));
1111 auto NewInst = MIB.
buildInstr(NewOpc, {}, {},
MI.getFlags());
1117 for (
auto &Fn : *AddrModeFns)
1119 MI.eraseFromParent();
1123 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1124 return selectIntrinsicWithSideEffects(
MI, MIB);
1130bool RISCVInstructionSelector::selectUnmergeValues(
1131 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
1132 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1134 if (!Subtarget->hasStdExtZfa())
1138 if (
MI.getNumOperands() != 3)
1143 if (!isRegInFprb(Src) || !isRegInGprb(
Lo) || !isRegInGprb(
Hi))
1146 MachineInstr *ExtractLo = MIB.
buildInstr(RISCV::FMV_X_W_FPR64, {
Lo}, {Src});
1150 MachineInstr *ExtractHi = MIB.
buildInstr(RISCV::FMVH_X_D, {
Hi}, {Src});
1154 MI.eraseFromParent();
1158bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &
Op,
1159 MachineIRBuilder &MIB) {
1161 assert(
MRI->getType(PtrReg).isPointer() &&
"Operand is not a pointer!");
1165 MRI->setRegBank(PtrToInt.getReg(0), RBI.
getRegBank(RISCV::GPRBRegBankID));
1166 Op.setReg(PtrToInt.getReg(0));
1167 return select(*PtrToInt);
1170void RISCVInstructionSelector::preISelLower(MachineInstr &
MI,
1171 MachineIRBuilder &MIB) {
1172 switch (
MI.getOpcode()) {
1173 case TargetOpcode::G_PTR_ADD: {
1177 replacePtrWithInt(
MI.getOperand(1), MIB);
1178 MI.setDesc(
TII.get(TargetOpcode::G_ADD));
1179 MRI->setType(DstReg, sXLen);
1182 case TargetOpcode::G_PTRMASK: {
1185 replacePtrWithInt(
MI.getOperand(1), MIB);
1186 MI.setDesc(
TII.get(TargetOpcode::G_AND));
1187 MRI->setType(DstReg, sXLen);
1193void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1194 const MachineInstr &
MI,
1196 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1197 "Expected G_CONSTANT");
1198 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1202void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1203 const MachineInstr &
MI,
1205 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1206 "Expected G_CONSTANT");
1207 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1211void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1212 const MachineInstr &
MI,
1214 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1215 "Expected G_CONSTANT");
1216 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1220void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1221 const MachineInstr &
MI,
1223 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1224 "Expected G_CONSTANT");
1225 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1229void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1230 const MachineInstr &
MI,
1232 assert(
MI.getOpcode() == TargetOpcode::G_FRAME_INDEX &&
OpIdx == -1 &&
1233 "Expected G_FRAME_INDEX");
1234 MIB.
add(
MI.getOperand(1));
1237void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1238 const MachineInstr &
MI,
1240 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1241 "Expected G_CONSTANT");
1242 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1246void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1247 MachineInstrBuilder &MIB,
const MachineInstr &
MI,
int OpIdx)
const {
1248 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1249 "Expected G_CONSTANT");
1250 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1254void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1255 const MachineInstr &
MI,
1257 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1258 "Expected G_CONSTANT");
1259 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1260 int64_t Adj =
Imm < 0 ? -2048 : 2047;
1264void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1265 const MachineInstr &
MI,
1267 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1268 "Expected G_CONSTANT");
1269 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1273const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1274 LLT Ty,
const RegisterBank &RB)
const {
1275 if (RB.
getID() == RISCV::GPRBRegBankID) {
1277 return &RISCV::GPRRegClass;
1280 if (RB.
getID() == RISCV::FPRBRegBankID) {
1282 return &RISCV::FPR16RegClass;
1284 return &RISCV::FPR32RegClass;
1286 return &RISCV::FPR64RegClass;
1289 if (RB.
getID() == RISCV::VRBRegBankID) {
1291 return &RISCV::VRRegClass;
1294 return &RISCV::VRM2RegClass;
1297 return &RISCV::VRM4RegClass;
1300 return &RISCV::VRM8RegClass;
1306bool RISCVInstructionSelector::isRegInGprb(
Register Reg)
const {
1310bool RISCVInstructionSelector::isRegInFprb(
Register Reg)
const {
1314bool RISCVInstructionSelector::selectCopy(MachineInstr &
MI)
const {
1320 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1323 "Register class not available for LLT, register bank combination");
1334 MI.setDesc(
TII.get(RISCV::COPY));
1338bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &
MI,
1339 MachineIRBuilder &MIB)
const {
1340 assert(
MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1342 const Register DstReg =
MI.getOperand(0).getReg();
1343 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1347 "Register class not available for LLT, register bank combination");
1353 MI.setDesc(
TII.get(TargetOpcode::IMPLICIT_DEF));
1357bool RISCVInstructionSelector::materializeImm(
Register DstReg, int64_t Imm,
1358 MachineIRBuilder &MIB)
const {
1366 unsigned NumInsts = Seq.
size();
1369 for (
unsigned i = 0; i < NumInsts; i++) {
1371 ?
MRI->createVirtualRegister(&RISCV::GPRRegClass)
1373 const RISCVMatInt::Inst &
I = Seq[i];
1376 switch (
I.getOpndKind()) {
1385 {SrcReg, Register(RISCV::X0)});
1405bool RISCVInstructionSelector::selectAddr(MachineInstr &
MI,
1406 MachineIRBuilder &MIB,
bool IsLocal,
1407 bool IsExternWeak)
const {
1408 assert((
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1409 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1410 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1411 "Unexpected opcode");
1413 const MachineOperand &DispMO =
MI.getOperand(1);
1416 const LLT DefTy =
MRI->getType(DefReg);
1423 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1427 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1435 MachineFunction &MF = *
MI.getParent()->getParent();
1449 MI.eraseFromParent();
1456 "Unsupported code model for lowering",
MI);
1463 Register AddrHiDest =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1464 MachineInstr *AddrHi = MIB.
buildInstr(RISCV::LUI, {AddrHiDest}, {})
1476 MI.eraseFromParent();
1489 MachineFunction &MF = *
MI.getParent()->getParent();
1503 MI.eraseFromParent();
1510 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1517bool RISCVInstructionSelector::selectSelect(MachineInstr &
MI,
1518 MachineIRBuilder &MIB)
const {
1525 Register DstReg = SelectMI.getReg(0);
1527 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1529 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1530 Opc =
Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1531 : RISCV::Select_FPR64_Using_CC_GPR;
1539 .
addReg(SelectMI.getTrueReg())
1540 .
addReg(SelectMI.getFalseReg());
1541 MI.eraseFromParent();
1552 return Size == 16 ? RISCV::FLT_H :
Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1554 return Size == 16 ? RISCV::FLE_H :
Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1556 return Size == 16 ? RISCV::FEQ_H :
Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1569 assert(!isLegalFCmpPredicate(Pred) &&
"Predicate already legal?");
1572 if (isLegalFCmpPredicate(InvPred)) {
1580 if (isLegalFCmpPredicate(InvPred)) {
1585 if (isLegalFCmpPredicate(InvPred)) {
1597bool RISCVInstructionSelector::selectFPCompare(MachineInstr &
MI,
1598 MachineIRBuilder &MIB)
const {
1606 unsigned Size =
MRI->getType(
LHS).getSizeInBits();
1611 bool NeedInvert =
false;
1615 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1617 if (!
Cmp.constrainAllUses(
TII,
TRI, RBI))
1623 {&RISCV::GPRRegClass}, {
LHS,
RHS});
1624 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1627 {&RISCV::GPRRegClass}, {
RHS,
LHS});
1628 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1631 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1633 MIB.
buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1634 if (!
Or.constrainAllUses(
TII,
TRI, RBI))
1641 {&RISCV::GPRRegClass}, {
LHS,
LHS});
1642 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1645 {&RISCV::GPRRegClass}, {
RHS,
RHS});
1646 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1649 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1651 MIB.
buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1652 if (!
And.constrainAllUses(
TII,
TRI, RBI))
1659 auto Xor = MIB.
buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1660 if (!
Xor.constrainAllUses(
TII,
TRI, RBI))
1664 MI.eraseFromParent();
1668void RISCVInstructionSelector::emitFence(
AtomicOrdering FenceOrdering,
1670 MachineIRBuilder &MIB)
const {
1671 if (STI.hasStdExtZtso()) {
1674 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1684 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1692 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1698 unsigned Pred, Succ;
1699 switch (FenceOrdering) {
1702 case AtomicOrdering::AcquireRelease:
1706 case AtomicOrdering::Acquire:
1711 case AtomicOrdering::Release:
1716 case AtomicOrdering::SequentiallyConsistent:
1726InstructionSelector *
1730 return new RISCVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
APInt bitcastToAPInt() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This is an important base class in LLVM.
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.