30using namespace LegalityPredicates;
31using namespace LegalizeMutations;
38 return Query.Types[TypeIdx].isScalar() &&
39 ((ST.hasStdExtZfh() && Query.Types[TypeIdx].getSizeInBits() == 16) ||
40 (ST.hasStdExtF() && Query.Types[TypeIdx].getSizeInBits() == 32) ||
41 (ST.hasStdExtD() && Query.Types[TypeIdx].getSizeInBits() == 64));
47 std::initializer_list<LLT> IntOrFPVecTys,
50 return ST.hasVInstructions() &&
51 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
52 ST.hasVInstructionsI64()) &&
53 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
64 return ST.hasVInstructions() &&
65 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
72 std::initializer_list<LLT> PtrVecTys,
75 return ST.hasVInstructions() &&
76 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
83 : STI(ST), XLen(STI.getXLen()), sXLen(
LLT::scalar(XLen)) {
131 using namespace TargetOpcode;
133 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
135 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
136 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
137 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
138 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
140 auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0};
143 .legalFor({s32, sXLen})
149 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
160 ShiftActions.
legalFor({{s32, s32}, {s32, sXLen}, {sXLen, sXLen}})
161 .widenScalarToNextPow2(0)
172 ExtActions.legalFor({{sXLen, s32}});
184 for (
unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
186 unsigned BigTyIdx =
Op == G_MERGE_VALUES ? 0 : 1;
187 unsigned LitTyIdx =
Op == G_MERGE_VALUES ? 1 : 0;
188 if (XLen == 32 && ST.hasStdExtD()) {
189 MergeUnmergeActions.legalIf(
192 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
193 .widenScalarToNextPow2(BigTyIdx, XLen)
194 .clampScalar(LitTyIdx, sXLen, sXLen)
195 .clampScalar(BigTyIdx, sXLen, sXLen);
201 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb()) {
202 RotateActions.
legalFor({{s32, sXLen}, {sXLen, sXLen}});
208 RotateActions.
lower();
219 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
220 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
222 BSWAPActions.maxScalar(0, sXLen).lower();
225 auto &CountZerosUndefActions =
227 if (ST.hasStdExtZbb()) {
228 CountZerosActions.
legalFor({{s32, s32}, {sXLen, sXLen}})
229 .clampScalar(0, s32, sXLen)
236 CountZerosUndefActions.
lower();
239 if (ST.hasStdExtZbb()) {
240 CTPOPActions.legalFor({{s32, s32}, {sXLen, sXLen}})
241 .clampScalar(0, s32, sXLen)
242 .widenScalarToNextPow2(0)
243 .scalarSameSizeAs(1, 0);
245 CTPOPActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
249 ConstantActions.legalFor({s32, p0});
251 ConstantActions.customFor({s64});
252 ConstantActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen);
256 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER, G_FREEZE})
257 .legalFor({s32, sXLen, p0})
264 .
legalFor({{sXLen, sXLen}, {sXLen, p0}})
271 auto &SelectActions =
273 .
legalFor({{s32, sXLen}, {p0, sXLen}})
276 if (XLen == 64 || ST.hasStdExtD())
277 SelectActions.
legalFor({{s64, sXLen}});
279 .
clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
282 auto &LoadStoreActions =
284 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
287 {p0, p0, sXLen, XLen}});
288 if (ST.hasVInstructions())
290 {nxv4s8, p0, nxv4s8, 8},
291 {nxv8s8, p0, nxv8s8, 8},
292 {nxv16s8, p0, nxv16s8, 8},
293 {nxv32s8, p0, nxv32s8, 8},
294 {nxv64s8, p0, nxv64s8, 8},
295 {nxv2s16, p0, nxv2s16, 16},
296 {nxv4s16, p0, nxv4s16, 16},
297 {nxv8s16, p0, nxv8s16, 16},
298 {nxv16s16, p0, nxv16s16, 16},
299 {nxv32s16, p0, nxv32s16, 16},
300 {nxv2s32, p0, nxv2s32, 32},
301 {nxv4s32, p0, nxv4s32, 32},
302 {nxv8s32, p0, nxv8s32, 32},
303 {nxv16s32, p0, nxv16s32, 32}});
305 auto &ExtLoadActions =
307 .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
312 {s64, p0, s64, 64}});
314 {{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
315 }
else if (ST.hasStdExtD()) {
318 if (ST.hasVInstructions() && ST.getELen() == 64)
320 {nxv1s16, p0, nxv1s16, 16},
321 {nxv1s32, p0, nxv1s32, 32}});
323 if (ST.hasVInstructionsI64())
326 {nxv2s64, p0, nxv2s64, 64},
327 {nxv4s64, p0, nxv4s64, 64},
328 {nxv8s64, p0, nxv8s64, 64}});
346 .clampScalar(0, sXLen, sXLen);
350 .clampScalar(1, sXLen, sXLen);
360 .widenScalarToNextPow2(0)
366 if (ST.hasStdExtZmmul()) {
369 .widenScalarToNextPow2(0)
382 .widenScalarToNextPow2(0)
397 if (ST.hasStdExtM()) {
399 .legalFor({s32, sXLen})
400 .libcallFor({sDoubleXLen})
401 .clampScalar(0, s32, sDoubleXLen)
405 .libcallFor({sXLen, sDoubleXLen})
406 .clampScalar(0, sXLen, sDoubleXLen)
414 if (ST.hasStdExtZbb())
415 AbsActions.customFor({s32, sXLen}).minScalar(0, sXLen);
418 auto &MinMaxActions =
420 if (ST.hasStdExtZbb())
421 MinMaxActions.
legalFor({sXLen}).minScalar(0, sXLen);
422 MinMaxActions.
lower();
433 G_FABS, G_FSQRT, G_FMAXNUM, G_FMINNUM})
447 return (ST.hasStdExtD() &&
typeIs(0, s32)(Query) &&
449 (ST.hasStdExtZfh() &&
typeIs(0, s16)(Query) &&
451 (ST.hasStdExtZfh() && ST.hasStdExtD() &&
typeIs(0, s16)(Query) &&
456 return (ST.hasStdExtD() &&
typeIs(0, s64)(Query) &&
458 (ST.hasStdExtZfh() &&
typeIs(0, s32)(Query) &&
460 (ST.hasStdExtZfh() && ST.hasStdExtD() &&
typeIs(0, s64)(Query) &&
490 .libcallFor({s32, s64});
519 if (ST.hasVInstructionsF64() && ST.hasStdExtD())
522 else if (ST.hasVInstructionsI64())
535 switch (IntrinsicID) {
538 case Intrinsic::vacopy: {
549 LLT PtrTy =
MRI.getType(DstLst);
555 auto Tmp = MIRBuilder.
buildLoad(PtrTy,
MI.getOperand(2), *LoadMMO);
560 MIRBuilder.
buildStore(Tmp, DstLst, *StoreMMO);
562 MI.eraseFromParent();
568bool RISCVLegalizerInfo::legalizeShlAshrLshr(
571 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
572 MI.getOpcode() == TargetOpcode::G_LSHR ||
573 MI.getOpcode() == TargetOpcode::G_SHL);
582 uint64_t Amount = VRegAndVal->Value.getZExtValue();
587 MI.getOperand(2).setReg(ExtCst.getReg(0));
595 assert(
MI.getOpcode() == TargetOpcode::G_VASTART);
602 MIRBuilder.
buildStore(FINAddr,
MI.getOperand(0).getReg(),
603 *
MI.memoperands()[0]);
604 MI.eraseFromParent();
608bool RISCVLegalizerInfo::shouldBeInConstantPool(
APInt APImm,
609 bool ShouldOptForSize)
const {
629 if (ShouldOptForSize)
637 unsigned ShiftAmt, AddOpc;
658 uint64_t Val =
MI.getOperand(1).getCImm()->getZExtValue();
662 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
664 }
else if (
Log2 > 3) {
665 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
668 MIB.
buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
670 }
else if ((Val % 8) == 0) {
673 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
676 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
680 MI.eraseFromParent();
691 unsigned Opc =
MI.getOpcode();
692 assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
693 Opc == TargetOpcode::G_ANYEXT);
699 LLT DstTy =
MRI.getType(Dst);
700 int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
707 MI.eraseFromParent();
715 "Machine instructions must be Load/Store.");
722 LLT DataTy =
MRI.getType(DstReg);
726 if (!
MI.hasOneMemOperand())
734 if (TLI->allowsMemoryAccessForAlignment(Ctx,
DL, VT, *MMO))
738 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
739 "Unexpected unaligned RVV load type");
742 unsigned NumElements =
766 return MIB.
buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
771static std::pair<MachineInstrBuilder, Register>
774 LLT VecTy = Dst.getLLTTy(
MRI);
793 return MIB.
buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
794 {Passthru,
Lo,
Hi, VL});
804 Unmerge.getReg(1), VL, MIB,
MRI);
813 assert(
MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
818 Register SplatVal =
MI.getOperand(1).getReg();
820 LLT VecTy =
MRI.getType(Dst);
824 if (XLenTy.getSizeInBits() == 32 &&
829 MI.eraseFromParent();
837 MIB.
buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
838 MI.eraseFromParent();
843 MIB.
buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
844 MI.eraseFromParent();
852 auto ZExtSplatVal = MIB.
buildZExt(InterEltTy, SplatVal);
859 MI.eraseFromParent();
869 switch (
MI.getOpcode()) {
873 case TargetOpcode::G_ABS:
876 case TargetOpcode::G_CONSTANT: {
880 bool ShouldOptForSize =
F.hasOptSize() ||
F.hasMinSize();
882 if (!shouldBeInConstantPool(ConstVal->
getValue(), ShouldOptForSize))
886 case TargetOpcode::G_SHL:
887 case TargetOpcode::G_ASHR:
888 case TargetOpcode::G_LSHR:
889 return legalizeShlAshrLshr(
MI, MIRBuilder, Observer);
890 case TargetOpcode::G_SEXT_INREG: {
892 int64_t SizeInBits =
MI.getOperand(2).getImm();
893 if (SizeInBits == 32)
899 case TargetOpcode::G_IS_FPCLASS: {
900 Register GISFPCLASS =
MI.getOperand(0).getReg();
911 auto GFClass = MIB.
buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
912 auto And = MIB.
buildAnd(sXLen, GFClass, FClassMask);
915 MI.eraseFromParent();
918 case TargetOpcode::G_VASTART:
919 return legalizeVAStart(
MI, MIRBuilder);
920 case TargetOpcode::G_VSCALE:
921 return legalizeVScale(
MI, MIRBuilder);
922 case TargetOpcode::G_ZEXT:
923 case TargetOpcode::G_SEXT:
924 case TargetOpcode::G_ANYEXT:
925 return legalizeExt(
MI, MIRBuilder);
926 case TargetOpcode::G_SPLAT_VECTOR:
927 return legalizeSplatVector(
MI, MIRBuilder);
928 case TargetOpcode::G_LOAD:
929 case TargetOpcode::G_STORE:
930 return legalizeLoadStore(
MI, Helper, MIRBuilder);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)
static LegalityPredicate typeIsScalarFPArith(unsigned TypeIdx, const RISCVSubtarget &ST)
static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, Register VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, Register VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static std::pair< MachineInstrBuilder, Register > buildDefaultVLOps(const DstOp &Dst, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Gets the two common "VL" operands: an all-ones mask and the vector length.
static LLT getMaskTypeFor(LLT VecTy)
Return the type of the mask type suitable for masking the provided vector type.
static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx, std::initializer_list< LLT > PtrVecTys, const RISCVSubtarget &ST)
This file declares the targeting of the Machinelegalizer class for RISC-V.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
int64_t getSExtValue() const
Get sign extended value.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
This is an important class for using LLVM in a threaded context.
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & widenScalarOrEltToNextPow2OrMinSize(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar or vector element type to the next power of two that is at least MinSize.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & libcall()
The instruction is emitted as a library call.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lowerIfMemSizeNotByteSizePow2()
Lower a memory operation if the memory access size is not a round power of 2 byte size.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & lowerFor(std::initializer_list< LLT > Types)
The instruction is lowered when type index 0 is any type in the given list.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx)
Widen the scalar to match the size of another.
LegalizeRuleSet & widenScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Widen the scalar to the one selected by the mutation if the predicate is true.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & lowerForCartesianProduct(std::initializer_list< LLT > Types0, std::initializer_list< LLT > Types1)
The instruction is lowered when type indexes 0 and 1 are both in their respective lists.
LegalizeRuleSet & legalForTypesWithMemDesc(std::initializer_list< LegalityPredicates::TypePairAndMemDesc > TypesAndMemDesc)
The instruction is legal when type indexes 0 and 1 along with the memory size and minimum alignment i...
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by replacing the value type.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeResult lowerConstant(MachineInstr &MI)
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Helper class to build MachineInstr.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineRegisterInfo * getMRI()
Getter for MRI.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
RISCVLegalizerInfo(const RISCVSubtarget &ST)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getVarArgsFrameIndex() const
unsigned getRealMinVLen() const
unsigned getMaxBuildIntsCost() const
bool useConstantPoolForLargeInts() const
const RISCVTargetLowering * getTargetLowering() const override
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static constexpr unsigned RVVBitsPerBlock
This is an optimization pass for GlobalISel generic memory operations.
Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
@ And
Bitwise or logical AND of integers.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
unsigned Log2(Align A)
Returns the log2 of the alignment.
std::function< bool(const LegalityQuery &)> LegalityPredicate
This struct is a compact representation of a valid (non-zero power of two) alignment.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
This class contains a discriminated union of information about pointers in memory operands,...