23#define DEBUG_TYPE "riscvtti"
26 "riscv-v-register-bit-width-lmul",
28 "The LMUL to use for getRegisterBitWidth queries. Affects LMUL used "
29 "by autovectorized code. Fractional LMULs are not supported."),
35 "Overrides result used for getMaximumVF query which is used "
36 "exclusively by SLP vectorizer."),
45 size_t NumInstr = OpCodes.
size();
50 return LMULCost * NumInstr;
52 for (
auto Op : OpCodes) {
54 case RISCV::VRGATHER_VI:
57 case RISCV::VRGATHER_VV:
60 case RISCV::VSLIDEUP_VI:
61 case RISCV::VSLIDEDOWN_VI:
64 case RISCV::VSLIDEUP_VX:
65 case RISCV::VSLIDEDOWN_VX:
68 case RISCV::VREDMAX_VS:
69 case RISCV::VREDMIN_VS:
70 case RISCV::VREDMAXU_VS:
71 case RISCV::VREDMINU_VS:
72 case RISCV::VREDSUM_VS:
73 case RISCV::VREDAND_VS:
74 case RISCV::VREDOR_VS:
75 case RISCV::VREDXOR_VS:
76 case RISCV::VFREDMAX_VS:
77 case RISCV::VFREDMIN_VS:
78 case RISCV::VFREDUSUM_VS: {
85 case RISCV::VFREDOSUM_VS: {
99 case RISCV::VMANDN_MM:
100 case RISCV::VMNAND_MM:
102 case RISCV::VFIRST_M:
118 "getIntImmCost can only estimate cost of materialising integers");
139 auto *BO = dyn_cast<BinaryOperator>(Inst->
getOperand(0));
140 if (!BO || !BO->hasOneUse())
143 if (BO->getOpcode() != Instruction::Shl)
146 if (!isa<ConstantInt>(BO->getOperand(1)))
149 unsigned ShAmt = cast<ConstantInt>(BO->getOperand(1))->getZExtValue();
154 if (ShAmt == Trailing)
166 "getIntImmCost can only estimate cost of materialising integers");
174 bool Takes12BitImm =
false;
175 unsigned ImmArgIdx = ~0U;
178 case Instruction::GetElementPtr:
183 case Instruction::Store: {
188 if (
Idx == 1 || !Inst)
193 if (!getTLI()->allowsMemoryAccessForAlignment(
195 ST->getPointerAddressSpace(), ST->getAlign()))
201 case Instruction::Load:
204 case Instruction::And:
206 if (Imm == UINT64_C(0xffff) && ST->hasStdExtZbb())
209 if (Imm == UINT64_C(0xffffffff) && ST->hasStdExtZba())
212 if (ST->hasStdExtZbs() && (~Imm).isPowerOf2())
214 if (Inst &&
Idx == 1 && Imm.getBitWidth() <= ST->
getXLen() &&
217 Takes12BitImm =
true;
219 case Instruction::Add:
220 Takes12BitImm =
true;
222 case Instruction::Or:
223 case Instruction::Xor:
225 if (ST->hasStdExtZbs() && Imm.isPowerOf2())
227 Takes12BitImm =
true;
229 case Instruction::Mul:
231 if (Imm.isPowerOf2() || Imm.isNegatedPowerOf2())
234 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2())
237 Takes12BitImm =
true;
239 case Instruction::Sub:
240 case Instruction::Shl:
241 case Instruction::LShr:
242 case Instruction::AShr:
243 Takes12BitImm =
true;
254 if (Imm.getSignificantBits() <= 64 &&
283 return ST->hasStdExtZbb() || (ST->hasVendorXCVbitmanip() && !ST->
is64Bit())
292 switch (
II->getIntrinsicID()) {
296 case Intrinsic::vector_reduce_mul:
297 case Intrinsic::vector_reduce_fmul:
353 return cast<VectorType>(
EVT(IndexVT).getTypeForEVT(
C));
369 if (isa<FixedVectorType>(Tp)) {
374 if (Mask.size() >= 2 && LT.second.isFixedLengthVector()) {
375 MVT EltTp = LT.second.getVectorElementType();
388 if (Mask[0] == 0 || Mask[0] == 1) {
392 if (
equal(DeinterleaveMask, Mask))
393 return LT.first * getRISCVInstructionCost(RISCV::VNSRL_WI,
400 if (LT.second.isFixedLengthVector() && LT.first == 1 &&
401 (LT.second.getScalarSizeInBits() != 8 ||
402 LT.second.getVectorNumElements() <= 256)) {
406 getRISCVInstructionCost(RISCV::VRGATHER_VV, LT.second,
CostKind);
415 if (LT.second.isFixedLengthVector() && LT.first == 1 &&
416 (LT.second.getScalarSizeInBits() != 8 ||
417 LT.second.getVectorNumElements() <= 256)) {
424 return 2 * IndexCost +
425 getRISCVInstructionCost({RISCV::VRGATHER_VV, RISCV::VRGATHER_VV},
435 if (!Mask.empty() && LT.first.isValid() && LT.first != 1 &&
436 LT.second.isFixedLengthVector() &&
437 LT.second.getVectorElementType().getSizeInBits() ==
439 LT.second.getVectorNumElements() <
440 cast<FixedVectorType>(Tp)->getNumElements() &&
442 cast<FixedVectorType>(Tp)->getNumElements()) ==
443 static_cast<unsigned>(*LT.first.getValue())) {
444 unsigned NumRegs = *LT.first.getValue();
445 unsigned VF = cast<FixedVectorType>(Tp)->getNumElements();
450 for (
unsigned I = 0;
I < NumRegs; ++
I) {
451 bool IsSingleVector =
true;
454 I == NumRegs - 1 ? Mask.size() % SubVF : SubVF),
455 SubMask.
begin(), [&](
int I) {
456 bool SingleSubVector = I / VF == 0;
457 IsSingleVector &= SingleSubVector;
458 return (SingleSubVector ? 0 : 1) * SubVF + I % VF;
462 SubVecTy, SubMask,
CostKind, 0,
nullptr);
490 SubLT.second.isValid() && SubLT.second.isFixedLengthVector()) {
493 if (MinVLen == MaxVLen &&
494 SubLT.second.getScalarSizeInBits() *
Index % MinVLen == 0 &&
495 SubLT.second.getSizeInBits() <= MinVLen)
503 getRISCVInstructionCost(RISCV::VSLIDEDOWN_VI, LT.second,
CostKind);
509 getRISCVInstructionCost(RISCV::VSLIDEUP_VI, LT.second,
CostKind);
521 (1 + getRISCVInstructionCost({RISCV::VMV_S_X, RISCV::VMERGE_VVM},
526 Instruction::InsertElement);
527 if (LT.second.getScalarSizeInBits() == 1) {
535 (1 + getRISCVInstructionCost({RISCV::VMV_V_X, RISCV::VMSNE_VI},
548 (1 + getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM,
549 RISCV::VMV_X_S, RISCV::VMV_V_X,
558 getRISCVInstructionCost(RISCV::VMV_V_X, LT.second,
CostKind);
564 getRISCVInstructionCost(RISCV::VRGATHER_VI, LT.second,
CostKind);
570 unsigned Opcodes[2] = {RISCV::VSLIDEDOWN_VX, RISCV::VSLIDEUP_VX};
572 Opcodes[0] = RISCV::VSLIDEDOWN_VI;
573 else if (Index < 0 && Index > -32)
574 Opcodes[1] = RISCV::VSLIDEUP_VI;
575 return LT.first * getRISCVInstructionCost(Opcodes, LT.second,
CostKind);
593 if (LT.second.isFixedLengthVector())
595 LenCost = isInt<5>(LT.second.getVectorNumElements() - 1) ? 0 : 1;
596 unsigned Opcodes[] = {RISCV::VID_V, RISCV::VRSUB_VX, RISCV::VRGATHER_VV};
597 if (LT.second.isFixedLengthVector() &&
598 isInt<5>(LT.second.getVectorNumElements() - 1))
599 Opcodes[1] = RISCV::VRSUB_VI;
601 getRISCVInstructionCost(Opcodes, LT.second,
CostKind);
604 return LT.first * (LenCost + GatherCost + ExtendCost);
625 bool UseMaskForCond,
bool UseMaskForGaps) {
626 if (isa<ScalableVectorType>(VecTy) && Factor != 2)
633 if (!UseMaskForCond && !UseMaskForGaps &&
634 Factor <= TLI->getMaxSupportedInterleaveFactor()) {
635 auto *VTy = cast<VectorType>(VecTy);
638 if (LT.second.isVector()) {
641 VTy->getElementCount().divideCoefficientBy(Factor));
643 if (VTy->getElementCount().isKnownMultipleOf(Factor) &&
651 LT.second.getVectorElementCount());
654 return LT.first + LegalMemCost;
661 if (isa<ScalableVectorType>(VecTy))
664 auto *FVTy = cast<FixedVectorType>(VecTy);
667 unsigned VF = FVTy->getNumElements() / Factor;
674 if (Opcode == Instruction::Load) {
676 for (
unsigned Index : Indices) {
701 UseMaskForCond, UseMaskForGaps);
703 assert(Opcode == Instruction::Store &&
"Opcode must be a store");
710 return MemCost + ShuffleCost;
714 unsigned Opcode,
Type *DataTy,
const Value *
Ptr,
bool VariableMask,
720 if ((Opcode == Instruction::Load &&
722 (Opcode == Instruction::Store &&
730 auto &VTy = *cast<VectorType>(DataTy);
733 {TTI::OK_AnyValue, TTI::OP_None},
I);
734 unsigned NumLoads = getEstimatedVLFor(&VTy);
735 return NumLoads * MemOpCost;
739 unsigned Opcode,
Type *DataTy,
const Value *
Ptr,
bool VariableMask,
741 if (((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
743 (Opcode != Instruction::Load && Opcode != Instruction::Store))
753 auto &VTy = *cast<VectorType>(DataTy);
756 {TTI::OK_AnyValue, TTI::OP_None},
I);
757 unsigned NumLoads = getEstimatedVLFor(&VTy);
758 return NumLoads * MemOpCost;
766 {Intrinsic::floor, MVT::f32, 9},
767 {Intrinsic::floor, MVT::f64, 9},
768 {Intrinsic::ceil, MVT::f32, 9},
769 {Intrinsic::ceil, MVT::f64, 9},
770 {Intrinsic::trunc, MVT::f32, 7},
771 {Intrinsic::trunc, MVT::f64, 7},
772 {Intrinsic::round, MVT::f32, 9},
773 {Intrinsic::round, MVT::f64, 9},
774 {Intrinsic::roundeven, MVT::f32, 9},
775 {Intrinsic::roundeven, MVT::f64, 9},
776 {Intrinsic::rint, MVT::f32, 7},
777 {Intrinsic::rint, MVT::f64, 7},
778 {Intrinsic::lrint, MVT::i32, 1},
779 {Intrinsic::lrint, MVT::i64, 1},
780 {Intrinsic::llrint, MVT::i64, 1},
781 {Intrinsic::nearbyint, MVT::f32, 9},
782 {Intrinsic::nearbyint, MVT::f64, 9},
783 {Intrinsic::bswap, MVT::i16, 3},
784 {Intrinsic::bswap, MVT::i32, 12},
785 {Intrinsic::bswap, MVT::i64, 31},
786 {Intrinsic::vp_bswap, MVT::i16, 3},
787 {Intrinsic::vp_bswap, MVT::i32, 12},
788 {Intrinsic::vp_bswap, MVT::i64, 31},
789 {Intrinsic::vp_fshl, MVT::i8, 7},
790 {Intrinsic::vp_fshl, MVT::i16, 7},
791 {Intrinsic::vp_fshl, MVT::i32, 7},
792 {Intrinsic::vp_fshl, MVT::i64, 7},
793 {Intrinsic::vp_fshr, MVT::i8, 7},
794 {Intrinsic::vp_fshr, MVT::i16, 7},
795 {Intrinsic::vp_fshr, MVT::i32, 7},
796 {Intrinsic::vp_fshr, MVT::i64, 7},
797 {Intrinsic::bitreverse, MVT::i8, 17},
798 {Intrinsic::bitreverse, MVT::i16, 24},
799 {Intrinsic::bitreverse, MVT::i32, 33},
800 {Intrinsic::bitreverse, MVT::i64, 52},
801 {Intrinsic::vp_bitreverse, MVT::i8, 17},
802 {Intrinsic::vp_bitreverse, MVT::i16, 24},
803 {Intrinsic::vp_bitreverse, MVT::i32, 33},
804 {Intrinsic::vp_bitreverse, MVT::i64, 52},
805 {Intrinsic::ctpop, MVT::i8, 12},
806 {Intrinsic::ctpop, MVT::i16, 19},
807 {Intrinsic::ctpop, MVT::i32, 20},
808 {Intrinsic::ctpop, MVT::i64, 21},
809 {Intrinsic::vp_ctpop, MVT::i8, 12},
810 {Intrinsic::vp_ctpop, MVT::i16, 19},
811 {Intrinsic::vp_ctpop, MVT::i32, 20},
812 {Intrinsic::vp_ctpop, MVT::i64, 21},
813 {Intrinsic::vp_ctlz, MVT::i8, 19},
814 {Intrinsic::vp_ctlz, MVT::i16, 28},
815 {Intrinsic::vp_ctlz, MVT::i32, 31},
816 {Intrinsic::vp_ctlz, MVT::i64, 35},
817 {Intrinsic::vp_cttz, MVT::i8, 16},
818 {Intrinsic::vp_cttz, MVT::i16, 23},
819 {Intrinsic::vp_cttz, MVT::i32, 24},
820 {Intrinsic::vp_cttz, MVT::i64, 25},
825#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
826 case Intrinsic::VPID: \
828#include "llvm/IR/VPIntrinsics.def"
829#undef HELPER_MAP_VPID_TO_VPSD
838 switch (ICA.
getID()) {
839 case Intrinsic::ceil:
840 case Intrinsic::floor:
841 case Intrinsic::trunc:
842 case Intrinsic::rint:
843 case Intrinsic::lrint:
844 case Intrinsic::llrint:
845 case Intrinsic::round:
846 case Intrinsic::roundeven: {
853 case Intrinsic::umin:
854 case Intrinsic::umax:
855 case Intrinsic::smin:
856 case Intrinsic::smax: {
858 if (LT.second.isScalarInteger() && ST->hasStdExtZbb())
863 switch (ICA.
getID()) {
864 case Intrinsic::umin:
865 Op = RISCV::VMINU_VV;
867 case Intrinsic::umax:
868 Op = RISCV::VMAXU_VV;
870 case Intrinsic::smin:
873 case Intrinsic::smax:
877 return LT.first * getRISCVInstructionCost(
Op, LT.second,
CostKind);
881 case Intrinsic::sadd_sat:
882 case Intrinsic::ssub_sat:
883 case Intrinsic::uadd_sat:
884 case Intrinsic::usub_sat:
885 case Intrinsic::fabs:
886 case Intrinsic::sqrt: {
892 case Intrinsic::ctpop: {
898 case Intrinsic::abs: {
907 case Intrinsic::get_active_lane_mask: {
917 getRISCVInstructionCost({RISCV::VSADDU_VX, RISCV::VMSLTU_VX},
923 case Intrinsic::experimental_stepvector: {
928 return getRISCVInstructionCost(RISCV::VID_V, LT.second,
CostKind) +
930 getRISCVInstructionCost(RISCV::VADD_VX, LT.second,
CostKind);
931 return 1 + (LT.first - 1);
933 case Intrinsic::experimental_cttz_elts: {
945 cast<ConstantInt>(ICA.
getArgs()[1])->isZero())
953 case Intrinsic::vp_rint: {
958 return Cost * LT.first;
961 case Intrinsic::vp_nearbyint: {
966 return Cost * LT.first;
969 case Intrinsic::vp_ceil:
970 case Intrinsic::vp_floor:
971 case Intrinsic::vp_round:
972 case Intrinsic::vp_roundeven:
973 case Intrinsic::vp_roundtozero: {
980 return Cost * LT.first;
984 case Intrinsic::vp_add:
985 case Intrinsic::vp_and:
986 case Intrinsic::vp_ashr:
987 case Intrinsic::vp_lshr:
988 case Intrinsic::vp_mul:
989 case Intrinsic::vp_or:
990 case Intrinsic::vp_sdiv:
991 case Intrinsic::vp_shl:
992 case Intrinsic::vp_srem:
993 case Intrinsic::vp_sub:
994 case Intrinsic::vp_udiv:
995 case Intrinsic::vp_urem:
996 case Intrinsic::vp_xor:
998 case Intrinsic::vp_fadd:
999 case Intrinsic::vp_fsub:
1000 case Intrinsic::vp_fmul:
1001 case Intrinsic::vp_fdiv:
1002 case Intrinsic::vp_frem: {
1003 std::optional<unsigned> FOp =
1013 LT.second.isVector()) {
1014 MVT EltTy = LT.second.getVectorElementType();
1016 ICA.
getID(), EltTy))
1017 return LT.first * Entry->Cost;
1029 bool IsVectorType = isa<VectorType>(Dst) && isa<VectorType>(Src);
1037 Dst->getScalarSizeInBits() > ST->
getELen())
1048 if (!SrcLT.second.isVector() || !DstLT.second.isVector() ||
1050 SrcLT.second.getSizeInBits()) ||
1052 DstLT.second.getSizeInBits()))
1056 assert(ISD &&
"Invalid opcode");
1058 int PowDiff = (int)
Log2_32(Dst->getScalarSizeInBits()) -
1059 (
int)
Log2_32(Src->getScalarSizeInBits());
1063 const unsigned SrcEltSize = Src->getScalarSizeInBits();
1064 if (SrcEltSize == 1) {
1069 return getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM},
1072 if ((PowDiff < 1) || (PowDiff > 3))
1074 unsigned SExtOp[] = {RISCV::VSEXT_VF2, RISCV::VSEXT_VF4, RISCV::VSEXT_VF8};
1075 unsigned ZExtOp[] = {RISCV::VZEXT_VF2, RISCV::VZEXT_VF4, RISCV::VZEXT_VF8};
1078 return getRISCVInstructionCost(
Op, DstLT.second,
CostKind);
1081 if (Dst->getScalarSizeInBits() == 1) {
1087 return getRISCVInstructionCost({RISCV::VAND_VI, RISCV::VMSNE_VI},
1094 unsigned SrcEltSize = Src->getScalarSizeInBits();
1095 unsigned DstEltSize = Dst->getScalarSizeInBits();
1099 : RISCV::VFNCVT_F_F_W;
1101 for (; SrcEltSize != DstEltSize;) {
1107 (DstEltSize > SrcEltSize) ? DstEltSize >> 1 : DstEltSize << 1;
1118 if (Dst->getScalarSizeInBits() == 1)
1121 if (std::abs(PowDiff) <= 1)
1125 return std::abs(PowDiff);
1133 if (Src->getScalarSizeInBits() == 1)
1136 if (std::abs(PowDiff) <= 1)
1145unsigned RISCVTTIImpl::getEstimatedVLFor(
VectorType *Ty) {
1146 if (isa<ScalableVectorType>(Ty)) {
1152 return cast<FixedVectorType>(Ty)->getNumElements();
1171 if (IID == Intrinsic::umax || IID == Intrinsic::smin)
1177 if (IID == Intrinsic::maximum || IID == Intrinsic::minimum) {
1181 case Intrinsic::maximum:
1183 Opcodes = {RISCV::VFREDMAX_VS, RISCV::VFMV_F_S};
1185 Opcodes = {RISCV::VMFNE_VV, RISCV::VCPOP_M, RISCV::VFREDMAX_VS,
1200 case Intrinsic::minimum:
1202 Opcodes = {RISCV::VFREDMIN_VS, RISCV::VFMV_F_S};
1204 Opcodes = {RISCV::VMFNE_VV, RISCV::VCPOP_M, RISCV::VFREDMIN_VS,
1219 return ExtraCost + getRISCVInstructionCost(Opcodes, LT.second,
CostKind);
1228 case Intrinsic::smax:
1229 SplitOp = RISCV::VMAX_VV;
1230 Opcodes = {RISCV::VMV_S_X, RISCV::VREDMAX_VS, RISCV::VMV_X_S};
1232 case Intrinsic::smin:
1233 SplitOp = RISCV::VMIN_VV;
1234 Opcodes = {RISCV::VMV_S_X, RISCV::VREDMIN_VS, RISCV::VMV_X_S};
1236 case Intrinsic::umax:
1237 SplitOp = RISCV::VMAXU_VV;
1238 Opcodes = {RISCV::VMV_S_X, RISCV::VREDMAXU_VS, RISCV::VMV_X_S};
1240 case Intrinsic::umin:
1241 SplitOp = RISCV::VMINU_VV;
1242 Opcodes = {RISCV::VMV_S_X, RISCV::VREDMINU_VS, RISCV::VMV_X_S};
1244 case Intrinsic::maxnum:
1245 SplitOp = RISCV::VFMAX_VV;
1246 Opcodes = {RISCV::VFMV_S_F, RISCV::VFREDMAX_VS, RISCV::VFMV_F_S};
1248 case Intrinsic::minnum:
1249 SplitOp = RISCV::VFMIN_VV;
1250 Opcodes = {RISCV::VFMV_S_F, RISCV::VFREDMIN_VS, RISCV::VFMV_F_S};
1255 (LT.first > 1) ? (LT.first - 1) *
1256 getRISCVInstructionCost(SplitOp, LT.second,
CostKind)
1258 return SplitCost + getRISCVInstructionCost(Opcodes, LT.second,
CostKind);
1263 std::optional<FastMathFlags> FMF,
1273 assert(ISD &&
"Invalid opcode");
1289 Opcodes = {RISCV::VMNAND_MM, RISCV::VCPOP_M};
1290 return (LT.first - 1) +
1291 getRISCVInstructionCost(Opcodes, LT.second,
CostKind) +
1299 Opcodes = {RISCV::VCPOP_M};
1300 return (LT.first - 1) +
1301 getRISCVInstructionCost(Opcodes, LT.second,
CostKind) +
1310 for (
unsigned i = 0; i < LT.first.getValue(); i++)
1313 return getRISCVInstructionCost(Opcodes, LT.second,
CostKind);
1318 SplitOp = RISCV::VADD_VV;
1319 Opcodes = {RISCV::VMV_S_X, RISCV::VREDSUM_VS, RISCV::VMV_X_S};
1322 SplitOp = RISCV::VOR_VV;
1323 Opcodes = {RISCV::VMV_S_X, RISCV::VREDOR_VS, RISCV::VMV_X_S};
1326 SplitOp = RISCV::VXOR_VV;
1327 Opcodes = {RISCV::VMV_S_X, RISCV::VREDXOR_VS, RISCV::VMV_X_S};
1330 SplitOp = RISCV::VAND_VV;
1331 Opcodes = {RISCV::VMV_S_X, RISCV::VREDAND_VS, RISCV::VMV_X_S};
1334 SplitOp = RISCV::VFADD_VV;
1335 Opcodes = {RISCV::VFMV_S_F, RISCV::VFREDUSUM_VS, RISCV::VFMV_F_S};
1340 (LT.first > 1) ? (LT.first - 1) *
1341 getRISCVInstructionCost(SplitOp, LT.second,
CostKind)
1343 return SplitCost + getRISCVInstructionCost(Opcodes, LT.second,
CostKind);
1347 unsigned Opcode,
bool IsUnsigned,
Type *ResTy,
VectorType *ValTy,
1358 if (Opcode != Instruction::Add && Opcode != Instruction::FAdd)
1368 return (LT.first - 1) +
1376 if (!isa<VectorType>(Ty))
1388 return getConstantPoolLoadCost(Ty,
CostKind);
1400 if (VT == MVT::Other)
1405 if (Opcode == Instruction::Store && OpInfo.
isConstant())
1420 if (Src->
isVectorTy() && LT.second.isVector() &&
1422 LT.second.getSizeInBits()))
1434 return Cost + BaseCost;
1457 if (Opcode == Instruction::Select && ValTy->
isVectorTy()) {
1464 getRISCVInstructionCost(
1465 {RISCV::VMANDN_MM, RISCV::VMAND_MM, RISCV::VMOR_MM},
1470 getRISCVInstructionCost(RISCV::VMERGE_VVM, LT.second,
CostKind);
1479 MVT InterimVT = LT.second.changeVectorElementType(MVT::i8);
1481 getRISCVInstructionCost({RISCV::VMV_V_X, RISCV::VMSNE_VI},
1483 LT.first * getRISCVInstructionCost(
1484 {RISCV::VMANDN_MM, RISCV::VMAND_MM, RISCV::VMOR_MM},
1491 return LT.first * getRISCVInstructionCost(
1492 {RISCV::VMV_V_X, RISCV::VMSNE_VI, RISCV::VMERGE_VVM},
1496 if ((Opcode == Instruction::ICmp) && ValTy->
isVectorTy() &&
1501 getRISCVInstructionCost(RISCV::VMSLT_VV, LT.second,
CostKind);
1504 if ((Opcode == Instruction::FCmp) && ValTy->
isVectorTy() &&
1509 return getRISCVInstructionCost(RISCV::VMXOR_MM, LT.second,
CostKind);
1528 return LT.first * getRISCVInstructionCost(
1529 {RISCV::VMFLT_VV, RISCV::VMFLT_VV, RISCV::VMOR_MM},
1537 getRISCVInstructionCost({RISCV::VMFLT_VV, RISCV::VMNAND_MM},
1547 getRISCVInstructionCost(RISCV::VMFLT_VV, LT.second,
CostKind);
1560 return match(U, m_Select(m_Specific(I), m_Value(), m_Value())) &&
1561 U->getType()->isIntegerTy() &&
1562 !isa<ConstantData>(U->getOperand(1)) &&
1563 !isa<ConstantData>(U->getOperand(2));
1577 return Opcode == Instruction::PHI ? 0 : 1;
1588 if (Opcode != Instruction::ExtractElement &&
1589 Opcode != Instruction::InsertElement)
1596 if (!LT.second.isVector()) {
1597 auto *FixedVecTy = cast<FixedVectorType>(Val);
1605 Type *ElemTy = FixedVecTy->getElementType();
1606 auto NumElems = FixedVecTy->getNumElements();
1612 return Opcode == Instruction::ExtractElement
1613 ? StoreCost * NumElems + LoadCost
1614 : (StoreCost + LoadCost) * NumElems + StoreCost;
1618 if (LT.second.isScalableVector() && !LT.first.isValid())
1628 cast<VectorType>(Val)->getElementCount());
1629 if (Opcode == Instruction::ExtractElement) {
1635 return ExtendCost + ExtractCost;
1645 return ExtendCost + InsertCost + TruncCost;
1651 unsigned BaseCost = 1;
1653 unsigned SlideCost = Opcode == Instruction::InsertElement ? 2 : 1;
1658 if (LT.second.isFixedLengthVector()) {
1659 unsigned Width = LT.second.getVectorNumElements();
1666 else if (Opcode == Instruction::InsertElement)
1691 BaseCost = Opcode == Instruction::InsertElement ? 3 : 4;
1693 return BaseCost + SlideCost;
1719 if (!LT.second.isVector())
1723 auto getConstantMatCost =
1733 return getConstantPoolLoadCost(Ty,
CostKind);
1739 ConstantMatCost += getConstantMatCost(0, Op1Info);
1741 ConstantMatCost += getConstantMatCost(1, Op2Info);
1747 Op = RISCV::VADD_VV;
1752 Op = RISCV::VSLL_VV;
1762 Op = RISCV::VMUL_VV;
1766 Op = RISCV::VDIV_VV;
1770 Op = RISCV::VREM_VV;
1775 Op = RISCV::VFADD_VV;
1779 Op = RISCV::VFMUL_VV;
1782 Op = RISCV::VFDIV_VV;
1785 Op = RISCV::VFSGNJN_VV;
1801 return ConstantMatCost + LT.first *
InstrCost;
1821 const auto *
GEP = dyn_cast<GetElementPtrInst>(V);
1824 if (
Info.isSameBase() && V !=
Base) {
1825 if (
GEP->hasAllConstantIndices())
1832 if (
Info.isUnitStride() &&
1838 GEP->getType()->getPointerAddressSpace()))
1841 {TTI::OK_AnyValue, TTI::OP_None},
1842 {TTI::OK_AnyValue, TTI::OP_None},
1860 if (ST->enableDefaultUnroll())
1870 if (L->getHeader()->getParent()->hasOptSize())
1874 L->getExitingBlocks(ExitingBlocks);
1876 <<
"Blocks: " << L->getNumBlocks() <<
"\n"
1877 <<
"Exit blocks: " << ExitingBlocks.
size() <<
"\n");
1881 if (ExitingBlocks.
size() > 2)
1886 if (L->getNumBlocks() > 4)
1896 for (
auto *BB : L->getBlocks()) {
1897 for (
auto &
I : *BB) {
1900 if (
I.getType()->isVectorTy())
1903 if (isa<CallInst>(
I) || isa<InvokeInst>(
I)) {
1962 return std::max<unsigned>(1U, RegWidth.
getFixedValue() / ElemWidth);
1981 auto *VTy = dyn_cast<VectorType>(DataTy);
1982 if (!VTy || VTy->isScalableTy())
1995 TM.getSubtargetImpl(*Caller)->getFeatureBits();
1997 TM.getSubtargetImpl(*Callee)->getFeatureBits();
2001 return (CallerBits & CalleeBits) == CalleeBits;
2010 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader) {
2011 bool Considerable =
false;
2012 AllowPromotionWithoutCommonHeader =
false;
2013 if (!isa<SExtInst>(&
I))
2015 Type *ConsideredSExtType =
2017 if (
I.getType() != ConsideredSExtType)
2021 for (
const User *U :
I.users()) {
2023 Considerable =
true;
2027 if (GEPInst->getNumOperands() > 2) {
2028 AllowPromotionWithoutCommonHeader =
true;
2033 return Considerable;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Cost tables and simple lookup functions.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static cl::opt< int > InstrCost("inline-instr-cost", cl::Hidden, cl::init(5), cl::desc("Cost of a single instruction when inlining"))
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V)
uint64_t IntrinsicInst * II
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool isTypeLegal(Type *Ty)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
std::optional< unsigned > getVScaleForTuning() const
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
std::optional< unsigned > getMaxVScale() const
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
unsigned getRegUsageForType(Type *Ty)
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
bool isFPPredicate() const
bool isIntPredicate() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Convenience struct for specifying and reasoning about fast-math flags.
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static InstructionCost getInvalid(CostType Val=0)
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
const SmallVectorImpl< const Value * > & getArgs() const
Intrinsic::ID getID() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
static MVT getFloatingPointVT(unsigned BitWidth)
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
MVT changeTypeToInteger()
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(MVT VT) const
Return true if this has more bits than VT.
bool isFixedLengthVector() const
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
bool hasVInstructionsF64() const
unsigned getRealMinVLen() const
bool useRVVForFixedLengthVectors() const
bool hasConditionalMoveFusion() const
bool hasVInstructionsF16() const
bool hasVInstructions() const
unsigned getRealMaxVLen() const
bool hasVInstructionsF32() const
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment)
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr)
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader)
See if I should be considered for address type promotion.
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
std::optional< unsigned > getVScaleForTuning() const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, FastMathFlags FMF, TTI::TargetCostKind CostKind)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
std::optional< unsigned > getMaxVScale() const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind)
TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
bool shouldExpandReduction(const IntrinsicInst *II) const
InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo, TTI::TargetCostKind CostKind)
Return the cost of materializing an immediate for a value operand of a store instruction.
bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment)
bool isLegalStridedLoadStore(Type *DataType, Align Alignment)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getRegUsageForType(Type *Ty)
bool isLegalMaskedGather(Type *DataType, Align Alignment)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpdInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
bool isLegalMaskedScatter(Type *DataType, Align Alignment)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
bool hasActiveVectorLength(unsigned Opcode, Type *DataType, Align Alignment) const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getVRGatherVVCost(MVT VT) const
Return the cost of a vrgather.vv instruction for the type VT.
bool canSplatOperand(Instruction *I, int Operand) const
Return true if the (vector) instruction I will be lowered to an instruction with a scalar splat opera...
InstructionCost getVRGatherVICost(MVT VT) const
Return the cost of a vrgather.vi (or vx) instruction for the type VT.
static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize, unsigned MinSize)
bool shouldExpandCttzElements(EVT VT) const override
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
InstructionCost getLMULCost(MVT VT) const
Return the cost of LMUL for linear operations.
InstructionCost getVSlideVICost(MVT VT) const
Return the cost of a vslidedown.vi or vslideup.vi instruction for the type VT.
InstructionCost getVSlideVXCost(MVT VT) const
Return the cost of a vslidedown.vx or vslideup.vx instruction for the type VT.
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace, const DataLayout &) const
Returns whether or not generating a interleaved load/store intrinsic for this type will be legal.
The main scalar evolution driver.
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const TargetMachine & getTargetMachine() const
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
Primary interface to the complete machine description for the target machine.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Value * getOperand(unsigned i) const
static std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
LLVM Value Representation.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ ADD
Simple integer binary arithmetic operators.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ SIGN_EXTEND
Conversion operators.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost, bool FreeZeroes)
static constexpr unsigned RVVBitsPerBlock
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
const CostTblEntryT< CostType > * CostTableLookup(ArrayRef< CostTblEntryT< CostType > > Tbl, int ISD, MVT Ty)
Find in cost table.
bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name)
Returns true if Name is applied to TheLoop and enabled.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
DWARFExpression::Operation Op
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.