16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
88 const T *thisT()
const {
return static_cast<const T *
>(
this); }
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
137 "Can only extract subvectors from vectors");
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
148 for (
int i = 0; i != NumSubElts; ++i) {
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index,
nullptr,
nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
165 "Can only insert subvectors into vectors");
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
176 for (
int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
181 i + Index,
nullptr,
nullptr);
188 return static_cast<const T *
>(
this)->getST();
193 return static_cast<const T *
>(
this)->getTLI();
215 bool IsGatherScatter,
223 unsigned VF = VT->getNumElements();
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
244 Opcode == Instruction::Store,
CostKind);
258 VF * (thisT()->getCFInstrCost(Instruction::Br,
CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI,
CostKind));
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
270 static bool isSplatMask(
ArrayRef<int> Mask,
unsigned NumSrcElts,
int &Index) {
272 bool IsCompared =
false;
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (
static_cast<unsigned>(
P.value()) >= NumSrcElts * 2)
280 SplatIdx =
P.value();
281 return P.index() != Mask.size() - 1;
284 return SplatIdx ==
P.value();
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 std::optional<unsigned> CallRetElementIndex = {})
const {
314 EVT VT = getTLI()->getValueType(
DL, Ty);
316 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
318 switch (ICA.
getID()) {
319 case Intrinsic::modf:
322 case Intrinsic::sincospi:
325 case Intrinsic::sincos:
333 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
334 if (LibcallImpl == RTLIB::Unsupported)
347 VecTy, {},
CostKind, 0,
nullptr, {});
353 if (Idx == CallRetElementIndex)
355 Cost += thisT()->getMemoryOpCost(
356 Instruction::Load, VectorTy,
389 unsigned *
Fast)
const override {
391 return getTLI()->allowsMisalignedMemoryAccesses(
396 const Function *Callee)
const override {
406 return (CallerBits & CalleeBits) == CalleeBits;
432 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
436 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
440 return getTLI()->getTargetMachine().Options.ThreadModel ==
444 std::pair<const Value *, unsigned>
446 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
450 Value *NewV)
const override {
455 return getTLI()->isLegalAddImmediate(imm);
459 return getTLI()->isLegalAddScalableImmediate(Imm);
463 return getTLI()->isLegalICmpImmediate(imm);
467 bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
469 int64_t ScalableOffset = 0)
const override {
476 return getTLI()->isLegalAddressingMode(
DL, AM, Ty, AddrSpace,
I);
480 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
484 Type *ScalarValTy)
const override {
485 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
487 EVT VT = getTLI()->getValueType(
DL, SrcTy);
488 if (getTLI()->isOperationLegal(
ISD::STORE, VT) ||
495 getTLI()->getTypeToTransformTo(ScalarMemTy->
getContext(), VT);
496 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
498 while (VF > 2 && IsSupportedByTarget(VF))
504 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
505 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
509 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
510 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
533 unsigned AddrSpace)
const override {
546 return getTLI()->isTruncateFree(Ty1, Ty2);
550 return getTLI()->isProfitableToHoist(
I);
553 bool useAA()
const override {
return getST()->useAA(); }
556 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
557 return getTLI()->isTypeLegal(VT);
561 EVT ETy = getTLI()->getValueType(
DL, Ty);
562 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
581 unsigned N =
SI.getNumCases();
589 if (
N < 1 || (!IsJTAllowed &&
DL.getIndexSizeInBits(0u) <
N))
592 APInt MaxCaseVal =
SI.case_begin()->getCaseValue()->getValue();
593 APInt MinCaseVal = MaxCaseVal;
594 for (
auto CI :
SI.cases()) {
595 const APInt &CaseVal = CI.getCaseValue()->getValue();
596 if (CaseVal.
sgt(MaxCaseVal))
597 MaxCaseVal = CaseVal;
598 if (CaseVal.
slt(MinCaseVal))
599 MinCaseVal = CaseVal;
603 if (
N <=
DL.getIndexSizeInBits(0u)) {
605 for (
auto I :
SI.cases()) {
616 if (
N < 2 ||
N < TLI->getMinimumJumpTableEntries())
619 (MaxCaseVal - MinCaseVal)
620 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
623 JumpTableSize =
Range;
681 const Function &Fn)
const override {
685 case Instruction::SDiv:
686 case Instruction::SRem:
687 case Instruction::UDiv:
688 case Instruction::URem: {
740 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
741 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
758 <<
"advising against unrolling the loop because it "
809 std::optional<Instruction *>
814 std::optional<Value *>
817 bool &KnownBitsComputed)
const override {
826 SimplifyAndSetOp)
const override {
828 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
832 std::optional<unsigned>
834 return std::optional<unsigned>(
838 std::optional<unsigned>
840 std::optional<unsigned> TargetResult =
841 getST()->getCacheAssociativity(
static_cast<unsigned>(Level));
850 return getST()->getCacheLineSize();
854 return getST()->getPrefetchDistance();
858 unsigned NumStridedMemAccesses,
859 unsigned NumPrefetches,
860 bool HasCall)
const override {
861 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
862 NumPrefetches, HasCall);
866 return getST()->getMaxPrefetchIterationsAhead();
870 return getST()->enableWritePrefetching();
874 return getST()->shouldPrefetchAddressSpace(AS);
887 std::optional<unsigned>
getMaxVScale()
const override {
return std::nullopt; }
897 VectorType *InTy,
const APInt &DemandedElts,
bool Insert,
bool Extract,
907 (VL.empty() || VL.size() == Ty->getNumElements()) &&
908 "Vector size mismatch");
912 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
913 if (!DemandedElts[i])
916 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
917 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
921 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
934 unsigned ScalarOpdIdx)
const override {
939 int OpdIdx)
const override {
945 int RetIdx)
const override {
958 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
968 for (
Type *Ty : Tys) {
970 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
971 !Ty->isPtrOrPtrVectorTy())
994 filterConstantAndDuplicatedOperands(Args, Tys),
CostKind);
1007 EVT MTy = getTLI()->getValueType(
DL, Ty);
1031 if (MTy == LK.second)
1046 const Instruction *CxtI =
nullptr)
const override {
1048 const TargetLoweringBase *TLI = getTLI();
1049 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1050 assert(ISD &&
"Invalid opcode");
1065 if (TLI->isOperationLegalOrPromote(ISD,
LT.second)) {
1068 return LT.first * OpCost;
1071 if (!TLI->isOperationExpand(ISD,
LT.second)) {
1074 return LT.first * 2 * OpCost;
1086 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1088 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
1090 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
1092 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
1093 return DivCost + MulCost + SubCost;
1125 int NumDstElts = Mask.size();
1126 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1133 if (isSplatMask(Mask, NumSrcElts, Index))
1136 (Index + NumDstElts) <= NumSrcElts) {
1143 if (
all_of(Mask, [NumSrcElts](
int M) {
return M < NumSrcElts; }))
1148 Mask, NumSrcElts, NumSubElts, Index)) {
1149 if (Index + NumSubElts > NumSrcElts)
1178 const Instruction *CxtI =
nullptr)
const override {
1182 return getBroadcastShuffleOverhead(FVT,
CostKind);
1191 return getPermuteShuffleOverhead(FVT,
CostKind);
1194 return getExtractSubvectorOverhead(SrcTy,
CostKind, Index,
1197 return getInsertSubvectorOverhead(DstTy,
CostKind, Index,
1216 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1217 TypeSize DstSize = DstLT.second.getSizeInBits();
1218 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1219 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1224 case Instruction::Trunc:
1229 case Instruction::BitCast:
1232 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1236 case Instruction::FPExt:
1237 if (
I && getTLI()->isExtFree(
I))
1240 case Instruction::ZExt:
1241 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1244 case Instruction::SExt:
1245 if (
I && getTLI()->isExtFree(
I))
1255 if (DstLT.first == SrcLT.first &&
1260 case Instruction::AddrSpaceCast:
1262 Dst->getPointerAddressSpace()))
1271 if (SrcLT.first == DstLT.first &&
1276 if (!SrcVTy && !DstVTy) {
1287 if (DstVTy && SrcVTy) {
1289 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1292 if (Opcode == Instruction::ZExt)
1296 if (Opcode == Instruction::SExt)
1297 return SrcLT.first * 2;
1303 return SrcLT.first * 1;
1316 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isKnownEven() &&
1317 DstVTy->getElementCount().isKnownEven()) {
1320 const T *TTI = thisT();
1323 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1325 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1337 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1350 if (Opcode == Instruction::BitCast) {
1367 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1368 CostKind, Index,
nullptr,
nullptr) +
1384 const Instruction *
I =
nullptr)
const override {
1385 const TargetLoweringBase *TLI = getTLI();
1386 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1387 assert(ISD &&
"Invalid opcode");
1391 Op1Info, Op2Info,
I);
1395 assert(CondTy &&
"CondTy must exist");
1402 !TLI->isOperationExpand(ISD,
LT.second)) {
1405 return LT.first * 1;
1417 Opcode, ValVTy->getScalarType(), CondTy->
getScalarType(), VecPred,
1433 unsigned Index,
const Value *Op0,
1434 const Value *Op1)
const override {
1444 unsigned Index,
Value *Scalar,
1445 ArrayRef<std::tuple<Value *, User *, int>>
1446 ScalarUserAndIdx)
const override {
1447 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind, Index,
nullptr,
1453 unsigned Index)
const override {
1454 Value *Op0 =
nullptr;
1455 Value *Op1 =
nullptr;
1457 Op0 = IE->getOperand(0);
1458 Op1 = IE->getOperand(1);
1460 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind, Index, Op0,
1467 unsigned Index)
const override {
1468 unsigned NewIndex = -1;
1471 "Unexpected index from end of vector");
1472 NewIndex = FVTy->getNumElements() - 1 - Index;
1474 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind, NewIndex,
nullptr,
1480 const APInt &DemandedDstElts,
1483 "Unexpected size of DemandedDstElts.");
1501 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1504 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1516 assert(!Src->isVoidTy() &&
"Invalid type");
1533 LT.second.getSizeInBits())) {
1539 if (Opcode == Instruction::Store)
1549 Opcode == Instruction::Store,
CostKind);
1559 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false)
const override {
1567 unsigned NumElts = VT->getNumElements();
1568 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1570 unsigned NumSubElts = NumElts / Factor;
1575 if (UseMaskForCond || UseMaskForGaps) {
1576 unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load
1577 : Intrinsic::masked_store;
1578 Cost = thisT()->getMemIntrinsicInstrCost(
1588 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1605 if (
Cost.isValid() && VecTySize > VecTyLTSize) {
1608 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1612 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1615 BitVector UsedInsts(NumLegalInsts,
false);
1616 for (
unsigned Index : Indices)
1617 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1618 UsedInsts.
set((Index + Elt * Factor) / NumEltsPerLegalInst);
1627 "Interleaved memory op has too many members");
1633 for (
unsigned Index : Indices) {
1634 assert(Index < Factor &&
"Invalid index for interleaved memory op");
1635 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1636 DemandedLoadStoreElts.
setBit(Index + Elm * Factor);
1639 if (Opcode == Instruction::Load) {
1649 SubVT, DemandedAllSubElts,
1651 Cost += Indices.
size() * InsSubCost;
1652 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1670 SubVT, DemandedAllSubElts,
1672 Cost += ExtSubCost * Indices.
size();
1673 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1678 if (!UseMaskForCond)
1683 Cost += thisT()->getReplicationShuffleCost(
1684 I8Type, Factor, NumSubElts,
1685 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1693 if (UseMaskForGaps) {
1695 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1721 std::optional<unsigned> FOp =
1724 if (ICA.
getID() == Intrinsic::vp_load) {
1727 Alignment = VPI->getPointerAlignment().valueOrOne();
1731 AS = PtrTy->getAddressSpace();
1732 return thisT()->getMemoryOpCost(*FOp, ICA.
getReturnType(), Alignment,
1735 if (ICA.
getID() == Intrinsic::vp_store) {
1738 Alignment = VPI->getPointerAlignment().valueOrOne();
1742 AS = PtrTy->getAddressSpace();
1743 return thisT()->getMemoryOpCost(*FOp, ICA.
getArgTypes()[0], Alignment,
1747 ICA.
getID() == Intrinsic::vp_fneg) {
1748 return thisT()->getArithmeticInstrCost(*FOp, ICA.
getReturnType(),
1752 return thisT()->getCastInstrCost(
1761 return thisT()->getCmpSelInstrCost(*FOp, ICA.
getArgTypes()[0],
1767 if (ICA.
getID() == Intrinsic::vp_load_ff) {
1772 Alignment = VPI->getPointerAlignment().valueOrOne();
1773 return thisT()->getMemIntrinsicInstrCost(
1777 if (ICA.
getID() == Intrinsic::vp_scatter) {
1787 Alignment = VPI->getPointerAlignment().valueOrOne();
1789 return thisT()->getMemIntrinsicInstrCost(
1792 VarMask, Alignment,
nullptr),
1795 if (ICA.
getID() == Intrinsic::vp_gather) {
1805 Alignment = VPI->getPointerAlignment().valueOrOne();
1807 return thisT()->getMemIntrinsicInstrCost(
1810 VarMask, Alignment,
nullptr),
1814 if (ICA.
getID() == Intrinsic::vp_select ||
1815 ICA.
getID() == Intrinsic::vp_merge) {
1826 std::optional<Intrinsic::ID> FID =
1830 if (ICA.
getID() == Intrinsic::experimental_vp_reverse)
1831 FID = Intrinsic::vector_reverse;
1837 "Expected VPIntrinsic to have Mask and Vector Length args and "
1849 *FID != Intrinsic::vector_reduce_fadd &&
1850 *FID != Intrinsic::vector_reduce_fmul) {
1858 return thisT()->getIntrinsicInstrCost(NewICA,
CostKind);
1877 case Intrinsic::powi:
1879 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1880 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1881 ShouldOptForSize)) {
1885 unsigned ActiveBits =
Exponent.getActiveBits();
1886 unsigned PopCount =
Exponent.popcount();
1888 thisT()->getArithmeticInstrCost(
1889 Instruction::FMul, RetTy,
CostKind);
1890 if (RHSC->isNegative())
1891 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1897 case Intrinsic::cttz:
1899 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1903 case Intrinsic::ctlz:
1905 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1909 case Intrinsic::memcpy:
1910 return thisT()->getMemcpyCost(ICA.
getInst());
1912 case Intrinsic::masked_scatter: {
1913 const Value *Mask = Args[2];
1915 Align Alignment =
I->getParamAlign(1).valueOrOne();
1916 return thisT()->getMemIntrinsicInstrCost(
1922 case Intrinsic::masked_gather: {
1923 const Value *Mask = Args[1];
1925 Align Alignment =
I->getParamAlign(0).valueOrOne();
1926 return thisT()->getMemIntrinsicInstrCost(
1928 VarMask, Alignment,
I),
1931 case Intrinsic::masked_compressstore: {
1933 const Value *Mask = Args[2];
1934 Align Alignment =
I->getParamAlign(1).valueOrOne();
1935 return thisT()->getMemIntrinsicInstrCost(
1940 case Intrinsic::masked_expandload: {
1941 const Value *Mask = Args[1];
1942 Align Alignment =
I->getParamAlign(0).valueOrOne();
1943 return thisT()->getMemIntrinsicInstrCost(
1948 case Intrinsic::experimental_vp_strided_store: {
1950 const Value *Ptr = Args[1];
1951 const Value *Mask = Args[3];
1952 const Value *EVL = Args[4];
1956 I->getParamAlign(1).value_or(thisT()->
DL.getABITypeAlign(EltTy));
1957 return thisT()->getMemIntrinsicInstrCost(
1962 case Intrinsic::experimental_vp_strided_load: {
1963 const Value *Ptr = Args[0];
1964 const Value *Mask = Args[2];
1965 const Value *EVL = Args[3];
1969 I->getParamAlign(0).value_or(thisT()->
DL.getABITypeAlign(EltTy));
1970 return thisT()->getMemIntrinsicInstrCost(
1974 case Intrinsic::stepvector: {
1980 case Intrinsic::vector_extract: {
1991 case Intrinsic::vector_insert: {
1997 return thisT()->getShuffleCost(
2002 case Intrinsic::vector_splice_left:
2003 case Intrinsic::vector_splice_right: {
2005 return thisT()->getShuffleCost(
2008 IID == Intrinsic::vector_splice_left ? Index : -Index,
2011 case Intrinsic::vector_reduce_add:
2012 case Intrinsic::vector_reduce_mul:
2013 case Intrinsic::vector_reduce_and:
2014 case Intrinsic::vector_reduce_or:
2015 case Intrinsic::vector_reduce_xor:
2016 case Intrinsic::vector_reduce_smax:
2017 case Intrinsic::vector_reduce_smin:
2018 case Intrinsic::vector_reduce_fmax:
2019 case Intrinsic::vector_reduce_fmin:
2020 case Intrinsic::vector_reduce_fmaximum:
2021 case Intrinsic::vector_reduce_fminimum:
2022 case Intrinsic::vector_reduce_umax:
2023 case Intrinsic::vector_reduce_umin: {
2027 case Intrinsic::vector_reduce_fadd:
2028 case Intrinsic::vector_reduce_fmul: {
2030 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF,
I, 1);
2033 case Intrinsic::fshl:
2034 case Intrinsic::fshr: {
2035 const Value *
X = Args[0];
2036 const Value *
Y = Args[1];
2037 const Value *Z = Args[2];
2046 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2048 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
2049 Cost += thisT()->getArithmeticInstrCost(
2050 BinaryOperator::Shl, RetTy,
CostKind, OpInfoX,
2052 Cost += thisT()->getArithmeticInstrCost(
2053 BinaryOperator::LShr, RetTy,
CostKind, OpInfoY,
2059 Cost += thisT()->getArithmeticInstrCost(
2061 : BinaryOperator::URem,
2063 {TTI::OK_UniformConstantValue, TTI::OP_None});
2068 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2071 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2076 case Intrinsic::experimental_cttz_elts: {
2081 if (!getTLI()->shouldExpandCttzElements(ArgType))
2094 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2105 thisT()->getIntrinsicInstrCost(StepVecAttrs,
CostKind);
2108 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy,
CostKind);
2109 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2113 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy,
CostKind);
2116 NewEltTy, NewVecTy, FMF,
I, 1);
2117 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs,
CostKind);
2119 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy,
CostKind);
2123 case Intrinsic::get_active_lane_mask:
2124 case Intrinsic::experimental_vector_match:
2125 case Intrinsic::experimental_vector_histogram_add:
2126 case Intrinsic::experimental_vector_histogram_uadd_sat:
2127 case Intrinsic::experimental_vector_histogram_umax:
2128 case Intrinsic::experimental_vector_histogram_umin:
2129 return thisT()->getTypeBasedIntrinsicInstrCost(ICA,
CostKind);
2130 case Intrinsic::modf:
2131 case Intrinsic::sincos:
2132 case Intrinsic::sincospi: {
2133 std::optional<unsigned> CallRetElementIndex;
2136 if (ICA.
getID() == Intrinsic::modf)
2137 CallRetElementIndex = 0;
2139 if (
auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2140 ICA,
CostKind, CallRetElementIndex))
2145 case Intrinsic::loop_dependence_war_mask:
2146 case Intrinsic::loop_dependence_raw_mask: {
2166 PtrTy->getAddressSpace()));
2167 bool IsReadAfterWrite = IID == Intrinsic::loop_dependence_raw_mask;
2170 thisT()->getArithmeticInstrCost(Instruction::Sub, IntPtrTy,
CostKind);
2171 if (IsReadAfterWrite) {
2174 Cost += thisT()->getIntrinsicInstrCost(AbsAttrs,
CostKind);
2179 Cost += thisT()->getArithmeticInstrCost(Instruction::SDiv, IntPtrTy,
2185 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CondTy,
2187 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, IntPtrTy,
2191 {IntPtrTy, IntPtrTy}, FMF);
2192 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2202 ScalarizationCost = 0;
2211 filterConstantAndDuplicatedOperands(Args, ICA.
getArgTypes()),
2217 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2238 unsigned VecTyIndex = 0;
2239 if (IID == Intrinsic::vector_reduce_fadd ||
2240 IID == Intrinsic::vector_reduce_fmul)
2242 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
2259 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2260 unsigned ScalarCalls = 1;
2261 Type *ScalarRetTy = RetTy;
2263 if (!SkipScalarizationCost)
2266 ScalarCalls = std::max(ScalarCalls,
2271 for (
Type *Ty : Tys) {
2273 if (!SkipScalarizationCost)
2276 ScalarCalls = std::max(ScalarCalls,
2278 Ty = Ty->getScalarType();
2282 if (ScalarCalls == 1)
2287 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
2289 return ScalarCalls * ScalarCost + ScalarizationCost;
2293 case Intrinsic::sqrt:
2296 case Intrinsic::sin:
2299 case Intrinsic::cos:
2302 case Intrinsic::sincos:
2305 case Intrinsic::sincospi:
2308 case Intrinsic::modf:
2311 case Intrinsic::tan:
2314 case Intrinsic::asin:
2317 case Intrinsic::acos:
2320 case Intrinsic::atan:
2323 case Intrinsic::atan2:
2326 case Intrinsic::sinh:
2329 case Intrinsic::cosh:
2332 case Intrinsic::tanh:
2335 case Intrinsic::exp:
2338 case Intrinsic::exp2:
2341 case Intrinsic::exp10:
2344 case Intrinsic::log:
2347 case Intrinsic::log10:
2350 case Intrinsic::log2:
2353 case Intrinsic::ldexp:
2356 case Intrinsic::fabs:
2359 case Intrinsic::canonicalize:
2362 case Intrinsic::minnum:
2365 case Intrinsic::maxnum:
2368 case Intrinsic::minimum:
2371 case Intrinsic::maximum:
2374 case Intrinsic::minimumnum:
2377 case Intrinsic::maximumnum:
2380 case Intrinsic::copysign:
2383 case Intrinsic::floor:
2386 case Intrinsic::ceil:
2389 case Intrinsic::trunc:
2392 case Intrinsic::nearbyint:
2395 case Intrinsic::rint:
2398 case Intrinsic::lrint:
2401 case Intrinsic::llrint:
2404 case Intrinsic::round:
2407 case Intrinsic::roundeven:
2410 case Intrinsic::lround:
2413 case Intrinsic::llround:
2416 case Intrinsic::pow:
2419 case Intrinsic::fma:
2422 case Intrinsic::fmuladd:
2425 case Intrinsic::experimental_constrained_fmuladd:
2429 case Intrinsic::lifetime_start:
2430 case Intrinsic::lifetime_end:
2431 case Intrinsic::sideeffect:
2432 case Intrinsic::pseudoprobe:
2433 case Intrinsic::arithmetic_fence:
2435 case Intrinsic::masked_store: {
2437 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2438 return thisT()->getMemIntrinsicInstrCost(
2441 case Intrinsic::masked_load: {
2443 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2444 return thisT()->getMemIntrinsicInstrCost(
2447 case Intrinsic::experimental_vp_strided_store: {
2449 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2450 return thisT()->getMemIntrinsicInstrCost(
2456 case Intrinsic::experimental_vp_strided_load: {
2458 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2459 return thisT()->getMemIntrinsicInstrCost(
2465 case Intrinsic::vector_reduce_add:
2466 case Intrinsic::vector_reduce_mul:
2467 case Intrinsic::vector_reduce_and:
2468 case Intrinsic::vector_reduce_or:
2469 case Intrinsic::vector_reduce_xor:
2470 return thisT()->getArithmeticReductionCost(
2473 case Intrinsic::vector_reduce_fadd:
2474 case Intrinsic::vector_reduce_fmul:
2475 return thisT()->getArithmeticReductionCost(
2477 case Intrinsic::vector_reduce_smax:
2478 case Intrinsic::vector_reduce_smin:
2479 case Intrinsic::vector_reduce_umax:
2480 case Intrinsic::vector_reduce_umin:
2481 case Intrinsic::vector_reduce_fmax:
2482 case Intrinsic::vector_reduce_fmin:
2483 case Intrinsic::vector_reduce_fmaximum:
2484 case Intrinsic::vector_reduce_fminimum:
2487 case Intrinsic::experimental_vector_match: {
2490 unsigned SearchSize = NeedleTy->getNumElements();
2494 EVT SearchVT = getTLI()->getValueType(
DL, SearchTy);
2495 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2501 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2503 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2507 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2510 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2513 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy,
CostKind);
2516 case Intrinsic::vector_reverse:
2520 case Intrinsic::experimental_vector_histogram_add:
2521 case Intrinsic::experimental_vector_histogram_uadd_sat:
2522 case Intrinsic::experimental_vector_histogram_umax:
2523 case Intrinsic::experimental_vector_histogram_umin: {
2531 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2533 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2535 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2540 case Intrinsic::experimental_vector_histogram_add:
2542 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy,
CostKind);
2544 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2546 Cost += thisT()->getIntrinsicInstrCost(UAddSat,
CostKind);
2549 case Intrinsic::experimental_vector_histogram_umax: {
2554 case Intrinsic::experimental_vector_histogram_umin: {
2560 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2565 case Intrinsic::get_active_lane_mask: {
2567 EVT ResVT = getTLI()->getValueType(
DL, RetTy,
true);
2568 EVT ArgVT = getTLI()->getValueType(
DL, ArgTy,
true);
2572 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2581 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2582 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2586 case Intrinsic::experimental_memset_pattern:
2591 case Intrinsic::abs:
2594 case Intrinsic::fshl:
2597 case Intrinsic::fshr:
2600 case Intrinsic::smax:
2603 case Intrinsic::smin:
2606 case Intrinsic::umax:
2609 case Intrinsic::umin:
2612 case Intrinsic::sadd_sat:
2615 case Intrinsic::ssub_sat:
2618 case Intrinsic::uadd_sat:
2621 case Intrinsic::usub_sat:
2624 case Intrinsic::smul_fix:
2627 case Intrinsic::umul_fix:
2630 case Intrinsic::sadd_with_overflow:
2633 case Intrinsic::ssub_with_overflow:
2636 case Intrinsic::uadd_with_overflow:
2639 case Intrinsic::usub_with_overflow:
2642 case Intrinsic::smul_with_overflow:
2645 case Intrinsic::umul_with_overflow:
2648 case Intrinsic::fptosi_sat:
2649 case Intrinsic::fptoui_sat: {
2655 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2661 case Intrinsic::ctpop:
2667 case Intrinsic::ctlz:
2670 case Intrinsic::cttz:
2673 case Intrinsic::bswap:
2676 case Intrinsic::bitreverse:
2679 case Intrinsic::ucmp:
2682 case Intrinsic::scmp:
2688 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2694 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2704 return (LT.first * 2);
2706 return (LT.first * 1);
2710 return (LT.first * 2);
2714 case Intrinsic::fmuladd: {
2718 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2720 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2723 case Intrinsic::experimental_constrained_fmuladd: {
2725 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2727 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2728 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2729 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2731 case Intrinsic::smin:
2732 case Intrinsic::smax:
2733 case Intrinsic::umin:
2734 case Intrinsic::umax: {
2737 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2741 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2743 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2747 case Intrinsic::sadd_with_overflow:
2748 case Intrinsic::ssub_with_overflow: {
2751 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2752 ? BinaryOperator::Add
2753 : BinaryOperator::Sub;
2760 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2762 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2764 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2768 case Intrinsic::uadd_with_overflow:
2769 case Intrinsic::usub_with_overflow: {
2772 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2773 ? BinaryOperator::Add
2774 : BinaryOperator::Sub;
2780 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2781 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2785 case Intrinsic::smul_with_overflow:
2786 case Intrinsic::umul_with_overflow: {
2791 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2793 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2797 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2799 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2800 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2802 Cost += thisT()->getArithmeticInstrCost(
2807 Cost += thisT()->getArithmeticInstrCost(
2808 Instruction::AShr, MulTy,
CostKind,
2812 Cost += thisT()->getCmpSelInstrCost(
2816 case Intrinsic::sadd_sat:
2817 case Intrinsic::ssub_sat: {
2823 ? Intrinsic::sadd_with_overflow
2824 : Intrinsic::ssub_with_overflow;
2831 nullptr, ScalarizationCostPassed);
2832 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2833 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2835 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2839 case Intrinsic::uadd_sat:
2840 case Intrinsic::usub_sat: {
2845 ? Intrinsic::uadd_with_overflow
2846 : Intrinsic::usub_with_overflow;
2850 nullptr, ScalarizationCostPassed);
2851 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2853 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2857 case Intrinsic::smul_fix:
2858 case Intrinsic::umul_fix: {
2863 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2867 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH,
CostKind);
2869 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2870 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2872 Cost += thisT()->getArithmeticInstrCost(
2875 Cost += thisT()->getArithmeticInstrCost(
2878 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy,
CostKind);
2881 case Intrinsic::abs: {
2886 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2888 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2891 Cost += thisT()->getArithmeticInstrCost(
2892 BinaryOperator::Sub, RetTy,
CostKind,
2896 case Intrinsic::fshl:
2897 case Intrinsic::fshr: {
2903 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2905 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
2907 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy,
CostKind);
2908 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2913 Cost += thisT()->getArithmeticInstrCost(
2915 : BinaryOperator::URem,
2916 RetTy,
CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2917 {TTI::OK_UniformConstantValue, TTI::OP_None});
2919 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2921 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2925 case Intrinsic::fptosi_sat:
2926 case Intrinsic::fptoui_sat: {
2929 Type *FromTy = Tys[0];
2930 bool IsSigned = IID == Intrinsic::fptosi_sat;
2935 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2938 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2939 Cost += thisT()->getCastInstrCost(
2940 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2944 Cost += thisT()->getCmpSelInstrCost(
2946 Cost += thisT()->getCmpSelInstrCost(
2951 case Intrinsic::ucmp:
2952 case Intrinsic::scmp: {
2953 Type *CmpTy = Tys[0];
2956 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2959 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2966 Cost += 2 * thisT()->getCmpSelInstrCost(
2967 BinaryOperator::Select, RetTy, CondTy,
2972 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2974 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2979 case Intrinsic::maximumnum:
2980 case Intrinsic::minimumnum: {
2995 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs,
CostKind);
2996 return LT.first + FCanonicalizeCost * 2;
3016 if (!SkipScalarizationCost) {
3017 ScalarizationCost = 0;
3018 for (
Type *RetVTy : RetVTys) {
3027 for (
Type *Ty : Tys) {
3028 if (Ty->isVectorTy())
3029 Ty = Ty->getScalarType();
3034 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
3035 for (
Type *Ty : Tys) {
3040 ScalarCalls = std::max(ScalarCalls,
3044 return ScalarCalls * ScalarCost + ScalarizationCost;
3048 return SingleCallCost;
3055 unsigned Id = MICA.
getID();
3061 case Intrinsic::experimental_vp_strided_load:
3062 case Intrinsic::experimental_vp_strided_store: {
3063 unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load
3065 : Instruction::Store;
3069 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3072 case Intrinsic::masked_scatter:
3073 case Intrinsic::masked_gather:
3074 case Intrinsic::vp_scatter:
3075 case Intrinsic::vp_gather: {
3076 unsigned Opcode = (MICA.
getID() == Intrinsic::masked_gather ||
3077 MICA.
getID() == Intrinsic::vp_gather)
3079 : Instruction::Store;
3081 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3084 case Intrinsic::vp_load:
3085 case Intrinsic::vp_store:
3087 case Intrinsic::masked_load:
3088 case Intrinsic::masked_store: {
3090 Id == Intrinsic::masked_load ? Instruction::Load : Instruction::Store;
3092 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
3095 case Intrinsic::masked_compressstore:
3096 case Intrinsic::masked_expandload: {
3097 unsigned Opcode = MICA.
getID() == Intrinsic::masked_expandload
3099 : Instruction::Store;
3102 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3106 case Intrinsic::vp_load_ff:
3132 if (!LT.first.isValid())
3137 Tp && LT.second.isFixedLengthVector() &&
3142 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3144 return LT.first.getValue();
3181 Type *ScalarTy = Ty->getElementType();
3183 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3193 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3195 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3199 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3202 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3203 unsigned LongVectorCount = 0;
3205 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3206 while (NumVecElts > MVTLen) {
3209 ShuffleCost += thisT()->getShuffleCost(
3211 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
3216 NumReduxLevels -= LongVectorCount;
3228 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
3229 return ShuffleCost + ArithCost +
3230 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3264 return ExtractCost + ArithCost;
3269 std::optional<FastMathFlags> FMF,
3271 assert(Ty &&
"Unknown reduction vector type");
3287 Type *ScalarTy = Ty->getElementType();
3289 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3292 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3293 unsigned LongVectorCount = 0;
3295 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3296 while (NumVecElts > MVTLen) {
3300 ShuffleCost += thisT()->getShuffleCost(
3309 NumReduxLevels -= LongVectorCount;
3322 return ShuffleCost + MinMaxCost +
3323 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3329 VectorType *Ty, std::optional<FastMathFlags> FMF,
3332 FTy && IsUnsigned && Opcode == Instruction::Add &&
3340 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3342 thisT()->getIntrinsicInstrCost(ICA,
CostKind);
3348 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
3350 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3353 return RedCost + ExtCost;
3363 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3364 "The reduction opcode is expected to be Add or Sub.");
3367 RedOpcode, ExtTy, std::nullopt,
CostKind);
3369 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3373 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
3375 return RedCost + MulCost + 2 * ExtCost;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static const Function * getCalledFunction(const Value *V)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
AttributeList getAttributes() const
Return the attribute list for this Function.
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
const FeatureBitset & getFeatureBits() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Information for memory intrinsic cost model.
Align getAlignment() const
Type * getDataType() const
bool getVariableMask() const
Intrinsic::ID getID() const
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
CodeModel::Model getCodeModel() const
Returns the code model.
TargetSubtargetInfo - Generic base class for all target subtargets.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
auto dyn_cast_or_null(const Y &Val)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
FunctionAddr VTableAddr uintptr_t uintptr_t Data
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
ElementCount getVectorElementCount() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...