16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
87 T *thisT() {
return static_cast<T *
>(
this); }
96 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
100 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
119 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
121 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
134 "Can only extract subvectors from vectors");
136 assert((!isa<FixedVectorType>(VTy) ||
137 (
Index + NumSubElts) <=
139 "SK_ExtractSubvector index out of range");
145 for (
int i = 0; i != NumSubElts; ++i) {
147 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
149 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
162 "Can only insert subvectors into vectors");
164 assert((!isa<FixedVectorType>(VTy) ||
165 (
Index + NumSubElts) <=
167 "SK_InsertSubvector index out of range");
173 for (
int i = 0; i != NumSubElts; ++i) {
174 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
177 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
178 i +
Index,
nullptr,
nullptr);
185 return static_cast<const T *
>(
this)->getST();
190 return static_cast<const T *
>(
this)->getTLI();
212 bool IsGatherScatter,
216 if (isa<ScalableVectorType>(DataTy))
219 auto *VT = cast<FixedVectorType>(DataTy);
220 unsigned VF = VT->getNumElements();
236 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
242 Opcode == Instruction::Store,
CostKind);
256 VF * (thisT()->getCFInstrCost(Instruction::Br,
CostKind) +
257 thisT()->getCFInstrCost(Instruction::PHI,
CostKind));
260 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
275 unsigned *
Fast)
const {
318 std::pair<const Value *, unsigned>
341 bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
343 int64_t ScalableOffset = 0) {
358 Type *ScalarValTy)
const {
359 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
362 if (getTLI()->isOperationLegal(
ISD::STORE, VT) ||
372 while (VF > 2 && IsSupportedByTarget(VF))
412 int64_t Scale,
unsigned AddrSpace) {
451 unsigned &JumpTableSize,
461 unsigned N = SI.getNumCases();
466 bool IsJTAllowed = TLI->
areJTsAllowed(SI.getParent()->getParent());
472 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
473 APInt MinCaseVal = MaxCaseVal;
474 for (
auto CI : SI.cases()) {
475 const APInt &CaseVal = CI.getCaseValue()->getValue();
476 if (CaseVal.
sgt(MaxCaseVal))
477 MaxCaseVal = CaseVal;
478 if (CaseVal.
slt(MinCaseVal))
479 MinCaseVal = CaseVal;
485 for (
auto I : SI.cases())
486 Dests.
insert(
I.getCaseSuccessor());
495 if (
N < 2 || N < TLI->getMinimumJumpTableEntries())
498 (MaxCaseVal - MinCaseVal)
499 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
502 JumpTableSize =
Range;
518 if (!
TM.isPositionIndependent())
528 const Triple &TargetTriple =
TM.getTargetTriple();
566 case Instruction::SDiv:
567 case Instruction::SRem:
568 case Instruction::UDiv:
569 case Instruction::URem: {
618 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
619 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
626 if (isa<CallInst>(
I) || isa<InvokeInst>(
I)) {
636 <<
"advising against unrolling the loop because it "
689 std::optional<Value *>
692 bool &KnownBitsComputed) {
703 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
707 virtual std::optional<unsigned>
709 return std::optional<unsigned>(
713 virtual std::optional<unsigned>
715 std::optional<unsigned> TargetResult =
733 unsigned NumStridedMemAccesses,
734 unsigned NumPrefetches,
735 bool HasCall)
const {
737 NumPrefetches, HasCall);
769 const APInt &DemandedElts,
770 bool Insert,
bool Extract,
774 if (isa<ScalableVectorType>(InTy))
776 auto *Ty = cast<FixedVectorType>(InTy);
779 "Vector size mismatch");
783 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
784 if (!DemandedElts[i])
787 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
790 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
801 if (isa<ScalableVectorType>(InTy))
803 auto *Ty = cast<FixedVectorType>(InTy);
806 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
817 assert(Args.size() == Tys.
size() &&
"Expected matching Args and Tys");
821 for (
int I = 0,
E = Args.size();
I !=
E;
I++) {
829 if (!isa<Constant>(
A) && UniqueOperands.
insert(
A).second) {
830 if (
auto *VecTy = dyn_cast<VectorType>(Ty))
887 if (MTy == LK.second)
901 ArrayRef<const Value *> Args = std::nullopt,
902 const Instruction *CxtI =
nullptr) {
904 const TargetLoweringBase *TLI = getTLI();
905 int ISD = TLI->InstructionOpcodeToISD(Opcode);
906 assert(ISD &&
"Invalid opcode");
919 InstructionCost OpCost = (IsFloat ? 2 : 1);
921 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
924 return LT.first * OpCost;
927 if (!TLI->isOperationExpand(ISD,
LT.second)) {
930 return LT.first * 2 * OpCost;
942 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
943 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
944 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
945 InstructionCost MulCost =
946 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
947 InstructionCost SubCost =
948 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
949 return DivCost + MulCost + SubCost;
954 if (isa<ScalableVectorType>(Ty))
960 if (
auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
961 InstructionCost
Cost = thisT()->getArithmeticInstrCost(
966 SmallVector<Type *> Tys(
Args.size(), Ty);
989 (
Index + Mask.size()) <= (
size_t)NumSrcElts) {
997 Mask, NumSrcElts, NumSubElts,
Index)) {
998 if (
Index + NumSubElts > NumSrcElts)
1031 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
1032 return getBroadcastShuffleOverhead(FVT,
CostKind);
1040 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
1041 return getPermuteShuffleOverhead(FVT,
CostKind);
1045 cast<FixedVectorType>(SubTp));
1048 cast<FixedVectorType>(SubTp));
1062 assert(ISD &&
"Invalid opcode");
1066 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1067 TypeSize DstSize = DstLT.second.getSizeInBits();
1068 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1069 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1074 case Instruction::Trunc:
1079 case Instruction::BitCast:
1082 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1086 case Instruction::FPExt:
1087 if (
I && getTLI()->isExtFree(
I))
1090 case Instruction::ZExt:
1091 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1094 case Instruction::SExt:
1095 if (
I && getTLI()->isExtFree(
I))
1105 if (DstLT.first == SrcLT.first &&
1110 case Instruction::AddrSpaceCast:
1112 Dst->getPointerAddressSpace()))
1117 auto *SrcVTy = dyn_cast<VectorType>(Src);
1118 auto *DstVTy = dyn_cast<VectorType>(Dst);
1121 if (SrcLT.first == DstLT.first &&
1126 if (!SrcVTy && !DstVTy) {
1137 if (DstVTy && SrcVTy) {
1139 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1142 if (Opcode == Instruction::ZExt)
1146 if (Opcode == Instruction::SExt)
1147 return SrcLT.first * 2;
1153 return SrcLT.first * 1;
1166 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1167 DstVTy->getElementCount().isVector()) {
1170 T *
TTI =
static_cast<T *
>(
this);
1173 (!SplitSrc || !SplitDst) ?
TTI->getVectorSplitCost() : 0;
1180 if (isa<ScalableVectorType>(DstVTy))
1185 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1187 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1200 if (Opcode == Instruction::BitCast) {
1216 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1233 assert(ISD &&
"Invalid opcode");
1242 assert(CondTy &&
"CondTy must exist");
1248 if (!(ValTy->
isVectorTy() && !LT.second.isVector()) &&
1252 return LT.first * 1;
1258 if (
auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1259 if (isa<ScalableVectorType>(ValTy))
1262 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1266 Opcode, ValVTy->getScalarType(), CondTy, VecPred,
CostKind,
I);
1288 Value *Op0 =
nullptr;
1289 Value *Op1 =
nullptr;
1290 if (
auto *IE = dyn_cast<InsertElementInst>(&
I)) {
1291 Op0 = IE->getOperand(0);
1292 Op1 = IE->getOperand(1);
1294 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind,
Index, Op0,
1300 const APInt &DemandedDstElts,
1303 "Unexpected size of DemandedDstElts.");
1321 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1324 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1336 assert(!Src->isVoidTy() &&
"Invalid type");
1338 if (getTLI()->getValueType(
DL, Src,
true) == MVT::Other)
1353 LT.second.getSizeInBits())) {
1359 if (Opcode == Instruction::Store)
1368 cast<VectorType>(Src), Opcode != Instruction::Store,
1369 Opcode == Instruction::Store,
CostKind);
1380 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
1385 const Value *
Ptr,
bool VariableMask,
1389 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1394 const Value *
Ptr,
bool VariableMask,
1401 return thisT()->getGatherScatterOpCost(Opcode, DataTy,
Ptr, VariableMask,
1408 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false) {
1411 if (isa<ScalableVectorType>(VecTy))
1414 auto *VT = cast<FixedVectorType>(VecTy);
1416 unsigned NumElts = VT->getNumElements();
1417 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1419 unsigned NumSubElts = NumElts / Factor;
1424 if (UseMaskForCond || UseMaskForGaps)
1425 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1434 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1454 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1458 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1461 BitVector UsedInsts(NumLegalInsts,
false);
1462 for (
unsigned Index : Indices)
1463 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1464 UsedInsts.
set((
Index + Elt * Factor) / NumEltsPerLegalInst);
1473 "Interleaved memory op has too many members");
1479 for (
unsigned Index : Indices) {
1480 assert(
Index < Factor &&
"Invalid index for interleaved memory op");
1481 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1482 DemandedLoadStoreElts.
setBit(
Index + Elm * Factor);
1485 if (Opcode == Instruction::Load) {
1495 SubVT, DemandedAllSubElts,
1497 Cost += Indices.
size() * InsSubCost;
1498 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1516 SubVT, DemandedAllSubElts,
1518 Cost += ExtSubCost * Indices.
size();
1519 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1524 if (!UseMaskForCond)
1529 Cost += thisT()->getReplicationShuffleCost(
1530 I8Type, Factor, NumSubElts,
1531 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1539 if (UseMaskForGaps) {
1541 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1566 (
RetTy->isVectorTy() ? cast<VectorType>(
RetTy)->getElementCount()
1575 case Intrinsic::powi:
1576 if (
auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1577 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1579 ShouldOptForSize)) {
1583 unsigned ActiveBits =
Exponent.getActiveBits();
1584 unsigned PopCount =
Exponent.popcount();
1586 thisT()->getArithmeticInstrCost(
1588 if (RHSC->isNegative())
1589 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv,
RetTy,
1595 case Intrinsic::cttz:
1601 case Intrinsic::ctlz:
1607 case Intrinsic::memcpy:
1608 return thisT()->getMemcpyCost(ICA.
getInst());
1610 case Intrinsic::masked_scatter: {
1611 const Value *Mask = Args[3];
1612 bool VarMask = !isa<Constant>(Mask);
1613 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1614 return thisT()->getGatherScatterOpCost(Instruction::Store,
1618 case Intrinsic::masked_gather: {
1619 const Value *Mask = Args[2];
1620 bool VarMask = !isa<Constant>(Mask);
1621 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1622 return thisT()->getGatherScatterOpCost(Instruction::Load,
RetTy, Args[0],
1625 case Intrinsic::experimental_vp_strided_store: {
1628 const Value *Mask = Args[3];
1629 const Value *EVL = Args[4];
1630 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1631 Type *EltTy = cast<VectorType>(
Data->getType())->getElementType();
1634 return thisT()->getStridedMemoryOpCost(Instruction::Store,
1635 Data->getType(),
Ptr, VarMask,
1638 case Intrinsic::experimental_vp_strided_load: {
1640 const Value *Mask = Args[2];
1641 const Value *EVL = Args[3];
1642 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1643 Type *EltTy = cast<VectorType>(
RetTy)->getElementType();
1646 return thisT()->getStridedMemoryOpCost(Instruction::Load,
RetTy,
Ptr,
1649 case Intrinsic::experimental_stepvector: {
1650 if (isa<ScalableVectorType>(
RetTy))
1655 case Intrinsic::vector_extract: {
1658 if (isa<ScalableVectorType>(
RetTy))
1660 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1661 return thisT()->getShuffleCost(
1665 case Intrinsic::vector_insert: {
1668 if (isa<ScalableVectorType>(Args[1]->
getType()))
1670 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1671 return thisT()->getShuffleCost(
1675 case Intrinsic::vector_reverse: {
1676 return thisT()->getShuffleCost(
1680 case Intrinsic::vector_splice: {
1681 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1682 return thisT()->getShuffleCost(
1686 case Intrinsic::vector_reduce_add:
1687 case Intrinsic::vector_reduce_mul:
1688 case Intrinsic::vector_reduce_and:
1689 case Intrinsic::vector_reduce_or:
1690 case Intrinsic::vector_reduce_xor:
1691 case Intrinsic::vector_reduce_smax:
1692 case Intrinsic::vector_reduce_smin:
1693 case Intrinsic::vector_reduce_fmax:
1694 case Intrinsic::vector_reduce_fmin:
1695 case Intrinsic::vector_reduce_fmaximum:
1696 case Intrinsic::vector_reduce_fminimum:
1697 case Intrinsic::vector_reduce_umax:
1698 case Intrinsic::vector_reduce_umin: {
1702 case Intrinsic::vector_reduce_fadd:
1703 case Intrinsic::vector_reduce_fmul: {
1705 IID,
RetTy, {Args[0]->getType(), Args[1]->
getType()}, FMF,
I, 1);
1708 case Intrinsic::fshl:
1709 case Intrinsic::fshr: {
1710 const Value *
X = Args[0];
1711 const Value *
Y = Args[1];
1712 const Value *Z = Args[2];
1725 thisT()->getArithmeticInstrCost(BinaryOperator::Or,
RetTy,
CostKind);
1727 thisT()->getArithmeticInstrCost(BinaryOperator::Sub,
RetTy,
CostKind);
1728 Cost += thisT()->getArithmeticInstrCost(
1731 Cost += thisT()->getArithmeticInstrCost(
1736 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem,
RetTy,
1740 Type *CondTy =
RetTy->getWithNewBitWidth(1);
1742 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
1745 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
1750 case Intrinsic::get_active_lane_mask: {
1756 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
1766 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
1767 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy,
RetTy,
1771 case Intrinsic::experimental_cttz_elts: {
1776 if (!getTLI()->shouldExpandCttzElements(ArgType))
1784 bool ZeroIsPoison = !cast<ConstantInt>(Args[1])->isZero();
1786 if (isa<ScalableVectorType>(ICA.
getArgTypes()[0]) &&
I &&
I->getCaller())
1795 NewEltTy, cast<VectorType>(Args[0]->
getType())->getElementCount());
1800 thisT()->getIntrinsicInstrCost(StepVecAttrs,
CostKind);
1803 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy,
CostKind);
1804 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
1808 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy,
CostKind);
1811 NewEltTy, NewVecTy, FMF,
I, 1);
1812 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs,
CostKind);
1814 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy,
CostKind);
1826 std::optional<unsigned> FOp =
1829 if (ICA.
getID() == Intrinsic::vp_load) {
1831 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1832 Alignment = VPI->getPointerAlignment().valueOrOne();
1836 dyn_cast<PointerType>(ICA.
getArgs()[0]->getType()))
1837 AS = PtrTy->getAddressSpace();
1838 return thisT()->getMemoryOpCost(*FOp, ICA.
getReturnType(), Alignment,
1841 if (ICA.
getID() == Intrinsic::vp_store) {
1843 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1844 Alignment = VPI->getPointerAlignment().valueOrOne();
1848 dyn_cast<PointerType>(ICA.
getArgs()[1]->getType()))
1849 AS = PtrTy->getAddressSpace();
1850 return thisT()->getMemoryOpCost(*FOp, Args[0]->
getType(), Alignment,
1854 return thisT()->getArithmeticInstrCost(*FOp, ICA.
getReturnType(),
1859 std::optional<Intrinsic::ID> FID =
1864 "Expected VPIntrinsic to have Mask and Vector Length args and "
1872 *FID != Intrinsic::vector_reduce_fadd &&
1873 *FID != Intrinsic::vector_reduce_fmul)
1878 return thisT()->getIntrinsicInstrCost(NewICA,
CostKind);
1887 ScalarizationCost = 0;
1888 if (!
RetTy->isVoidTy())
1890 cast<VectorType>(
RetTy),
1892 ScalarizationCost +=
1898 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
1919 unsigned VecTyIndex = 0;
1920 if (IID == Intrinsic::vector_reduce_fadd ||
1921 IID == Intrinsic::vector_reduce_fmul)
1923 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
1924 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1933 if (isa<ScalableVectorType>(
RetTy) ||
any_of(Tys, [](
const Type *Ty) {
1934 return isa<ScalableVectorType>(Ty);
1940 SkipScalarizationCost ? ScalarizationCostPassed : 0;
1941 unsigned ScalarCalls = 1;
1943 if (
auto *RetVTy = dyn_cast<VectorType>(
RetTy)) {
1944 if (!SkipScalarizationCost)
1947 ScalarCalls = std::max(ScalarCalls,
1949 ScalarRetTy =
RetTy->getScalarType();
1952 for (
Type *Ty : Tys) {
1953 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
1954 if (!SkipScalarizationCost)
1957 ScalarCalls = std::max(ScalarCalls,
1963 if (ScalarCalls == 1)
1968 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
1970 return ScalarCalls * ScalarCost + ScalarizationCost;
1974 case Intrinsic::sqrt:
1977 case Intrinsic::sin:
1980 case Intrinsic::cos:
1983 case Intrinsic::tan:
1986 case Intrinsic::asin:
1989 case Intrinsic::acos:
1992 case Intrinsic::atan:
1995 case Intrinsic::sinh:
1998 case Intrinsic::cosh:
2001 case Intrinsic::tanh:
2004 case Intrinsic::exp:
2007 case Intrinsic::exp2:
2010 case Intrinsic::exp10:
2013 case Intrinsic::log:
2016 case Intrinsic::log10:
2019 case Intrinsic::log2:
2022 case Intrinsic::fabs:
2025 case Intrinsic::canonicalize:
2028 case Intrinsic::minnum:
2031 case Intrinsic::maxnum:
2034 case Intrinsic::minimum:
2037 case Intrinsic::maximum:
2040 case Intrinsic::minimumnum:
2043 case Intrinsic::maximumnum:
2046 case Intrinsic::copysign:
2049 case Intrinsic::floor:
2052 case Intrinsic::ceil:
2055 case Intrinsic::trunc:
2058 case Intrinsic::nearbyint:
2061 case Intrinsic::rint:
2064 case Intrinsic::lrint:
2067 case Intrinsic::llrint:
2070 case Intrinsic::round:
2073 case Intrinsic::roundeven:
2076 case Intrinsic::pow:
2079 case Intrinsic::fma:
2082 case Intrinsic::fmuladd:
2085 case Intrinsic::experimental_constrained_fmuladd:
2089 case Intrinsic::lifetime_start:
2090 case Intrinsic::lifetime_end:
2091 case Intrinsic::sideeffect:
2092 case Intrinsic::pseudoprobe:
2093 case Intrinsic::arithmetic_fence:
2095 case Intrinsic::masked_store: {
2097 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2098 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
2101 case Intrinsic::masked_load: {
2103 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2104 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
2107 case Intrinsic::vector_reduce_add:
2108 case Intrinsic::vector_reduce_mul:
2109 case Intrinsic::vector_reduce_and:
2110 case Intrinsic::vector_reduce_or:
2111 case Intrinsic::vector_reduce_xor:
2112 return thisT()->getArithmeticReductionCost(
2115 case Intrinsic::vector_reduce_fadd:
2116 case Intrinsic::vector_reduce_fmul:
2117 return thisT()->getArithmeticReductionCost(
2119 case Intrinsic::vector_reduce_smax:
2120 case Intrinsic::vector_reduce_smin:
2121 case Intrinsic::vector_reduce_umax:
2122 case Intrinsic::vector_reduce_umin:
2123 case Intrinsic::vector_reduce_fmax:
2124 case Intrinsic::vector_reduce_fmin:
2125 case Intrinsic::vector_reduce_fmaximum:
2126 case Intrinsic::vector_reduce_fminimum:
2129 case Intrinsic::abs:
2132 case Intrinsic::smax:
2135 case Intrinsic::smin:
2138 case Intrinsic::umax:
2141 case Intrinsic::umin:
2144 case Intrinsic::sadd_sat:
2147 case Intrinsic::ssub_sat:
2150 case Intrinsic::uadd_sat:
2153 case Intrinsic::usub_sat:
2156 case Intrinsic::smul_fix:
2159 case Intrinsic::umul_fix:
2162 case Intrinsic::sadd_with_overflow:
2165 case Intrinsic::ssub_with_overflow:
2168 case Intrinsic::uadd_with_overflow:
2171 case Intrinsic::usub_with_overflow:
2174 case Intrinsic::smul_with_overflow:
2177 case Intrinsic::umul_with_overflow:
2180 case Intrinsic::fptosi_sat:
2183 case Intrinsic::fptoui_sat:
2186 case Intrinsic::ctpop:
2192 case Intrinsic::ctlz:
2195 case Intrinsic::cttz:
2198 case Intrinsic::bswap:
2201 case Intrinsic::bitreverse:
2206 auto *ST = dyn_cast<StructType>(
RetTy);
2207 Type *LegalizeTy = ST ? ST->getContainedType(0) :
RetTy;
2213 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2223 return (LT.first * 2);
2225 return (LT.first * 1);
2229 return (LT.first * 2);
2233 case Intrinsic::fmuladd: {
2237 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul,
RetTy,
2239 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd,
RetTy,
2242 case Intrinsic::experimental_constrained_fmuladd: {
2244 Intrinsic::experimental_constrained_fmul,
RetTy, Tys);
2246 Intrinsic::experimental_constrained_fadd,
RetTy, Tys);
2247 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2248 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2250 case Intrinsic::smin:
2251 case Intrinsic::smax:
2252 case Intrinsic::umin:
2253 case Intrinsic::umax: {
2255 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2256 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2260 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2262 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2266 case Intrinsic::sadd_with_overflow:
2267 case Intrinsic::ssub_with_overflow: {
2268 Type *SumTy =
RetTy->getContainedType(0);
2269 Type *OverflowTy =
RetTy->getContainedType(1);
2270 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2271 ? BinaryOperator::Add
2272 : BinaryOperator::Sub;
2279 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2281 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2283 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2287 case Intrinsic::uadd_with_overflow:
2288 case Intrinsic::usub_with_overflow: {
2289 Type *SumTy =
RetTy->getContainedType(0);
2290 Type *OverflowTy =
RetTy->getContainedType(1);
2291 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2292 ? BinaryOperator::Add
2293 : BinaryOperator::Sub;
2299 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2300 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2304 case Intrinsic::smul_with_overflow:
2305 case Intrinsic::umul_with_overflow: {
2306 Type *MulTy =
RetTy->getContainedType(0);
2307 Type *OverflowTy =
RetTy->getContainedType(1);
2310 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2312 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2316 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2318 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2319 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2321 Cost += thisT()->getArithmeticInstrCost(
2326 Cost += thisT()->getArithmeticInstrCost(
2327 Instruction::AShr, MulTy,
CostKind,
2331 Cost += thisT()->getCmpSelInstrCost(
2335 case Intrinsic::sadd_sat:
2336 case Intrinsic::ssub_sat: {
2338 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2342 ? Intrinsic::sadd_with_overflow
2343 : Intrinsic::ssub_with_overflow;
2350 nullptr, ScalarizationCostPassed);
2351 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2352 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2354 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy,
2358 case Intrinsic::uadd_sat:
2359 case Intrinsic::usub_sat: {
2360 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2364 ? Intrinsic::uadd_with_overflow
2365 : Intrinsic::usub_with_overflow;
2369 nullptr, ScalarizationCostPassed);
2370 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2372 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2376 case Intrinsic::smul_fix:
2377 case Intrinsic::umul_fix: {
2378 unsigned ExtSize =
RetTy->getScalarSizeInBits() * 2;
2379 Type *ExtTy =
RetTy->getWithNewBitWidth(ExtSize);
2382 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2388 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2389 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc,
RetTy, ExtTy,
2391 Cost += thisT()->getArithmeticInstrCost(
2394 Cost += thisT()->getArithmeticInstrCost(
2400 case Intrinsic::abs: {
2402 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2405 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2407 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2410 Cost += thisT()->getArithmeticInstrCost(
2415 case Intrinsic::fptosi_sat:
2416 case Intrinsic::fptoui_sat: {
2419 Type *FromTy = Tys[0];
2420 bool IsSigned = IID == Intrinsic::fptosi_sat;
2425 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2428 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2429 Cost += thisT()->getCastInstrCost(
2430 IsSigned ? Instruction::FPToSI : Instruction::FPToUI,
RetTy, FromTy,
2433 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2434 Cost += thisT()->getCmpSelInstrCost(
2436 Cost += thisT()->getCmpSelInstrCost(
2448 if (
auto *RetVTy = dyn_cast<VectorType>(
RetTy)) {
2450 if (isa<ScalableVectorType>(
RetTy) ||
any_of(Tys, [](
const Type *Ty) {
2451 return isa<ScalableVectorType>(Ty);
2456 SkipScalarizationCost
2457 ? ScalarizationCostPassed
2461 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
2463 for (
Type *Ty : Tys) {
2470 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2471 for (
Type *Ty : Tys) {
2472 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
2476 ScalarCalls = std::max(ScalarCalls,
2480 return ScalarCalls * ScalarCost + ScalarizationCost;
2484 return SingleCallCost;
2506 return LT.first.isValid() ? *LT.first.getValue() : 0;
2539 if (isa<ScalableVectorType>(Ty))
2543 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2544 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
2554 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2556 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2560 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2563 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2564 unsigned LongVectorCount = 0;
2566 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2567 while (NumVecElts > MVTLen) {
2573 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
2578 NumReduxLevels -= LongVectorCount;
2590 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
2591 return ShuffleCost + ArithCost +
2592 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2616 if (isa<ScalableVectorType>(Ty))
2619 auto *VTy = cast<FixedVectorType>(Ty);
2626 return ExtractCost + ArithCost;
2630 std::optional<FastMathFlags> FMF,
2632 assert(Ty &&
"Unknown reduction vector type");
2645 if (isa<ScalableVectorType>(Ty))
2649 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2650 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2653 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2654 unsigned LongVectorCount = 0;
2656 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2657 while (NumVecElts > MVTLen) {
2671 NumReduxLevels -= LongVectorCount;
2684 return ShuffleCost + MinMaxCost +
2685 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2697 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
2699 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2702 return RedCost + ExtCost;
2713 Instruction::Add, ExtTy, std::nullopt,
CostKind);
2715 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2719 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2721 return RedCost + MulCost + 2 * ExtCost;
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Base class which can be used to help build a TTI implementation.
bool isTypeLegal(Type *Ty)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
virtual unsigned getPrefetchDistance() const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
unsigned getMaxInterleaveFactor(ElementCount VF)
unsigned getNumberOfParts(Type *Tp)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
std::optional< unsigned > getVScaleForTuning() const
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isNumRegsMajorCostOfLSR()
bool isTruncateFree(Type *Ty1, Type *Ty2)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI)
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
bool isLegalICmpImmediate(int64_t imm)
bool isProfitableToHoist(Instruction *I)
virtual unsigned getMaxPrefetchIterationsAhead() const
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index)
std::optional< unsigned > getMaxVScale() const
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
unsigned getRegUsageForType(Type *Ty)
bool shouldBuildRelLookupTables() const
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
bool shouldDropLSRSolutionIfLessProfitable() const
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2)
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
bool shouldFoldTerminatingConditionAfterLSR() const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
bool hasBranchDivergence(const Function *F=nullptr)
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
unsigned getAssumedAddrSpace(const Value *V) const
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
bool isAlwaysUniform(const Value *V)
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true)
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual bool enableWritePrefetching() const
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getFPOpCost(Type *Ty)
InstructionCost getVectorSplitCost()
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool haveFastSqrt(Type *Ty)
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
unsigned getInliningThresholdMultiplier() const
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
virtual ~BasicTTIImplBase()=default
bool isLegalAddScalableImmediate(int64_t Imm)
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
bool isVScaleKnownToBeAPowerOfTwo() const
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II)
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
bool isLegalAddImmediate(int64_t imm)
bool shouldBuildLookupTables()
unsigned getFlatAddressSpace()
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
virtual unsigned getCacheLineSize() const
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
bool isSourceOfDivergence(const Value *V)
int getInlinerVectorBonusPercent() const
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on argument types.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0)
bool isSingleThreaded() const
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
unsigned adjustInliningThreshold(const CallBase *CB)
bool isProfitableLSRChainElement(Instruction *I)
Concrete BasicTTIImpl that can be used if no further customization is needed.
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool isTargetIntrinsic() const
isTargetIntrinsic - Returns true if this function is an intrinsic and the intrinsic is specific to a ...
AttributeList getAttributes() const
Return the attribute list for this Function.
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
virtual bool enableWritePrefetching() const
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
virtual std::optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
virtual std::optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isProfitableToHoist(Instruction *I) const
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isLegalAddScalableImmediate(int64_t) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
If the specified predicate checks whether a generic pointer falls within a specified address space,...
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Value * getOperand(unsigned i) const
static bool isVPBinOp(Intrinsic::ID ID)
static std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static bool isVPIntrinsic(Intrinsic::ID)
static bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
constexpr unsigned BitWidth
cl::opt< unsigned > PartialUnrollingThreshold
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
ElementCount getVectorElementCount() const
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Attributes of a target dependent hardware loop.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...