16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
85 T *thisT() {
return static_cast<T *
>(
this); }
94 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
98 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
117 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
119 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
132 "Can only extract subvectors from vectors");
134 assert((!isa<FixedVectorType>(VTy) ||
135 (
Index + NumSubElts) <=
136 (
int)cast<FixedVectorType>(VTy)->getNumElements()) &&
137 "SK_ExtractSubvector index out of range");
143 for (
int i = 0; i != NumSubElts; ++i) {
145 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
147 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
160 "Can only insert subvectors into vectors");
162 assert((!isa<FixedVectorType>(VTy) ||
163 (
Index + NumSubElts) <=
164 (
int)cast<FixedVectorType>(VTy)->getNumElements()) &&
165 "SK_InsertSubvector index out of range");
171 for (
int i = 0; i != NumSubElts; ++i) {
172 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
175 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
176 i +
Index,
nullptr,
nullptr);
183 return static_cast<const T *
>(
this)->getST();
188 return static_cast<const T *
>(
this)->getTLI();
210 bool IsGatherScatter,
213 if (isa<ScalableVectorType>(DataTy))
216 auto *VT = cast<FixedVectorType>(DataTy);
226 VT->getNumElements()),
230 VT->getNumElements() *
248 VT->getNumElements() *
250 Instruction::ExtractElement,
252 VT->getNumElements()),
258 return LoadCost + PackingCost + ConditionalCost;
273 unsigned *
Fast)
const {
316 std::pair<const Value *, unsigned>
335 bool HasBaseReg, int64_t Scale,
350 Type *ScalarValTy)
const {
351 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
354 if (getTLI()->isOperationLegal(
ISD::STORE, VT) ||
364 while (VF > 2 && IsSupportedByTarget(VF))
399 int64_t BaseOffset,
bool HasBaseReg,
400 int64_t Scale,
unsigned AddrSpace) {
438 unsigned &JumpTableSize,
448 unsigned N = SI.getNumCases();
453 bool IsJTAllowed = TLI->
areJTsAllowed(SI.getParent()->getParent());
459 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
460 APInt MinCaseVal = MaxCaseVal;
461 for (
auto CI : SI.cases()) {
462 const APInt &CaseVal = CI.getCaseValue()->getValue();
463 if (CaseVal.
sgt(MaxCaseVal))
464 MaxCaseVal = CaseVal;
465 if (CaseVal.
slt(MinCaseVal))
466 MinCaseVal = CaseVal;
472 for (
auto I : SI.cases())
473 Dests.
insert(
I.getCaseSuccessor());
482 if (
N < 2 || N < TLI->getMinimumJumpTableEntries())
485 (MaxCaseVal - MinCaseVal)
486 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
489 JumpTableSize = Range;
505 if (!
TM.isPositionIndependent())
515 Triple TargetTriple =
TM.getTargetTriple();
586 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
587 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
594 if (isa<CallInst>(
I) || isa<InvokeInst>(
I)) {
604 <<
"advising against unrolling the loop because it "
657 std::optional<Value *>
660 bool &KnownBitsComputed) {
671 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
675 virtual std::optional<unsigned>
677 return std::optional<unsigned>(
681 virtual std::optional<unsigned>
683 std::optional<unsigned> TargetResult =
701 unsigned NumStridedMemAccesses,
702 unsigned NumPrefetches,
703 bool HasCall)
const {
705 NumPrefetches, HasCall);
737 const APInt &DemandedElts,
738 bool Insert,
bool Extract,
742 if (isa<ScalableVectorType>(InTy))
744 auto *Ty = cast<FixedVectorType>(InTy);
747 "Vector size mismatch");
751 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
752 if (!DemandedElts[i])
755 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
758 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
769 if (isa<ScalableVectorType>(InTy))
771 auto *Ty = cast<FixedVectorType>(InTy);
774 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
785 assert(Args.size() == Tys.
size() &&
"Expected matching Args and Tys");
789 for (
int I = 0,
E = Args.size();
I !=
E;
I++) {
797 if (!isa<Constant>(
A) && UniqueOperands.
insert(
A).second) {
798 if (
auto *VecTy = dyn_cast<VectorType>(Ty))
855 if (MTy == LK.second)
869 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
870 const Instruction *CxtI =
nullptr) {
872 const TargetLoweringBase *TLI = getTLI();
873 int ISD = TLI->InstructionOpcodeToISD(
Opcode);
874 assert(ISD &&
"Invalid opcode");
887 InstructionCost OpCost = (IsFloat ? 2 : 1);
889 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
892 return LT.first * OpCost;
895 if (!TLI->isOperationExpand(ISD,
LT.second)) {
898 return LT.first * 2 * OpCost;
910 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
911 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
912 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
913 InstructionCost MulCost =
914 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
915 InstructionCost SubCost =
916 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
917 return DivCost + MulCost + SubCost;
922 if (isa<ScalableVectorType>(Ty))
928 if (
auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
929 InstructionCost
Cost = thisT()->getArithmeticInstrCost(
934 SmallVector<Type *> Tys(
Args.size(), Ty);
957 (
Index + Mask.size()) <= (
size_t)NumSrcElts) {
965 Mask, NumSrcElts, NumSubElts,
Index)) {
966 if (
Index + NumSubElts > NumSrcElts)
998 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
999 return getBroadcastShuffleOverhead(FVT,
CostKind);
1007 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
1008 return getPermuteShuffleOverhead(FVT,
CostKind);
1012 cast<FixedVectorType>(SubTp));
1015 cast<FixedVectorType>(SubTp));
1029 assert(ISD &&
"Invalid opcode");
1033 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1034 TypeSize DstSize = DstLT.second.getSizeInBits();
1035 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1036 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1041 case Instruction::Trunc:
1046 case Instruction::BitCast:
1049 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1053 case Instruction::FPExt:
1054 if (
I && getTLI()->isExtFree(
I))
1057 case Instruction::ZExt:
1058 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1061 case Instruction::SExt:
1062 if (
I && getTLI()->isExtFree(
I))
1072 if (DstLT.first == SrcLT.first &&
1077 case Instruction::AddrSpaceCast:
1079 Dst->getPointerAddressSpace()))
1084 auto *SrcVTy = dyn_cast<VectorType>(Src);
1085 auto *DstVTy = dyn_cast<VectorType>(Dst);
1088 if (SrcLT.first == DstLT.first &&
1093 if (!SrcVTy && !DstVTy) {
1104 if (DstVTy && SrcVTy) {
1106 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1109 if (
Opcode == Instruction::ZExt)
1113 if (
Opcode == Instruction::SExt)
1114 return SrcLT.first * 2;
1120 return SrcLT.first * 1;
1133 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1134 DstVTy->getElementCount().isVector()) {
1137 T *
TTI =
static_cast<T *
>(
this);
1140 (!SplitSrc || !SplitDst) ?
TTI->getVectorSplitCost() : 0;
1147 if (isa<ScalableVectorType>(DstVTy))
1152 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1154 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1167 if (
Opcode == Instruction::BitCast) {
1183 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1200 assert(ISD &&
"Invalid opcode");
1209 assert(CondTy &&
"CondTy must exist");
1215 if (!(ValTy->
isVectorTy() && !LT.second.isVector()) &&
1219 return LT.first * 1;
1225 if (
auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1226 if (isa<ScalableVectorType>(ValTy))
1229 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1255 Value *Op0 =
nullptr;
1256 Value *Op1 =
nullptr;
1257 if (
auto *IE = dyn_cast<InsertElementInst>(&
I)) {
1258 Op0 = IE->getOperand(0);
1259 Op1 = IE->getOperand(1);
1261 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind,
Index, Op0,
1267 const APInt &DemandedDstElts,
1270 "Unexpected size of DemandedDstElts.");
1288 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1291 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1303 assert(!Src->isVoidTy() &&
"Invalid type");
1305 if (getTLI()->getValueType(
DL, Src,
true) == MVT::Other)
1320 LT.second.getSizeInBits())) {
1326 if (
Opcode == Instruction::Store)
1335 cast<VectorType>(Src),
Opcode != Instruction::Store,
1346 return getCommonMaskedMemoryOpCost(
Opcode, DataTy, Alignment,
true,
false,
1351 const Value *
Ptr,
bool VariableMask,
1355 return getCommonMaskedMemoryOpCost(
Opcode, DataTy, Alignment, VariableMask,
1362 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false) {
1365 if (isa<ScalableVectorType>(VecTy))
1368 auto *VT = cast<FixedVectorType>(VecTy);
1370 unsigned NumElts = VT->getNumElements();
1371 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1373 unsigned NumSubElts = NumElts / Factor;
1378 if (UseMaskForCond || UseMaskForGaps)
1379 Cost = thisT()->getMaskedMemoryOpCost(
Opcode, VecTy, Alignment,
1388 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1408 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1412 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1415 BitVector UsedInsts(NumLegalInsts,
false);
1416 for (
unsigned Index : Indices)
1417 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1418 UsedInsts.
set((
Index + Elt * Factor) / NumEltsPerLegalInst);
1427 "Interleaved memory op has too many members");
1433 for (
unsigned Index : Indices) {
1434 assert(
Index < Factor &&
"Invalid index for interleaved memory op");
1435 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1436 DemandedLoadStoreElts.
setBit(
Index + Elm * Factor);
1439 if (
Opcode == Instruction::Load) {
1449 SubVT, DemandedAllSubElts,
1451 Cost += Indices.
size() * InsSubCost;
1452 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1470 SubVT, DemandedAllSubElts,
1472 Cost += ExtSubCost * Indices.
size();
1473 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1478 if (!UseMaskForCond)
1483 Cost += thisT()->getReplicationShuffleCost(
1484 I8Type, Factor, NumSubElts,
1485 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1493 if (UseMaskForGaps) {
1495 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1520 (
RetTy->isVectorTy() ? cast<VectorType>(
RetTy)->getElementCount()
1529 case Intrinsic::powi:
1530 if (
auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1531 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1533 ShouldOptForSize)) {
1537 unsigned ActiveBits =
Exponent.getActiveBits();
1538 unsigned PopCount =
Exponent.popcount();
1540 thisT()->getArithmeticInstrCost(
1542 if (RHSC->isNegative())
1543 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv,
RetTy,
1549 case Intrinsic::cttz:
1555 case Intrinsic::ctlz:
1561 case Intrinsic::memcpy:
1562 return thisT()->getMemcpyCost(ICA.
getInst());
1564 case Intrinsic::masked_scatter: {
1565 const Value *Mask = Args[3];
1566 bool VarMask = !isa<Constant>(Mask);
1567 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1568 return thisT()->getGatherScatterOpCost(Instruction::Store,
1572 case Intrinsic::masked_gather: {
1573 const Value *Mask = Args[2];
1574 bool VarMask = !isa<Constant>(Mask);
1575 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1576 return thisT()->getGatherScatterOpCost(Instruction::Load,
RetTy, Args[0],
1579 case Intrinsic::experimental_stepvector: {
1580 if (isa<ScalableVectorType>(
RetTy))
1585 case Intrinsic::vector_extract: {
1588 if (isa<ScalableVectorType>(
RetTy))
1590 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1591 return thisT()->getShuffleCost(
1595 case Intrinsic::vector_insert: {
1598 if (isa<ScalableVectorType>(Args[1]->
getType()))
1600 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1601 return thisT()->getShuffleCost(
1605 case Intrinsic::experimental_vector_reverse: {
1606 return thisT()->getShuffleCost(
1610 case Intrinsic::experimental_vector_splice: {
1611 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1612 return thisT()->getShuffleCost(
1616 case Intrinsic::vector_reduce_add:
1617 case Intrinsic::vector_reduce_mul:
1618 case Intrinsic::vector_reduce_and:
1619 case Intrinsic::vector_reduce_or:
1620 case Intrinsic::vector_reduce_xor:
1621 case Intrinsic::vector_reduce_smax:
1622 case Intrinsic::vector_reduce_smin:
1623 case Intrinsic::vector_reduce_fmax:
1624 case Intrinsic::vector_reduce_fmin:
1625 case Intrinsic::vector_reduce_fmaximum:
1626 case Intrinsic::vector_reduce_fminimum:
1627 case Intrinsic::vector_reduce_umax:
1628 case Intrinsic::vector_reduce_umin: {
1632 case Intrinsic::vector_reduce_fadd:
1633 case Intrinsic::vector_reduce_fmul: {
1635 IID,
RetTy, {Args[0]->getType(), Args[1]->
getType()}, FMF,
I, 1);
1638 case Intrinsic::fshl:
1639 case Intrinsic::fshr: {
1640 const Value *
X = Args[0];
1641 const Value *
Y = Args[1];
1642 const Value *Z = Args[2];
1655 thisT()->getArithmeticInstrCost(BinaryOperator::Or,
RetTy,
CostKind);
1657 thisT()->getArithmeticInstrCost(BinaryOperator::Sub,
RetTy,
CostKind);
1658 Cost += thisT()->getArithmeticInstrCost(
1661 Cost += thisT()->getArithmeticInstrCost(
1666 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem,
RetTy,
1670 Type *CondTy =
RetTy->getWithNewBitWidth(1);
1672 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
1675 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
1680 case Intrinsic::get_active_lane_mask: {
1686 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
1696 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
1697 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy,
RetTy,
1709 std::optional<unsigned> FOp =
1712 if (ICA.
getID() == Intrinsic::vp_load) {
1714 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1715 Alignment = VPI->getPointerAlignment().valueOrOne();
1719 dyn_cast<PointerType>(ICA.
getArgs()[0]->getType()))
1720 AS = PtrTy->getAddressSpace();
1721 return thisT()->getMemoryOpCost(*FOp, ICA.
getReturnType(), Alignment,
1724 if (ICA.
getID() == Intrinsic::vp_store) {
1726 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1727 Alignment = VPI->getPointerAlignment().valueOrOne();
1731 dyn_cast<PointerType>(ICA.
getArgs()[1]->getType()))
1732 AS = PtrTy->getAddressSpace();
1733 return thisT()->getMemoryOpCost(*FOp, Args[0]->
getType(), Alignment,
1737 return thisT()->getArithmeticInstrCost(*FOp, ICA.
getReturnType(),
1742 std::optional<Intrinsic::ID> FID =
1747 "Expected VPIntrinsic to have Mask and Vector Length args and "
1755 *FID != Intrinsic::vector_reduce_fadd &&
1756 *FID != Intrinsic::vector_reduce_fmul)
1761 return thisT()->getIntrinsicInstrCost(NewICA,
CostKind);
1770 ScalarizationCost = 0;
1771 if (!
RetTy->isVoidTy())
1773 cast<VectorType>(
RetTy),
1775 ScalarizationCost +=
1781 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
1802 unsigned VecTyIndex = 0;
1803 if (IID == Intrinsic::vector_reduce_fadd ||
1804 IID == Intrinsic::vector_reduce_fmul)
1806 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
1807 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1816 if (isa<ScalableVectorType>(
RetTy) ||
any_of(Tys, [](
const Type *Ty) {
1817 return isa<ScalableVectorType>(Ty);
1823 SkipScalarizationCost ? ScalarizationCostPassed : 0;
1824 unsigned ScalarCalls = 1;
1826 if (
auto *RetVTy = dyn_cast<VectorType>(
RetTy)) {
1827 if (!SkipScalarizationCost)
1830 ScalarCalls = std::max(ScalarCalls,
1831 cast<FixedVectorType>(RetVTy)->getNumElements());
1832 ScalarRetTy =
RetTy->getScalarType();
1835 for (
unsigned i = 0, ie = Tys.
size(); i != ie; ++i) {
1837 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
1838 if (!SkipScalarizationCost)
1841 ScalarCalls = std::max(ScalarCalls,
1842 cast<FixedVectorType>(VTy)->getNumElements());
1847 if (ScalarCalls == 1)
1852 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
1854 return ScalarCalls * ScalarCost + ScalarizationCost;
1858 case Intrinsic::sqrt:
1861 case Intrinsic::sin:
1864 case Intrinsic::cos:
1867 case Intrinsic::exp:
1870 case Intrinsic::exp2:
1873 case Intrinsic::exp10:
1876 case Intrinsic::log:
1879 case Intrinsic::log10:
1882 case Intrinsic::log2:
1885 case Intrinsic::fabs:
1888 case Intrinsic::canonicalize:
1891 case Intrinsic::minnum:
1894 case Intrinsic::maxnum:
1897 case Intrinsic::minimum:
1900 case Intrinsic::maximum:
1903 case Intrinsic::copysign:
1906 case Intrinsic::floor:
1909 case Intrinsic::ceil:
1912 case Intrinsic::trunc:
1915 case Intrinsic::nearbyint:
1918 case Intrinsic::rint:
1921 case Intrinsic::lrint:
1924 case Intrinsic::llrint:
1927 case Intrinsic::round:
1930 case Intrinsic::roundeven:
1933 case Intrinsic::pow:
1936 case Intrinsic::fma:
1939 case Intrinsic::fmuladd:
1942 case Intrinsic::experimental_constrained_fmuladd:
1946 case Intrinsic::lifetime_start:
1947 case Intrinsic::lifetime_end:
1948 case Intrinsic::sideeffect:
1949 case Intrinsic::pseudoprobe:
1950 case Intrinsic::arithmetic_fence:
1952 case Intrinsic::masked_store: {
1954 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1955 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
1958 case Intrinsic::masked_load: {
1960 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1961 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
1964 case Intrinsic::vector_reduce_add:
1965 return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy,
1967 case Intrinsic::vector_reduce_mul:
1968 return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy,
1970 case Intrinsic::vector_reduce_and:
1971 return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
1973 case Intrinsic::vector_reduce_or:
1974 return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy,
1976 case Intrinsic::vector_reduce_xor:
1977 return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
1979 case Intrinsic::vector_reduce_fadd:
1980 return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
1982 case Intrinsic::vector_reduce_fmul:
1983 return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
1985 case Intrinsic::vector_reduce_smax:
1986 return thisT()->getMinMaxReductionCost(Intrinsic::smax, VecOpTy,
1988 case Intrinsic::vector_reduce_smin:
1989 return thisT()->getMinMaxReductionCost(Intrinsic::smin, VecOpTy,
1991 case Intrinsic::vector_reduce_umax:
1992 return thisT()->getMinMaxReductionCost(Intrinsic::umax, VecOpTy,
1994 case Intrinsic::vector_reduce_umin:
1995 return thisT()->getMinMaxReductionCost(Intrinsic::umin, VecOpTy,
1997 case Intrinsic::vector_reduce_fmax:
1998 return thisT()->getMinMaxReductionCost(Intrinsic::maxnum, VecOpTy,
2000 case Intrinsic::vector_reduce_fmin:
2001 return thisT()->getMinMaxReductionCost(Intrinsic::minnum, VecOpTy,
2003 case Intrinsic::vector_reduce_fmaximum:
2004 return thisT()->getMinMaxReductionCost(Intrinsic::maximum, VecOpTy,
2006 case Intrinsic::vector_reduce_fminimum:
2007 return thisT()->getMinMaxReductionCost(Intrinsic::minimum, VecOpTy,
2009 case Intrinsic::abs: {
2011 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2014 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2016 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2019 Cost += thisT()->getArithmeticInstrCost(
2023 case Intrinsic::smax:
2024 case Intrinsic::smin:
2025 case Intrinsic::umax:
2026 case Intrinsic::umin: {
2028 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2029 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2033 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2035 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2039 case Intrinsic::sadd_sat:
2040 case Intrinsic::ssub_sat: {
2041 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2045 ? Intrinsic::sadd_with_overflow
2046 : Intrinsic::ssub_with_overflow;
2053 nullptr, ScalarizationCostPassed);
2054 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2055 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2057 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy,
2061 case Intrinsic::uadd_sat:
2062 case Intrinsic::usub_sat: {
2063 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2067 ? Intrinsic::uadd_with_overflow
2068 : Intrinsic::usub_with_overflow;
2072 nullptr, ScalarizationCostPassed);
2073 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2075 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2079 case Intrinsic::smul_fix:
2080 case Intrinsic::umul_fix: {
2081 unsigned ExtSize =
RetTy->getScalarSizeInBits() * 2;
2082 Type *ExtTy =
RetTy->getWithNewBitWidth(ExtSize);
2085 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2091 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2092 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc,
RetTy, ExtTy,
2094 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr,
RetTy,
2104 case Intrinsic::sadd_with_overflow:
2105 case Intrinsic::ssub_with_overflow: {
2106 Type *SumTy =
RetTy->getContainedType(0);
2107 Type *OverflowTy =
RetTy->getContainedType(1);
2108 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2109 ? BinaryOperator::Add
2110 : BinaryOperator::Sub;
2118 Cost += 2 * thisT()->getCmpSelInstrCost(
2119 Instruction::ICmp, SumTy, OverflowTy,
2121 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2125 case Intrinsic::uadd_with_overflow:
2126 case Intrinsic::usub_with_overflow: {
2127 Type *SumTy =
RetTy->getContainedType(0);
2128 Type *OverflowTy =
RetTy->getContainedType(1);
2129 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2130 ? BinaryOperator::Add
2131 : BinaryOperator::Sub;
2139 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
2143 case Intrinsic::smul_with_overflow:
2144 case Intrinsic::umul_with_overflow: {
2145 Type *MulTy =
RetTy->getContainedType(0);
2146 Type *OverflowTy =
RetTy->getContainedType(1);
2149 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2151 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2155 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2157 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2158 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2160 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, ExtTy,
2166 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
2171 Cost += thisT()->getCmpSelInstrCost(
2175 case Intrinsic::fptosi_sat:
2176 case Intrinsic::fptoui_sat: {
2179 Type *FromTy = Tys[0];
2180 bool IsSigned = IID == Intrinsic::fptosi_sat;
2185 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2188 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2189 Cost += thisT()->getCastInstrCost(
2190 IsSigned ? Instruction::FPToSI : Instruction::FPToUI,
RetTy, FromTy,
2193 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2194 Cost += thisT()->getCmpSelInstrCost(
2196 Cost += thisT()->getCmpSelInstrCost(
2201 case Intrinsic::ctpop:
2207 case Intrinsic::ctlz:
2210 case Intrinsic::cttz:
2213 case Intrinsic::bswap:
2216 case Intrinsic::bitreverse:
2225 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2235 return (LT.first * 2);
2237 return (LT.first * 1);
2241 return (LT.first * 2);
2246 if (IID == Intrinsic::fmuladd)
2247 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul,
RetTy,
2249 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd,
RetTy,
2251 if (IID == Intrinsic::experimental_constrained_fmuladd) {
2253 Intrinsic::experimental_constrained_fmul,
RetTy, Tys);
2255 Intrinsic::experimental_constrained_fadd,
RetTy, Tys);
2256 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2257 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2263 if (
auto *RetVTy = dyn_cast<VectorType>(
RetTy)) {
2265 if (isa<ScalableVectorType>(
RetTy) ||
any_of(Tys, [](
const Type *Ty) {
2266 return isa<ScalableVectorType>(Ty);
2271 SkipScalarizationCost
2272 ? ScalarizationCostPassed
2276 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
2278 for (
unsigned i = 0, ie = Tys.
size(); i != ie; ++i) {
2286 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2287 for (
unsigned i = 0, ie = Tys.
size(); i != ie; ++i) {
2288 if (
auto *VTy = dyn_cast<VectorType>(Tys[i])) {
2292 ScalarCalls = std::max(ScalarCalls,
2293 cast<FixedVectorType>(VTy)->getNumElements());
2296 return ScalarCalls * ScalarCost + ScalarizationCost;
2300 return SingleCallCost;
2322 return LT.first.isValid() ? *LT.first.getValue() : 0;
2355 if (isa<ScalableVectorType>(Ty))
2359 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2360 if ((
Opcode == Instruction::Or ||
Opcode == Instruction::And) &&
2370 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2372 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2376 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2379 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2380 unsigned LongVectorCount = 0;
2382 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2383 while (NumVecElts > MVTLen) {
2389 ArithCost += thisT()->getArithmeticInstrCost(
Opcode, SubTy,
CostKind);
2394 NumReduxLevels -= LongVectorCount;
2406 NumReduxLevels * thisT()->getArithmeticInstrCost(
Opcode, Ty,
CostKind);
2407 return ShuffleCost + ArithCost +
2408 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2432 if (isa<ScalableVectorType>(Ty))
2435 auto *VTy = cast<FixedVectorType>(Ty);
2442 return ExtractCost + ArithCost;
2446 std::optional<FastMathFlags> FMF,
2448 assert(Ty &&
"Unknown reduction vector type");
2461 if (isa<ScalableVectorType>(Ty))
2465 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2466 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2469 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2470 unsigned LongVectorCount = 0;
2472 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2473 while (NumVecElts > MVTLen) {
2487 NumReduxLevels -= LongVectorCount;
2500 return ShuffleCost + MinMaxCost +
2501 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2513 thisT()->getArithmeticReductionCost(
Opcode, ExtTy, FMF,
CostKind);
2515 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2518 return RedCost + ExtCost;
2529 Instruction::Add, ExtTy, std::nullopt,
CostKind);
2531 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2535 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2537 return RedCost + MulCost + 2 * ExtCost;
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
static constexpr uint32_t Opcode
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Base class which can be used to help build a TTI implementation.
bool isTypeLegal(Type *Ty)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
virtual unsigned getPrefetchDistance() const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
unsigned getMaxInterleaveFactor(ElementCount VF)
unsigned getNumberOfParts(Type *Tp)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
std::optional< unsigned > getVScaleForTuning() const
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isNumRegsMajorCostOfLSR()
bool isTruncateFree(Type *Ty1, Type *Ty2)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI)
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
bool isLegalICmpImmediate(int64_t imm)
bool isProfitableToHoist(Instruction *I)
virtual unsigned getMaxPrefetchIterationsAhead() const
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index)
std::optional< unsigned > getMaxVScale() const
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
unsigned getRegUsageForType(Type *Ty)
bool shouldBuildRelLookupTables() const
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2)
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
bool shouldFoldTerminatingConditionAfterLSR() const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
bool hasBranchDivergence(const Function *F=nullptr)
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
unsigned getAssumedAddrSpace(const Value *V) const
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
bool isAlwaysUniform(const Value *V)
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true)
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual bool enableWritePrefetching() const
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getFPOpCost(Type *Ty)
InstructionCost getVectorSplitCost()
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool haveFastSqrt(Type *Ty)
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
unsigned getInliningThresholdMultiplier() const
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
virtual ~BasicTTIImplBase()=default
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
bool isVScaleKnownToBeAPowerOfTwo() const
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II)
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
bool isLegalAddImmediate(int64_t imm)
bool shouldBuildLookupTables()
unsigned getFlatAddressSpace()
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
virtual unsigned getCacheLineSize() const
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
bool isSourceOfDivergence(const Value *V)
int getInlinerVectorBonusPercent() const
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on argument types.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
bool isSingleThreaded() const
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
unsigned adjustInliningThreshold(const CallBase *CB)
bool isProfitableLSRChainElement(Instruction *I)
Concrete BasicTTIImpl that can be used if no further customization is needed.
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool isTargetIntrinsic() const
isTargetIntrinsic - Returns true if this function is an intrinsic and the intrinsic is specific to a ...
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
virtual bool enableWritePrefetching() const
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
virtual std::optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
virtual std::optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isProfitableToHoist(Instruction *I) const
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
If the specified predicate checks whether a generic pointer falls within a specified address space,...
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static bool isVPBinOp(Intrinsic::ID ID)
static std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static bool isVPIntrinsic(Intrinsic::ID)
static bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ BSWAP
Byte Swap and Counting operators.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
constexpr unsigned BitWidth
cl::opt< unsigned > PartialUnrollingThreshold
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Attributes of a target dependent hardware loop.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...