Go to the documentation of this file.
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
63 class ScalarEvolution;
85 T *thisT() {
return static_cast<T *
>(
this); }
94 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
98 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
117 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
119 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
132 "Can only extract subvectors from vectors");
134 assert((!isa<FixedVectorType>(VTy) ||
135 (
Index + NumSubElts) <=
136 (
int)cast<FixedVectorType>(VTy)->getNumElements()) &&
137 "SK_ExtractSubvector index out of range");
143 for (
int i = 0;
i != NumSubElts; ++
i) {
145 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
147 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
160 "Can only insert subvectors into vectors");
162 assert((!isa<FixedVectorType>(VTy) ||
163 (
Index + NumSubElts) <=
164 (
int)cast<FixedVectorType>(VTy)->getNumElements()) &&
165 "SK_InsertSubvector index out of range");
171 for (
int i = 0;
i != NumSubElts; ++
i) {
172 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
175 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
176 i +
Index,
nullptr,
nullptr);
183 return static_cast<const T *
>(
this)->getST();
188 return static_cast<const T *
>(
this)->getTLI();
210 bool IsGatherScatter,
213 if (isa<ScalableVectorType>(DataTy))
216 auto *VT = cast<FixedVectorType>(DataTy);
226 VT->getNumElements()),
230 VT->getNumElements() *
248 VT->getNumElements() *
250 Instruction::ExtractElement,
252 VT->getNumElements()),
258 return LoadCost + PackingCost + ConditionalCost;
273 unsigned *Fast)
const {
310 std::pair<const Value *, unsigned>
329 bool HasBaseReg, int64_t Scale,
340 Type *ScalarValTy)
const {
341 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
344 if (getTLI()->isOperationLegal(
ISD::STORE, VT) ||
354 while (VF > 2 && IsSupportedByTarget(VF))
384 int64_t BaseOffset,
bool HasBaseReg,
385 int64_t Scale,
unsigned AddrSpace) {
423 unsigned &JumpTableSize,
433 unsigned N =
SI.getNumCases();
444 APInt MaxCaseVal =
SI.case_begin()->getCaseValue()->getValue();
445 APInt MinCaseVal = MaxCaseVal;
446 for (
auto CI :
SI.cases()) {
447 const APInt &CaseVal = CI.getCaseValue()->getValue();
448 if (CaseVal.
sgt(MaxCaseVal))
449 MaxCaseVal = CaseVal;
450 if (CaseVal.
slt(MinCaseVal))
451 MinCaseVal = CaseVal;
457 for (
auto I :
SI.cases())
458 Dests.
insert(
I.getCaseSuccessor());
467 if (
N < 2 || N < TLI->getMinimumJumpTableEntries())
470 (MaxCaseVal - MinCaseVal)
474 JumpTableSize = Range;
490 if (!
TM.isPositionIndependent())
500 Triple TargetTriple =
TM.getTargetTriple();
568 else if (
ST->getSchedModel().LoopMicroOpBufferSize > 0)
569 MaxOps =
ST->getSchedModel().LoopMicroOpBufferSize;
576 if (isa<CallInst>(
I) || isa<InvokeInst>(
I)) {
586 <<
"advising against unrolling the loop because it "
642 std::optional<Value *>
645 bool &KnownBitsComputed) {
656 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
660 virtual std::optional<unsigned>
662 return std::optional<unsigned>(
666 virtual std::optional<unsigned>
668 std::optional<unsigned> TargetResult =
686 unsigned NumStridedMemAccesses,
687 unsigned NumPrefetches,
688 bool HasCall)
const {
690 NumPrefetches, HasCall);
721 const APInt &DemandedElts,
722 bool Insert,
bool Extract,
726 if (isa<ScalableVectorType>(InTy))
728 auto *Ty = cast<FixedVectorType>(InTy);
731 "Vector size mismatch");
735 for (
int i = 0,
e = Ty->getNumElements();
i <
e; ++
i) {
736 if (!DemandedElts[
i])
739 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
742 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
753 if (isa<ScalableVectorType>(InTy))
755 auto *Ty = cast<FixedVectorType>(InTy);
758 return thisT()->getScalarizationOverhead(Ty, DemandedElts,
Insert, Extract,
769 assert(
Args.size() == Tys.
size() &&
"Expected matching Args and Tys");
773 for (
int I = 0,
E =
Args.size();
I !=
E;
I++) {
781 if (!isa<Constant>(A) && UniqueOperands.
insert(A).second) {
782 if (
auto *VecTy = dyn_cast<VectorType>(Ty))
839 if (MTy == LK.second)
853 ArrayRef<const Value *>
Args = ArrayRef<const Value *>(),
854 const Instruction *CxtI =
nullptr) {
856 const TargetLoweringBase *TLI = getTLI();
857 int ISD = TLI->InstructionOpcodeToISD(Opcode);
858 assert(ISD &&
"Invalid opcode");
871 InstructionCost OpCost = (IsFloat ? 2 : 1);
873 if (TLI->isOperationLegalOrPromote(ISD,
LT.second)) {
876 return LT.first * OpCost;
879 if (!TLI->isOperationExpand(ISD,
LT.second)) {
882 return LT.first * 2 * OpCost;
894 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
895 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
896 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
897 InstructionCost MulCost =
899 InstructionCost SubCost =
900 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
901 return DivCost + MulCost + SubCost;
906 if (isa<ScalableVectorType>(Ty))
912 if (
auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
913 InstructionCost
Cost = thisT()->getArithmeticInstrCost(
918 SmallVector<Type *> Tys(
Args.size(), Ty);
920 VTy->getNumElements() *
Cost;
929 int Limit =
Mask.size() * 2;
971 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
972 return getBroadcastShuffleOverhead(FVT,
CostKind);
980 if (
auto *FVT = dyn_cast<FixedVectorType>(Tp))
981 return getPermuteShuffleOverhead(FVT,
CostKind);
985 cast<FixedVectorType>(SubTp));
988 cast<FixedVectorType>(SubTp));
1002 assert(ISD &&
"Invalid opcode");
1006 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1007 TypeSize DstSize = DstLT.second.getSizeInBits();
1008 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1009 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1014 case Instruction::Trunc:
1019 case Instruction::BitCast:
1022 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1026 case Instruction::FPExt:
1027 if (
I && getTLI()->isExtFree(
I))
1030 case Instruction::ZExt:
1031 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1034 case Instruction::SExt:
1035 if (
I && getTLI()->isExtFree(
I))
1045 if (DstLT.first == SrcLT.first &&
1050 case Instruction::AddrSpaceCast:
1052 Dst->getPointerAddressSpace()))
1057 auto *SrcVTy = dyn_cast<VectorType>(Src);
1058 auto *DstVTy = dyn_cast<VectorType>(Dst);
1061 if (SrcLT.first == DstLT.first &&
1066 if (!SrcVTy && !DstVTy) {
1077 if (DstVTy && SrcVTy) {
1079 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1082 if (Opcode == Instruction::ZExt)
1086 if (Opcode == Instruction::SExt)
1087 return SrcLT.first * 2;
1093 return SrcLT.first * 1;
1106 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1107 DstVTy->getElementCount().isVector()) {
1110 T *
TTI =
static_cast<T *
>(
this);
1113 (!SplitSrc || !SplitDst) ?
TTI->getVectorSplitCost() : 0;
1120 if (isa<ScalableVectorType>(DstVTy))
1125 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1127 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1140 if (Opcode == Instruction::BitCast) {
1156 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1173 assert(ISD &&
"Invalid opcode");
1182 assert(CondTy &&
"CondTy must exist");
1192 return LT.first * 1;
1198 if (
auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1199 if (isa<ScalableVectorType>(ValTy))
1202 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1206 Opcode, ValVTy->getScalarType(), CondTy, VecPred,
CostKind,
I);
1228 Value *Op0 =
nullptr;
1229 Value *Op1 =
nullptr;
1230 if (
auto *
IE = dyn_cast<InsertElementInst>(&
I)) {
1231 Op0 =
IE->getOperand(0);
1232 Op1 =
IE->getOperand(1);
1234 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind,
Index, Op0,
1240 const APInt &DemandedDstElts,
1243 "Unexpected size of DemandedDstElts.");
1261 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1264 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1276 assert(!Src->isVoidTy() &&
"Invalid type");
1278 if (getTLI()->getValueType(
DL, Src,
true) ==
MVT::Other)
1293 LT.second.getSizeInBits())) {
1319 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
1324 const Value *
Ptr,
bool VariableMask,
1328 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1335 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false) {
1338 if (isa<ScalableVectorType>(VecTy))
1341 auto *VT = cast<FixedVectorType>(VecTy);
1343 unsigned NumElts = VT->getNumElements();
1344 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1346 unsigned NumSubElts = NumElts / Factor;
1351 if (UseMaskForCond || UseMaskForGaps)
1352 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1361 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1378 if (
Cost.isValid() && VecTySize > VecTyLTSize) {
1381 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1385 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1388 BitVector UsedInsts(NumLegalInsts,
false);
1389 for (
unsigned Index : Indices)
1390 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1391 UsedInsts.
set((
Index + Elt * Factor) / NumEltsPerLegalInst);
1400 "Interleaved memory op has too many members");
1406 for (
unsigned Index : Indices) {
1407 assert(
Index < Factor &&
"Invalid index for interleaved memory op");
1408 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1409 DemandedLoadStoreElts.
setBit(
Index + Elm * Factor);
1422 SubVT, DemandedAllSubElts,
1424 Cost += Indices.size() * InsSubCost;
1425 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1443 SubVT, DemandedAllSubElts,
1445 Cost += ExtSubCost * Indices.size();
1446 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1451 if (!UseMaskForCond)
1456 Cost += thisT()->getReplicationShuffleCost(
1457 I8Type, Factor, NumSubElts,
1458 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1466 if (UseMaskForGaps) {
1468 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1493 (RetTy->
isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
1503 if (
auto *RHSC = dyn_cast<ConstantInt>(
Args[1])) {
1504 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1506 ShouldOptForSize)) {
1510 unsigned ActiveBits =
Exponent.getActiveBits();
1511 unsigned PopCount =
Exponent.countPopulation();
1513 thisT()->getArithmeticInstrCost(
1514 Instruction::FMul, RetTy,
CostKind);
1515 if (RHSC->getSExtValue() < 0)
1516 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1522 case Intrinsic::cttz:
1528 case Intrinsic::ctlz:
1535 return thisT()->getMemcpyCost(ICA.
getInst());
1537 case Intrinsic::masked_scatter: {
1539 bool VarMask = !isa<Constant>(
Mask);
1540 Align Alignment = cast<ConstantInt>(
Args[2])->getAlignValue();
1545 case Intrinsic::masked_gather: {
1547 bool VarMask = !isa<Constant>(
Mask);
1548 Align Alignment = cast<ConstantInt>(
Args[1])->getAlignValue();
1552 case Intrinsic::experimental_stepvector: {
1553 if (isa<ScalableVectorType>(RetTy))
1558 case Intrinsic::vector_extract: {
1561 if (isa<ScalableVectorType>(RetTy))
1563 unsigned Index = cast<ConstantInt>(
Args[1])->getZExtValue();
1564 return thisT()->getShuffleCost(
1568 case Intrinsic::vector_insert: {
1573 unsigned Index = cast<ConstantInt>(
Args[2])->getZExtValue();
1574 return thisT()->getShuffleCost(
1578 case Intrinsic::experimental_vector_reverse: {
1579 return thisT()->getShuffleCost(
1581 CostKind, 0, cast<VectorType>(RetTy));
1583 case Intrinsic::experimental_vector_splice: {
1584 unsigned Index = cast<ConstantInt>(
Args[2])->getZExtValue();
1585 return thisT()->getShuffleCost(
1589 case Intrinsic::vector_reduce_add:
1590 case Intrinsic::vector_reduce_mul:
1591 case Intrinsic::vector_reduce_and:
1592 case Intrinsic::vector_reduce_or:
1593 case Intrinsic::vector_reduce_xor:
1594 case Intrinsic::vector_reduce_smax:
1595 case Intrinsic::vector_reduce_smin:
1596 case Intrinsic::vector_reduce_fmax:
1597 case Intrinsic::vector_reduce_fmin:
1598 case Intrinsic::vector_reduce_umax:
1599 case Intrinsic::vector_reduce_umin: {
1603 case Intrinsic::vector_reduce_fadd:
1604 case Intrinsic::vector_reduce_fmul: {
1609 case Intrinsic::fshl:
1610 case Intrinsic::fshr: {
1626 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
1628 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
1629 Cost += thisT()->getArithmeticInstrCost(
1630 BinaryOperator::Shl, RetTy,
CostKind, OpInfoX,
1632 Cost += thisT()->getArithmeticInstrCost(
1633 BinaryOperator::LShr, RetTy,
CostKind, OpInfoY,
1637 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
1643 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1651 case Intrinsic::get_active_lane_mask: {
1657 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
1664 ICA.
getArgTypes()[0], cast<VectorType>(RetTy)->getElementCount());
1668 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
1679 ScalarizationCost = 0;
1682 cast<VectorType>(RetTy),
1684 ScalarizationCost +=
1690 return thisT()->getTypeBasedIntrinsicInstrCost(
Attrs,
CostKind);
1711 unsigned VecTyIndex = 0;
1712 if (IID == Intrinsic::vector_reduce_fadd ||
1713 IID == Intrinsic::vector_reduce_fmul)
1715 assert(Tys.size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
1716 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1725 if (isa<ScalableVectorType>(RetTy) ||
any_of(Tys, [](
const Type *Ty) {
1726 return isa<ScalableVectorType>(Ty);
1732 SkipScalarizationCost ? ScalarizationCostPassed : 0;
1733 unsigned ScalarCalls = 1;
1734 Type *ScalarRetTy = RetTy;
1735 if (
auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1736 if (!SkipScalarizationCost)
1739 ScalarCalls =
std::max(ScalarCalls,
1740 cast<FixedVectorType>(RetVTy)->getNumElements());
1744 for (
unsigned i = 0, ie = Tys.size();
i != ie; ++
i) {
1746 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
1747 if (!SkipScalarizationCost)
1750 ScalarCalls =
std::max(ScalarCalls,
1751 cast<FixedVectorType>(VTy)->getNumElements());
1754 ScalarTys.push_back(Ty);
1756 if (ScalarCalls == 1)
1761 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
1763 return ScalarCalls * ScalarCost + ScalarizationCost;
1767 case Intrinsic::sqrt:
1770 case Intrinsic::sin:
1773 case Intrinsic::cos:
1776 case Intrinsic::exp:
1779 case Intrinsic::exp2:
1782 case Intrinsic::log:
1785 case Intrinsic::log10:
1791 case Intrinsic::fabs:
1794 case Intrinsic::canonicalize:
1809 case Intrinsic::copysign:
1821 case Intrinsic::nearbyint:
1824 case Intrinsic::rint:
1830 case Intrinsic::roundeven:
1833 case Intrinsic::pow:
1836 case Intrinsic::fma:
1839 case Intrinsic::fmuladd:
1842 case Intrinsic::experimental_constrained_fmuladd:
1846 case Intrinsic::lifetime_start:
1847 case Intrinsic::lifetime_end:
1848 case Intrinsic::sideeffect:
1849 case Intrinsic::pseudoprobe:
1850 case Intrinsic::arithmetic_fence:
1852 case Intrinsic::masked_store: {
1854 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1858 case Intrinsic::masked_load: {
1860 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1864 case Intrinsic::vector_reduce_add:
1867 case Intrinsic::vector_reduce_mul:
1870 case Intrinsic::vector_reduce_and:
1871 return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
1873 case Intrinsic::vector_reduce_or:
1874 return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy,
1876 case Intrinsic::vector_reduce_xor:
1877 return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
1879 case Intrinsic::vector_reduce_fadd:
1880 return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
1882 case Intrinsic::vector_reduce_fmul:
1883 return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
1885 case Intrinsic::vector_reduce_smax:
1886 case Intrinsic::vector_reduce_smin:
1887 case Intrinsic::vector_reduce_fmax:
1888 case Intrinsic::vector_reduce_fmin:
1889 return thisT()->getMinMaxReductionCost(
1892 case Intrinsic::vector_reduce_umax:
1893 case Intrinsic::vector_reduce_umin:
1894 return thisT()->getMinMaxReductionCost(
1902 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1907 Cost += thisT()->getArithmeticInstrCost(
1921 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1927 case Intrinsic::sadd_sat:
1928 case Intrinsic::ssub_sat: {
1933 ? Intrinsic::sadd_with_overflow
1934 : Intrinsic::ssub_with_overflow;
1941 nullptr, ScalarizationCostPassed);
1943 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1949 case Intrinsic::uadd_sat:
1950 case Intrinsic::usub_sat: {
1955 ? Intrinsic::uadd_with_overflow
1956 : Intrinsic::usub_with_overflow;
1960 nullptr, ScalarizationCostPassed);
1967 case Intrinsic::smul_fix:
1968 case Intrinsic::umul_fix: {
1973 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
1977 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH,
CostKind);
1980 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
1982 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
1986 Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy,
CostKind,
1989 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy,
CostKind);
1992 case Intrinsic::sadd_with_overflow:
1993 case Intrinsic::ssub_with_overflow: {
1996 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
1998 : BinaryOperator::Sub;
2005 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2006 Cost += 2 * thisT()->getCmpSelInstrCost(
2007 Instruction::ICmp, SumTy, OverflowTy,
2009 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2013 case Intrinsic::uadd_with_overflow:
2014 case Intrinsic::usub_with_overflow: {
2017 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2019 : BinaryOperator::Sub;
2025 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2027 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
2031 case Intrinsic::smul_with_overflow:
2032 case Intrinsic::umul_with_overflow: {
2037 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2039 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2043 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2046 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2048 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, ExtTy,
2054 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
2059 Cost += thisT()->getCmpSelInstrCost(
2063 case Intrinsic::fptosi_sat:
2064 case Intrinsic::fptoui_sat: {
2067 Type *FromTy = Tys[0];
2068 bool IsSigned = IID == Intrinsic::fptosi_sat;
2073 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2076 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2077 Cost += thisT()->getCastInstrCost(
2078 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2082 Cost += thisT()->getCmpSelInstrCost(
2084 Cost += thisT()->getCmpSelInstrCost(
2089 case Intrinsic::ctpop:
2095 case Intrinsic::ctlz:
2098 case Intrinsic::cttz:
2101 case Intrinsic::bswap:
2104 case Intrinsic::bitreverse:
2113 if (IID == Intrinsic::fabs &&
LT.second.isFloatingPoint() &&
2123 return (
LT.first * 2);
2125 return (
LT.first * 1);
2129 return (
LT.first * 2);
2134 if (IID == Intrinsic::fmuladd)
2135 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2137 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2139 if (IID == Intrinsic::experimental_constrained_fmuladd) {
2141 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2143 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2144 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2145 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2151 if (
auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2153 if (isa<ScalableVectorType>(RetTy) ||
any_of(Tys, [](
const Type *Ty) {
2154 return isa<ScalableVectorType>(Ty);
2159 SkipScalarizationCost
2160 ? ScalarizationCostPassed
2164 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
2166 for (
unsigned i = 0, ie = Tys.size();
i != ie; ++
i) {
2170 ScalarTys.push_back(Ty);
2175 for (
unsigned i = 0, ie = Tys.size();
i != ie; ++
i) {
2176 if (
auto *VTy = dyn_cast<VectorType>(Tys[
i])) {
2180 ScalarCalls =
std::max(ScalarCalls,
2181 cast<FixedVectorType>(VTy)->getNumElements());
2184 return ScalarCalls * ScalarCost + ScalarizationCost;
2188 return SingleCallCost;
2210 return LT.first.isValid() ? *
LT.first.getValue() : 0;
2243 if (isa<ScalableVectorType>(Ty))
2247 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2248 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
2258 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2260 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2264 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2267 std::pair<InstructionCost, MVT>
LT = thisT()->getTypeLegalizationCost(Ty);
2268 unsigned LongVectorCount = 0;
2270 LT.second.isVector() ?
LT.second.getVectorNumElements() : 1;
2271 while (NumVecElts > MVTLen) {
2277 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
2282 NumReduxLevels -= LongVectorCount;
2294 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
2295 return ShuffleCost + ArithCost +
2296 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2320 if (isa<ScalableVectorType>(Ty))
2323 auto *VTy = cast<FixedVectorType>(Ty);
2328 ArithCost *= VTy->getNumElements();
2330 return ExtractCost + ArithCost;
2334 std::optional<FastMathFlags> FMF,
2348 if (isa<ScalableVectorType>(Ty))
2353 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2354 unsigned NumReduxLevels =
Log2_32(NumVecElts);
2357 CmpOpcode = Instruction::FCmp;
2360 "expecting floating point or integer type for min/max reduction");
2361 CmpOpcode = Instruction::ICmp;
2365 std::pair<InstructionCost, MVT>
LT = thisT()->getTypeLegalizationCost(Ty);
2366 unsigned LongVectorCount = 0;
2368 LT.second.isVector() ?
LT.second.getVectorNumElements() : 1;
2369 while (NumVecElts > MVTLen) {
2378 thisT()->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy,
2386 NumReduxLevels -= LongVectorCount;
2397 (thisT()->getCmpSelInstrCost(CmpOpcode, Ty, CondTy,
2403 return ShuffleCost + MinMaxCost +
2404 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2410 std::optional<FastMathFlags> FMF,
2416 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
2418 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2421 return RedCost + ExtCost;
2434 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2440 return RedCost + MulCost + 2 * ExtCost;
2467 #endif // LLVM_CODEGEN_BASICTTIIMPL_H
static bool isZeroEltSplatMask(ArrayRef< int > Mask)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
virtual bool enableWritePrefetching() const
InstructionCost getVectorSplitCost()
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
InstructionCost getFPOpCost(Type *Ty)
constexpr bool isScalar() const
Exactly one element.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool shouldBuildRelLookupTables() const
This is an optimization pass for GlobalISel generic memory operations.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
static IntegerType * getInt1Ty(LLVMContext &C)
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
A parsed version of the target data layout string in and methods for querying it.
unsigned getInliningThresholdMultiplier()
bool isTruncateFree(Type *Ty1, Type *Ty2)
bool isAlwaysUniform(const Value *V)
Vector Rotate Left Mask Mask Insert
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ BR_JT
BR_JT - Jumptable branch.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
We have fiadd patterns now but the followings have the same cost and complexity We need a way to specify the later is more profitable def def The FP stackifier should handle simple permutates to reduce number of shuffle e g ceil
bool isLegalICmpImmediate(int64_t imm)
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Represents a single loop in the control flow graph.
@ BSWAP
Byte Swap and Counting operators.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
@ TypeScalarizeScalableVector
bool isTypeLegal(Type *Ty)
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Should compile r2 movcc movcs str strb mov lr r1 movcs movcc mov lr r1 str mov mov cmp r1 movlo r2 str bx lr r0 mov mov cmp r0 movhs r2 mov r1 bx lr Some of the NEON intrinsics may be appropriate for more general either as target independent intrinsics or perhaps elsewhere in the ARM backend Some of them may also be lowered to target independent and perhaps some new SDNodes could be added For minimum
instcombine should handle this C2 when C1
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Type * getReturnType() const
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
The main scalar evolution driver.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
const IntrinsicInst * getInst() const
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Triple - Helper class for working with autoconf configuration names.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
virtual ~BasicTTIImplBase()=default
@ ICMP_SGT
signed greater than
constexpr bool isVector() const
One or more elements.
The instances of the Type class are immutable: once they are created, they are never changed.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2)
static constexpr ElementCount getFixed(ScalarTy MinVal)
virtual bool isProfitableToHoist(Instruction *I) const
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask) const
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
DiagnosticInfoOptimizationBase::Argument NV
Type * getElementType() const
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
virtual std::optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
static bool isReverseMask(ArrayRef< int > Mask)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent fixed width SIMD vectors.
static IntegerType * getInt8Ty(LLVMContext &C)
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isNumRegsMajorCostOfLSR()
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
bool isSourceOfDivergence(const Value *V)
InstructionCost getScalarizationCost() const
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool skipScalarizationCost() const
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
LLVM Basic Block Representation.
FastMathFlags getFlags() const
We have fiadd patterns now but the followings have the same cost and complexity We need a way to specify the later is more profitable def def The FP stackifier should handle simple permutates to reduce number of shuffle e g floor
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
@ BRIND
BRIND - Indirect branch.
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
virtual unsigned getPrefetchDistance() const
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned getNumElements() const
const SmallVectorImpl< Type * > & getArgTypes() const
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned adjustInliningThreshold(const CallBase *CB)
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
bool useGPUDivergenceAnalysis()
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
virtual bool enableWritePrefetching() const
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
(vector float) vec_cmpeq(*A, *B) C
size_type count() const
count - Returns the number of bits which are set.
static uint64_t round(uint64_t Acc, uint64_t Input)
unsigned getFlatAddressSpace()
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
iterator_range< block_iterator > blocks() const
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
bool isVectorTy() const
True if this is an instance of VectorType.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
unsigned getMaxInterleaveFactor(unsigned VF)
unsigned getRegUsageForType(Type *Ty)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Drive the analysis of interleaved memory accesses in the loop.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
int getInlinerVectorBonusPercent()
virtual unsigned getCacheLineSize() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSingleThreaded() const
ArchType getArch() const
Get the parsed architecture type of this triple.
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
cl::opt< unsigned > PartialUnrollingThreshold
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics.
bool shouldBuildLookupTables()
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE maxNum semantics.
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index)
Analysis providing profile information.
mir Rename Register Operands
PredicationStyle emitGetActiveLaneMask()
bool isBeneficialToExpandPowI(int Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
@ FADD
Simple binary floating point operators.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
Base class of all SIMD vector types.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
bool slt(const APInt &RHS) const
Signed less than comparison.
This class represents an analyzed expression in the program.
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
bool haveFastSqrt(Type *Ty)
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
std::optional< unsigned > getVScaleForTuning() const
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
This is an important class for using LLVM in a threaded context.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Base class which can be used to help build a TTI implementation.
Should compile r2 movcc movcs str strb mov lr r1 movcs movcc mov lr r1 str mov mov cmp r1 movlo r2 str bx lr r0 mov mov cmp r0 movhs r2 mov r1 bx lr Some of the NEON intrinsics may be appropriate for more general either as target independent intrinsics or perhaps elsewhere in the ARM backend Some of them may also be lowered to target independent and perhaps some new SDNodes could be added For maximum
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Primary interface to the complete machine description for the target machine.
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
print Print MemDeps of function
bool isVoidTy() const
Return true if this is 'void'.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
BasicTTIImpl(const TargetMachine *TM, const Function &F)
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
Class for arbitrary precision integers.
static M68kRelType getType(unsigned Kind, MCSymbolRefExpr::VariantKind &Modifier, bool &IsPCRel)
virtual unsigned getMaxPrefetchIterationsAhead() const
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
A cache of @llvm.assume calls within a function.
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
If the specified predicate checks whether a generic pointer falls within a specified address space,...
@ ICMP_ULT
unsigned less than
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *TLI, DominatorTree *DT, LoopVectorizationLegality *LVL, InterleavedAccessInfo *IAI)
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
We have fiadd patterns now but the followings have the same cost and complexity We need a way to specify the later is more profitable def def The FP stackifier should handle simple permutates to reduce number of shuffle e g trunc
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
TargetSubtargetInfo - Generic base class for all target subtargets.
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
const TargetMachine & getTargetMachine() const
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
static bool isSpliceMask(ArrayRef< int > Mask, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II)
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE minNum semantics.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
Concrete BasicTTIImpl that can be used if no further customization is needed.
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
static bool isSelectMask(ArrayRef< int > Mask)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
constexpr unsigned BitWidth
BlockT * getHeader() const
bool isProfitableToHoist(Instruction *I)
bool isLegalAddImmediate(int64_t imm)
Provides information about what library functions are available for the current target.
bool isTargetIntrinsic() const
isTargetIntrinsic - Returns true if this function is an intrinsic and the intrinsic is specific to a ...
This is blocked on not handling X *X *X powi(X, 3)(see note above). The issue is that we end up getting t
static double log2(double V)
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
The core instruction combiner logic.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
A wrapper class for inspecting calls to intrinsic functions.
Attributes of a target dependent hardware loop.
static InstructionCost getInvalid(CostType Val=0)
InstructionCost getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, bool IsUnsigned, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
Intrinsic::ID getID() const
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
@ ICMP_UGT
unsigned greater than
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
size_t size() const
size - Get the array size.
unsigned getNumberOfParts(Type *Tp)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getAssumedAddrSpace(const Value *V) const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool sgt(const APInt &RHS) const
Signed greater than comparison.
const char LLVMTargetMachineRef TM
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on argument types.
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isTypeBasedOnly() const
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
std::optional< unsigned > getMaxVScale() const
APFloat abs(APFloat X)
Returns the absolute value of the argument.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
bool hasBranchDivergence()
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
LLVM Value Representation.
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
bool isProfitableLSRChainElement(Instruction *I)
const SmallVectorImpl< const Value * > & getArgs() const
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
static bool isTransposeMask(ArrayRef< int > Mask)
Return true if this shuffle mask is a transpose mask.
virtual std::optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...