16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
88 const T *thisT()
const {
return static_cast<const T *
>(
this); }
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
137 "Can only extract subvectors from vectors");
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
148 for (
int i = 0; i != NumSubElts; ++i) {
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index,
nullptr,
nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
165 "Can only insert subvectors into vectors");
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
176 for (
int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
181 i + Index,
nullptr,
nullptr);
188 return static_cast<const T *
>(
this)->getST();
193 return static_cast<const T *
>(
this)->getTLI();
215 bool IsGatherScatter,
223 unsigned VF = VT->getNumElements();
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
244 Opcode == Instruction::Store,
CostKind);
258 VF * (thisT()->getCFInstrCost(Instruction::Br,
CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI,
CostKind));
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
270 static bool isSplatMask(
ArrayRef<int> Mask,
unsigned NumSrcElts,
int &Index) {
272 bool IsCompared =
false;
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (
static_cast<unsigned>(
P.value()) >= NumSrcElts * 2)
280 SplatIdx =
P.value();
281 return P.index() != Mask.size() - 1;
284 return SplatIdx ==
P.value();
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 std::optional<unsigned> CallRetElementIndex = {})
const {
314 EVT VT = getTLI()->getValueType(
DL, Ty);
316 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
318 switch (ICA.
getID()) {
319 case Intrinsic::modf:
322 case Intrinsic::sincospi:
325 case Intrinsic::sincos:
333 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
334 if (LibcallImpl == RTLIB::Unsupported)
347 VecTy, {},
CostKind, 0,
nullptr, {});
353 if (Idx == CallRetElementIndex)
355 Cost += thisT()->getMemoryOpCost(
356 Instruction::Load, VectorTy,
390 unsigned *
Fast)
const override {
392 return getTLI()->allowsMisalignedMemoryAccesses(
397 const Function *Callee)
const override {
407 return (CallerBits & CalleeBits) == CalleeBits;
433 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
437 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
441 return getTLI()->getTargetMachine().Options.ThreadModel ==
445 std::pair<const Value *, unsigned>
447 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
451 Value *NewV)
const override {
456 return getTLI()->isLegalAddImmediate(imm);
460 return getTLI()->isLegalAddScalableImmediate(Imm);
464 return getTLI()->isLegalICmpImmediate(imm);
468 bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
470 int64_t ScalableOffset = 0)
const override {
477 return getTLI()->isLegalAddressingMode(
DL, AM, Ty, AddrSpace,
I);
481 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
485 Type *ScalarValTy)
const override {
486 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
488 EVT VT = getTLI()->getValueType(
DL, SrcTy);
489 if (getTLI()->isOperationLegal(
ISD::STORE, VT) ||
496 getTLI()->getTypeToTransformTo(ScalarMemTy->
getContext(), VT);
497 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
499 while (VF > 2 && IsSupportedByTarget(VF))
505 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
506 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
510 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
511 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
534 unsigned AddrSpace)
const override {
547 return getTLI()->isTruncateFree(Ty1, Ty2);
551 return getTLI()->isProfitableToHoist(
I);
554 bool useAA()
const override {
return getST()->useAA(); }
557 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
558 return getTLI()->isTypeLegal(VT);
562 EVT ETy = getTLI()->getValueType(
DL, Ty);
563 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
582 unsigned N =
SI.getNumCases();
590 if (
N < 1 || (!IsJTAllowed &&
DL.getIndexSizeInBits(0u) <
N))
593 APInt MaxCaseVal =
SI.case_begin()->getCaseValue()->getValue();
594 APInt MinCaseVal = MaxCaseVal;
595 for (
auto CI :
SI.cases()) {
596 const APInt &CaseVal = CI.getCaseValue()->getValue();
597 if (CaseVal.
sgt(MaxCaseVal))
598 MaxCaseVal = CaseVal;
599 if (CaseVal.
slt(MinCaseVal))
600 MinCaseVal = CaseVal;
604 if (
N <=
DL.getIndexSizeInBits(0u)) {
606 for (
auto I :
SI.cases()) {
617 if (
N < 2 ||
N < TLI->getMinimumJumpTableEntries())
620 (MaxCaseVal - MinCaseVal)
621 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
624 JumpTableSize =
Range;
682 const Function &Fn)
const override {
686 case Instruction::SDiv:
687 case Instruction::SRem:
688 case Instruction::UDiv:
689 case Instruction::URem: {
741 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
742 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
759 <<
"advising against unrolling the loop because it "
810 std::optional<Instruction *>
815 std::optional<Value *>
818 bool &KnownBitsComputed)
const override {
827 SimplifyAndSetOp)
const override {
829 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
833 std::optional<unsigned>
835 return std::optional<unsigned>(
839 std::optional<unsigned>
841 std::optional<unsigned> TargetResult =
842 getST()->getCacheAssociativity(
static_cast<unsigned>(Level));
851 return getST()->getCacheLineSize();
855 return getST()->getPrefetchDistance();
859 unsigned NumStridedMemAccesses,
860 unsigned NumPrefetches,
861 bool HasCall)
const override {
862 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
863 NumPrefetches, HasCall);
867 return getST()->getMaxPrefetchIterationsAhead();
871 return getST()->enableWritePrefetching();
875 return getST()->shouldPrefetchAddressSpace(AS);
888 std::optional<unsigned>
getMaxVScale()
const override {
return std::nullopt; }
899 bool Insert,
bool Extract,
911 (VL.empty() || VL.size() == Ty->getNumElements()) &&
912 "Vector size mismatch");
916 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
917 if (!DemandedElts[i])
920 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
922 thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
923 CostKind, i,
nullptr, InsertedVal, VIC);
926 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
927 CostKind, i,
nullptr,
nullptr, VIC);
939 unsigned ScalarOpdIdx)
const override {
944 int OpdIdx)
const override {
950 int RetIdx)
const override {
965 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
977 for (
Type *Ty : Tys) {
979 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
980 !Ty->isPtrOrPtrVectorTy())
1004 filterConstantAndDuplicatedOperands(Args, Tys),
CostKind);
1017 EVT MTy = getTLI()->getValueType(
DL, Ty);
1041 if (MTy == LK.second)
1056 const Instruction *CxtI =
nullptr)
const override {
1058 const TargetLoweringBase *TLI = getTLI();
1059 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1060 assert(ISD &&
"Invalid opcode");
1075 if (TLI->isOperationLegalOrPromote(ISD,
LT.second)) {
1078 return LT.first * OpCost;
1081 if (!TLI->isOperationExpand(ISD,
LT.second)) {
1084 return LT.first * 2 * OpCost;
1096 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1098 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
1100 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
1102 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
1103 return DivCost + MulCost + SubCost;
1135 int NumDstElts = Mask.size();
1136 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1143 if (isSplatMask(Mask, NumSrcElts, Index))
1146 (Index + NumDstElts) <= NumSrcElts) {
1153 if (
all_of(Mask, [NumSrcElts](
int M) {
return M < NumSrcElts; }))
1158 Mask, NumSrcElts, NumSubElts, Index)) {
1159 if (Index + NumSubElts > NumSrcElts)
1188 const Instruction *CxtI =
nullptr)
const override {
1192 return getBroadcastShuffleOverhead(FVT,
CostKind);
1201 return getPermuteShuffleOverhead(FVT,
CostKind);
1204 return getExtractSubvectorOverhead(SrcTy,
CostKind, Index,
1207 return getInsertSubvectorOverhead(DstTy,
CostKind, Index,
1226 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1227 TypeSize DstSize = DstLT.second.getSizeInBits();
1228 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1229 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1234 case Instruction::Trunc:
1239 case Instruction::BitCast:
1242 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1246 case Instruction::FPExt:
1247 if (
I && getTLI()->isExtFree(
I))
1250 case Instruction::ZExt:
1251 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1254 case Instruction::SExt:
1255 if (
I && getTLI()->isExtFree(
I))
1265 if (DstLT.first == SrcLT.first &&
1270 case Instruction::AddrSpaceCast:
1272 Dst->getPointerAddressSpace()))
1281 if (SrcLT.first == DstLT.first &&
1286 if (!SrcVTy && !DstVTy) {
1297 if (DstVTy && SrcVTy) {
1299 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1302 if (Opcode == Instruction::ZExt)
1306 if (Opcode == Instruction::SExt)
1307 return SrcLT.first * 2;
1313 return SrcLT.first * 1;
1326 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isKnownEven() &&
1327 DstVTy->getElementCount().isKnownEven()) {
1330 const T *TTI = thisT();
1333 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1335 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1347 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1360 if (Opcode == Instruction::BitCast) {
1377 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1378 CostKind, Index,
nullptr,
nullptr) +
1394 const Instruction *
I =
nullptr)
const override {
1395 const TargetLoweringBase *TLI = getTLI();
1396 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1397 assert(ISD &&
"Invalid opcode");
1401 Op1Info, Op2Info,
I);
1405 assert(CondTy &&
"CondTy must exist");
1412 !TLI->isOperationExpand(ISD,
LT.second)) {
1415 return LT.first * 1;
1427 Opcode, ValVTy->getScalarType(), CondTy->
getScalarType(), VecPred,
1443 unsigned Index,
const Value *Op0,
const Value *Op1,
1456 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
1468 Value *Op0 =
nullptr;
1469 Value *Op1 =
nullptr;
1471 Op0 = IE->getOperand(0);
1472 Op1 = IE->getOperand(1);
1477 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind, Index, Op0,
1484 unsigned Index)
const override {
1485 unsigned NewIndex = -1;
1488 "Unexpected index from end of vector");
1489 NewIndex = FVTy->getNumElements() - 1 - Index;
1491 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind, NewIndex,
nullptr,
1497 const APInt &DemandedDstElts,
1500 "Unexpected size of DemandedDstElts.");
1518 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1521 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1533 assert(!Src->isVoidTy() &&
"Invalid type");
1550 LT.second.getSizeInBits())) {
1556 if (Opcode == Instruction::Store)
1566 Opcode == Instruction::Store,
CostKind);
1576 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false)
const override {
1584 unsigned NumElts = VT->getNumElements();
1585 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1587 unsigned NumSubElts = NumElts / Factor;
1592 if (UseMaskForCond || UseMaskForGaps) {
1593 unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load
1594 : Intrinsic::masked_store;
1595 Cost = thisT()->getMemIntrinsicInstrCost(
1605 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1622 if (
Cost.isValid() && VecTySize > VecTyLTSize) {
1625 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1629 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1632 BitVector UsedInsts(NumLegalInsts,
false);
1633 for (
unsigned Index : Indices)
1634 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1635 UsedInsts.
set((Index + Elt * Factor) / NumEltsPerLegalInst);
1644 "Interleaved memory op has too many members");
1650 for (
unsigned Index : Indices) {
1651 assert(Index < Factor &&
"Invalid index for interleaved memory op");
1652 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1653 DemandedLoadStoreElts.
setBit(Index + Elm * Factor);
1656 if (Opcode == Instruction::Load) {
1666 SubVT, DemandedAllSubElts,
1668 Cost += Indices.
size() * InsSubCost;
1669 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1687 SubVT, DemandedAllSubElts,
1689 Cost += ExtSubCost * Indices.
size();
1690 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1695 if (!UseMaskForCond)
1700 Cost += thisT()->getReplicationShuffleCost(
1701 I8Type, Factor, NumSubElts,
1702 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1710 if (UseMaskForGaps) {
1712 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1738 std::optional<unsigned> FOp =
1741 if (ICA.
getID() == Intrinsic::vp_load) {
1744 Alignment = VPI->getPointerAlignment().valueOrOne();
1748 AS = PtrTy->getAddressSpace();
1749 return thisT()->getMemoryOpCost(*FOp, ICA.
getReturnType(), Alignment,
1752 if (ICA.
getID() == Intrinsic::vp_store) {
1755 Alignment = VPI->getPointerAlignment().valueOrOne();
1759 AS = PtrTy->getAddressSpace();
1760 return thisT()->getMemoryOpCost(*FOp, ICA.
getArgTypes()[0], Alignment,
1764 ICA.
getID() == Intrinsic::vp_fneg) {
1765 return thisT()->getArithmeticInstrCost(*FOp, ICA.
getReturnType(),
1769 return thisT()->getCastInstrCost(
1778 return thisT()->getCmpSelInstrCost(*FOp, ICA.
getArgTypes()[0],
1784 if (ICA.
getID() == Intrinsic::vp_load_ff) {
1789 Alignment = VPI->getPointerAlignment().valueOrOne();
1790 return thisT()->getMemIntrinsicInstrCost(
1794 if (ICA.
getID() == Intrinsic::vp_scatter) {
1804 Alignment = VPI->getPointerAlignment().valueOrOne();
1806 return thisT()->getMemIntrinsicInstrCost(
1809 VarMask, Alignment,
nullptr),
1812 if (ICA.
getID() == Intrinsic::vp_gather) {
1822 Alignment = VPI->getPointerAlignment().valueOrOne();
1824 return thisT()->getMemIntrinsicInstrCost(
1827 VarMask, Alignment,
nullptr),
1831 if (ICA.
getID() == Intrinsic::vp_select ||
1832 ICA.
getID() == Intrinsic::vp_merge) {
1843 std::optional<Intrinsic::ID> FID =
1847 if (ICA.
getID() == Intrinsic::experimental_vp_reverse)
1848 FID = Intrinsic::vector_reverse;
1854 "Expected VPIntrinsic to have Mask and Vector Length args and "
1866 *FID != Intrinsic::vector_reduce_fadd &&
1867 *FID != Intrinsic::vector_reduce_fmul) {
1875 return thisT()->getIntrinsicInstrCost(NewICA,
CostKind);
1894 case Intrinsic::powi:
1896 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1897 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1898 ShouldOptForSize)) {
1902 unsigned ActiveBits =
Exponent.getActiveBits();
1903 unsigned PopCount =
Exponent.popcount();
1905 thisT()->getArithmeticInstrCost(
1906 Instruction::FMul, RetTy,
CostKind);
1907 if (RHSC->isNegative())
1908 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1914 case Intrinsic::cttz:
1916 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1920 case Intrinsic::ctlz:
1922 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1926 case Intrinsic::memcpy:
1927 return thisT()->getMemcpyCost(ICA.
getInst());
1929 case Intrinsic::masked_scatter: {
1930 const Value *Mask = Args[2];
1932 Align Alignment =
I->getParamAlign(1).valueOrOne();
1933 return thisT()->getMemIntrinsicInstrCost(
1939 case Intrinsic::masked_gather: {
1940 const Value *Mask = Args[1];
1942 Align Alignment =
I->getParamAlign(0).valueOrOne();
1943 return thisT()->getMemIntrinsicInstrCost(
1945 VarMask, Alignment,
I),
1948 case Intrinsic::masked_compressstore: {
1950 const Value *Mask = Args[2];
1951 Align Alignment =
I->getParamAlign(1).valueOrOne();
1952 return thisT()->getMemIntrinsicInstrCost(
1957 case Intrinsic::masked_expandload: {
1958 const Value *Mask = Args[1];
1959 Align Alignment =
I->getParamAlign(0).valueOrOne();
1960 return thisT()->getMemIntrinsicInstrCost(
1965 case Intrinsic::experimental_vp_strided_store: {
1967 const Value *Ptr = Args[1];
1968 const Value *Mask = Args[3];
1969 const Value *EVL = Args[4];
1973 I->getParamAlign(1).value_or(thisT()->
DL.getABITypeAlign(EltTy));
1974 return thisT()->getMemIntrinsicInstrCost(
1979 case Intrinsic::experimental_vp_strided_load: {
1980 const Value *Ptr = Args[0];
1981 const Value *Mask = Args[2];
1982 const Value *EVL = Args[3];
1986 I->getParamAlign(0).value_or(thisT()->
DL.getABITypeAlign(EltTy));
1987 return thisT()->getMemIntrinsicInstrCost(
1991 case Intrinsic::stepvector: {
1997 case Intrinsic::vector_extract: {
2008 case Intrinsic::vector_insert: {
2014 return thisT()->getShuffleCost(
2019 case Intrinsic::vector_splice_left:
2020 case Intrinsic::vector_splice_right: {
2024 unsigned Index = COffset->getZExtValue();
2025 return thisT()->getShuffleCost(
2028 IID == Intrinsic::vector_splice_left ? Index : -Index,
2031 case Intrinsic::vector_reduce_add:
2032 case Intrinsic::vector_reduce_mul:
2033 case Intrinsic::vector_reduce_and:
2034 case Intrinsic::vector_reduce_or:
2035 case Intrinsic::vector_reduce_xor:
2036 case Intrinsic::vector_reduce_smax:
2037 case Intrinsic::vector_reduce_smin:
2038 case Intrinsic::vector_reduce_fmax:
2039 case Intrinsic::vector_reduce_fmin:
2040 case Intrinsic::vector_reduce_fmaximum:
2041 case Intrinsic::vector_reduce_fminimum:
2042 case Intrinsic::vector_reduce_umax:
2043 case Intrinsic::vector_reduce_umin: {
2047 case Intrinsic::vector_reduce_fadd:
2048 case Intrinsic::vector_reduce_fmul: {
2050 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF,
I, 1);
2053 case Intrinsic::fshl:
2054 case Intrinsic::fshr: {
2055 const Value *
X = Args[0];
2056 const Value *
Y = Args[1];
2057 const Value *Z = Args[2];
2066 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2068 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
2069 Cost += thisT()->getArithmeticInstrCost(
2070 BinaryOperator::Shl, RetTy,
CostKind, OpInfoX,
2072 Cost += thisT()->getArithmeticInstrCost(
2073 BinaryOperator::LShr, RetTy,
CostKind, OpInfoY,
2079 Cost += thisT()->getArithmeticInstrCost(
2081 : BinaryOperator::URem,
2083 {TTI::OK_UniformConstantValue, TTI::OP_None});
2088 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2091 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2096 case Intrinsic::experimental_cttz_elts: {
2101 if (!getTLI()->shouldExpandCttzElements(ArgType))
2114 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2125 thisT()->getIntrinsicInstrCost(StepVecAttrs,
CostKind);
2128 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy,
CostKind);
2129 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2133 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy,
CostKind);
2136 NewEltTy, NewVecTy, FMF,
I, 1);
2137 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs,
CostKind);
2139 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy,
CostKind);
2143 case Intrinsic::get_active_lane_mask:
2144 case Intrinsic::experimental_vector_match:
2145 case Intrinsic::experimental_vector_histogram_add:
2146 case Intrinsic::experimental_vector_histogram_uadd_sat:
2147 case Intrinsic::experimental_vector_histogram_umax:
2148 case Intrinsic::experimental_vector_histogram_umin:
2149 return thisT()->getTypeBasedIntrinsicInstrCost(ICA,
CostKind);
2150 case Intrinsic::modf:
2151 case Intrinsic::sincos:
2152 case Intrinsic::sincospi: {
2153 std::optional<unsigned> CallRetElementIndex;
2156 if (ICA.
getID() == Intrinsic::modf)
2157 CallRetElementIndex = 0;
2159 if (
auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2160 ICA,
CostKind, CallRetElementIndex))
2165 case Intrinsic::loop_dependence_war_mask:
2166 case Intrinsic::loop_dependence_raw_mask: {
2186 PtrTy->getAddressSpace()));
2187 bool IsReadAfterWrite = IID == Intrinsic::loop_dependence_raw_mask;
2190 thisT()->getArithmeticInstrCost(Instruction::Sub, IntPtrTy,
CostKind);
2191 if (IsReadAfterWrite) {
2194 Cost += thisT()->getIntrinsicInstrCost(AbsAttrs,
CostKind);
2199 Cost += thisT()->getArithmeticInstrCost(Instruction::SDiv, IntPtrTy,
2205 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CondTy,
2207 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, IntPtrTy,
2211 {IntPtrTy, IntPtrTy}, FMF);
2212 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2222 ScalarizationCost = 0;
2231 filterConstantAndDuplicatedOperands(Args, ICA.
getArgTypes()),
2237 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2258 unsigned VecTyIndex = 0;
2259 if (IID == Intrinsic::vector_reduce_fadd ||
2260 IID == Intrinsic::vector_reduce_fmul)
2262 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
2279 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2280 unsigned ScalarCalls = 1;
2281 Type *ScalarRetTy = RetTy;
2283 if (!SkipScalarizationCost)
2286 ScalarCalls = std::max(ScalarCalls,
2291 for (
Type *Ty : Tys) {
2293 if (!SkipScalarizationCost)
2296 ScalarCalls = std::max(ScalarCalls,
2298 Ty = Ty->getScalarType();
2302 if (ScalarCalls == 1)
2307 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
2309 return ScalarCalls * ScalarCost + ScalarizationCost;
2313 case Intrinsic::sqrt:
2316 case Intrinsic::sin:
2319 case Intrinsic::cos:
2322 case Intrinsic::sincos:
2325 case Intrinsic::sincospi:
2328 case Intrinsic::modf:
2331 case Intrinsic::tan:
2334 case Intrinsic::asin:
2337 case Intrinsic::acos:
2340 case Intrinsic::atan:
2343 case Intrinsic::atan2:
2346 case Intrinsic::sinh:
2349 case Intrinsic::cosh:
2352 case Intrinsic::tanh:
2355 case Intrinsic::exp:
2358 case Intrinsic::exp2:
2361 case Intrinsic::exp10:
2364 case Intrinsic::log:
2367 case Intrinsic::log10:
2370 case Intrinsic::log2:
2373 case Intrinsic::ldexp:
2376 case Intrinsic::fabs:
2379 case Intrinsic::canonicalize:
2382 case Intrinsic::minnum:
2385 case Intrinsic::maxnum:
2388 case Intrinsic::minimum:
2391 case Intrinsic::maximum:
2394 case Intrinsic::minimumnum:
2397 case Intrinsic::maximumnum:
2400 case Intrinsic::copysign:
2403 case Intrinsic::floor:
2406 case Intrinsic::ceil:
2409 case Intrinsic::trunc:
2412 case Intrinsic::nearbyint:
2415 case Intrinsic::rint:
2418 case Intrinsic::lrint:
2421 case Intrinsic::llrint:
2424 case Intrinsic::round:
2427 case Intrinsic::roundeven:
2430 case Intrinsic::lround:
2433 case Intrinsic::llround:
2436 case Intrinsic::pow:
2439 case Intrinsic::fma:
2442 case Intrinsic::fmuladd:
2445 case Intrinsic::experimental_constrained_fmuladd:
2449 case Intrinsic::lifetime_start:
2450 case Intrinsic::lifetime_end:
2451 case Intrinsic::sideeffect:
2452 case Intrinsic::pseudoprobe:
2453 case Intrinsic::arithmetic_fence:
2455 case Intrinsic::masked_store: {
2457 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2458 return thisT()->getMemIntrinsicInstrCost(
2461 case Intrinsic::masked_load: {
2463 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2464 return thisT()->getMemIntrinsicInstrCost(
2467 case Intrinsic::experimental_vp_strided_store: {
2469 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2470 return thisT()->getMemIntrinsicInstrCost(
2476 case Intrinsic::experimental_vp_strided_load: {
2478 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2479 return thisT()->getMemIntrinsicInstrCost(
2485 case Intrinsic::vector_reduce_add:
2486 case Intrinsic::vector_reduce_mul:
2487 case Intrinsic::vector_reduce_and:
2488 case Intrinsic::vector_reduce_or:
2489 case Intrinsic::vector_reduce_xor:
2490 return thisT()->getArithmeticReductionCost(
2493 case Intrinsic::vector_reduce_fadd:
2494 case Intrinsic::vector_reduce_fmul:
2495 return thisT()->getArithmeticReductionCost(
2497 case Intrinsic::vector_reduce_smax:
2498 case Intrinsic::vector_reduce_smin:
2499 case Intrinsic::vector_reduce_umax:
2500 case Intrinsic::vector_reduce_umin:
2501 case Intrinsic::vector_reduce_fmax:
2502 case Intrinsic::vector_reduce_fmin:
2503 case Intrinsic::vector_reduce_fmaximum:
2504 case Intrinsic::vector_reduce_fminimum:
2507 case Intrinsic::experimental_vector_match: {
2510 unsigned SearchSize = NeedleTy->getNumElements();
2514 EVT SearchVT = getTLI()->getValueType(
DL, SearchTy);
2515 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2521 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2523 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2527 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2530 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2533 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy,
CostKind);
2536 case Intrinsic::vector_reverse:
2540 case Intrinsic::experimental_vector_histogram_add:
2541 case Intrinsic::experimental_vector_histogram_uadd_sat:
2542 case Intrinsic::experimental_vector_histogram_umax:
2543 case Intrinsic::experimental_vector_histogram_umin: {
2551 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2553 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2555 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2560 case Intrinsic::experimental_vector_histogram_add:
2562 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy,
CostKind);
2564 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2566 Cost += thisT()->getIntrinsicInstrCost(UAddSat,
CostKind);
2569 case Intrinsic::experimental_vector_histogram_umax: {
2574 case Intrinsic::experimental_vector_histogram_umin: {
2580 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2585 case Intrinsic::get_active_lane_mask: {
2587 EVT ResVT = getTLI()->getValueType(
DL, RetTy,
true);
2588 EVT ArgVT = getTLI()->getValueType(
DL, ArgTy,
true);
2592 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2601 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2602 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2606 case Intrinsic::experimental_memset_pattern:
2611 case Intrinsic::abs:
2614 case Intrinsic::fshl:
2617 case Intrinsic::fshr:
2620 case Intrinsic::smax:
2623 case Intrinsic::smin:
2626 case Intrinsic::umax:
2629 case Intrinsic::umin:
2632 case Intrinsic::sadd_sat:
2635 case Intrinsic::ssub_sat:
2638 case Intrinsic::uadd_sat:
2641 case Intrinsic::usub_sat:
2644 case Intrinsic::smul_fix:
2647 case Intrinsic::umul_fix:
2650 case Intrinsic::sadd_with_overflow:
2653 case Intrinsic::ssub_with_overflow:
2656 case Intrinsic::uadd_with_overflow:
2659 case Intrinsic::usub_with_overflow:
2662 case Intrinsic::smul_with_overflow:
2665 case Intrinsic::umul_with_overflow:
2668 case Intrinsic::fptosi_sat:
2669 case Intrinsic::fptoui_sat: {
2675 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2681 case Intrinsic::ctpop:
2687 case Intrinsic::ctlz:
2690 case Intrinsic::cttz:
2693 case Intrinsic::bswap:
2696 case Intrinsic::bitreverse:
2699 case Intrinsic::ucmp:
2702 case Intrinsic::scmp:
2708 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2714 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2724 return (LT.first * 2);
2726 return (LT.first * 1);
2730 return (LT.first * 2);
2734 case Intrinsic::fmuladd: {
2738 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2740 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2743 case Intrinsic::experimental_constrained_fmuladd: {
2745 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2747 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2748 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2749 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2751 case Intrinsic::smin:
2752 case Intrinsic::smax:
2753 case Intrinsic::umin:
2754 case Intrinsic::umax: {
2757 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2761 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2763 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2767 case Intrinsic::sadd_with_overflow:
2768 case Intrinsic::ssub_with_overflow: {
2771 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2772 ? BinaryOperator::Add
2773 : BinaryOperator::Sub;
2780 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2782 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2784 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2788 case Intrinsic::uadd_with_overflow:
2789 case Intrinsic::usub_with_overflow: {
2792 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2793 ? BinaryOperator::Add
2794 : BinaryOperator::Sub;
2800 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2801 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2805 case Intrinsic::smul_with_overflow:
2806 case Intrinsic::umul_with_overflow: {
2811 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2813 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2817 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2819 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2820 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2822 Cost += thisT()->getArithmeticInstrCost(
2827 Cost += thisT()->getArithmeticInstrCost(
2828 Instruction::AShr, MulTy,
CostKind,
2832 Cost += thisT()->getCmpSelInstrCost(
2836 case Intrinsic::sadd_sat:
2837 case Intrinsic::ssub_sat: {
2843 ? Intrinsic::sadd_with_overflow
2844 : Intrinsic::ssub_with_overflow;
2851 nullptr, ScalarizationCostPassed);
2852 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2853 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2855 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2859 case Intrinsic::uadd_sat:
2860 case Intrinsic::usub_sat: {
2865 ? Intrinsic::uadd_with_overflow
2866 : Intrinsic::usub_with_overflow;
2870 nullptr, ScalarizationCostPassed);
2871 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2873 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2877 case Intrinsic::smul_fix:
2878 case Intrinsic::umul_fix: {
2883 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2887 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH,
CostKind);
2889 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2890 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2892 Cost += thisT()->getArithmeticInstrCost(
2895 Cost += thisT()->getArithmeticInstrCost(
2898 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy,
CostKind);
2901 case Intrinsic::abs: {
2906 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2908 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2911 Cost += thisT()->getArithmeticInstrCost(
2912 BinaryOperator::Sub, RetTy,
CostKind,
2916 case Intrinsic::fshl:
2917 case Intrinsic::fshr: {
2923 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2925 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
2927 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy,
CostKind);
2928 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2933 Cost += thisT()->getArithmeticInstrCost(
2935 : BinaryOperator::URem,
2936 RetTy,
CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2937 {TTI::OK_UniformConstantValue, TTI::OP_None});
2939 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2941 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2945 case Intrinsic::fptosi_sat:
2946 case Intrinsic::fptoui_sat: {
2949 Type *FromTy = Tys[0];
2950 bool IsSigned = IID == Intrinsic::fptosi_sat;
2955 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2958 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2959 Cost += thisT()->getCastInstrCost(
2960 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2964 Cost += thisT()->getCmpSelInstrCost(
2966 Cost += thisT()->getCmpSelInstrCost(
2971 case Intrinsic::ucmp:
2972 case Intrinsic::scmp: {
2973 Type *CmpTy = Tys[0];
2976 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2979 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2986 Cost += 2 * thisT()->getCmpSelInstrCost(
2987 BinaryOperator::Select, RetTy, CondTy,
2992 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2994 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2999 case Intrinsic::maximumnum:
3000 case Intrinsic::minimumnum: {
3015 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs,
CostKind);
3016 return LT.first + FCanonicalizeCost * 2;
3036 if (!SkipScalarizationCost) {
3037 ScalarizationCost = 0;
3038 for (
Type *RetVTy : RetVTys) {
3047 for (
Type *Ty : Tys) {
3048 if (Ty->isVectorTy())
3049 Ty = Ty->getScalarType();
3054 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
3055 for (
Type *Ty : Tys) {
3060 ScalarCalls = std::max(ScalarCalls,
3064 return ScalarCalls * ScalarCost + ScalarizationCost;
3068 return SingleCallCost;
3075 unsigned Id = MICA.
getID();
3081 case Intrinsic::experimental_vp_strided_load:
3082 case Intrinsic::experimental_vp_strided_store: {
3083 unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load
3085 : Instruction::Store;
3089 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3092 case Intrinsic::masked_scatter:
3093 case Intrinsic::masked_gather:
3094 case Intrinsic::vp_scatter:
3095 case Intrinsic::vp_gather: {
3096 unsigned Opcode = (MICA.
getID() == Intrinsic::masked_gather ||
3097 MICA.
getID() == Intrinsic::vp_gather)
3099 : Instruction::Store;
3101 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3104 case Intrinsic::vp_load:
3105 case Intrinsic::vp_store:
3107 case Intrinsic::masked_load:
3108 case Intrinsic::masked_store: {
3110 Id == Intrinsic::masked_load ? Instruction::Load : Instruction::Store;
3112 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
3115 case Intrinsic::masked_compressstore:
3116 case Intrinsic::masked_expandload: {
3117 unsigned Opcode = MICA.
getID() == Intrinsic::masked_expandload
3119 : Instruction::Store;
3122 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3126 case Intrinsic::vp_load_ff:
3152 if (!LT.first.isValid())
3157 Tp && LT.second.isFixedLengthVector() &&
3162 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3164 return LT.first.getValue();
3201 Type *ScalarTy = Ty->getElementType();
3203 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3213 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3215 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3219 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3222 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3223 unsigned LongVectorCount = 0;
3225 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3226 while (NumVecElts > MVTLen) {
3229 ShuffleCost += thisT()->getShuffleCost(
3231 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
3236 NumReduxLevels -= LongVectorCount;
3248 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
3249 return ShuffleCost + ArithCost +
3250 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3284 return ExtractCost + ArithCost;
3289 std::optional<FastMathFlags> FMF,
3291 assert(Ty &&
"Unknown reduction vector type");
3307 Type *ScalarTy = Ty->getElementType();
3309 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3312 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3313 unsigned LongVectorCount = 0;
3315 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3316 while (NumVecElts > MVTLen) {
3320 ShuffleCost += thisT()->getShuffleCost(
3329 NumReduxLevels -= LongVectorCount;
3342 return ShuffleCost + MinMaxCost +
3343 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3349 VectorType *Ty, std::optional<FastMathFlags> FMF,
3352 FTy && IsUnsigned && Opcode == Instruction::Add &&
3360 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3362 thisT()->getIntrinsicInstrCost(ICA,
CostKind);
3368 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
3370 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3373 return RedCost + ExtCost;
3383 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3384 "The reduction opcode is expected to be Add or Sub.");
3387 RedOpcode, ExtTy, std::nullopt,
CostKind);
3389 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3393 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
3395 return RedCost + MulCost + 2 * ExtCost;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static const Function * getCalledFunction(const Value *V)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
AttributeList getAttributes() const
Return the attribute list for this Function.
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
const FeatureBitset & getFeatureBits() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Information for memory intrinsic cost model.
Align getAlignment() const
Type * getDataType() const
bool getVariableMask() const
Intrinsic::ID getID() const
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
CodeModel::Model getCodeModel() const
Returns the code model.
TargetSubtargetInfo - Generic base class for all target subtargets.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
auto dyn_cast_or_null(const Y &Val)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
FunctionAddr VTableAddr uintptr_t uintptr_t Data
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
ElementCount getVectorElementCount() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...