16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
89 const T *thisT()
const {
return static_cast<const T *
>(
this); }
99 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
103 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
123 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
125 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
138 "Can only extract subvectors from vectors");
141 (Index + NumSubElts) <=
143 "SK_ExtractSubvector index out of range");
149 for (
int i = 0; i != NumSubElts; ++i) {
151 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
152 CostKind, i + Index,
nullptr,
nullptr);
153 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
166 "Can only insert subvectors into vectors");
169 (Index + NumSubElts) <=
171 "SK_InsertSubvector index out of range");
177 for (
int i = 0; i != NumSubElts; ++i) {
178 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
181 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
182 i + Index,
nullptr,
nullptr);
189 return static_cast<const T *
>(
this)->getST();
194 return static_cast<const T *
>(
this)->getTLI();
216 bool IsGatherScatter,
224 unsigned VF = VT->getNumElements();
239 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
245 Opcode == Instruction::Store,
CostKind);
259 VF * (thisT()->getCFInstrCost(Instruction::CondBr,
CostKind) +
260 thisT()->getCFInstrCost(Instruction::PHI,
CostKind));
263 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
271 static bool isSplatMask(
ArrayRef<int> Mask,
unsigned NumSrcElts,
int &Index) {
273 bool IsCompared =
false;
277 return P.index() != Mask.size() - 1 || IsCompared;
278 if (
static_cast<unsigned>(
P.value()) >= NumSrcElts * 2)
281 SplatIdx =
P.value();
282 return P.index() != Mask.size() - 1;
285 return SplatIdx ==
P.value();
304 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
306 std::optional<unsigned> CallRetElementIndex = {})
const {
314 EVT VT = getTLI()->getValueType(
DL, Ty);
316 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
318 switch (ICA.
getID()) {
319 case Intrinsic::modf:
322 case Intrinsic::sincospi:
325 case Intrinsic::sincos:
333 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
334 if (LibcallImpl == RTLIB::Unsupported)
347 VecTy, {},
CostKind, 0,
nullptr, {});
353 if (Idx == CallRetElementIndex)
355 Cost += thisT()->getMemoryOpCost(
356 Instruction::Load, VectorTy,
390 unsigned *
Fast)
const override {
392 return getTLI()->allowsMisalignedMemoryAccesses(
397 const Function *Callee)
const override {
407 return (CallerBits & CalleeBits) == CalleeBits;
433 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
437 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
441 return getTLI()->getTargetMachine().Options.ThreadModel ==
445 std::pair<const Value *, unsigned>
447 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
451 Value *NewV)
const override {
456 return getTLI()->isLegalAddImmediate(imm);
460 return getTLI()->isLegalAddScalableImmediate(Imm);
464 return getTLI()->isLegalICmpImmediate(imm);
468 bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
470 int64_t ScalableOffset = 0)
const override {
477 return getTLI()->isLegalAddressingMode(
DL, AM, Ty, AddrSpace,
I);
481 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
486 unsigned AddrSpace)
const override {
487 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy, Alignment,
488 AddrSpace](
unsigned VF) {
490 EVT VT = getTLI()->getValueType(
DL, SrcTy);
491 if (getTLI()->isOperationLegal(
ISD::STORE, VT) ||
498 getTLI()->getTypeToTransformTo(ScalarMemTy->
getContext(), VT);
499 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT, Alignment,
502 while (VF > 2 && IsSupportedByTarget(VF))
508 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
509 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
513 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
514 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
537 unsigned AddrSpace)
const override {
550 return getTLI()->isTruncateFree(Ty1, Ty2);
554 return getTLI()->isProfitableToHoist(
I);
557 bool useAA()
const override {
return getST()->useAA(); }
560 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
561 return getTLI()->isTypeLegal(VT);
565 EVT ETy = getTLI()->getValueType(
DL, Ty);
566 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
585 unsigned N =
SI.getNumCases();
593 if (
N < 1 || (!IsJTAllowed &&
DL.getIndexSizeInBits(0u) <
N))
596 APInt MaxCaseVal =
SI.case_begin()->getCaseValue()->getValue();
597 APInt MinCaseVal = MaxCaseVal;
598 for (
auto CI :
SI.cases()) {
599 const APInt &CaseVal = CI.getCaseValue()->getValue();
600 if (CaseVal.
sgt(MaxCaseVal))
601 MaxCaseVal = CaseVal;
602 if (CaseVal.
slt(MinCaseVal))
603 MinCaseVal = CaseVal;
607 if (
N <=
DL.getIndexSizeInBits(0u)) {
609 for (
auto I :
SI.cases()) {
620 if (
N < 2 ||
N < TLI->getMinimumJumpTableEntries())
623 (MaxCaseVal - MinCaseVal)
624 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
627 JumpTableSize =
Range;
685 const Function &Fn)
const override {
689 case Instruction::SDiv:
690 case Instruction::SRem:
691 case Instruction::UDiv:
692 case Instruction::URem: {
744 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
745 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
762 <<
"advising against unrolling the loop because it "
812 std::optional<Instruction *>
817 std::optional<Value *>
820 bool &KnownBitsComputed)
const override {
829 SimplifyAndSetOp)
const override {
831 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
835 std::optional<unsigned>
837 return std::optional<unsigned>(
841 std::optional<unsigned>
843 std::optional<unsigned> TargetResult =
844 getST()->getCacheAssociativity(
static_cast<unsigned>(Level));
853 return getST()->getCacheLineSize();
857 return getST()->getPrefetchDistance();
861 unsigned NumStridedMemAccesses,
862 unsigned NumPrefetches,
863 bool HasCall)
const override {
864 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
865 NumPrefetches, HasCall);
869 return getST()->getMaxPrefetchIterationsAhead();
873 return getST()->enableWritePrefetching();
877 return getST()->shouldPrefetchAddressSpace(AS);
890 std::optional<unsigned>
getMaxVScale()
const override {
return std::nullopt; }
900 bool Insert,
bool Extract,
912 (VL.empty() || VL.size() == Ty->getNumElements()) &&
913 "Vector size mismatch");
917 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
918 if (!DemandedElts[i])
921 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
923 thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
924 CostKind, i,
nullptr, InsertedVal, VIC);
927 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
928 CostKind, i,
nullptr,
nullptr, VIC);
936 unsigned ScalarOpdIdx)
const override {
941 int OpdIdx)
const override {
947 int RetIdx)
const override {
962 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
974 for (
Type *Ty : Tys) {
976 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
977 !Ty->isPtrOrPtrVectorTy())
1001 filterConstantAndDuplicatedOperands(Args, Tys),
CostKind);
1014 EVT MTy = getTLI()->getValueType(
DL, Ty);
1038 if (MTy == LK.second)
1053 const Instruction *CxtI =
nullptr)
const override {
1055 const TargetLoweringBase *TLI = getTLI();
1056 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1057 assert(ISD &&
"Invalid opcode");
1072 if (TLI->isOperationLegalOrPromote(ISD,
LT.second)) {
1075 return LT.first * OpCost;
1078 if (!TLI->isOperationExpand(ISD,
LT.second)) {
1081 return LT.first * 2 * OpCost;
1093 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1095 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
1097 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
1099 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
1100 return DivCost + MulCost + SubCost;
1132 int NumDstElts = Mask.size();
1133 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1140 if (isSplatMask(Mask, NumSrcElts, Index))
1143 (Index + NumDstElts) <= NumSrcElts) {
1150 if (
all_of(Mask, [NumSrcElts](
int M) {
return M < NumSrcElts; }))
1155 Mask, NumSrcElts, NumSubElts, Index)) {
1156 if (Index + NumSubElts > NumSrcElts)
1185 const Instruction *CxtI =
nullptr)
const override {
1189 return getBroadcastShuffleOverhead(FVT,
CostKind);
1198 return getPermuteShuffleOverhead(FVT,
CostKind);
1201 return getExtractSubvectorOverhead(SrcTy,
CostKind, Index,
1204 return getInsertSubvectorOverhead(DstTy,
CostKind, Index,
1223 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1224 TypeSize DstSize = DstLT.second.getSizeInBits();
1225 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1226 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1231 case Instruction::Trunc:
1236 case Instruction::BitCast:
1239 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1243 case Instruction::FPExt:
1244 if (
I && getTLI()->isExtFree(
I))
1247 case Instruction::ZExt:
1248 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1251 case Instruction::SExt:
1252 if (
I && getTLI()->isExtFree(
I))
1264 if (DstLT.first == SrcLT.first &&
1266 LI->getPointerAddressSpace(), LType,
false))
1269 switch (
II->getIntrinsicID()) {
1270 case Intrinsic::masked_load: {
1271 Type *PtrType =
II->getArgOperand(0)->getType();
1274 if (DstLT.first == SrcLT.first &&
1276 ExtVT, LoadVT,
II->getParamAlign(0).valueOrOne(),
1289 case Instruction::AddrSpaceCast:
1291 Dst->getPointerAddressSpace()))
1300 if (SrcLT.first == DstLT.first &&
1305 if (!SrcVTy && !DstVTy) {
1316 if (DstVTy && SrcVTy) {
1318 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1321 if (Opcode == Instruction::ZExt)
1325 if (Opcode == Instruction::SExt)
1326 return SrcLT.first * 2;
1332 return SrcLT.first * 1;
1345 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isKnownEven() &&
1346 DstVTy->getElementCount().isKnownEven()) {
1349 const T *TTI = thisT();
1352 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1354 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1366 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1379 if (Opcode == Instruction::BitCast) {
1396 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1397 CostKind, Index,
nullptr,
nullptr) +
1413 const Instruction *
I =
nullptr)
const override {
1414 const TargetLoweringBase *TLI = getTLI();
1415 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1416 assert(ISD &&
"Invalid opcode");
1420 Op1Info, Op2Info,
I);
1424 assert(CondTy &&
"CondTy must exist");
1425 if (CondTy->isVectorTy())
1431 !TLI->isOperationExpand(ISD,
LT.second)) {
1434 return LT.first * 1;
1446 Opcode, ValVTy->getScalarType(), CondTy->
getScalarType(), VecPred,
1462 unsigned Index,
const Value *Op0,
const Value *Op1,
1475 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
1487 Value *Op0 =
nullptr;
1488 Value *Op1 =
nullptr;
1490 Op0 = IE->getOperand(0);
1491 Op1 = IE->getOperand(1);
1496 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind, Index, Op0,
1503 unsigned Index)
const override {
1504 unsigned NewIndex = -1;
1507 "Unexpected index from end of vector");
1508 NewIndex = FVTy->getNumElements() - 1 - Index;
1510 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind, NewIndex,
nullptr,
1516 const APInt &DemandedDstElts,
1519 "Unexpected size of DemandedDstElts.");
1537 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1540 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1552 assert(!Src->isVoidTy() &&
"Invalid type");
1569 LT.second.getSizeInBits())) {
1575 if (Opcode == Instruction::Store)
1587 Opcode == Instruction::Store,
CostKind);
1597 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false)
const override {
1605 unsigned NumElts = VT->getNumElements();
1606 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1608 unsigned NumSubElts = NumElts / Factor;
1613 if (UseMaskForCond || UseMaskForGaps) {
1614 unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load
1615 : Intrinsic::masked_store;
1616 Cost = thisT()->getMemIntrinsicInstrCost(
1626 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1643 if (
Cost.isValid() && VecTySize > VecTyLTSize) {
1646 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1650 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1653 BitVector UsedInsts(NumLegalInsts,
false);
1654 for (
unsigned Index : Indices)
1655 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1656 UsedInsts.
set((Index + Elt * Factor) / NumEltsPerLegalInst);
1665 "Interleaved memory op has too many members");
1671 for (
unsigned Index : Indices) {
1672 assert(Index < Factor &&
"Invalid index for interleaved memory op");
1673 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1674 DemandedLoadStoreElts.
setBit(Index + Elm * Factor);
1677 if (Opcode == Instruction::Load) {
1687 SubVT, DemandedAllSubElts,
1689 Cost += Indices.
size() * InsSubCost;
1690 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1708 SubVT, DemandedAllSubElts,
1710 Cost += ExtSubCost * Indices.
size();
1711 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1716 if (!UseMaskForCond)
1721 Cost += thisT()->getReplicationShuffleCost(
1722 I8Type, Factor, NumSubElts,
1723 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1731 if (UseMaskForGaps) {
1733 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1759 std::optional<unsigned> FOp =
1762 if (ICA.
getID() == Intrinsic::vp_load) {
1765 Alignment = VPI->getPointerAlignment().valueOrOne();
1769 AS = PtrTy->getAddressSpace();
1770 return thisT()->getMemoryOpCost(*FOp, ICA.
getReturnType(), Alignment,
1773 if (ICA.
getID() == Intrinsic::vp_store) {
1776 Alignment = VPI->getPointerAlignment().valueOrOne();
1780 AS = PtrTy->getAddressSpace();
1781 return thisT()->getMemoryOpCost(*FOp, ICA.
getArgTypes()[0], Alignment,
1785 ICA.
getID() == Intrinsic::vp_fneg) {
1786 return thisT()->getArithmeticInstrCost(*FOp, ICA.
getReturnType(),
1790 return thisT()->getCastInstrCost(
1799 return thisT()->getCmpSelInstrCost(*FOp, ICA.
getArgTypes()[0],
1805 if (ICA.
getID() == Intrinsic::vp_load_ff) {
1810 Alignment = VPI->getPointerAlignment().valueOrOne();
1811 return thisT()->getMemIntrinsicInstrCost(
1815 if (ICA.
getID() == Intrinsic::vp_scatter) {
1825 Alignment = VPI->getPointerAlignment().valueOrOne();
1827 return thisT()->getMemIntrinsicInstrCost(
1830 VarMask, Alignment,
nullptr),
1833 if (ICA.
getID() == Intrinsic::vp_gather) {
1843 Alignment = VPI->getPointerAlignment().valueOrOne();
1845 return thisT()->getMemIntrinsicInstrCost(
1848 VarMask, Alignment,
nullptr),
1852 if (ICA.
getID() == Intrinsic::vp_select ||
1853 ICA.
getID() == Intrinsic::vp_merge) {
1864 std::optional<Intrinsic::ID> FID =
1868 if (ICA.
getID() == Intrinsic::experimental_vp_reverse)
1869 FID = Intrinsic::vector_reverse;
1875 "Expected VPIntrinsic to have Mask and Vector Length args and "
1887 *FID != Intrinsic::vector_reduce_fadd &&
1888 *FID != Intrinsic::vector_reduce_fmul) {
1896 return thisT()->getIntrinsicInstrCost(NewICA,
CostKind);
1915 case Intrinsic::powi:
1917 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1918 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1919 ShouldOptForSize)) {
1923 unsigned ActiveBits =
Exponent.getActiveBits();
1924 unsigned PopCount =
Exponent.popcount();
1926 thisT()->getArithmeticInstrCost(
1927 Instruction::FMul, RetTy,
CostKind);
1928 if (RHSC->isNegative())
1929 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1935 case Intrinsic::cttz:
1937 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1941 case Intrinsic::ctlz:
1943 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1947 case Intrinsic::memcpy:
1948 return thisT()->getMemcpyCost(ICA.
getInst());
1950 case Intrinsic::masked_scatter: {
1951 const Value *Mask = Args[2];
1953 Align Alignment =
I->getParamAlign(1).valueOrOne();
1954 return thisT()->getMemIntrinsicInstrCost(
1960 case Intrinsic::masked_gather: {
1961 const Value *Mask = Args[1];
1963 Align Alignment =
I->getParamAlign(0).valueOrOne();
1964 return thisT()->getMemIntrinsicInstrCost(
1966 VarMask, Alignment,
I),
1969 case Intrinsic::masked_compressstore: {
1971 const Value *Mask = Args[2];
1972 Align Alignment =
I->getParamAlign(1).valueOrOne();
1973 return thisT()->getMemIntrinsicInstrCost(
1978 case Intrinsic::masked_expandload: {
1979 const Value *Mask = Args[1];
1980 Align Alignment =
I->getParamAlign(0).valueOrOne();
1981 return thisT()->getMemIntrinsicInstrCost(
1986 case Intrinsic::experimental_vp_strided_store: {
1988 const Value *Ptr = Args[1];
1989 const Value *Mask = Args[3];
1990 const Value *EVL = Args[4];
1994 I->getParamAlign(1).value_or(thisT()->
DL.getABITypeAlign(EltTy));
1995 return thisT()->getMemIntrinsicInstrCost(
2000 case Intrinsic::experimental_vp_strided_load: {
2001 const Value *Ptr = Args[0];
2002 const Value *Mask = Args[2];
2003 const Value *EVL = Args[3];
2007 I->getParamAlign(0).value_or(thisT()->
DL.getABITypeAlign(EltTy));
2008 return thisT()->getMemIntrinsicInstrCost(
2012 case Intrinsic::stepvector: {
2018 case Intrinsic::vector_extract: {
2029 case Intrinsic::vector_insert: {
2035 return thisT()->getShuffleCost(
2040 case Intrinsic::vector_splice_left:
2041 case Intrinsic::vector_splice_right: {
2045 unsigned Index = COffset->getZExtValue();
2046 return thisT()->getShuffleCost(
2049 IID == Intrinsic::vector_splice_left ? Index : -Index,
2052 case Intrinsic::vector_reduce_add:
2053 case Intrinsic::vector_reduce_mul:
2054 case Intrinsic::vector_reduce_and:
2055 case Intrinsic::vector_reduce_or:
2056 case Intrinsic::vector_reduce_xor:
2057 case Intrinsic::vector_reduce_smax:
2058 case Intrinsic::vector_reduce_smin:
2059 case Intrinsic::vector_reduce_fmax:
2060 case Intrinsic::vector_reduce_fmin:
2061 case Intrinsic::vector_reduce_fmaximum:
2062 case Intrinsic::vector_reduce_fminimum:
2063 case Intrinsic::vector_reduce_umax:
2064 case Intrinsic::vector_reduce_umin: {
2068 case Intrinsic::vector_reduce_fadd:
2069 case Intrinsic::vector_reduce_fmul: {
2071 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF,
I, 1);
2074 case Intrinsic::fshl:
2075 case Intrinsic::fshr: {
2076 const Value *
X = Args[0];
2077 const Value *
Y = Args[1];
2078 const Value *Z = Args[2];
2087 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2088 Cost += thisT()->getArithmeticInstrCost(
2089 BinaryOperator::Shl, RetTy,
CostKind, OpInfoX,
2091 Cost += thisT()->getArithmeticInstrCost(
2092 BinaryOperator::LShr, RetTy,
CostKind, OpInfoY,
2096 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2101 Cost += thisT()->getArithmeticInstrCost(
2103 : BinaryOperator::URem,
2105 {TTI::OK_UniformConstantValue, TTI::OP_None});
2109 Cost += thisT()->getCmpSelInstrCost(
2112 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2118 case Intrinsic::experimental_cttz_elts: {
2123 if (!getTLI()->shouldExpandCttzElements(ArgType))
2136 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2138 ZeroIsPoison, &VScaleRange);
2148 thisT()->getIntrinsicInstrCost(StepVecAttrs,
CostKind);
2151 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy,
CostKind);
2152 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2156 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy,
CostKind);
2159 NewEltTy, NewVecTy, FMF,
I, 1);
2160 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs,
CostKind);
2162 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy,
CostKind);
2166 case Intrinsic::get_active_lane_mask:
2167 case Intrinsic::experimental_vector_match:
2168 case Intrinsic::experimental_vector_histogram_add:
2169 case Intrinsic::experimental_vector_histogram_uadd_sat:
2170 case Intrinsic::experimental_vector_histogram_umax:
2171 case Intrinsic::experimental_vector_histogram_umin:
2172 case Intrinsic::masked_udiv:
2173 case Intrinsic::masked_sdiv:
2174 case Intrinsic::masked_urem:
2175 case Intrinsic::masked_srem:
2176 return thisT()->getTypeBasedIntrinsicInstrCost(ICA,
CostKind);
2177 case Intrinsic::modf:
2178 case Intrinsic::sincos:
2179 case Intrinsic::sincospi: {
2180 std::optional<unsigned> CallRetElementIndex;
2183 if (ICA.
getID() == Intrinsic::modf)
2184 CallRetElementIndex = 0;
2186 if (
auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2187 ICA,
CostKind, CallRetElementIndex))
2192 case Intrinsic::loop_dependence_war_mask:
2193 case Intrinsic::loop_dependence_raw_mask: {
2213 PtrTy->getAddressSpace()));
2214 bool IsReadAfterWrite = IID == Intrinsic::loop_dependence_raw_mask;
2217 thisT()->getArithmeticInstrCost(Instruction::Sub, IntPtrTy,
CostKind);
2218 if (IsReadAfterWrite) {
2221 Cost += thisT()->getIntrinsicInstrCost(AbsAttrs,
CostKind);
2226 Cost += thisT()->getArithmeticInstrCost(Instruction::SDiv, IntPtrTy,
2232 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CondTy,
2234 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, IntPtrTy,
2238 {IntPtrTy, IntPtrTy}, FMF);
2239 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2249 ScalarizationCost = 0;
2258 filterConstantAndDuplicatedOperands(Args, ICA.
getArgTypes()),
2264 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2285 unsigned VecTyIndex = 0;
2286 if (IID == Intrinsic::vector_reduce_fadd ||
2287 IID == Intrinsic::vector_reduce_fmul)
2289 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
2306 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2307 unsigned ScalarCalls = 1;
2308 Type *ScalarRetTy = RetTy;
2310 if (!SkipScalarizationCost)
2313 ScalarCalls = std::max(ScalarCalls,
2318 for (
Type *Ty : Tys) {
2320 if (!SkipScalarizationCost)
2323 ScalarCalls = std::max(ScalarCalls,
2325 Ty = Ty->getScalarType();
2329 if (ScalarCalls == 1)
2334 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
2336 return ScalarCalls * ScalarCost + ScalarizationCost;
2340 case Intrinsic::sqrt:
2343 case Intrinsic::sin:
2346 case Intrinsic::cos:
2349 case Intrinsic::sincos:
2352 case Intrinsic::sincospi:
2355 case Intrinsic::modf:
2358 case Intrinsic::tan:
2361 case Intrinsic::asin:
2364 case Intrinsic::acos:
2367 case Intrinsic::atan:
2370 case Intrinsic::atan2:
2373 case Intrinsic::sinh:
2376 case Intrinsic::cosh:
2379 case Intrinsic::tanh:
2382 case Intrinsic::exp:
2385 case Intrinsic::exp2:
2388 case Intrinsic::exp10:
2391 case Intrinsic::log:
2394 case Intrinsic::log10:
2397 case Intrinsic::log2:
2400 case Intrinsic::ldexp:
2403 case Intrinsic::fabs:
2406 case Intrinsic::canonicalize:
2409 case Intrinsic::minnum:
2412 case Intrinsic::maxnum:
2415 case Intrinsic::minimum:
2418 case Intrinsic::maximum:
2421 case Intrinsic::minimumnum:
2424 case Intrinsic::maximumnum:
2427 case Intrinsic::copysign:
2430 case Intrinsic::floor:
2433 case Intrinsic::ceil:
2436 case Intrinsic::trunc:
2439 case Intrinsic::nearbyint:
2442 case Intrinsic::rint:
2445 case Intrinsic::lrint:
2448 case Intrinsic::llrint:
2451 case Intrinsic::round:
2454 case Intrinsic::roundeven:
2457 case Intrinsic::lround:
2460 case Intrinsic::llround:
2463 case Intrinsic::pow:
2466 case Intrinsic::fma:
2469 case Intrinsic::fmuladd:
2472 case Intrinsic::experimental_constrained_fmuladd:
2476 case Intrinsic::lifetime_start:
2477 case Intrinsic::lifetime_end:
2478 case Intrinsic::sideeffect:
2479 case Intrinsic::pseudoprobe:
2480 case Intrinsic::arithmetic_fence:
2482 case Intrinsic::masked_store: {
2484 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2485 return thisT()->getMemIntrinsicInstrCost(
2488 case Intrinsic::masked_load: {
2490 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2491 return thisT()->getMemIntrinsicInstrCost(
2494 case Intrinsic::experimental_vp_strided_store: {
2496 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2497 return thisT()->getMemIntrinsicInstrCost(
2503 case Intrinsic::experimental_vp_strided_load: {
2505 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2506 return thisT()->getMemIntrinsicInstrCost(
2512 case Intrinsic::vector_reduce_add:
2513 case Intrinsic::vector_reduce_mul:
2514 case Intrinsic::vector_reduce_and:
2515 case Intrinsic::vector_reduce_or:
2516 case Intrinsic::vector_reduce_xor:
2517 return thisT()->getArithmeticReductionCost(
2520 case Intrinsic::vector_reduce_fadd:
2521 case Intrinsic::vector_reduce_fmul:
2522 return thisT()->getArithmeticReductionCost(
2524 case Intrinsic::vector_reduce_smax:
2525 case Intrinsic::vector_reduce_smin:
2526 case Intrinsic::vector_reduce_umax:
2527 case Intrinsic::vector_reduce_umin:
2528 case Intrinsic::vector_reduce_fmax:
2529 case Intrinsic::vector_reduce_fmin:
2530 case Intrinsic::vector_reduce_fmaximum:
2531 case Intrinsic::vector_reduce_fminimum:
2534 case Intrinsic::experimental_vector_match: {
2537 unsigned SearchSize = NeedleTy->getNumElements();
2541 EVT SearchVT = getTLI()->getValueType(
DL, SearchTy);
2542 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2548 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2550 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2554 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2557 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2560 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy,
CostKind);
2563 case Intrinsic::vector_reverse:
2567 case Intrinsic::experimental_vector_histogram_add:
2568 case Intrinsic::experimental_vector_histogram_uadd_sat:
2569 case Intrinsic::experimental_vector_histogram_umax:
2570 case Intrinsic::experimental_vector_histogram_umin: {
2578 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2580 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2582 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2587 case Intrinsic::experimental_vector_histogram_add:
2589 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy,
CostKind);
2591 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2593 Cost += thisT()->getIntrinsicInstrCost(UAddSat,
CostKind);
2596 case Intrinsic::experimental_vector_histogram_umax: {
2601 case Intrinsic::experimental_vector_histogram_umin: {
2607 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2612 case Intrinsic::get_active_lane_mask: {
2614 EVT ResVT = getTLI()->getValueType(
DL, RetTy,
true);
2615 EVT ArgVT = getTLI()->getValueType(
DL, ArgTy,
true);
2619 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2628 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2629 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2633 case Intrinsic::experimental_memset_pattern:
2638 case Intrinsic::abs:
2641 case Intrinsic::fshl:
2644 case Intrinsic::fshr:
2647 case Intrinsic::smax:
2650 case Intrinsic::smin:
2653 case Intrinsic::umax:
2656 case Intrinsic::umin:
2659 case Intrinsic::sadd_sat:
2662 case Intrinsic::ssub_sat:
2665 case Intrinsic::uadd_sat:
2668 case Intrinsic::usub_sat:
2671 case Intrinsic::smul_fix:
2674 case Intrinsic::umul_fix:
2677 case Intrinsic::sadd_with_overflow:
2680 case Intrinsic::ssub_with_overflow:
2683 case Intrinsic::uadd_with_overflow:
2686 case Intrinsic::usub_with_overflow:
2689 case Intrinsic::smul_with_overflow:
2692 case Intrinsic::umul_with_overflow:
2695 case Intrinsic::fptosi_sat:
2696 case Intrinsic::fptoui_sat: {
2702 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2708 case Intrinsic::ctpop:
2714 case Intrinsic::ctlz:
2717 case Intrinsic::cttz:
2720 case Intrinsic::bswap:
2723 case Intrinsic::bitreverse:
2726 case Intrinsic::ucmp:
2729 case Intrinsic::scmp:
2732 case Intrinsic::clmul:
2735 case Intrinsic::masked_udiv:
2736 case Intrinsic::masked_sdiv:
2737 case Intrinsic::masked_urem:
2738 case Intrinsic::masked_srem: {
2739 unsigned UnmaskedOpc;
2741 case Intrinsic::masked_udiv:
2743 UnmaskedOpc = Instruction::UDiv;
2745 case Intrinsic::masked_sdiv:
2747 UnmaskedOpc = Instruction::SDiv;
2749 case Intrinsic::masked_urem:
2751 UnmaskedOpc = Instruction::URem;
2753 case Intrinsic::masked_srem:
2755 UnmaskedOpc = Instruction::SRem;
2761 thisT()->getArithmeticInstrCost(UnmaskedOpc, RetTy,
CostKind);
2765 if (!getTLI()->isOperationLegalOrCustom(
ISD, LT)) {
2768 Cost += thisT()->getCmpSelInstrCost(
2778 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2784 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2794 return (LT.first * 2);
2796 return (LT.first * 1);
2800 return (LT.first * 2);
2804 case Intrinsic::fmuladd: {
2808 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2810 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2813 case Intrinsic::experimental_constrained_fmuladd: {
2815 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2817 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2818 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2819 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2821 case Intrinsic::smin:
2822 case Intrinsic::smax:
2823 case Intrinsic::umin:
2824 case Intrinsic::umax: {
2827 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2831 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2833 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2837 case Intrinsic::sadd_with_overflow:
2838 case Intrinsic::ssub_with_overflow: {
2841 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2842 ? BinaryOperator::Add
2843 : BinaryOperator::Sub;
2850 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2852 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2854 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2858 case Intrinsic::uadd_with_overflow:
2859 case Intrinsic::usub_with_overflow: {
2862 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2863 ? BinaryOperator::Add
2864 : BinaryOperator::Sub;
2870 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2871 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2875 case Intrinsic::smul_with_overflow:
2876 case Intrinsic::umul_with_overflow: {
2881 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2883 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2887 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2889 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2890 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2892 Cost += thisT()->getArithmeticInstrCost(
2897 Cost += thisT()->getArithmeticInstrCost(
2898 Instruction::AShr, MulTy,
CostKind,
2902 Cost += thisT()->getCmpSelInstrCost(
2906 case Intrinsic::sadd_sat:
2907 case Intrinsic::ssub_sat: {
2913 ? Intrinsic::sadd_with_overflow
2914 : Intrinsic::ssub_with_overflow;
2921 nullptr, ScalarizationCostPassed);
2922 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2923 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2925 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2929 case Intrinsic::uadd_sat:
2930 case Intrinsic::usub_sat: {
2935 ? Intrinsic::uadd_with_overflow
2936 : Intrinsic::usub_with_overflow;
2940 nullptr, ScalarizationCostPassed);
2941 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2943 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2947 case Intrinsic::smul_fix:
2948 case Intrinsic::umul_fix: {
2953 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2957 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH,
CostKind);
2959 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2960 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2962 Cost += thisT()->getArithmeticInstrCost(
2965 Cost += thisT()->getArithmeticInstrCost(
2968 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy,
CostKind);
2971 case Intrinsic::abs: {
2976 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2978 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2981 Cost += thisT()->getArithmeticInstrCost(
2982 BinaryOperator::Sub, RetTy,
CostKind,
2986 case Intrinsic::fshl:
2987 case Intrinsic::fshr: {
2993 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2995 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
2997 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy,
CostKind);
2998 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
3003 Cost += thisT()->getArithmeticInstrCost(
3005 : BinaryOperator::URem,
3006 RetTy,
CostKind, {TTI::OK_AnyValue, TTI::OP_None},
3007 {TTI::OK_UniformConstantValue, TTI::OP_None});
3009 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
3011 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
3015 case Intrinsic::fptosi_sat:
3016 case Intrinsic::fptoui_sat: {
3019 Type *FromTy = Tys[0];
3020 bool IsSigned = IID == Intrinsic::fptosi_sat;
3025 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
3028 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
3029 Cost += thisT()->getCastInstrCost(
3030 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
3034 Cost += thisT()->getCmpSelInstrCost(
3036 Cost += thisT()->getCmpSelInstrCost(
3041 case Intrinsic::ucmp:
3042 case Intrinsic::scmp: {
3043 Type *CmpTy = Tys[0];
3046 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
3049 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
3056 Cost += 2 * thisT()->getCmpSelInstrCost(
3057 BinaryOperator::Select, RetTy, CondTy,
3062 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
3064 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
3069 case Intrinsic::maximumnum:
3070 case Intrinsic::minimumnum: {
3085 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs,
CostKind);
3086 return LT.first + FCanonicalizeCost * 2;
3090 case Intrinsic::clmul: {
3094 thisT()->getArithmeticInstrCost(Instruction::And, RetTy,
CostKind) +
3095 thisT()->getArithmeticInstrCost(Instruction::Mul, RetTy,
CostKind) +
3096 thisT()->getArithmeticInstrCost(Instruction::Xor, RetTy,
CostKind);
3098 thisT()->getArithmeticInstrCost(Instruction::And, RetTy,
CostKind) +
3099 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, RetTy,
3101 thisT()->getCmpSelInstrCost(Instruction::ICmp, RetTy, RetTy,
3103 InstructionCost PerBitCost = std::min(PerBitCostMul, PerBitCostBittest);
3122 if (!SkipScalarizationCost) {
3123 ScalarizationCost = 0;
3124 for (
Type *RetVTy : RetVTys) {
3133 for (
Type *Ty : Tys) {
3134 if (Ty->isVectorTy())
3135 Ty = Ty->getScalarType();
3140 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
3141 for (
Type *Ty : Tys) {
3146 ScalarCalls = std::max(ScalarCalls,
3150 return ScalarCalls * ScalarCost + ScalarizationCost;
3154 return SingleCallCost;
3161 unsigned Id = MICA.
getID();
3167 case Intrinsic::experimental_vp_strided_load:
3168 case Intrinsic::experimental_vp_strided_store: {
3169 unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load
3171 : Instruction::Store;
3175 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3178 case Intrinsic::masked_scatter:
3179 case Intrinsic::masked_gather:
3180 case Intrinsic::vp_scatter:
3181 case Intrinsic::vp_gather: {
3182 unsigned Opcode = (MICA.
getID() == Intrinsic::masked_gather ||
3183 MICA.
getID() == Intrinsic::vp_gather)
3185 : Instruction::Store;
3187 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3190 case Intrinsic::vp_load:
3191 case Intrinsic::vp_store:
3193 case Intrinsic::masked_load:
3194 case Intrinsic::masked_store: {
3196 Id == Intrinsic::masked_load ? Instruction::Load : Instruction::Store;
3198 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
3201 case Intrinsic::masked_compressstore:
3202 case Intrinsic::masked_expandload: {
3203 unsigned Opcode = MICA.
getID() == Intrinsic::masked_expandload
3205 : Instruction::Store;
3208 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3212 case Intrinsic::vp_load_ff:
3238 if (!LT.first.isValid())
3243 Tp && LT.second.isFixedLengthVector() &&
3248 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3250 return LT.first.getValue();
3287 Type *ScalarTy = Ty->getElementType();
3289 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3299 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3301 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3305 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3308 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3309 unsigned LongVectorCount = 0;
3311 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3312 while (NumVecElts > MVTLen) {
3315 ShuffleCost += thisT()->getShuffleCost(
3317 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
3322 NumReduxLevels -= LongVectorCount;
3334 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
3335 return ShuffleCost + ArithCost +
3336 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3370 return ExtractCost + ArithCost;
3375 std::optional<FastMathFlags> FMF,
3377 assert(Ty &&
"Unknown reduction vector type");
3393 Type *ScalarTy = Ty->getElementType();
3395 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3398 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3399 unsigned LongVectorCount = 0;
3401 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3402 while (NumVecElts > MVTLen) {
3406 ShuffleCost += thisT()->getShuffleCost(
3415 NumReduxLevels -= LongVectorCount;
3428 return ShuffleCost + MinMaxCost +
3429 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3435 VectorType *Ty, std::optional<FastMathFlags> FMF,
3438 FTy && IsUnsigned && Opcode == Instruction::Add &&
3446 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3448 thisT()->getIntrinsicInstrCost(ICA,
CostKind);
3454 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
3456 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3459 return RedCost + ExtCost;
3469 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3470 "The reduction opcode is expected to be Add or Sub.");
3473 RedOpcode, ExtTy, std::nullopt,
CostKind);
3475 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3479 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
3481 return RedCost + MulCost + 2 * ExtCost;
3485 unsigned Opcode,
Type *InputTypeA,
Type *InputTypeB,
Type *AccumType,
3489 std::optional<FastMathFlags> FMF)
const override {
3492 unsigned Ratio = EltSizeAcc / EltSizeInA;
3494 EltSizeAcc % EltSizeInA != 0 || (BinOp && InputTypeA != InputTypeB))
3499 Type *AccumVectorType =
3515 return ExtendCostA + ReductionOpCost;
3523 return ExtendCostA + ExtendCostB + ReductionOpCost +
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static const Function * getCalledFunction(const Value *V)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V, bool LookThroughCmp=false)
Returns the "element type" of the given value/instruction V.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy, Align Alignment, unsigned AddrSpace) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, TTI::PartialReductionExtendKind OpAExtend, TTI::PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind, std::optional< FastMathFlags > FMF) const override
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
bool preferTailFoldingOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
TailFoldingStyle getPreferredTailFoldingStyle() const override
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
AttributeList getAttributes() const
Return the attribute list for this Function.
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
const FeatureBitset & getFeatureBits() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Information for memory intrinsic cost model.
Align getAlignment() const
Type * getDataType() const
bool getVariableMask() const
Intrinsic::ID getID() const
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeAction getLoadAction(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace, unsigned ExtType, bool Atomic) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
bool isLoadLegal(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace, unsigned ExtType, bool Atomic) const
Return true if the specified load with extension is legal on this target.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
CodeModel::Model getCodeModel() const
Returns the code model.
TargetSubtargetInfo - Generic base class for all target subtargets.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVM_ABI Type * getWithNewType(Type *EltTy) const
Given vector type, change the element type, whilst keeping the old number of elements.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ CLMUL
Carry-less multiplication operations.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ MASKED_UDIV
Masked vector arithmetic that returns poison on disabled lanes.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
auto dyn_cast_or_null(const Y &Val)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
FunctionAddr VTableAddr uintptr_t uintptr_t Data
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
ElementCount getVectorElementCount() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...