16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
88 const T *thisT()
const {
return static_cast<const T *
>(
this); }
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
137 "Can only extract subvectors from vectors");
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
148 for (
int i = 0; i != NumSubElts; ++i) {
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index,
nullptr,
nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
165 "Can only insert subvectors into vectors");
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
176 for (
int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
181 i + Index,
nullptr,
nullptr);
188 return static_cast<const T *
>(
this)->getST();
193 return static_cast<const T *
>(
this)->getTLI();
215 bool IsGatherScatter,
223 unsigned VF = VT->getNumElements();
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
244 Opcode == Instruction::Store,
CostKind);
258 VF * (thisT()->getCFInstrCost(Instruction::Br,
CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI,
CostKind));
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
270 static bool isSplatMask(
ArrayRef<int> Mask,
unsigned NumSrcElts,
int &Index) {
272 bool IsCompared =
false;
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (
static_cast<unsigned>(
P.value()) >= NumSrcElts * 2)
280 SplatIdx =
P.value();
281 return P.index() != Mask.size() - 1;
284 return SplatIdx ==
P.value();
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 std::optional<unsigned> CallRetElementIndex = {})
const {
314 EVT VT = getTLI()->getValueType(
DL, Ty);
317 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
319 switch (ICA.
getID()) {
320 case Intrinsic::modf:
323 case Intrinsic::sincospi:
326 case Intrinsic::sincos:
334 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
335 if (LibcallImpl == RTLIB::Unsupported)
348 for (
bool Masked : {
false,
true}) {
349 if ((VD = LibInfo->getVectorMappingInfo(LCName, VF,
Masked)))
361 VecTy, {},
CostKind, 0,
nullptr, {});
367 if (Idx == CallRetElementIndex)
369 Cost += thisT()->getMemoryOpCost(
370 Instruction::Load, VectorTy,
403 unsigned *
Fast)
const override {
405 return getTLI()->allowsMisalignedMemoryAccesses(
410 const Function *Callee)
const override {
414 TM.getSubtargetImpl(*Caller)->getFeatureBits();
416 TM.getSubtargetImpl(*Callee)->getFeatureBits();
420 return (CallerBits & CalleeBits) == CalleeBits;
450 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
454 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
458 return getTLI()->getTargetMachine().Options.ThreadModel ==
462 std::pair<const Value *, unsigned>
464 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
468 Value *NewV)
const override {
473 return getTLI()->isLegalAddImmediate(imm);
477 return getTLI()->isLegalAddScalableImmediate(Imm);
481 return getTLI()->isLegalICmpImmediate(imm);
485 bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
487 int64_t ScalableOffset = 0)
const override {
494 return getTLI()->isLegalAddressingMode(
DL, AM, Ty, AddrSpace,
I);
498 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
502 Type *ScalarValTy)
const override {
503 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
505 EVT VT = getTLI()->getValueType(
DL, SrcTy);
506 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
507 getTLI()->isOperationCustom(ISD::STORE, VT))
513 getTLI()->getTypeToTransformTo(ScalarMemTy->
getContext(), VT);
514 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
516 while (VF > 2 && IsSupportedByTarget(VF))
522 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
523 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
527 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
528 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
551 unsigned AddrSpace)
const override {
564 return getTLI()->isTruncateFree(Ty1, Ty2);
568 return getTLI()->isProfitableToHoist(
I);
571 bool useAA()
const override {
return getST()->useAA(); }
574 EVT VT = getTLI()->getValueType(
DL, Ty,
true);
575 return getTLI()->isTypeLegal(VT);
579 EVT ETy = getTLI()->getValueType(
DL, Ty);
580 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
599 unsigned N =
SI.getNumCases();
607 if (
N < 1 || (!IsJTAllowed &&
DL.getIndexSizeInBits(0u) <
N))
610 APInt MaxCaseVal =
SI.case_begin()->getCaseValue()->getValue();
611 APInt MinCaseVal = MaxCaseVal;
612 for (
auto CI :
SI.cases()) {
613 const APInt &CaseVal = CI.getCaseValue()->getValue();
614 if (CaseVal.
sgt(MaxCaseVal))
615 MaxCaseVal = CaseVal;
616 if (CaseVal.
slt(MinCaseVal))
617 MinCaseVal = CaseVal;
621 if (
N <=
DL.getIndexSizeInBits(0u)) {
623 for (
auto I :
SI.cases()) {
634 if (
N < 2 ||
N < TLI->getMinimumJumpTableEntries())
637 (MaxCaseVal - MinCaseVal)
638 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
641 JumpTableSize =
Range;
657 if (!TM.isPositionIndependent())
667 const Triple &TargetTriple = TM.getTargetTriple();
699 const Function &Fn)
const override {
703 case Instruction::SDiv:
704 case Instruction::SRem:
705 case Instruction::UDiv:
706 case Instruction::URem: {
758 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
759 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
776 <<
"advising against unrolling the loop because it "
827 std::optional<Instruction *>
832 std::optional<Value *>
835 bool &KnownBitsComputed)
const override {
844 SimplifyAndSetOp)
const override {
846 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
850 std::optional<unsigned>
852 return std::optional<unsigned>(
856 std::optional<unsigned>
858 std::optional<unsigned> TargetResult =
859 getST()->getCacheAssociativity(
static_cast<unsigned>(Level));
868 return getST()->getCacheLineSize();
872 return getST()->getPrefetchDistance();
876 unsigned NumStridedMemAccesses,
877 unsigned NumPrefetches,
878 bool HasCall)
const override {
879 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
880 NumPrefetches, HasCall);
884 return getST()->getMaxPrefetchIterationsAhead();
888 return getST()->enableWritePrefetching();
892 return getST()->shouldPrefetchAddressSpace(AS);
905 std::optional<unsigned>
getMaxVScale()
const override {
return std::nullopt; }
915 VectorType *InTy,
const APInt &DemandedElts,
bool Insert,
bool Extract,
925 (VL.empty() || VL.size() == Ty->getNumElements()) &&
926 "Vector size mismatch");
930 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
931 if (!DemandedElts[i])
934 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
935 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
939 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
952 unsigned ScalarOpdIdx)
const override {
957 int OpdIdx)
const override {
963 int RetIdx)
const override {
976 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
986 for (
Type *Ty : Tys) {
988 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
989 !Ty->isPtrOrPtrVectorTy())
1012 filterConstantAndDuplicatedOperands(Args, Tys),
CostKind);
1025 EVT MTy = getTLI()->getValueType(
DL, Ty);
1049 if (MTy == LK.second)
1064 const Instruction *CxtI =
nullptr)
const override {
1066 const TargetLoweringBase *TLI = getTLI();
1067 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1068 assert(ISD &&
"Invalid opcode");
1083 if (TLI->isOperationLegalOrPromote(ISD,
LT.second)) {
1086 return LT.first * OpCost;
1089 if (!TLI->isOperationExpand(ISD,
LT.second)) {
1092 return LT.first * 2 * OpCost;
1104 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1106 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
1108 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
1110 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
1111 return DivCost + MulCost + SubCost;
1143 int NumDstElts = Mask.size();
1144 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1151 if (isSplatMask(Mask, NumSrcElts, Index))
1154 (Index + NumDstElts) <= NumSrcElts) {
1161 if (
all_of(Mask, [NumSrcElts](
int M) {
return M < NumSrcElts; }))
1166 Mask, NumSrcElts, NumSubElts, Index)) {
1167 if (Index + NumSubElts > NumSrcElts)
1196 const Instruction *CxtI =
nullptr)
const override {
1200 return getBroadcastShuffleOverhead(FVT,
CostKind);
1209 return getPermuteShuffleOverhead(FVT,
CostKind);
1212 return getExtractSubvectorOverhead(SrcTy,
CostKind, Index,
1215 return getInsertSubvectorOverhead(DstTy,
CostKind, Index,
1234 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1235 TypeSize DstSize = DstLT.second.getSizeInBits();
1236 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1237 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1242 case Instruction::Trunc:
1247 case Instruction::BitCast:
1250 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1254 case Instruction::FPExt:
1255 if (
I && getTLI()->isExtFree(
I))
1258 case Instruction::ZExt:
1259 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1262 case Instruction::SExt:
1263 if (
I && getTLI()->isExtFree(
I))
1273 if (DstLT.first == SrcLT.first &&
1278 case Instruction::AddrSpaceCast:
1280 Dst->getPointerAddressSpace()))
1289 if (SrcLT.first == DstLT.first &&
1294 if (!SrcVTy && !DstVTy) {
1305 if (DstVTy && SrcVTy) {
1307 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1310 if (Opcode == Instruction::ZExt)
1314 if (Opcode == Instruction::SExt)
1315 return SrcLT.first * 2;
1321 return SrcLT.first * 1;
1334 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1335 DstVTy->getElementCount().isVector()) {
1338 const T *TTI = thisT();
1341 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1343 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1355 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1368 if (Opcode == Instruction::BitCast) {
1385 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1386 CostKind, Index,
nullptr,
nullptr) +
1402 const Instruction *
I =
nullptr)
const override {
1403 const TargetLoweringBase *TLI = getTLI();
1404 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1405 assert(ISD &&
"Invalid opcode");
1409 Op1Info, Op2Info,
I);
1413 assert(CondTy &&
"CondTy must exist");
1420 !TLI->isOperationExpand(ISD,
LT.second)) {
1423 return LT.first * 1;
1435 Opcode, ValVTy->getScalarType(), CondTy->
getScalarType(), VecPred,
1451 unsigned Index,
const Value *Op0,
1452 const Value *Op1)
const override {
1462 unsigned Index,
Value *Scalar,
1463 ArrayRef<std::tuple<Value *, User *, int>>
1464 ScalarUserAndIdx)
const override {
1465 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind, Index,
nullptr,
1471 unsigned Index)
const override {
1472 Value *Op0 =
nullptr;
1473 Value *Op1 =
nullptr;
1475 Op0 = IE->getOperand(0);
1476 Op1 = IE->getOperand(1);
1478 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind, Index, Op0,
1485 unsigned Index)
const override {
1486 unsigned NewIndex = -1;
1489 "Unexpected index from end of vector");
1490 NewIndex = FVTy->getNumElements() - 1 - Index;
1492 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind, NewIndex,
nullptr,
1498 const APInt &DemandedDstElts,
1501 "Unexpected size of DemandedDstElts.");
1519 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1522 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1534 assert(!Src->isVoidTy() &&
"Invalid type");
1551 LT.second.getSizeInBits())) {
1557 if (Opcode == Instruction::Store)
1567 Opcode == Instruction::Store,
CostKind);
1579 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
1585 bool VariableMask,
Align Alignment,
1588 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1594 bool VariableMask,
Align Alignment,
1599 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1604 const Value *
Ptr,
bool VariableMask,
1611 return thisT()->getGatherScatterOpCost(Opcode, DataTy,
Ptr, VariableMask,
1618 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false)
const override {
1626 unsigned NumElts = VT->getNumElements();
1627 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1629 unsigned NumSubElts = NumElts / Factor;
1634 if (UseMaskForCond || UseMaskForGaps)
1635 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1644 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1661 if (
Cost.isValid() && VecTySize > VecTyLTSize) {
1664 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1668 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1671 BitVector UsedInsts(NumLegalInsts,
false);
1672 for (
unsigned Index : Indices)
1673 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1674 UsedInsts.
set((Index + Elt * Factor) / NumEltsPerLegalInst);
1683 "Interleaved memory op has too many members");
1689 for (
unsigned Index : Indices) {
1690 assert(Index < Factor &&
"Invalid index for interleaved memory op");
1691 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1692 DemandedLoadStoreElts.
setBit(Index + Elm * Factor);
1695 if (Opcode == Instruction::Load) {
1705 SubVT, DemandedAllSubElts,
1707 Cost += Indices.
size() * InsSubCost;
1708 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1726 SubVT, DemandedAllSubElts,
1728 Cost += ExtSubCost * Indices.
size();
1729 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1734 if (!UseMaskForCond)
1739 Cost += thisT()->getReplicationShuffleCost(
1740 I8Type, Factor, NumSubElts,
1741 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1749 if (UseMaskForGaps) {
1751 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1777 std::optional<unsigned> FOp =
1780 if (ICA.
getID() == Intrinsic::vp_load) {
1783 Alignment = VPI->getPointerAlignment().valueOrOne();
1787 AS = PtrTy->getAddressSpace();
1788 return thisT()->getMemoryOpCost(*FOp, ICA.
getReturnType(), Alignment,
1791 if (ICA.
getID() == Intrinsic::vp_store) {
1794 Alignment = VPI->getPointerAlignment().valueOrOne();
1798 AS = PtrTy->getAddressSpace();
1799 return thisT()->getMemoryOpCost(*FOp, ICA.
getArgTypes()[0], Alignment,
1803 ICA.
getID() == Intrinsic::vp_fneg) {
1804 return thisT()->getArithmeticInstrCost(*FOp, ICA.
getReturnType(),
1808 return thisT()->getCastInstrCost(
1817 return thisT()->getCmpSelInstrCost(*FOp, ICA.
getArgTypes()[0],
1824 if (ICA.
getID() == Intrinsic::vp_scatter) {
1834 Alignment = VPI->getPointerAlignment().valueOrOne();
1836 return thisT()->getGatherScatterOpCost(
1840 if (ICA.
getID() == Intrinsic::vp_gather) {
1850 Alignment = VPI->getPointerAlignment().valueOrOne();
1852 return thisT()->getGatherScatterOpCost(
1857 if (ICA.
getID() == Intrinsic::vp_select ||
1858 ICA.
getID() == Intrinsic::vp_merge) {
1869 std::optional<Intrinsic::ID> FID =
1873 if (ICA.
getID() == Intrinsic::experimental_vp_reverse)
1874 FID = Intrinsic::vector_reverse;
1880 "Expected VPIntrinsic to have Mask and Vector Length args and "
1892 *FID != Intrinsic::vector_reduce_fadd &&
1893 *FID != Intrinsic::vector_reduce_fmul) {
1901 return thisT()->getIntrinsicInstrCost(NewICA,
CostKind);
1920 case Intrinsic::powi:
1922 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1923 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1924 ShouldOptForSize)) {
1928 unsigned ActiveBits =
Exponent.getActiveBits();
1929 unsigned PopCount =
Exponent.popcount();
1931 thisT()->getArithmeticInstrCost(
1932 Instruction::FMul, RetTy,
CostKind);
1933 if (RHSC->isNegative())
1934 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1940 case Intrinsic::cttz:
1942 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1946 case Intrinsic::ctlz:
1948 if (RetVF.
isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1952 case Intrinsic::memcpy:
1953 return thisT()->getMemcpyCost(ICA.
getInst());
1955 case Intrinsic::masked_scatter: {
1956 const Value *Mask = Args[2];
1958 Align Alignment =
I->getParamAlign(1).valueOrOne();
1959 return thisT()->getGatherScatterOpCost(Instruction::Store,
1963 case Intrinsic::masked_gather: {
1964 const Value *Mask = Args[1];
1966 Align Alignment =
I->getParamAlign(0).valueOrOne();
1967 return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
1970 case Intrinsic::masked_compressstore: {
1972 const Value *Mask = Args[2];
1973 Align Alignment =
I->getParamAlign(1).valueOrOne();
1974 return thisT()->getExpandCompressMemoryOpCost(
1978 case Intrinsic::masked_expandload: {
1979 const Value *Mask = Args[1];
1980 Align Alignment =
I->getParamAlign(0).valueOrOne();
1981 return thisT()->getExpandCompressMemoryOpCost(Instruction::Load, RetTy,
1985 case Intrinsic::experimental_vp_strided_store: {
1988 const Value *Mask = Args[3];
1989 const Value *EVL = Args[4];
1993 I->getParamAlign(1).value_or(thisT()->
DL.getABITypeAlign(EltTy));
1994 return thisT()->getStridedMemoryOpCost(Instruction::Store,
1995 Data->getType(),
Ptr, VarMask,
1998 case Intrinsic::experimental_vp_strided_load: {
2000 const Value *Mask = Args[2];
2001 const Value *EVL = Args[3];
2005 I->getParamAlign(0).value_or(thisT()->
DL.getABITypeAlign(EltTy));
2006 return thisT()->getStridedMemoryOpCost(Instruction::Load, RetTy,
Ptr,
2009 case Intrinsic::stepvector: {
2015 case Intrinsic::vector_extract: {
2026 case Intrinsic::vector_insert: {
2032 return thisT()->getShuffleCost(
2037 case Intrinsic::vector_splice: {
2043 case Intrinsic::vector_reduce_add:
2044 case Intrinsic::vector_reduce_mul:
2045 case Intrinsic::vector_reduce_and:
2046 case Intrinsic::vector_reduce_or:
2047 case Intrinsic::vector_reduce_xor:
2048 case Intrinsic::vector_reduce_smax:
2049 case Intrinsic::vector_reduce_smin:
2050 case Intrinsic::vector_reduce_fmax:
2051 case Intrinsic::vector_reduce_fmin:
2052 case Intrinsic::vector_reduce_fmaximum:
2053 case Intrinsic::vector_reduce_fminimum:
2054 case Intrinsic::vector_reduce_umax:
2055 case Intrinsic::vector_reduce_umin: {
2059 case Intrinsic::vector_reduce_fadd:
2060 case Intrinsic::vector_reduce_fmul: {
2062 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF,
I, 1);
2065 case Intrinsic::fshl:
2066 case Intrinsic::fshr: {
2067 const Value *
X = Args[0];
2068 const Value *
Y = Args[1];
2069 const Value *Z = Args[2];
2078 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2080 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
2081 Cost += thisT()->getArithmeticInstrCost(
2082 BinaryOperator::Shl, RetTy,
CostKind, OpInfoX,
2084 Cost += thisT()->getArithmeticInstrCost(
2085 BinaryOperator::LShr, RetTy,
CostKind, OpInfoY,
2091 Cost += thisT()->getArithmeticInstrCost(
2093 : BinaryOperator::URem,
2095 {TTI::OK_UniformConstantValue, TTI::OP_None});
2100 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2103 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2108 case Intrinsic::experimental_cttz_elts: {
2113 if (!getTLI()->shouldExpandCttzElements(ArgType))
2126 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2137 thisT()->getIntrinsicInstrCost(StepVecAttrs,
CostKind);
2140 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy,
CostKind);
2141 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2145 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy,
CostKind);
2148 NewEltTy, NewVecTy, FMF,
I, 1);
2149 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs,
CostKind);
2151 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy,
CostKind);
2155 case Intrinsic::get_active_lane_mask:
2156 case Intrinsic::experimental_vector_match:
2157 case Intrinsic::experimental_vector_histogram_add:
2158 case Intrinsic::experimental_vector_histogram_uadd_sat:
2159 case Intrinsic::experimental_vector_histogram_umax:
2160 case Intrinsic::experimental_vector_histogram_umin:
2161 return thisT()->getTypeBasedIntrinsicInstrCost(ICA,
CostKind);
2162 case Intrinsic::modf:
2163 case Intrinsic::sincos:
2164 case Intrinsic::sincospi: {
2165 std::optional<unsigned> CallRetElementIndex;
2168 if (ICA.
getID() == Intrinsic::modf)
2169 CallRetElementIndex = 0;
2171 if (
auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2172 ICA,
CostKind, CallRetElementIndex))
2184 ScalarizationCost = 0;
2193 filterConstantAndDuplicatedOperands(Args, ICA.
getArgTypes()),
2199 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2220 unsigned VecTyIndex = 0;
2221 if (IID == Intrinsic::vector_reduce_fadd ||
2222 IID == Intrinsic::vector_reduce_fmul)
2224 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
2241 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2242 unsigned ScalarCalls = 1;
2243 Type *ScalarRetTy = RetTy;
2245 if (!SkipScalarizationCost)
2248 ScalarCalls = std::max(ScalarCalls,
2253 for (
Type *Ty : Tys) {
2255 if (!SkipScalarizationCost)
2258 ScalarCalls = std::max(ScalarCalls,
2260 Ty = Ty->getScalarType();
2264 if (ScalarCalls == 1)
2269 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
2271 return ScalarCalls * ScalarCost + ScalarizationCost;
2275 case Intrinsic::sqrt:
2278 case Intrinsic::sin:
2281 case Intrinsic::cos:
2284 case Intrinsic::sincos:
2287 case Intrinsic::sincospi:
2288 ISD = ISD::FSINCOSPI;
2290 case Intrinsic::modf:
2293 case Intrinsic::tan:
2296 case Intrinsic::asin:
2299 case Intrinsic::acos:
2302 case Intrinsic::atan:
2305 case Intrinsic::atan2:
2308 case Intrinsic::sinh:
2311 case Intrinsic::cosh:
2314 case Intrinsic::tanh:
2317 case Intrinsic::exp:
2320 case Intrinsic::exp2:
2323 case Intrinsic::exp10:
2326 case Intrinsic::log:
2329 case Intrinsic::log10:
2332 case Intrinsic::log2:
2335 case Intrinsic::ldexp:
2338 case Intrinsic::fabs:
2341 case Intrinsic::canonicalize:
2344 case Intrinsic::minnum:
2347 case Intrinsic::maxnum:
2350 case Intrinsic::minimum:
2351 ISD = ISD::FMINIMUM;
2353 case Intrinsic::maximum:
2354 ISD = ISD::FMAXIMUM;
2356 case Intrinsic::minimumnum:
2357 ISD = ISD::FMINIMUMNUM;
2359 case Intrinsic::maximumnum:
2360 ISD = ISD::FMAXIMUMNUM;
2362 case Intrinsic::copysign:
2365 case Intrinsic::floor:
2368 case Intrinsic::ceil:
2371 case Intrinsic::trunc:
2374 case Intrinsic::nearbyint:
2375 ISD = ISD::FNEARBYINT;
2377 case Intrinsic::rint:
2380 case Intrinsic::lrint:
2383 case Intrinsic::llrint:
2386 case Intrinsic::round:
2389 case Intrinsic::roundeven:
2390 ISD = ISD::FROUNDEVEN;
2392 case Intrinsic::lround:
2395 case Intrinsic::llround:
2398 case Intrinsic::pow:
2401 case Intrinsic::fma:
2404 case Intrinsic::fmuladd:
2407 case Intrinsic::experimental_constrained_fmuladd:
2411 case Intrinsic::lifetime_start:
2412 case Intrinsic::lifetime_end:
2413 case Intrinsic::sideeffect:
2414 case Intrinsic::pseudoprobe:
2415 case Intrinsic::arithmetic_fence:
2417 case Intrinsic::masked_store: {
2419 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2420 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
2423 case Intrinsic::masked_load: {
2425 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2426 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
2429 case Intrinsic::experimental_vp_strided_store: {
2431 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2432 return thisT()->getStridedMemoryOpCost(
2433 Instruction::Store, Ty,
nullptr,
true,
2436 case Intrinsic::experimental_vp_strided_load: {
2438 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2439 return thisT()->getStridedMemoryOpCost(
2440 Instruction::Load, Ty,
nullptr,
true,
2443 case Intrinsic::vector_reduce_add:
2444 case Intrinsic::vector_reduce_mul:
2445 case Intrinsic::vector_reduce_and:
2446 case Intrinsic::vector_reduce_or:
2447 case Intrinsic::vector_reduce_xor:
2448 return thisT()->getArithmeticReductionCost(
2451 case Intrinsic::vector_reduce_fadd:
2452 case Intrinsic::vector_reduce_fmul:
2453 return thisT()->getArithmeticReductionCost(
2455 case Intrinsic::vector_reduce_smax:
2456 case Intrinsic::vector_reduce_smin:
2457 case Intrinsic::vector_reduce_umax:
2458 case Intrinsic::vector_reduce_umin:
2459 case Intrinsic::vector_reduce_fmax:
2460 case Intrinsic::vector_reduce_fmin:
2461 case Intrinsic::vector_reduce_fmaximum:
2462 case Intrinsic::vector_reduce_fminimum:
2465 case Intrinsic::experimental_vector_match: {
2468 unsigned SearchSize = NeedleTy->getNumElements();
2472 EVT SearchVT = getTLI()->getValueType(
DL, SearchTy);
2473 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2479 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2481 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2485 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2488 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2491 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy,
CostKind);
2494 case Intrinsic::vector_reverse:
2498 case Intrinsic::experimental_vector_histogram_add:
2499 case Intrinsic::experimental_vector_histogram_uadd_sat:
2500 case Intrinsic::experimental_vector_histogram_umax:
2501 case Intrinsic::experimental_vector_histogram_umin: {
2509 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2511 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2513 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2518 case Intrinsic::experimental_vector_histogram_add:
2520 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy,
CostKind);
2522 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2524 Cost += thisT()->getIntrinsicInstrCost(UAddSat,
CostKind);
2527 case Intrinsic::experimental_vector_histogram_umax: {
2532 case Intrinsic::experimental_vector_histogram_umin: {
2538 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2543 case Intrinsic::get_active_lane_mask: {
2545 EVT ResVT = getTLI()->getValueType(
DL, RetTy,
true);
2546 EVT ArgVT = getTLI()->getValueType(
DL, ArgTy,
true);
2550 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2559 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2560 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2564 case Intrinsic::experimental_memset_pattern:
2569 case Intrinsic::abs:
2572 case Intrinsic::fshl:
2575 case Intrinsic::fshr:
2578 case Intrinsic::smax:
2581 case Intrinsic::smin:
2584 case Intrinsic::umax:
2587 case Intrinsic::umin:
2590 case Intrinsic::sadd_sat:
2593 case Intrinsic::ssub_sat:
2596 case Intrinsic::uadd_sat:
2599 case Intrinsic::usub_sat:
2602 case Intrinsic::smul_fix:
2605 case Intrinsic::umul_fix:
2608 case Intrinsic::sadd_with_overflow:
2611 case Intrinsic::ssub_with_overflow:
2614 case Intrinsic::uadd_with_overflow:
2617 case Intrinsic::usub_with_overflow:
2620 case Intrinsic::smul_with_overflow:
2623 case Intrinsic::umul_with_overflow:
2626 case Intrinsic::fptosi_sat:
2627 case Intrinsic::fptoui_sat: {
2633 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2639 case Intrinsic::ctpop:
2645 case Intrinsic::ctlz:
2648 case Intrinsic::cttz:
2651 case Intrinsic::bswap:
2654 case Intrinsic::bitreverse:
2657 case Intrinsic::ucmp:
2660 case Intrinsic::scmp:
2666 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2672 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2682 return (LT.first * 2);
2684 return (LT.first * 1);
2688 return (LT.first * 2);
2692 case Intrinsic::fmuladd: {
2696 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2698 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2701 case Intrinsic::experimental_constrained_fmuladd: {
2703 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2705 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2706 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2707 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2709 case Intrinsic::smin:
2710 case Intrinsic::smax:
2711 case Intrinsic::umin:
2712 case Intrinsic::umax: {
2715 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2719 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2721 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2725 case Intrinsic::sadd_with_overflow:
2726 case Intrinsic::ssub_with_overflow: {
2729 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2730 ? BinaryOperator::Add
2731 : BinaryOperator::Sub;
2738 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2740 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2742 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2746 case Intrinsic::uadd_with_overflow:
2747 case Intrinsic::usub_with_overflow: {
2750 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2751 ? BinaryOperator::Add
2752 : BinaryOperator::Sub;
2758 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2759 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2763 case Intrinsic::smul_with_overflow:
2764 case Intrinsic::umul_with_overflow: {
2769 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2771 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2775 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2777 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2778 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2780 Cost += thisT()->getArithmeticInstrCost(
2785 Cost += thisT()->getArithmeticInstrCost(
2786 Instruction::AShr, MulTy,
CostKind,
2790 Cost += thisT()->getCmpSelInstrCost(
2794 case Intrinsic::sadd_sat:
2795 case Intrinsic::ssub_sat: {
2801 ? Intrinsic::sadd_with_overflow
2802 : Intrinsic::ssub_with_overflow;
2809 nullptr, ScalarizationCostPassed);
2810 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2811 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2813 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2817 case Intrinsic::uadd_sat:
2818 case Intrinsic::usub_sat: {
2823 ? Intrinsic::uadd_with_overflow
2824 : Intrinsic::usub_with_overflow;
2828 nullptr, ScalarizationCostPassed);
2829 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2831 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2835 case Intrinsic::smul_fix:
2836 case Intrinsic::umul_fix: {
2841 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2845 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH,
CostKind);
2847 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2848 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2850 Cost += thisT()->getArithmeticInstrCost(
2853 Cost += thisT()->getArithmeticInstrCost(
2856 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy,
CostKind);
2859 case Intrinsic::abs: {
2864 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2866 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2869 Cost += thisT()->getArithmeticInstrCost(
2870 BinaryOperator::Sub, RetTy,
CostKind,
2874 case Intrinsic::fshl:
2875 case Intrinsic::fshr: {
2881 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy,
CostKind);
2883 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
CostKind);
2885 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy,
CostKind);
2886 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2891 Cost += thisT()->getArithmeticInstrCost(
2893 : BinaryOperator::URem,
2894 RetTy,
CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2895 {TTI::OK_UniformConstantValue, TTI::OP_None});
2897 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2899 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2903 case Intrinsic::fptosi_sat:
2904 case Intrinsic::fptoui_sat: {
2907 Type *FromTy = Tys[0];
2908 bool IsSigned = IID == Intrinsic::fptosi_sat;
2913 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2916 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2917 Cost += thisT()->getCastInstrCost(
2918 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2922 Cost += thisT()->getCmpSelInstrCost(
2924 Cost += thisT()->getCmpSelInstrCost(
2929 case Intrinsic::ucmp:
2930 case Intrinsic::scmp: {
2931 Type *CmpTy = Tys[0];
2934 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2937 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2944 Cost += 2 * thisT()->getCmpSelInstrCost(
2945 BinaryOperator::Select, RetTy, CondTy,
2950 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2952 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2957 case Intrinsic::maximumnum:
2958 case Intrinsic::minimumnum: {
2968 IID == Intrinsic::maximumnum ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
2973 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs,
CostKind);
2974 return LT.first + FCanonicalizeCost * 2;
2994 if (!SkipScalarizationCost) {
2995 ScalarizationCost = 0;
2996 for (
Type *RetVTy : RetVTys) {
3005 for (
Type *Ty : Tys) {
3006 if (Ty->isVectorTy())
3007 Ty = Ty->getScalarType();
3012 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
3013 for (
Type *Ty : Tys) {
3018 ScalarCalls = std::max(ScalarCalls,
3022 return ScalarCalls * ScalarCost + ScalarizationCost;
3026 return SingleCallCost;
3048 if (!LT.first.isValid())
3053 Tp && LT.second.isFixedLengthVector() &&
3058 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3060 return LT.first.getValue();
3097 Type *ScalarTy = Ty->getElementType();
3099 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3109 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3111 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3115 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3118 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3119 unsigned LongVectorCount = 0;
3121 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3122 while (NumVecElts > MVTLen) {
3125 ShuffleCost += thisT()->getShuffleCost(
3127 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
3132 NumReduxLevels -= LongVectorCount;
3144 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
3145 return ShuffleCost + ArithCost +
3146 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3180 return ExtractCost + ArithCost;
3185 std::optional<FastMathFlags> FMF,
3187 assert(Ty &&
"Unknown reduction vector type");
3203 Type *ScalarTy = Ty->getElementType();
3205 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3208 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3209 unsigned LongVectorCount = 0;
3211 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3212 while (NumVecElts > MVTLen) {
3216 ShuffleCost += thisT()->getShuffleCost(
3225 NumReduxLevels -= LongVectorCount;
3238 return ShuffleCost + MinMaxCost +
3239 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3245 VectorType *Ty, std::optional<FastMathFlags> FMF,
3248 FTy && IsUnsigned && Opcode == Instruction::Add &&
3256 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3258 thisT()->getIntrinsicInstrCost(ICA,
CostKind);
3264 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
3266 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3269 return RedCost + ExtCost;
3279 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3280 "The reduction opcode is expected to be Add or Sub.");
3283 RedOpcode, ExtTy, std::nullopt,
CostKind);
3285 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3289 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
3291 return RedCost + MulCost + 2 * ExtCost;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static const Function * getCalledFunction(const Value *V)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
InstructionCost getExpandCompressMemoryOpCost(unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isSourceOfDivergence(const Value *V) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isAlwaysUniform(const Value *V) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
AttributeList getAttributes() const
Return the attribute list for this Function.
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
TargetSubtargetInfo - Generic base class for all target subtargets.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Provides info so a possible vectorization of a function can be computed.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ SSUBO
Same for subtraction.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT RetVT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
auto dyn_cast_or_null(const Y &Val)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
FunctionAddr VTableAddr uintptr_t uintptr_t Data
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
ElementCount getVectorElementCount() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...