14#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
15#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
61 for (
const Value *Operand : Operands)
82 return SI.getNumCases();
156 virtual std::pair<KnownBits, KnownBits>
160 "expected pointer or pointer vector type");
163 if (
DL.isNonIntegralAddressSpace(FromAS))
164 return std::pair(
KnownBits(
DL.getPointerSizeInBits(FromAS)),
170 CastI->getDestAddressSpace(), *CastI->getPointerOperand());
171 FromPtrBits = KB.second;
179 return {FromPtrBits, ToPtrBits};
185 unsigned ToASBitSize =
DL.getPointerSizeInBits(ToAS);
187 if (
DL.isNonIntegralAddressSpace(FromAS))
197 unsigned DstAS)
const {
198 return {
DL.getPointerSizeInBits(SrcAS), 0};
210 virtual std::pair<const Value *, unsigned>
212 return std::make_pair(
nullptr, -1);
222 assert(
F &&
"A concrete function must be provided to this routine.");
229 if (
F->isIntrinsic())
232 if (
F->hasLocalLinkage() || !
F->hasName())
239 if (Name ==
"copysign" || Name ==
"copysignf" || Name ==
"copysignl" ||
240 Name ==
"fabs" || Name ==
"fabsf" || Name ==
"fabsl" ||
241 Name ==
"fmin" || Name ==
"fminf" || Name ==
"fminl" ||
242 Name ==
"fmax" || Name ==
"fmaxf" || Name ==
"fmaxl" ||
243 Name ==
"sin" || Name ==
"sinf" || Name ==
"sinl" ||
244 Name ==
"cos" || Name ==
"cosf" || Name ==
"cosl" ||
245 Name ==
"tan" || Name ==
"tanf" || Name ==
"tanl" ||
246 Name ==
"asin" || Name ==
"asinf" || Name ==
"asinl" ||
247 Name ==
"acos" || Name ==
"acosf" || Name ==
"acosl" ||
248 Name ==
"atan" || Name ==
"atanf" || Name ==
"atanl" ||
249 Name ==
"atan2" || Name ==
"atan2f" || Name ==
"atan2l"||
250 Name ==
"sinh" || Name ==
"sinhf" || Name ==
"sinhl" ||
251 Name ==
"cosh" || Name ==
"coshf" || Name ==
"coshl" ||
252 Name ==
"tanh" || Name ==
"tanhf" || Name ==
"tanhl" ||
253 Name ==
"sqrt" || Name ==
"sqrtf" || Name ==
"sqrtl" ||
254 Name ==
"exp10" || Name ==
"exp10l" || Name ==
"exp10f")
258 if (Name ==
"pow" || Name ==
"powf" || Name ==
"powl" || Name ==
"exp2" ||
259 Name ==
"exp2l" || Name ==
"exp2f" || Name ==
"floor" ||
260 Name ==
"floorf" || Name ==
"ceil" || Name ==
"round" ||
261 Name ==
"ffs" || Name ==
"ffsl" || Name ==
"abs" || Name ==
"labs" ||
285 virtual std::optional<Instruction *>
290 virtual std::optional<Value *>
293 bool &KnownBitsComputed)
const {
301 SimplifyAndSetOp)
const {
319 int64_t BaseOffset,
bool HasBaseReg,
320 int64_t Scale,
unsigned AddrSpace,
322 int64_t ScalableOffset = 0)
const {
325 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
372 unsigned DataSize =
DL.getTypeStoreSize(DataType);
379 unsigned DataSize =
DL.getTypeStoreSize(DataType);
397 Align Alignment)
const {
402 Align Alignment)
const {
407 Align Alignment)
const {
427 unsigned AddrSpace)
const {
432 Type *DataType)
const {
450 bool HasBaseReg, int64_t Scale,
451 unsigned AddrSpace)
const {
454 Scale, AddrSpace,
nullptr,
466 virtual bool useAA()
const {
return false; }
485 unsigned ScalarOpdIdx)
const {
560 unsigned *
Fast)
const {
616 Type *Ty =
nullptr)
const {
623 return "Generic::Unknown Register Class";
625 return "Generic::ScalarRC";
627 return "Generic::VectorRC";
650 virtual std::optional<unsigned>
getMaxVScale()
const {
return std::nullopt; }
664 virtual unsigned getMaximumVF(
unsigned ElemWidth,
unsigned Opcode)
const {
673 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
674 AllowPromotionWithoutCommonHeader =
false;
679 virtual std::optional<unsigned>
690 virtual std::optional<unsigned>
706 unsigned NumStridedMemAccesses,
707 unsigned NumPrefetches,
708 bool HasCall)
const {
716 unsigned Opcode,
Type *InputTypeA,
Type *InputTypeB,
Type *AccumType,
731 auto IsWidenableCondition = [](
const Value *V) {
733 if (
II->getIntrinsicID() == Intrinsic::experimental_widenable_condition)
742 case Instruction::FDiv:
743 case Instruction::FRem:
744 case Instruction::SDiv:
745 case Instruction::SRem:
746 case Instruction::UDiv:
747 case Instruction::URem:
750 case Instruction::And:
751 case Instruction::Or:
752 if (
any_of(Args, IsWidenableCondition))
759 if (Ty->getScalarType()->isFloatingPointTy())
787 case Instruction::IntToPtr: {
788 unsigned SrcSize = Src->getScalarSizeInBits();
789 if (
DL.isLegalInteger(SrcSize) &&
790 SrcSize <=
DL.getPointerTypeSizeInBits(Dst))
794 case Instruction::PtrToAddr: {
795 unsigned DstSize = Dst->getScalarSizeInBits();
796 assert(DstSize ==
DL.getAddressSizeInBits(Src));
797 if (
DL.isLegalInteger(DstSize))
801 case Instruction::PtrToInt: {
802 unsigned DstSize = Dst->getScalarSizeInBits();
803 if (
DL.isLegalInteger(DstSize) &&
804 DstSize >=
DL.getPointerTypeSizeInBits(Src))
808 case Instruction::BitCast:
809 if (Dst == Src || (Dst->isPointerTy() && Src->isPointerTy()))
813 case Instruction::Trunc: {
862 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
877 unsigned Index)
const {
883 const APInt &DemandedDstElts,
894 if (Opcode == Instruction::InsertValue &&
910 bool UseMaskForCond,
bool UseMaskForGaps)
const {
917 switch (ICA.
getID()) {
920 case Intrinsic::allow_runtime_check:
921 case Intrinsic::allow_ubsan_check:
922 case Intrinsic::annotation:
923 case Intrinsic::assume:
924 case Intrinsic::sideeffect:
925 case Intrinsic::pseudoprobe:
926 case Intrinsic::arithmetic_fence:
927 case Intrinsic::dbg_assign:
928 case Intrinsic::dbg_declare:
929 case Intrinsic::dbg_value:
930 case Intrinsic::dbg_label:
931 case Intrinsic::invariant_start:
932 case Intrinsic::invariant_end:
933 case Intrinsic::launder_invariant_group:
934 case Intrinsic::strip_invariant_group:
935 case Intrinsic::is_constant:
936 case Intrinsic::lifetime_start:
937 case Intrinsic::lifetime_end:
938 case Intrinsic::experimental_noalias_scope_decl:
939 case Intrinsic::objectsize:
940 case Intrinsic::ptr_annotation:
941 case Intrinsic::var_annotation:
942 case Intrinsic::experimental_gc_result:
943 case Intrinsic::experimental_gc_relocate:
944 case Intrinsic::coro_alloc:
945 case Intrinsic::coro_begin:
946 case Intrinsic::coro_begin_custom_abi:
947 case Intrinsic::coro_free:
948 case Intrinsic::coro_end:
949 case Intrinsic::coro_frame:
950 case Intrinsic::coro_size:
951 case Intrinsic::coro_align:
952 case Intrinsic::coro_suspend:
953 case Intrinsic::coro_subfn_addr:
954 case Intrinsic::threadlocal_address:
955 case Intrinsic::experimental_widenable_condition:
956 case Intrinsic::ssa_copy:
959 case Intrinsic::bswap:
970 switch (MICA.
getID()) {
971 case Intrinsic::masked_scatter:
972 case Intrinsic::masked_gather:
973 case Intrinsic::masked_load:
974 case Intrinsic::masked_store:
975 case Intrinsic::vp_scatter:
976 case Intrinsic::vp_gather:
977 case Intrinsic::masked_compressstore:
978 case Intrinsic::masked_expandload:
1002 std::optional<FastMathFlags> FMF,
1015 VectorType *Ty, std::optional<FastMathFlags> FMF,
1047 bool CanCreate =
true)
const {
1053 unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1055 std::optional<uint32_t> AtomicElementSize)
const {
1056 return AtomicElementSize ?
Type::getIntNTy(Context, *AtomicElementSize * 8)
1062 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1064 std::optional<uint32_t> AtomicCpySize)
const {
1065 unsigned OpSizeInBytes = AtomicCpySize.value_or(1);
1067 for (
unsigned i = 0; i != RemainingBytes; i += OpSizeInBytes)
1073 return (Caller->getFnAttribute(
"target-cpu") ==
1074 Callee->getFnAttribute(
"target-cpu")) &&
1075 (Caller->getFnAttribute(
"target-features") ==
1076 Callee->getFnAttribute(
"target-features"));
1080 unsigned DefaultCallPenalty)
const {
1081 return DefaultCallPenalty;
1094 return (Caller->getFnAttribute(
"target-cpu") ==
1095 Callee->getFnAttribute(
"target-cpu")) &&
1096 (Caller->getFnAttribute(
"target-features") ==
1097 Callee->getFnAttribute(
"target-features"));
1118 unsigned AddrSpace)
const {
1124 unsigned AddrSpace)
const {
1138 unsigned ChainSizeInBytes,
1144 unsigned ChainSizeInBytes,
1248 unsigned MaxRequiredSize =
1249 VT->getElementType()->getPrimitiveSizeInBits().getFixedValue();
1251 unsigned MinRequiredSize = 0;
1252 for (
unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
1253 if (
auto *IntElement =
1255 bool signedElement = IntElement->getValue().isNegative();
1257 unsigned ElementMinRequiredSize =
1258 IntElement->getValue().getSignificantBits() - 1;
1260 isSigned |= signedElement;
1262 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
1265 return MaxRequiredSize;
1268 return MinRequiredSize;
1272 isSigned = CI->getValue().isNegative();
1273 return CI->getValue().getSignificantBits() - 1;
1278 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
1283 return Cast->getSrcTy()->getScalarSizeInBits();
1295 const SCEV *Ptr)
const {
1303 int64_t MergeDistance)
const {
1317template <
typename T>
1329 assert(PointeeType && Ptr &&
"can't get GEPCost of nullptr");
1331 bool HasBaseReg = (BaseGV ==
nullptr);
1333 auto PtrSizeBits =
DL.getPointerTypeSizeInBits(Ptr->
getType());
1334 APInt BaseOffset(PtrSizeBits, 0);
1338 Type *TargetType =
nullptr;
1342 if (Operands.
empty())
1345 for (
auto I = Operands.
begin();
I != Operands.
end(); ++
I, ++GTI) {
1346 TargetType = GTI.getIndexedType();
1353 if (
StructType *STy = GTI.getStructTypeOrNull()) {
1355 assert(ConstIdx &&
"Unexpected GEP index");
1357 BaseOffset +=
DL.getStructLayout(STy)->getElementOffset(
Field);
1363 int64_t ElementSize =
1364 GTI.getSequentialElementStride(
DL).getFixedValue();
1373 Scale = ElementSize;
1388 AccessType = TargetType;
1419 for (
const Value *V : Ptrs) {
1423 if (Info.isSameBase() && V !=
Base) {
1424 if (
GEP->hasAllConstantIndices())
1428 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
1433 GEP->getSourceElementType(),
GEP->getPointerOperand(), Indices,
1445 auto *TargetTTI =
static_cast<const T *
>(
this);
1450 if (
const Function *
F = CB->getCalledFunction()) {
1451 if (!TargetTTI->isLoweredToCall(
F))
1460 Type *Ty = U->getType();
1466 case Instruction::Call: {
1470 return TargetTTI->getIntrinsicInstrCost(CostAttrs,
CostKind);
1472 case Instruction::UncondBr:
1473 case Instruction::CondBr:
1474 case Instruction::Ret:
1475 case Instruction::PHI:
1476 case Instruction::Switch:
1477 return TargetTTI->getCFInstrCost(Opcode,
CostKind,
I);
1478 case Instruction::Freeze:
1480 case Instruction::ExtractValue:
1481 case Instruction::InsertValue:
1482 return TargetTTI->getInsertExtractValueCost(Opcode,
CostKind);
1483 case Instruction::Alloca:
1487 case Instruction::GetElementPtr: {
1489 Type *AccessType =
nullptr;
1492 if (
GEP->hasOneUser() &&
I)
1493 AccessType =
I->user_back()->getAccessType();
1495 return TargetTTI->getGEPCost(
GEP->getSourceElementType(),
1499 case Instruction::Add:
1500 case Instruction::FAdd:
1501 case Instruction::Sub:
1502 case Instruction::FSub:
1503 case Instruction::Mul:
1504 case Instruction::FMul:
1505 case Instruction::UDiv:
1506 case Instruction::SDiv:
1507 case Instruction::FDiv:
1508 case Instruction::URem:
1509 case Instruction::SRem:
1510 case Instruction::FRem:
1511 case Instruction::Shl:
1512 case Instruction::LShr:
1513 case Instruction::AShr:
1514 case Instruction::And:
1515 case Instruction::Or:
1516 case Instruction::Xor:
1517 case Instruction::FNeg: {
1520 if (Opcode != Instruction::FNeg)
1522 return TargetTTI->getArithmeticInstrCost(Opcode, Ty,
CostKind, Op1Info,
1523 Op2Info, Operands,
I);
1525 case Instruction::IntToPtr:
1526 case Instruction::PtrToAddr:
1527 case Instruction::PtrToInt:
1528 case Instruction::SIToFP:
1529 case Instruction::UIToFP:
1530 case Instruction::FPToUI:
1531 case Instruction::FPToSI:
1532 case Instruction::Trunc:
1533 case Instruction::FPTrunc:
1534 case Instruction::BitCast:
1535 case Instruction::FPExt:
1536 case Instruction::SExt:
1537 case Instruction::ZExt:
1538 case Instruction::AddrSpaceCast: {
1539 Type *OpTy = Operands[0]->getType();
1540 return TargetTTI->getCastInstrCost(
1543 case Instruction::Store: {
1545 Type *ValTy = Operands[0]->getType();
1547 return TargetTTI->getMemoryOpCost(Opcode, ValTy,
SI->getAlign(),
1551 case Instruction::Load: {
1556 Type *LoadType = U->getType();
1567 LoadType = TI->getDestTy();
1569 return TargetTTI->getMemoryOpCost(Opcode, LoadType, LI->getAlign(),
1571 {TTI::OK_AnyValue, TTI::OP_None},
I);
1573 case Instruction::Select: {
1574 const Value *Op0, *Op1;
1585 return TargetTTI->getArithmeticInstrCost(
1587 CostKind, Op1Info, Op2Info, Operands,
I);
1591 Type *CondTy = Operands[0]->getType();
1592 return TargetTTI->getCmpSelInstrCost(Opcode, U->getType(), CondTy,
1596 case Instruction::ICmp:
1597 case Instruction::FCmp: {
1600 Type *ValTy = Operands[0]->getType();
1602 return TargetTTI->getCmpSelInstrCost(Opcode, ValTy, U->getType(),
1607 case Instruction::InsertElement: {
1613 if (CI->getValue().getActiveBits() <= 32)
1614 Idx = CI->getZExtValue();
1615 return TargetTTI->getVectorInstrCost(*IE, Ty,
CostKind, Idx,
1618 case Instruction::ShuffleVector: {
1626 int NumSubElts, SubIndex;
1629 if (
all_of(Mask, [](
int M) {
return M < 0; }))
1633 if (Shuffle->changesLength()) {
1635 if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding())
1638 if (Shuffle->isExtractSubvectorMask(SubIndex))
1640 VecSrcTy, Mask,
CostKind, SubIndex,
1641 VecTy, Operands, Shuffle);
1643 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1644 return TargetTTI->getShuffleCost(
1650 int ReplicationFactor, VF;
1651 if (Shuffle->isReplicationMask(ReplicationFactor, VF)) {
1655 DemandedDstElts.
setBit(
I.index());
1657 return TargetTTI->getReplicationShuffleCost(
1658 VecSrcTy->getElementType(), ReplicationFactor, VF,
1663 NumSubElts = VecSrcTy->getElementCount().getKnownMinValue();
1669 if (Shuffle->increasesLength()) {
1670 for (
int &M : AdjustMask)
1671 M = M >= NumSubElts ? (M + (Mask.size() - NumSubElts)) : M;
1673 return TargetTTI->getShuffleCost(
1675 VecTy, AdjustMask,
CostKind, 0,
nullptr, Operands, Shuffle);
1686 VecSrcTy, VecSrcTy, AdjustMask,
CostKind, 0,
nullptr, Operands,
1690 std::iota(ExtractMask.
begin(), ExtractMask.
end(), 0);
1691 return ShuffleCost + TargetTTI->getShuffleCost(
1693 ExtractMask,
CostKind, 0, VecTy, {}, Shuffle);
1696 if (Shuffle->isIdentity())
1699 if (Shuffle->isReverse())
1700 return TargetTTI->getShuffleCost(
TTI::SK_Reverse, VecTy, VecSrcTy, Mask,
1704 if (Shuffle->isTranspose())
1706 Mask,
CostKind, 0,
nullptr, Operands,
1709 if (Shuffle->isZeroEltSplat())
1711 Mask,
CostKind, 0,
nullptr, Operands,
1714 if (Shuffle->isSingleSource())
1716 VecSrcTy, Mask,
CostKind, 0,
nullptr,
1719 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1720 return TargetTTI->getShuffleCost(
1725 if (Shuffle->isSelect())
1726 return TargetTTI->getShuffleCost(
TTI::SK_Select, VecTy, VecSrcTy, Mask,
1730 if (Shuffle->isSplice(SubIndex))
1731 return TargetTTI->getShuffleCost(
TTI::SK_Splice, VecTy, VecSrcTy, Mask,
1732 CostKind, SubIndex,
nullptr, Operands,
1736 Mask,
CostKind, 0,
nullptr, Operands,
1739 case Instruction::ExtractElement: {
1745 if (CI->getValue().getActiveBits() <= 32)
1746 Idx = CI->getZExtValue();
1747 Type *DstTy = Operands[0]->getType();
1748 return TargetTTI->getVectorInstrCost(*EEI, DstTy,
CostKind, Idx);
1757 auto *TargetTTI =
static_cast<const T *
>(
this);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
int64_t getSExtValue() const
Get sign extended value.
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
Class to represent array types.
A cache of @llvm.assume calls within a function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Conditional Branch instruction.
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Convenience struct for specifying and reasoning about fast-math flags.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
Type * getReturnType() const
Intrinsic::ID getID() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Information for memory intrinsic cost model.
Intrinsic::ID getID() const
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This node represents a polynomial recurrence on the trip count of the specified loop.
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents a constant integer value.
const APInt & getAPInt() const
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
bool match(Val *V, const Pattern &P)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_Value()
Match an arbitrary value and ignore it.
auto m_Constant()
Match an arbitrary Constant and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t DataSize
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
ValueUniformity
Enum describing how values behave with respect to uniformity and divergence, to answer the question: ...
@ Default
The result value is uniform if and only if all operands are uniform.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Attributes of a target dependent hardware loop.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
Information about a load/store intrinsic defined by the target.