14#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
15#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
61 for (
const Value *Operand : Operands)
82 return SI.getNumCases();
156 virtual std::pair<KnownBits, KnownBits>
160 "expected pointer or pointer vector type");
163 if (
DL.isNonIntegralAddressSpace(FromAS))
164 return std::pair(
KnownBits(
DL.getPointerSizeInBits(FromAS)),
170 CastI->getDestAddressSpace(), *CastI->getPointerOperand());
171 FromPtrBits = KB.second;
179 return {FromPtrBits, ToPtrBits};
185 unsigned ToASBitSize =
DL.getPointerSizeInBits(ToAS);
187 if (
DL.isNonIntegralAddressSpace(FromAS))
205 virtual std::pair<const Value *, unsigned>
207 return std::make_pair(
nullptr, -1);
217 assert(
F &&
"A concrete function must be provided to this routine.");
224 if (
F->isIntrinsic())
227 if (
F->hasLocalLinkage() || !
F->hasName())
234 if (Name ==
"copysign" || Name ==
"copysignf" || Name ==
"copysignl" ||
235 Name ==
"fabs" || Name ==
"fabsf" || Name ==
"fabsl" ||
236 Name ==
"fmin" || Name ==
"fminf" || Name ==
"fminl" ||
237 Name ==
"fmax" || Name ==
"fmaxf" || Name ==
"fmaxl" ||
238 Name ==
"sin" || Name ==
"sinf" || Name ==
"sinl" ||
239 Name ==
"cos" || Name ==
"cosf" || Name ==
"cosl" ||
240 Name ==
"tan" || Name ==
"tanf" || Name ==
"tanl" ||
241 Name ==
"asin" || Name ==
"asinf" || Name ==
"asinl" ||
242 Name ==
"acos" || Name ==
"acosf" || Name ==
"acosl" ||
243 Name ==
"atan" || Name ==
"atanf" || Name ==
"atanl" ||
244 Name ==
"atan2" || Name ==
"atan2f" || Name ==
"atan2l"||
245 Name ==
"sinh" || Name ==
"sinhf" || Name ==
"sinhl" ||
246 Name ==
"cosh" || Name ==
"coshf" || Name ==
"coshl" ||
247 Name ==
"tanh" || Name ==
"tanhf" || Name ==
"tanhl" ||
248 Name ==
"sqrt" || Name ==
"sqrtf" || Name ==
"sqrtl" ||
249 Name ==
"exp10" || Name ==
"exp10l" || Name ==
"exp10f")
253 if (Name ==
"pow" || Name ==
"powf" || Name ==
"powl" || Name ==
"exp2" ||
254 Name ==
"exp2l" || Name ==
"exp2f" || Name ==
"floor" ||
255 Name ==
"floorf" || Name ==
"ceil" || Name ==
"round" ||
256 Name ==
"ffs" || Name ==
"ffsl" || Name ==
"abs" || Name ==
"labs" ||
281 virtual std::optional<Instruction *>
286 virtual std::optional<Value *>
289 bool &KnownBitsComputed)
const {
297 SimplifyAndSetOp)
const {
315 int64_t BaseOffset,
bool HasBaseReg,
316 int64_t Scale,
unsigned AddrSpace,
318 int64_t ScalableOffset = 0)
const {
321 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
368 unsigned DataSize =
DL.getTypeStoreSize(DataType);
375 unsigned DataSize =
DL.getTypeStoreSize(DataType);
393 Align Alignment)
const {
398 Align Alignment)
const {
403 Align Alignment)
const {
423 unsigned AddrSpace)
const {
428 Type *DataType)
const {
446 bool HasBaseReg, int64_t Scale,
447 unsigned AddrSpace)
const {
450 Scale, AddrSpace,
nullptr,
462 virtual bool useAA()
const {
return false; }
485 unsigned ScalarOpdIdx)
const {
560 unsigned *
Fast)
const {
616 Type *Ty =
nullptr)
const {
623 return "Generic::Unknown Register Class";
625 return "Generic::ScalarRC";
627 return "Generic::VectorRC";
638 virtual std::optional<unsigned>
getMaxVScale()
const {
return std::nullopt; }
653 virtual unsigned getMaximumVF(
unsigned ElemWidth,
unsigned Opcode)
const {
661 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
662 AllowPromotionWithoutCommonHeader =
false;
667 virtual std::optional<unsigned>
678 virtual std::optional<unsigned>
694 unsigned NumStridedMemAccesses,
695 unsigned NumPrefetches,
696 bool HasCall)
const {
704 unsigned Opcode,
Type *InputTypeA,
Type *InputTypeB,
Type *AccumType,
719 auto IsWidenableCondition = [](
const Value *V) {
721 if (
II->getIntrinsicID() == Intrinsic::experimental_widenable_condition)
730 case Instruction::FDiv:
731 case Instruction::FRem:
732 case Instruction::SDiv:
733 case Instruction::SRem:
734 case Instruction::UDiv:
735 case Instruction::URem:
738 case Instruction::And:
739 case Instruction::Or:
740 if (
any_of(Args, IsWidenableCondition))
747 if (Ty->getScalarType()->isFloatingPointTy())
775 case Instruction::IntToPtr: {
776 unsigned SrcSize = Src->getScalarSizeInBits();
777 if (
DL.isLegalInteger(SrcSize) &&
778 SrcSize <=
DL.getPointerTypeSizeInBits(Dst))
782 case Instruction::PtrToAddr: {
783 unsigned DstSize = Dst->getScalarSizeInBits();
784 assert(DstSize ==
DL.getAddressSizeInBits(Src));
785 if (
DL.isLegalInteger(DstSize))
789 case Instruction::PtrToInt: {
790 unsigned DstSize = Dst->getScalarSizeInBits();
791 if (
DL.isLegalInteger(DstSize) &&
792 DstSize >=
DL.getPointerTypeSizeInBits(Src))
796 case Instruction::BitCast:
797 if (Dst == Src || (Dst->isPointerTy() && Src->isPointerTy()))
801 case Instruction::Trunc: {
850 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
865 unsigned Index)
const {
871 const APInt &DemandedDstElts,
882 if (Opcode == Instruction::InsertValue &&
898 bool UseMaskForCond,
bool UseMaskForGaps)
const {
905 switch (ICA.
getID()) {
908 case Intrinsic::allow_runtime_check:
909 case Intrinsic::allow_ubsan_check:
910 case Intrinsic::annotation:
911 case Intrinsic::assume:
912 case Intrinsic::sideeffect:
913 case Intrinsic::pseudoprobe:
914 case Intrinsic::arithmetic_fence:
915 case Intrinsic::dbg_assign:
916 case Intrinsic::dbg_declare:
917 case Intrinsic::dbg_value:
918 case Intrinsic::dbg_label:
919 case Intrinsic::invariant_start:
920 case Intrinsic::invariant_end:
921 case Intrinsic::launder_invariant_group:
922 case Intrinsic::strip_invariant_group:
923 case Intrinsic::is_constant:
924 case Intrinsic::lifetime_start:
925 case Intrinsic::lifetime_end:
926 case Intrinsic::experimental_noalias_scope_decl:
927 case Intrinsic::objectsize:
928 case Intrinsic::ptr_annotation:
929 case Intrinsic::var_annotation:
930 case Intrinsic::experimental_gc_result:
931 case Intrinsic::experimental_gc_relocate:
932 case Intrinsic::coro_alloc:
933 case Intrinsic::coro_begin:
934 case Intrinsic::coro_begin_custom_abi:
935 case Intrinsic::coro_free:
936 case Intrinsic::coro_end:
937 case Intrinsic::coro_frame:
938 case Intrinsic::coro_size:
939 case Intrinsic::coro_align:
940 case Intrinsic::coro_suspend:
941 case Intrinsic::coro_subfn_addr:
942 case Intrinsic::threadlocal_address:
943 case Intrinsic::experimental_widenable_condition:
944 case Intrinsic::ssa_copy:
954 switch (MICA.
getID()) {
955 case Intrinsic::masked_scatter:
956 case Intrinsic::masked_gather:
957 case Intrinsic::masked_load:
958 case Intrinsic::masked_store:
959 case Intrinsic::vp_scatter:
960 case Intrinsic::vp_gather:
961 case Intrinsic::masked_compressstore:
962 case Intrinsic::masked_expandload:
986 std::optional<FastMathFlags> FMF,
999 VectorType *Ty, std::optional<FastMathFlags> FMF,
1031 bool CanCreate =
true)
const {
1037 unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1039 std::optional<uint32_t> AtomicElementSize)
const {
1040 return AtomicElementSize ?
Type::getIntNTy(Context, *AtomicElementSize * 8)
1046 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1048 std::optional<uint32_t> AtomicCpySize)
const {
1049 unsigned OpSizeInBytes = AtomicCpySize.value_or(1);
1051 for (
unsigned i = 0; i != RemainingBytes; i += OpSizeInBytes)
1057 return (Caller->getFnAttribute(
"target-cpu") ==
1058 Callee->getFnAttribute(
"target-cpu")) &&
1059 (Caller->getFnAttribute(
"target-features") ==
1060 Callee->getFnAttribute(
"target-features"));
1064 unsigned DefaultCallPenalty)
const {
1065 return DefaultCallPenalty;
1071 return (Caller->getFnAttribute(
"target-cpu") ==
1072 Callee->getFnAttribute(
"target-cpu")) &&
1073 (Caller->getFnAttribute(
"target-features") ==
1074 Callee->getFnAttribute(
"target-features"));
1095 unsigned AddrSpace)
const {
1101 unsigned AddrSpace)
const {
1115 unsigned ChainSizeInBytes,
1121 unsigned ChainSizeInBytes,
1216 unsigned MaxRequiredSize =
1217 VT->getElementType()->getPrimitiveSizeInBits().getFixedValue();
1219 unsigned MinRequiredSize = 0;
1220 for (
unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
1221 if (
auto *IntElement =
1223 bool signedElement = IntElement->getValue().isNegative();
1225 unsigned ElementMinRequiredSize =
1226 IntElement->getValue().getSignificantBits() - 1;
1228 isSigned |= signedElement;
1230 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
1233 return MaxRequiredSize;
1236 return MinRequiredSize;
1240 isSigned = CI->getValue().isNegative();
1241 return CI->getValue().getSignificantBits() - 1;
1246 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
1251 return Cast->getSrcTy()->getScalarSizeInBits();
1263 const SCEV *Ptr)
const {
1271 int64_t MergeDistance)
const {
1285template <
typename T>
1297 assert(PointeeType && Ptr &&
"can't get GEPCost of nullptr");
1299 bool HasBaseReg = (BaseGV ==
nullptr);
1301 auto PtrSizeBits =
DL.getPointerTypeSizeInBits(Ptr->
getType());
1302 APInt BaseOffset(PtrSizeBits, 0);
1306 Type *TargetType =
nullptr;
1310 if (Operands.
empty())
1313 for (
auto I = Operands.
begin();
I != Operands.
end(); ++
I, ++GTI) {
1314 TargetType = GTI.getIndexedType();
1321 if (
StructType *STy = GTI.getStructTypeOrNull()) {
1323 assert(ConstIdx &&
"Unexpected GEP index");
1325 BaseOffset +=
DL.getStructLayout(STy)->getElementOffset(
Field);
1331 int64_t ElementSize =
1332 GTI.getSequentialElementStride(
DL).getFixedValue();
1341 Scale = ElementSize;
1356 AccessType = TargetType;
1387 for (
const Value *V : Ptrs) {
1391 if (Info.isSameBase() && V !=
Base) {
1392 if (
GEP->hasAllConstantIndices())
1396 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
1401 GEP->getSourceElementType(),
GEP->getPointerOperand(), Indices,
1413 auto *TargetTTI =
static_cast<const T *
>(
this);
1418 if (
const Function *
F = CB->getCalledFunction()) {
1419 if (!TargetTTI->isLoweredToCall(
F))
1428 Type *Ty = U->getType();
1434 case Instruction::Call: {
1438 return TargetTTI->getIntrinsicInstrCost(CostAttrs,
CostKind);
1440 case Instruction::Br:
1441 case Instruction::Ret:
1442 case Instruction::PHI:
1443 case Instruction::Switch:
1444 return TargetTTI->getCFInstrCost(Opcode,
CostKind,
I);
1445 case Instruction::Freeze:
1447 case Instruction::ExtractValue:
1448 case Instruction::InsertValue:
1449 return TargetTTI->getInsertExtractValueCost(Opcode,
CostKind);
1450 case Instruction::Alloca:
1454 case Instruction::GetElementPtr: {
1456 Type *AccessType =
nullptr;
1459 if (
GEP->hasOneUser() &&
I)
1460 AccessType =
I->user_back()->getAccessType();
1462 return TargetTTI->getGEPCost(
GEP->getSourceElementType(),
1466 case Instruction::Add:
1467 case Instruction::FAdd:
1468 case Instruction::Sub:
1469 case Instruction::FSub:
1470 case Instruction::Mul:
1471 case Instruction::FMul:
1472 case Instruction::UDiv:
1473 case Instruction::SDiv:
1474 case Instruction::FDiv:
1475 case Instruction::URem:
1476 case Instruction::SRem:
1477 case Instruction::FRem:
1478 case Instruction::Shl:
1479 case Instruction::LShr:
1480 case Instruction::AShr:
1481 case Instruction::And:
1482 case Instruction::Or:
1483 case Instruction::Xor:
1484 case Instruction::FNeg: {
1487 if (Opcode != Instruction::FNeg)
1489 return TargetTTI->getArithmeticInstrCost(Opcode, Ty,
CostKind, Op1Info,
1490 Op2Info, Operands,
I);
1492 case Instruction::IntToPtr:
1493 case Instruction::PtrToAddr:
1494 case Instruction::PtrToInt:
1495 case Instruction::SIToFP:
1496 case Instruction::UIToFP:
1497 case Instruction::FPToUI:
1498 case Instruction::FPToSI:
1499 case Instruction::Trunc:
1500 case Instruction::FPTrunc:
1501 case Instruction::BitCast:
1502 case Instruction::FPExt:
1503 case Instruction::SExt:
1504 case Instruction::ZExt:
1505 case Instruction::AddrSpaceCast: {
1506 Type *OpTy = Operands[0]->getType();
1507 return TargetTTI->getCastInstrCost(
1510 case Instruction::Store: {
1512 Type *ValTy = Operands[0]->getType();
1514 return TargetTTI->getMemoryOpCost(Opcode, ValTy,
SI->getAlign(),
1518 case Instruction::Load: {
1523 Type *LoadType = U->getType();
1534 LoadType = TI->getDestTy();
1536 return TargetTTI->getMemoryOpCost(Opcode, LoadType, LI->getAlign(),
1538 {TTI::OK_AnyValue, TTI::OP_None},
I);
1540 case Instruction::Select: {
1541 const Value *Op0, *Op1;
1552 return TargetTTI->getArithmeticInstrCost(
1554 CostKind, Op1Info, Op2Info, Operands,
I);
1558 Type *CondTy = Operands[0]->getType();
1559 return TargetTTI->getCmpSelInstrCost(Opcode, U->getType(), CondTy,
1563 case Instruction::ICmp:
1564 case Instruction::FCmp: {
1567 Type *ValTy = Operands[0]->getType();
1569 return TargetTTI->getCmpSelInstrCost(Opcode, ValTy, U->getType(),
1574 case Instruction::InsertElement: {
1580 if (CI->getValue().getActiveBits() <= 32)
1581 Idx = CI->getZExtValue();
1582 return TargetTTI->getVectorInstrCost(*IE, Ty,
CostKind, Idx,
1585 case Instruction::ShuffleVector: {
1593 int NumSubElts, SubIndex;
1596 if (
all_of(Mask, [](
int M) {
return M < 0; }))
1600 if (Shuffle->changesLength()) {
1602 if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding())
1605 if (Shuffle->isExtractSubvectorMask(SubIndex))
1607 VecSrcTy, Mask,
CostKind, SubIndex,
1608 VecTy, Operands, Shuffle);
1610 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1611 return TargetTTI->getShuffleCost(
1617 int ReplicationFactor, VF;
1618 if (Shuffle->isReplicationMask(ReplicationFactor, VF)) {
1622 DemandedDstElts.
setBit(
I.index());
1624 return TargetTTI->getReplicationShuffleCost(
1625 VecSrcTy->getElementType(), ReplicationFactor, VF,
1630 NumSubElts = VecSrcTy->getElementCount().getKnownMinValue();
1636 if (Shuffle->increasesLength()) {
1637 for (
int &M : AdjustMask)
1638 M = M >= NumSubElts ? (M + (Mask.size() - NumSubElts)) : M;
1640 return TargetTTI->getShuffleCost(
1642 VecTy, AdjustMask,
CostKind, 0,
nullptr, Operands, Shuffle);
1653 VecSrcTy, VecSrcTy, AdjustMask,
CostKind, 0,
nullptr, Operands,
1657 std::iota(ExtractMask.
begin(), ExtractMask.
end(), 0);
1658 return ShuffleCost + TargetTTI->getShuffleCost(
1660 ExtractMask,
CostKind, 0, VecTy, {}, Shuffle);
1663 if (Shuffle->isIdentity())
1666 if (Shuffle->isReverse())
1667 return TargetTTI->getShuffleCost(
TTI::SK_Reverse, VecTy, VecSrcTy, Mask,
1671 if (Shuffle->isTranspose())
1673 Mask,
CostKind, 0,
nullptr, Operands,
1676 if (Shuffle->isZeroEltSplat())
1678 Mask,
CostKind, 0,
nullptr, Operands,
1681 if (Shuffle->isSingleSource())
1683 VecSrcTy, Mask,
CostKind, 0,
nullptr,
1686 if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
1687 return TargetTTI->getShuffleCost(
1692 if (Shuffle->isSelect())
1693 return TargetTTI->getShuffleCost(
TTI::SK_Select, VecTy, VecSrcTy, Mask,
1697 if (Shuffle->isSplice(SubIndex))
1698 return TargetTTI->getShuffleCost(
TTI::SK_Splice, VecTy, VecSrcTy, Mask,
1699 CostKind, SubIndex,
nullptr, Operands,
1703 Mask,
CostKind, 0,
nullptr, Operands,
1706 case Instruction::ExtractElement: {
1712 if (CI->getValue().getActiveBits() <= 32)
1713 Idx = CI->getZExtValue();
1714 Type *DstTy = Operands[0]->getType();
1715 return TargetTTI->getVectorInstrCost(*EEI, DstTy,
CostKind, Idx);
1724 auto *TargetTTI =
static_cast<const T *
>(
this);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
int64_t getSExtValue() const
Get sign extended value.
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
Class to represent array types.
A cache of @llvm.assume calls within a function.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Convenience struct for specifying and reasoning about fast-math flags.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
Intrinsic::ID getID() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Information for memory intrinsic cost model.
Intrinsic::ID getID() const
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents a constant integer value.
const APInt & getAPInt() const
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
bool match(Val *V, const Pattern &P)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t DataSize
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
@ Default
The result values are uniform if and only if all operands are uniform.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Attributes of a target dependent hardware loop.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
Information about a load/store intrinsic defined by the target.