28using namespace PatternMatch;
30#define DEBUG_TYPE "tti"
34 cl::desc(
"Recognize reduction patterns."));
38 cl::desc(
"Use this to override the target cache line size when "
39 "specified by the user."));
43 cl::desc(
"Use this to override the target's minimum page size."));
48 "Use this to override the target's predictable branch threshold (%)."));
66 if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
75 ScalarizationCost(ScalarizationCost) {
77 if (
const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
78 FMF = FPMO->getFastMathFlags();
91 :
II(
I),
RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
92 ParamTys.insert(ParamTys.begin(), Tys.
begin(), Tys.
end());
97 :
RetTy(Ty), IID(Id) {
99 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
100 ParamTys.reserve(Arguments.size());
111 :
II(
I),
RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
112 ParamTys.insert(ParamTys.begin(), Tys.
begin(), Tys.
end());
113 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
140 if (isa<SCEVCouldNotCompute>(EC))
142 if (
const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
143 if (ConstEC->getValue()->isZero())
164 bool NotAlways =
false;
183 if (
BranchInst *BI = dyn_cast<BranchInst>(TI)) {
184 if (!BI->isConditional())
204 : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(
DL))) {}
209 : TTIImpl(
std::
move(Arg.TTIImpl)) {}
212 TTIImpl = std::move(
RHS.TTIImpl);
217 return TTIImpl->getInliningThresholdMultiplier();
222 return TTIImpl->getInliningCostBenefitAnalysisSavingsMultiplier();
228 return TTIImpl->getInliningCostBenefitAnalysisProfitableMultiplier();
233 return TTIImpl->adjustInliningThreshold(CB);
238 return TTIImpl->getCallerAllocaCost(CB, AI);
242 return TTIImpl->getInlinerVectorBonusPercent();
256 "If pointers have same base address it has to be provided.");
263 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
272 "TTI should not produce negative costs!");
279 : TTIImpl->getPredictableBranchThreshold();
283 return TTIImpl->getBranchMispredictPenalty();
287 return TTIImpl->hasBranchDivergence(
F);
291 return TTIImpl->isSourceOfDivergence(V);
295 return TTIImpl->isAlwaysUniform(V);
299 unsigned ToAS)
const {
300 return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS);
304 unsigned ToAS)
const {
305 return TTIImpl->addrspacesMayAlias(FromAS, ToAS);
309 return TTIImpl->getFlatAddressSpace();
314 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
318 unsigned ToAS)
const {
319 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
324 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
328 return TTIImpl->getAssumedAddrSpace(V);
332 return TTIImpl->isSingleThreaded();
335std::pair<const Value *, unsigned>
337 return TTIImpl->getPredicatedAddrSpace(V);
342 return TTIImpl->rewriteIntrinsicWithAddressSpace(
II, OldV, NewV);
346 return TTIImpl->isLoweredToCall(
F);
352 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
357 return TTIImpl->preferPredicateOverEpilogue(TFI);
361 bool IVUpdateMayOverflow)
const {
362 return TTIImpl->getPreferredTailFoldingStyle(IVUpdateMayOverflow);
365std::optional<Instruction *>
368 return TTIImpl->instCombineIntrinsic(IC,
II);
373 bool &KnownBitsComputed)
const {
374 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC,
II, DemandedMask, Known,
382 SimplifyAndSetOp)
const {
383 return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
384 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
391 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);
396 return TTIImpl->getPeelingPreferences(L, SE, PP);
400 return TTIImpl->isLegalAddImmediate(Imm);
404 return TTIImpl->isLegalAddScalableImmediate(Imm);
408 return TTIImpl->isLegalICmpImmediate(Imm);
413 bool HasBaseReg, int64_t Scale,
416 int64_t ScalableOffset)
const {
417 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
418 Scale, AddrSpace,
I, ScalableOffset);
423 return TTIImpl->isLSRCostLess(C1, C2);
427 return TTIImpl->isNumRegsMajorCostOfLSR();
431 return TTIImpl->shouldFoldTerminatingConditionAfterLSR();
435 return TTIImpl->shouldDropLSRSolutionIfLessProfitable();
439 return TTIImpl->isProfitableLSRChainElement(
I);
443 return TTIImpl->canMacroFuseCmp();
450 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
456 return TTIImpl->getPreferredAddressingMode(L, SE);
460 Align Alignment)
const {
461 return TTIImpl->isLegalMaskedStore(DataType, Alignment);
465 Align Alignment)
const {
466 return TTIImpl->isLegalMaskedLoad(DataType, Alignment);
470 Align Alignment)
const {
471 return TTIImpl->isLegalNTStore(DataType, Alignment);
475 return TTIImpl->isLegalNTLoad(DataType, Alignment);
480 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);
484 Align Alignment)
const {
485 return TTIImpl->isLegalMaskedGather(DataType, Alignment);
489 VectorType *VecTy,
unsigned Opcode0,
unsigned Opcode1,
491 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
495 Align Alignment)
const {
496 return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
500 Align Alignment)
const {
501 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
505 Align Alignment)
const {
506 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
510 Align Alignment)
const {
511 return TTIImpl->isLegalMaskedCompressStore(DataType, Alignment);
515 Align Alignment)
const {
516 return TTIImpl->isLegalMaskedExpandLoad(DataType, Alignment);
520 Align Alignment)
const {
521 return TTIImpl->isLegalStridedLoadStore(DataType, Alignment);
525 Type *DataType)
const {
526 return TTIImpl->isLegalMaskedVectorHistogram(AddrType, DataType);
530 return TTIImpl->enableOrderedReductions();
534 return TTIImpl->hasDivRemOp(DataType, IsSigned);
538 unsigned AddrSpace)
const {
539 return TTIImpl->hasVolatileVariant(
I, AddrSpace);
543 return TTIImpl->prefersVectorizedAddressing();
548 int64_t Scale,
unsigned AddrSpace)
const {
550 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
551 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
556 return TTIImpl->LSRWithInstrQueries();
560 return TTIImpl->isTruncateFree(Ty1, Ty2);
564 return TTIImpl->isProfitableToHoist(
I);
570 return TTIImpl->isTypeLegal(Ty);
574 return TTIImpl->getRegUsageForType(Ty);
578 return TTIImpl->shouldBuildLookupTables();
583 return TTIImpl->shouldBuildLookupTablesForConstant(
C);
587 return TTIImpl->shouldBuildRelLookupTables();
591 return TTIImpl->useColdCCForColdCall(
F);
597 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
604 return TTIImpl->getOperandsScalarizationOverhead(Args, Tys,
CostKind);
608 return TTIImpl->supportsEfficientVectorElementLoadStore();
612 return TTIImpl->supportsTailCalls();
616 return TTIImpl->supportsTailCallFor(CB);
620 bool LoopHasReductions)
const {
621 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
626 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
630 return TTIImpl->enableSelectOptimize();
635 return TTIImpl->shouldTreatInstructionLikeSelect(
I);
639 return TTIImpl->enableInterleavedAccessVectorization();
643 return TTIImpl->enableMaskedInterleavedAccessVectorization();
647 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
655 unsigned *
Fast)
const {
656 return TTIImpl->allowsMisalignedMemoryAccesses(Context,
BitWidth,
662 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
666 return TTIImpl->haveFastSqrt(Ty);
671 return TTIImpl->isExpensiveToSpeculativelyExecute(
I);
675 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
680 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
689 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
697 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
702 unsigned Opcode,
unsigned Idx,
const APInt &Imm,
Type *Ty,
705 TTIImpl->getIntImmCostInst(Opcode,
Idx, Imm, Ty,
CostKind, Inst);
706 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
715 TTIImpl->getIntImmCostIntrin(IID,
Idx, Imm, Ty,
CostKind);
716 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
722 return TTIImpl->preferToKeepConstantsAttached(Inst, Fn);
726 return TTIImpl->getNumberOfRegisters(ClassID);
730 return TTIImpl->hasConditionalLoadStoreForType(Ty);
735 return TTIImpl->getRegisterClassForType(
Vector, Ty);
739 return TTIImpl->getRegisterClassName(ClassID);
744 return TTIImpl->getRegisterBitWidth(K);
748 return TTIImpl->getMinVectorRegisterBitWidth();
752 return TTIImpl->getMaxVScale();
756 return TTIImpl->getVScaleForTuning();
760 return TTIImpl->isVScaleKnownToBeAPowerOfTwo();
765 return TTIImpl->shouldMaximizeVectorBandwidth(K);
769 bool IsScalable)
const {
770 return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
774 unsigned Opcode)
const {
775 return TTIImpl->getMaximumVF(ElemWidth, Opcode);
779 Type *ScalarValTy)
const {
780 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
784 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
785 return TTIImpl->shouldConsiderAddressTypePromotion(
786 I, AllowPromotionWithoutCommonHeader);
791 : TTIImpl->getCacheLineSize();
794std::optional<unsigned>
796 return TTIImpl->getCacheSize(Level);
799std::optional<unsigned>
801 return TTIImpl->getCacheAssociativity(Level);
806 : TTIImpl->getMinPageSize();
810 return TTIImpl->getPrefetchDistance();
814 unsigned NumMemAccesses,
unsigned NumStridedMemAccesses,
815 unsigned NumPrefetches,
bool HasCall)
const {
816 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
817 NumPrefetches, HasCall);
821 return TTIImpl->getMaxPrefetchIterationsAhead();
825 return TTIImpl->enableWritePrefetching();
829 return TTIImpl->shouldPrefetchAddressSpace(AS);
833 return TTIImpl->getMaxInterleaveFactor(VF);
841 if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) {
842 if (
const auto *CI = dyn_cast<ConstantInt>(V)) {
843 if (CI->getValue().isPowerOf2())
845 else if (CI->getValue().isNegatedPowerOf2())
854 if (
const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
855 if (ShuffleInst->isZeroEltSplat())
862 if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
866 if (
auto *CI = dyn_cast<ConstantInt>(
Splat)) {
867 if (CI->getValue().isPowerOf2())
869 else if (CI->getValue().isNegatedPowerOf2())
872 }
else if (
const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
873 bool AllPow2 =
true, AllNegPow2 =
true;
874 for (
unsigned I = 0, E = CDS->getNumElements();
I != E; ++
I) {
875 if (
auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(
I))) {
876 AllPow2 &= CI->getValue().isPowerOf2();
877 AllNegPow2 &= CI->getValue().isNegatedPowerOf2();
878 if (AllPow2 || AllNegPow2)
881 AllPow2 = AllNegPow2 =
false;
894 return {OpInfo, OpProps};
906 if (TLibInfo && Opcode == Instruction::FRem) {
917 TTIImpl->getArithmeticInstrCost(Opcode, Ty,
CostKind,
920 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
925 VectorType *VecTy,
unsigned Opcode0,
unsigned Opcode1,
928 TTIImpl->getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask,
CostKind);
929 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
938 Index, SubTp, Args, CxtI);
939 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
948 auto getLoadStoreKind = [](
const Value *V,
unsigned LdStOp,
unsigned MaskedOp,
949 unsigned GatScatOp) {
954 if (
I->getOpcode() == LdStOp)
958 if (
II->getIntrinsicID() == MaskedOp)
960 if (
II->getIntrinsicID() == GatScatOp)
967 switch (
I->getOpcode()) {
968 case Instruction::ZExt:
969 case Instruction::SExt:
970 case Instruction::FPExt:
971 return getLoadStoreKind(
I->getOperand(0), Instruction::Load,
972 Intrinsic::masked_load, Intrinsic::masked_gather);
973 case Instruction::Trunc:
974 case Instruction::FPTrunc:
976 return getLoadStoreKind(*
I->user_begin(), Instruction::Store,
977 Intrinsic::masked_store,
978 Intrinsic::masked_scatter);
990 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
991 "Opcode should reflect passed instruction.");
993 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH,
CostKind,
I);
994 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1001 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy,
Index);
1002 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1008 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1009 "Opcode should reflect passed instruction.");
1011 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1018 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1019 "Opcode should reflect passed instruction.");
1021 TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred,
CostKind,
I);
1022 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1033 TTIImpl->getVectorInstrCost(Opcode, Val,
CostKind,
Index, Op0, Op1);
1034 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1041 unsigned Index)
const {
1046 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1051 Type *EltTy,
int ReplicationFactor,
int VF,
const APInt &DemandedDstElts,
1054 EltTy, ReplicationFactor, VF, DemandedDstElts,
CostKind);
1055 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1063 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1064 "Opcode should reflect passed instruction.");
1067 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1076 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1081 unsigned Opcode,
Type *DataTy,
const Value *
Ptr,
bool VariableMask,
1084 Opcode, DataTy,
Ptr, VariableMask, Alignment,
CostKind,
I);
1086 "TTI should not produce negative costs!");
1091 unsigned Opcode,
Type *DataTy,
const Value *
Ptr,
bool VariableMask,
1094 Opcode, DataTy,
Ptr, VariableMask, Alignment,
CostKind,
I);
1095 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1102 bool UseMaskForCond,
bool UseMaskForGaps)
const {
1105 UseMaskForCond, UseMaskForGaps);
1106 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1114 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1123 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1128 return TTIImpl->getNumberOfParts(Tp);
1135 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1141 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1146 return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold();
1150 unsigned Opcode,
VectorType *Ty, std::optional<FastMathFlags> FMF,
1153 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF,
CostKind);
1154 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1162 TTIImpl->getMinMaxReductionCost(IID, Ty, FMF,
CostKind);
1163 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1170 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
1177 return TTIImpl->getMulAccReductionCost(IsUnsigned, ResTy, Ty,
CostKind);
1182 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
1187 return TTIImpl->getTgtMemIntrinsic(Inst,
Info);
1191 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
1196 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
1201 unsigned DestAddrSpace,
Align SrcAlign,
Align DestAlign,
1202 std::optional<uint32_t> AtomicElementSize)
const {
1203 return TTIImpl->getMemcpyLoopLoweringType(Context,
Length, SrcAddrSpace,
1204 DestAddrSpace, SrcAlign, DestAlign,
1210 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1212 std::optional<uint32_t> AtomicCpySize)
const {
1213 TTIImpl->getMemcpyLoopResidualLoweringType(
1214 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
1215 DestAlign, AtomicCpySize);
1220 return TTIImpl->areInlineCompatible(Caller, Callee);
1226 unsigned DefaultCallPenalty)
const {
1227 return TTIImpl->getInlineCallPenalty(
F, Call, DefaultCallPenalty);
1233 return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
1238 return TTIImpl->isIndexedLoadLegal(Mode, Ty);
1243 return TTIImpl->isIndexedStoreLegal(Mode, Ty);
1247 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
1251 return TTIImpl->isLegalToVectorizeLoad(LI);
1255 return TTIImpl->isLegalToVectorizeStore(SI);
1259 unsigned ChainSizeInBytes,
Align Alignment,
unsigned AddrSpace)
const {
1260 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1265 unsigned ChainSizeInBytes,
Align Alignment,
unsigned AddrSpace)
const {
1266 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1272 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1276 return TTIImpl->isElementTypeLegalForScalableVector(Ty);
1281 unsigned ChainSizeInBytes,
1283 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1288 unsigned ChainSizeInBytes,
1290 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1294 return TTIImpl->preferFixedOverScalableIfEqualCost();
1299 return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags);
1304 return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags);
1308 return TTIImpl->preferEpilogueVectorization();
1313 return TTIImpl->getVPLegalizationStrategy(VPI);
1317 return TTIImpl->hasArmWideBranch(Thumb);
1321 return TTIImpl->getMaxNumArgs();
1325 return TTIImpl->shouldExpandReduction(
II);
1331 return TTIImpl->getPreferredExpandedReductionShuffle(
II);
1335 return TTIImpl->getGISelRematGlobalCost();
1339 return TTIImpl->getMinTripCountTailFoldingThreshold();
1343 return TTIImpl->supportsScalableVectors();
1347 return TTIImpl->enableScalableVectorization();
1351 Align Alignment)
const {
1352 return TTIImpl->hasActiveVectorLength(Opcode, DataType, Alignment);
1361 : TTICallback(
std::
move(TTICallback)) {}
1365 return TTICallback(
F);
1371 return Result(
F.getDataLayout());
1376 "Target Transform Information",
false,
true)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static cl::opt< bool > ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false), cl::desc("Force allowance of nested hardware loops"))
static cl::opt< bool > ForceHardwareLoopPHI("force-hardware-loop-phi", cl::Hidden, cl::init(false), cl::desc("Force hardware loop counter to be updated through a phi"))
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
mir Rename Register Operands
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
A container for analyses that lazily runs them and caches their results.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
LLVMContext & getContext() const
Get the context in which this basic block lives.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
ImmutablePass class - This class is used to provide information that does not need to be run.
The core instruction combiner logic.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarCost=InstructionCost::getInvalid(), bool TypeBasedOnly=false)
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
bool isLoopLatch(const BlockT *BB) const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This class represents a constant integer value.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
An instruction for storing to memory.
Analysis pass providing the TargetTransformInfo.
Result run(const Function &F, FunctionAnalysisManager &)
TargetTransformInfo Result
TargetIRAnalysis()
Default construct a target IR analysis.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
StringRef getName(LibFunc F) const
bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const
The instances of the Type class are immutable: once they are created, they are never changed.
static IntegerType * getInt32Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void initializeTargetTransformInfoWrapperPassPass(PassRegistry &)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Attributes of a target dependent hardware loop.
bool canAnalyze(LoopInfo &LI)
HardwareLoopInfo()=delete
bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.