LLVM 23.0.0git
TargetTransformInfo.cpp
Go to the documentation of this file.
1//===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
11#include "llvm/Analysis/CFG.h"
15#include "llvm/IR/CFG.h"
16#include "llvm/IR/Dominators.h"
17#include "llvm/IR/Instruction.h"
20#include "llvm/IR/Module.h"
21#include "llvm/IR/Operator.h"
24#include <optional>
25#include <utility>
26
27using namespace llvm;
28using namespace PatternMatch;
29
30#define DEBUG_TYPE "tti"
31
32static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
34 cl::desc("Recognize reduction patterns."));
35
37 "cache-line-size", cl::init(0), cl::Hidden,
38 cl::desc("Use this to override the target cache line size when "
39 "specified by the user."));
40
42 "min-page-size", cl::init(0), cl::Hidden,
43 cl::desc("Use this to override the target's minimum page size."));
44
46 "predictable-branch-threshold", cl::init(99), cl::Hidden,
48 "Use this to override the target's predictable branch threshold (%)."));
49
50namespace {
51/// No-op implementation of the TTI interface using the utility base
52/// classes.
53///
54/// This is used when no target specific information is available.
55struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
56 explicit NoTTIImpl(const DataLayout &DL)
57 : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
58};
59} // namespace
60
62 std::unique_ptr<const TargetTransformInfoImplBase> Impl)
63 : TTIImpl(std::move(Impl)) {}
64
66 // If the loop has irreducible control flow, it can not be converted to
67 // Hardware loop.
68 LoopBlocksRPO RPOT(L);
69 RPOT.perform(&LI);
71 return false;
72 return true;
73}
74
76 Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost,
77 bool TypeBasedOnly, const TargetLibraryInfo *LibInfo)
78 : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id),
79 ScalarizationCost(ScalarizationCost), LibInfo(LibInfo) {
80
81 if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
82 FMF = FPMO->getFastMathFlags();
83
84 if (!TypeBasedOnly)
85 Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
87 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
88}
89
92 FastMathFlags Flags,
93 const IntrinsicInst *I,
94 InstructionCost ScalarCost)
95 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
96 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
97}
98
101 : RetTy(Ty), IID(Id) {
102
103 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
104 ParamTys.reserve(Arguments.size());
105 for (const Value *Argument : Arguments)
106 ParamTys.push_back(Argument->getType());
107}
108
112 InstructionCost ScalarCost, TargetLibraryInfo const *LibInfo)
113 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost),
114 LibInfo(LibInfo) {
115 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
116 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
117}
118
120 // Match default options:
121 // - hardware-loop-counter-bitwidth = 32
122 // - hardware-loop-decrement = 1
123 CountType = Type::getInt32Ty(L->getHeader()->getContext());
124 LoopDecrement = ConstantInt::get(CountType, 1);
125}
126
128 LoopInfo &LI, DominatorTree &DT,
129 bool ForceNestedLoop,
131 SmallVector<BasicBlock *, 4> ExitingBlocks;
132 L->getExitingBlocks(ExitingBlocks);
133
134 for (BasicBlock *BB : ExitingBlocks) {
135 // If we pass the updated counter back through a phi, we need to know
136 // which latch the updated value will be coming from.
137 if (!L->isLoopLatch(BB)) {
139 continue;
140 }
141
142 const SCEV *EC = SE.getExitCount(L, BB);
144 continue;
145 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
146 if (ConstEC->getValue()->isZero())
147 continue;
148 } else if (!SE.isLoopInvariant(EC, L))
149 continue;
150
151 if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
152 continue;
153
154 // If this exiting block is contained in a nested loop, it is not eligible
155 // for insertion of the branch-and-decrement since the inner loop would
156 // end up messing up the value in the CTR.
157 if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
158 continue;
159
160 // We now have a loop-invariant count of loop iterations (which is not the
161 // constant zero) for which we know that this loop will not exit via this
162 // existing block.
163
164 // We need to make sure that this block will run on every loop iteration.
165 // For this to be true, we must dominate all blocks with backedges. Such
166 // blocks are in-loop predecessors to the header block.
167 bool NotAlways = false;
168 for (BasicBlock *Pred : predecessors(L->getHeader())) {
169 if (!L->contains(Pred))
170 continue;
171
172 if (!DT.dominates(BB, Pred)) {
173 NotAlways = true;
174 break;
175 }
176 }
177
178 if (NotAlways)
179 continue;
180
181 // Make sure this blocks ends with a conditional branch.
182 Instruction *TI = BB->getTerminator();
183 if (!TI)
184 continue;
185
186 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
187 if (!BI->isConditional())
188 continue;
189
190 ExitBranch = BI;
191 } else
192 continue;
193
194 // Note that this block may not be the loop latch block, even if the loop
195 // has a latch block.
196 ExitBlock = BB;
197 ExitCount = EC;
198 break;
199 }
200
201 if (!ExitBlock)
202 return false;
203 return true;
204}
205
207 : TTIImpl(std::make_unique<NoTTIImpl>(DL)) {}
208
210
213
215 TTIImpl = std::move(RHS.TTIImpl);
216 return *this;
217}
218
220 return TTIImpl->getInliningThresholdMultiplier();
221}
222
223unsigned
225 return TTIImpl->getInliningCostBenefitAnalysisSavingsMultiplier();
226}
227
228unsigned
230 const {
231 return TTIImpl->getInliningCostBenefitAnalysisProfitableMultiplier();
232}
233
235 return TTIImpl->getInliningLastCallToStaticBonus();
236}
237
238unsigned
240 return TTIImpl->adjustInliningThreshold(CB);
241}
242
244 const AllocaInst *AI) const {
245 return TTIImpl->getCallerAllocaCost(CB, AI);
246}
247
249 return TTIImpl->getInlinerVectorBonusPercent();
250}
251
253 Type *PointeeType, const Value *Ptr, ArrayRef<const Value *> Operands,
254 Type *AccessType, TTI::TargetCostKind CostKind) const {
255 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
256}
257
260 const TTI::PointersChainInfo &Info, Type *AccessTy,
262 assert((Base || !Info.isSameBase()) &&
263 "If pointers have same base address it has to be provided.");
264 return TTIImpl->getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind);
265}
266
268 const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
269 BlockFrequencyInfo *BFI) const {
270 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
271}
272
276 enum TargetCostKind CostKind) const {
277 InstructionCost Cost = TTIImpl->getInstructionCost(U, Operands, CostKind);
279 "TTI should not produce negative costs!");
280 return Cost;
281}
282
284 return PredictableBranchThreshold.getNumOccurrences() > 0
286 : TTIImpl->getPredictableBranchThreshold();
287}
288
290 return TTIImpl->getBranchMispredictPenalty();
291}
292
294 return TTIImpl->hasBranchDivergence(F);
295}
296
299 // Calls with the NoDivergenceSource attribute are always uniform.
300 if (const auto *Call = dyn_cast<CallBase>(V)) {
301 if (Call->hasFnAttr(Attribute::NoDivergenceSource))
303 }
304 return TTIImpl->getInstructionUniformity(V);
305}
306
308 unsigned ToAS) const {
309 return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS);
310}
311
313 unsigned ToAS) const {
314 return TTIImpl->addrspacesMayAlias(FromAS, ToAS);
315}
316
318 return TTIImpl->getFlatAddressSpace();
319}
320
322 SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
323 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
324}
325
327 unsigned ToAS) const {
328 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
329}
330
331std::pair<KnownBits, KnownBits>
333 const Value &PtrOp) const {
334 return TTIImpl->computeKnownBitsAddrSpaceCast(ToAS, PtrOp);
335}
336
338 unsigned FromAS, unsigned ToAS, const KnownBits &FromPtrBits) const {
339 return TTIImpl->computeKnownBitsAddrSpaceCast(FromAS, ToAS, FromPtrBits);
340}
341
343 unsigned AS) const {
344 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
345}
346
348 return TTIImpl->getAssumedAddrSpace(V);
349}
350
352 return TTIImpl->isSingleThreaded();
353}
354
355std::pair<const Value *, unsigned>
357 return TTIImpl->getPredicatedAddrSpace(V);
358}
359
361 IntrinsicInst *II, Value *OldV, Value *NewV) const {
362 return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
363}
364
366 return TTIImpl->isLoweredToCall(F);
367}
368
371 TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
372 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
373}
374
376 return TTIImpl->getEpilogueVectorizationMinVF();
377}
378
380 TailFoldingInfo *TFI) const {
381 return TTIImpl->preferPredicateOverEpilogue(TFI);
382}
383
385 bool IVUpdateMayOverflow) const {
386 return TTIImpl->getPreferredTailFoldingStyle(IVUpdateMayOverflow);
387}
388
389std::optional<Instruction *>
391 IntrinsicInst &II) const {
392 return TTIImpl->instCombineIntrinsic(IC, II);
393}
394
396 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
397 bool &KnownBitsComputed) const {
398 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
399 KnownBitsComputed);
400}
401
403 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
404 APInt &UndefElts2, APInt &UndefElts3,
405 std::function<void(Instruction *, unsigned, APInt, APInt &)>
406 SimplifyAndSetOp) const {
407 return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
408 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
409 SimplifyAndSetOp);
410}
411
414 OptimizationRemarkEmitter *ORE) const {
415 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);
416}
417
419 PeelingPreferences &PP) const {
420 return TTIImpl->getPeelingPreferences(L, SE, PP);
421}
422
424 return TTIImpl->isLegalAddImmediate(Imm);
425}
426
428 return TTIImpl->isLegalAddScalableImmediate(Imm);
429}
430
432 return TTIImpl->isLegalICmpImmediate(Imm);
433}
434
436 int64_t BaseOffset,
437 bool HasBaseReg, int64_t Scale,
438 unsigned AddrSpace,
439 Instruction *I,
440 int64_t ScalableOffset) const {
441 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
442 Scale, AddrSpace, I, ScalableOffset);
443}
444
446 const LSRCost &C2) const {
447 return TTIImpl->isLSRCostLess(C1, C2);
448}
449
451 return TTIImpl->isNumRegsMajorCostOfLSR();
452}
453
455 return TTIImpl->shouldDropLSRSolutionIfLessProfitable();
456}
457
459 return TTIImpl->isProfitableLSRChainElement(I);
460}
461
463 return TTIImpl->canMacroFuseCmp();
464}
465
467 ScalarEvolution *SE, LoopInfo *LI,
469 TargetLibraryInfo *LibInfo) const {
470 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
471}
472
475 ScalarEvolution *SE) const {
476 return TTIImpl->getPreferredAddressingMode(L, SE);
477}
478
480 unsigned AddressSpace,
481 TTI::MaskKind MaskKind) const {
482 return TTIImpl->isLegalMaskedStore(DataType, Alignment, AddressSpace,
483 MaskKind);
484}
485
487 unsigned AddressSpace,
488 TTI::MaskKind MaskKind) const {
489 return TTIImpl->isLegalMaskedLoad(DataType, Alignment, AddressSpace,
490 MaskKind);
491}
492
494 Align Alignment) const {
495 return TTIImpl->isLegalNTStore(DataType, Alignment);
496}
497
498bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
499 return TTIImpl->isLegalNTLoad(DataType, Alignment);
500}
501
503 ElementCount NumElements) const {
504 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);
505}
506
508 Align Alignment) const {
509 return TTIImpl->isLegalMaskedGather(DataType, Alignment);
510}
511
513 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
514 const SmallBitVector &OpcodeMask) const {
515 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
516}
517
519 Align Alignment) const {
520 return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
521}
522
524 Align Alignment) const {
525 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
526}
527
529 Align Alignment) const {
530 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
531}
532
534 Align Alignment) const {
535 return TTIImpl->isLegalMaskedCompressStore(DataType, Alignment);
536}
537
539 Align Alignment) const {
540 return TTIImpl->isLegalMaskedExpandLoad(DataType, Alignment);
541}
542
544 Align Alignment) const {
545 return TTIImpl->isLegalStridedLoadStore(DataType, Alignment);
546}
547
549 VectorType *VTy, unsigned Factor, Align Alignment,
550 unsigned AddrSpace) const {
551 return TTIImpl->isLegalInterleavedAccessType(VTy, Factor, Alignment,
552 AddrSpace);
553}
554
556 Type *DataType) const {
557 return TTIImpl->isLegalMaskedVectorHistogram(AddrType, DataType);
558}
559
561 return TTIImpl->enableOrderedReductions();
562}
563
564bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
565 return TTIImpl->hasDivRemOp(DataType, IsSigned);
566}
567
569 unsigned AddrSpace) const {
570 return TTIImpl->hasVolatileVariant(I, AddrSpace);
571}
572
574 return TTIImpl->prefersVectorizedAddressing();
575}
576
578 Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg,
579 int64_t Scale, unsigned AddrSpace) const {
580 InstructionCost Cost = TTIImpl->getScalingFactorCost(
581 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
582 assert(Cost >= 0 && "TTI should not produce negative costs!");
583 return Cost;
584}
585
587 return TTIImpl->LSRWithInstrQueries();
588}
589
591 return TTIImpl->isTruncateFree(Ty1, Ty2);
592}
593
595 return TTIImpl->isProfitableToHoist(I);
596}
597
598bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
599
601 return TTIImpl->isTypeLegal(Ty);
602}
603
605 return TTIImpl->getRegUsageForType(Ty);
606}
607
609 return TTIImpl->shouldBuildLookupTables();
610}
611
613 Constant *C) const {
614 return TTIImpl->shouldBuildLookupTablesForConstant(C);
615}
616
618 return TTIImpl->shouldBuildRelLookupTables();
619}
620
622 return TTIImpl->useColdCCForColdCall(F);
623}
624
626 return TTIImpl->useFastCCForInternalCall(F);
627}
628
630 Intrinsic::ID ID) const {
631 return TTIImpl->isTargetIntrinsicTriviallyScalarizable(ID);
632}
633
635 Intrinsic::ID ID, unsigned ScalarOpdIdx) const {
636 return TTIImpl->isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx);
637}
638
640 Intrinsic::ID ID, int OpdIdx) const {
641 return TTIImpl->isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);
642}
643
645 Intrinsic::ID ID, int RetIdx) const {
646 return TTIImpl->isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx);
647}
648
651 if (!I)
653
654 // For inserts, check if the value being inserted comes from a single-use
655 // load.
656 if (isa<InsertElementInst>(I) && isa<LoadInst>(I->getOperand(1)) &&
657 I->getOperand(1)->hasOneUse())
659
660 // For extracts, check if it has a single use that is a store.
661 if (isa<ExtractElementInst>(I) && I->hasOneUse() &&
662 isa<StoreInst>(*I->user_begin()))
664
666}
667
669 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
670 TTI::TargetCostKind CostKind, bool ForPoisonSrc, ArrayRef<Value *> VL,
671 TTI::VectorInstrContext VIC) const {
672 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
673 CostKind, ForPoisonSrc, VL, VIC);
674}
675
678 TTI::VectorInstrContext VIC) const {
679 return TTIImpl->getOperandsScalarizationOverhead(Tys, CostKind, VIC);
680}
681
683 return TTIImpl->supportsEfficientVectorElementLoadStore();
684}
685
687 return TTIImpl->supportsTailCalls();
688}
689
691 return TTIImpl->supportsTailCallFor(CB);
692}
693
695 bool LoopHasReductions) const {
696 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
697}
698
700TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
701 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
702}
703
705 return TTIImpl->enableSelectOptimize();
706}
707
709 const Instruction *I) const {
710 return TTIImpl->shouldTreatInstructionLikeSelect(I);
711}
712
714 return TTIImpl->enableInterleavedAccessVectorization();
715}
716
718 return TTIImpl->enableMaskedInterleavedAccessVectorization();
719}
720
722 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
723}
724
725bool
727 unsigned BitWidth,
728 unsigned AddressSpace,
729 Align Alignment,
730 unsigned *Fast) const {
731 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
732 AddressSpace, Alignment, Fast);
733}
734
736TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
737 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
738}
739
741 return TTIImpl->haveFastSqrt(Ty);
742}
743
745 const Instruction *I) const {
746 return TTIImpl->isExpensiveToSpeculativelyExecute(I);
747}
748
750 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
751}
752
754 InstructionCost Cost = TTIImpl->getFPOpCost(Ty);
755 assert(Cost >= 0 && "TTI should not produce negative costs!");
756 return Cost;
757}
758
760 unsigned Idx,
761 const APInt &Imm,
762 Type *Ty) const {
763 InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
764 assert(Cost >= 0 && "TTI should not produce negative costs!");
765 return Cost;
766}
767
771 InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
772 assert(Cost >= 0 && "TTI should not produce negative costs!");
773 return Cost;
774}
775
777 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
780 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
781 assert(Cost >= 0 && "TTI should not produce negative costs!");
782 return Cost;
783}
784
787 const APInt &Imm, Type *Ty,
790 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
791 assert(Cost >= 0 && "TTI should not produce negative costs!");
792 return Cost;
793}
794
796 const Instruction &Inst, const Function &Fn) const {
797 return TTIImpl->preferToKeepConstantsAttached(Inst, Fn);
798}
799
800unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
801 return TTIImpl->getNumberOfRegisters(ClassID);
802}
803
805 bool IsStore) const {
806 return TTIImpl->hasConditionalLoadStoreForType(Ty, IsStore);
807}
808
810 Type *Ty) const {
811 return TTIImpl->getRegisterClassForType(Vector, Ty);
812}
813
814const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
815 return TTIImpl->getRegisterClassName(ClassID);
816}
817
820 return TTIImpl->getRegisterBitWidth(K);
821}
822
824 return TTIImpl->getMinVectorRegisterBitWidth();
825}
826
827std::optional<unsigned> TargetTransformInfo::getMaxVScale() const {
828 return TTIImpl->getMaxVScale();
829}
830
831std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const {
832 return TTIImpl->getVScaleForTuning();
833}
834
836 return TTIImpl->isVScaleKnownToBeAPowerOfTwo();
837}
838
841 return TTIImpl->shouldMaximizeVectorBandwidth(K);
842}
843
845 bool IsScalable) const {
846 return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
847}
848
849unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
850 unsigned Opcode) const {
851 return TTIImpl->getMaximumVF(ElemWidth, Opcode);
852}
853
854unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
855 Type *ScalarValTy) const {
856 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
857}
858
860 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
861 return TTIImpl->shouldConsiderAddressTypePromotion(
862 I, AllowPromotionWithoutCommonHeader);
863}
864
866 return CacheLineSize.getNumOccurrences() > 0 ? CacheLineSize
867 : TTIImpl->getCacheLineSize();
868}
869
870std::optional<unsigned>
872 return TTIImpl->getCacheSize(Level);
873}
874
875std::optional<unsigned>
877 return TTIImpl->getCacheAssociativity(Level);
878}
879
880std::optional<unsigned> TargetTransformInfo::getMinPageSize() const {
881 return MinPageSize.getNumOccurrences() > 0 ? MinPageSize
882 : TTIImpl->getMinPageSize();
883}
884
886 return TTIImpl->getPrefetchDistance();
887}
888
890 unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
891 unsigned NumPrefetches, bool HasCall) const {
892 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
893 NumPrefetches, HasCall);
894}
895
897 return TTIImpl->getMaxPrefetchIterationsAhead();
898}
899
901 return TTIImpl->enableWritePrefetching();
902}
903
905 return TTIImpl->shouldPrefetchAddressSpace(AS);
906}
907
909 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
911 PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp,
912 TTI::TargetCostKind CostKind, std::optional<FastMathFlags> FMF) const {
913 return TTIImpl->getPartialReductionCost(Opcode, InputTypeA, InputTypeB,
914 AccumType, VF, OpAExtend, OpBExtend,
915 BinOp, CostKind, FMF);
916}
917
919 return TTIImpl->getMaxInterleaveFactor(VF);
920}
921
926
927 // undef/poison don't materialize constants.
928 if (isa<UndefValue>(V))
929 return {OK_AnyValue, OP_None};
930
931 if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) {
932 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
933 if (CI->getValue().isPowerOf2())
934 OpProps = OP_PowerOf2;
935 else if (CI->getValue().isNegatedPowerOf2())
936 OpProps = OP_NegatedPowerOf2;
937 }
938 return {OK_UniformConstantValue, OpProps};
939 }
940
941 // A broadcast shuffle creates a uniform value.
942 // TODO: Add support for non-zero index broadcasts.
943 // TODO: Add support for different source vector width.
944 if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
945 if (ShuffleInst->isZeroEltSplat())
946 OpInfo = OK_UniformValue;
947
948 const Value *Splat = getSplatValue(V);
949
950 // Check for a splat of a constant or for a non uniform vector of constants
951 // and check if the constant(s) are all powers of two.
952 if (Splat) {
953 // Check for a splat of a uniform value. This is not loop aware, so return
954 // true only for the obviously uniform cases (argument, globalvalue)
956 OpInfo = OK_UniformValue;
957 } else if (isa<Constant>(Splat)) {
959 if (auto *CI = dyn_cast<ConstantInt>(Splat)) {
960 if (CI->getValue().isPowerOf2())
961 OpProps = OP_PowerOf2;
962 else if (CI->getValue().isNegatedPowerOf2())
963 OpProps = OP_NegatedPowerOf2;
964 }
965 }
966 } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
968 bool AllPow2 = true, AllNegPow2 = true;
969 for (uint64_t I = 0, E = CDS->getNumElements(); I != E; ++I) {
970 if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I))) {
971 AllPow2 &= CI->getValue().isPowerOf2();
972 AllNegPow2 &= CI->getValue().isNegatedPowerOf2();
973 if (AllPow2 || AllNegPow2)
974 continue;
975 }
976 AllPow2 = AllNegPow2 = false;
977 break;
978 }
979 OpProps = AllPow2 ? OP_PowerOf2 : OpProps;
980 OpProps = AllNegPow2 ? OP_NegatedPowerOf2 : OpProps;
981 } else if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
983 }
984
985 return {OpInfo, OpProps};
986}
987
991 if (X == Y)
992 return OpInfoX;
993 return OpInfoX.mergeWith(getOperandInfo(Y));
994}
995
997 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
998 OperandValueInfo Op1Info, OperandValueInfo Op2Info,
999 ArrayRef<const Value *> Args, const Instruction *CxtI,
1000 const TargetLibraryInfo *TLibInfo) const {
1001
1002 // Use call cost for frem intructions that have platform specific vector math
1003 // functions, as those will be replaced with calls later by SelectionDAG or
1004 // ReplaceWithVecLib pass.
1005 if (TLibInfo && Opcode == Instruction::FRem) {
1006 VectorType *VecTy = dyn_cast<VectorType>(Ty);
1007 LibFunc Func;
1008 if (VecTy &&
1009 TLibInfo->getLibFunc(Instruction::FRem, Ty->getScalarType(), Func) &&
1010 TLibInfo->isFunctionVectorizable(TLibInfo->getName(Func),
1011 VecTy->getElementCount()))
1012 return getCallInstrCost(nullptr, VecTy, {VecTy, VecTy}, CostKind);
1013 }
1014
1015 InstructionCost Cost = TTIImpl->getArithmeticInstrCost(
1016 Opcode, Ty, CostKind, Op1Info, Op2Info, Args, CxtI);
1017 assert(Cost >= 0 && "TTI should not produce negative costs!");
1018 return Cost;
1019}
1020
1022 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
1023 const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const {
1025 TTIImpl->getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask, CostKind);
1026 assert(Cost >= 0 && "TTI should not produce negative costs!");
1027 return Cost;
1028}
1029
1031 ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef<int> Mask,
1032 TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
1033 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
1034 assert((Mask.empty() || DstTy->isScalableTy() ||
1035 Mask.size() == DstTy->getElementCount().getKnownMinValue()) &&
1036 "Expected the Mask to match the return size if given");
1037 assert(SrcTy->getScalarType() == DstTy->getScalarType() &&
1038 "Expected the same scalar types");
1039 InstructionCost Cost = TTIImpl->getShuffleCost(
1040 Kind, DstTy, SrcTy, Mask, CostKind, Index, SubTp, Args, CxtI);
1041 assert(Cost >= 0 && "TTI should not produce negative costs!");
1042 return Cost;
1043}
1044
1047 if (auto *Cast = dyn_cast<CastInst>(I))
1048 return getPartialReductionExtendKind(Cast->getOpcode());
1049 return PR_None;
1050}
1051
1054 Instruction::CastOps CastOpc) {
1055 switch (CastOpc) {
1056 case Instruction::CastOps::ZExt:
1057 return PR_ZeroExtend;
1058 case Instruction::CastOps::SExt:
1059 return PR_SignExtend;
1060 case Instruction::CastOps::FPExt:
1061 return PR_FPExtend;
1062 default:
1063 return PR_None;
1064 }
1065 llvm_unreachable("Unhandled cast opcode");
1066}
1067
1070 if (!I)
1071 return CastContextHint::None;
1072
1073 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
1074 unsigned GatScatOp) {
1076 if (!I)
1077 return CastContextHint::None;
1078
1079 if (I->getOpcode() == LdStOp)
1081
1082 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1083 if (II->getIntrinsicID() == MaskedOp)
1085 if (II->getIntrinsicID() == GatScatOp)
1087 }
1088
1090 };
1091
1092 switch (I->getOpcode()) {
1093 case Instruction::ZExt:
1094 case Instruction::SExt:
1095 case Instruction::FPExt:
1096 return getLoadStoreKind(I->getOperand(0), Instruction::Load,
1097 Intrinsic::masked_load, Intrinsic::masked_gather);
1098 case Instruction::Trunc:
1099 case Instruction::FPTrunc:
1100 if (I->hasOneUse())
1101 return getLoadStoreKind(*I->user_begin(), Instruction::Store,
1102 Intrinsic::masked_store,
1103 Intrinsic::masked_scatter);
1104 break;
1105 default:
1106 return CastContextHint::None;
1107 }
1108
1110}
1111
1113 unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH,
1114 TTI::TargetCostKind CostKind, const Instruction *I) const {
1115 assert((I == nullptr || I->getOpcode() == Opcode) &&
1116 "Opcode should reflect passed instruction.");
1118 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
1119 assert(Cost >= 0 && "TTI should not produce negative costs!");
1120 return Cost;
1121}
1122
1124 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index,
1127 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index, CostKind);
1128 assert(Cost >= 0 && "TTI should not produce negative costs!");
1129 return Cost;
1130}
1131
1133 unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const {
1134 assert((I == nullptr || I->getOpcode() == Opcode) &&
1135 "Opcode should reflect passed instruction.");
1136 InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I);
1137 assert(Cost >= 0 && "TTI should not produce negative costs!");
1138 return Cost;
1139}
1140
1142 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1144 OperandValueInfo Op2Info, const Instruction *I) const {
1145 assert((I == nullptr || I->getOpcode() == Opcode) &&
1146 "Opcode should reflect passed instruction.");
1147 InstructionCost Cost = TTIImpl->getCmpSelInstrCost(
1148 Opcode, ValTy, CondTy, VecPred, CostKind, Op1Info, Op2Info, I);
1149 assert(Cost >= 0 && "TTI should not produce negative costs!");
1150 return Cost;
1151}
1152
1154 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1155 const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC) const {
1156 assert((Opcode == Instruction::InsertElement ||
1157 Opcode == Instruction::ExtractElement) &&
1158 "Expecting Opcode to be insertelement/extractelement.");
1160 TTIImpl->getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1, VIC);
1161 assert(Cost >= 0 && "TTI should not produce negative costs!");
1162 return Cost;
1163}
1164
1166 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1167 Value *Scalar, ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
1168 TTI::VectorInstrContext VIC) const {
1169 assert((Opcode == Instruction::InsertElement ||
1170 Opcode == Instruction::ExtractElement) &&
1171 "Expecting Opcode to be insertelement/extractelement.");
1172 InstructionCost Cost = TTIImpl->getVectorInstrCost(
1173 Opcode, Val, CostKind, Index, Scalar, ScalarUserAndIdx, VIC);
1174 assert(Cost >= 0 && "TTI should not produce negative costs!");
1175 return Cost;
1176}
1177
1180 unsigned Index, TTI::VectorInstrContext VIC) const {
1181 // FIXME: Assert that Opcode is either InsertElement or ExtractElement.
1182 // This is mentioned in the interface description and respected by all
1183 // callers, but never asserted upon.
1185 TTIImpl->getVectorInstrCost(I, Val, CostKind, Index, VIC);
1186 assert(Cost >= 0 && "TTI should not produce negative costs!");
1187 return Cost;
1188}
1189
1191 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind,
1192 unsigned Index) const {
1194 TTIImpl->getIndexedVectorInstrCostFromEnd(Opcode, Val, CostKind, Index);
1195 assert(Cost >= 0 && "TTI should not produce negative costs!");
1196 return Cost;
1197}
1198
1200 unsigned Opcode, TTI::TargetCostKind CostKind) const {
1201 assert((Opcode == Instruction::InsertValue ||
1202 Opcode == Instruction::ExtractValue) &&
1203 "Expecting Opcode to be insertvalue/extractvalue.");
1204 InstructionCost Cost = TTIImpl->getInsertExtractValueCost(Opcode, CostKind);
1205 assert(Cost >= 0 && "TTI should not produce negative costs!");
1206 return Cost;
1207}
1208
1210 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
1212 InstructionCost Cost = TTIImpl->getReplicationShuffleCost(
1213 EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind);
1214 assert(Cost >= 0 && "TTI should not produce negative costs!");
1215 return Cost;
1216}
1217
1219 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1221 const Instruction *I) const {
1222 assert((I == nullptr || I->getOpcode() == Opcode) &&
1223 "Opcode should reflect passed instruction.");
1224 InstructionCost Cost = TTIImpl->getMemoryOpCost(
1225 Opcode, Src, Alignment, AddressSpace, CostKind, OpInfo, I);
1226 assert(Cost >= 0 && "TTI should not produce negative costs!");
1227 return Cost;
1228}
1229
1231 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1232 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1233 bool UseMaskForCond, bool UseMaskForGaps) const {
1234 InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost(
1235 Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
1236 UseMaskForCond, UseMaskForGaps);
1237 assert(Cost >= 0 && "TTI should not produce negative costs!");
1238 return Cost;
1239}
1240
1244 InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
1245 assert(Cost >= 0 && "TTI should not produce negative costs!");
1246 return Cost;
1247}
1248
1250 const MemIntrinsicCostAttributes &MICA,
1252 InstructionCost Cost = TTIImpl->getMemIntrinsicInstrCost(MICA, CostKind);
1253 assert(Cost >= 0 && "TTI should not produce negative costs!");
1254 return Cost;
1255}
1256
1259 ArrayRef<Type *> Tys,
1261 InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
1262 assert(Cost >= 0 && "TTI should not produce negative costs!");
1263 return Cost;
1264}
1265
1267 return TTIImpl->getNumberOfParts(Tp);
1268}
1269
1271 Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr,
1274 TTIImpl->getAddressComputationCost(PtrTy, SE, Ptr, CostKind);
1275 assert(Cost >= 0 && "TTI should not produce negative costs!");
1276 return Cost;
1277}
1278
1280 InstructionCost Cost = TTIImpl->getMemcpyCost(I);
1281 assert(Cost >= 0 && "TTI should not produce negative costs!");
1282 return Cost;
1283}
1284
1286 return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold();
1287}
1288
1290 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
1293 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
1294 assert(Cost >= 0 && "TTI should not produce negative costs!");
1295 return Cost;
1296}
1297
1302 TTIImpl->getMinMaxReductionCost(IID, Ty, FMF, CostKind);
1303 assert(Cost >= 0 && "TTI should not produce negative costs!");
1304 return Cost;
1305}
1306
1308 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1309 std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) const {
1310 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
1311 CostKind);
1312}
1313
1315 bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty,
1317 return TTIImpl->getMulAccReductionCost(IsUnsigned, RedOpcode, ResTy, Ty,
1318 CostKind);
1319}
1320
1323 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
1324}
1325
1327 MemIntrinsicInfo &Info) const {
1328 return TTIImpl->getTgtMemIntrinsic(Inst, Info);
1329}
1330
1332 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
1333}
1334
1336 IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate) const {
1337 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType,
1338 CanCreate);
1339}
1340
1342 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
1343 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
1344 std::optional<uint32_t> AtomicElementSize) const {
1345 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
1346 DestAddrSpace, SrcAlign, DestAlign,
1347 AtomicElementSize);
1348}
1349
1351 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1352 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1353 Align SrcAlign, Align DestAlign,
1354 std::optional<uint32_t> AtomicCpySize) const {
1355 TTIImpl->getMemcpyLoopResidualLoweringType(
1356 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
1357 DestAlign, AtomicCpySize);
1358}
1359
1361 const Function *Callee) const {
1362 return TTIImpl->areInlineCompatible(Caller, Callee);
1363}
1364
1365unsigned
1367 const CallBase &Call,
1368 unsigned DefaultCallPenalty) const {
1369 return TTIImpl->getInlineCallPenalty(F, Call, DefaultCallPenalty);
1370}
1371
1373 const Function *Callee,
1374 ArrayRef<Type *> Types) const {
1375 return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
1376}
1377
1379 Type *Ty) const {
1380 return TTIImpl->isIndexedLoadLegal(Mode, Ty);
1381}
1382
1384 Type *Ty) const {
1385 return TTIImpl->isIndexedStoreLegal(Mode, Ty);
1386}
1387
1389 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
1390}
1391
1393 return TTIImpl->isLegalToVectorizeLoad(LI);
1394}
1395
1397 return TTIImpl->isLegalToVectorizeStore(SI);
1398}
1399
1401 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1402 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1403 AddrSpace);
1404}
1405
1407 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1408 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1409 AddrSpace);
1410}
1411
1413 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
1414 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1415}
1416
1418 return TTIImpl->isElementTypeLegalForScalableVector(Ty);
1419}
1420
1422 unsigned LoadSize,
1423 unsigned ChainSizeInBytes,
1424 VectorType *VecTy) const {
1425 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1426}
1427
1429 unsigned StoreSize,
1430 unsigned ChainSizeInBytes,
1431 VectorType *VecTy) const {
1432 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1433}
1434
1436 bool IsEpilogue) const {
1437 return TTIImpl->preferFixedOverScalableIfEqualCost(IsEpilogue);
1438}
1439
1441 Type *Ty) const {
1442 return TTIImpl->preferInLoopReduction(Kind, Ty);
1443}
1444
1446 return TTIImpl->preferAlternateOpcodeVectorization();
1447}
1448
1450 return TTIImpl->preferPredicatedReductionSelect();
1451}
1452
1454 return TTIImpl->preferEpilogueVectorization();
1455}
1456
1458 return TTIImpl->shouldConsiderVectorizationRegPressure();
1459}
1460
1463 return TTIImpl->getVPLegalizationStrategy(VPI);
1464}
1465
1467 return TTIImpl->hasArmWideBranch(Thumb);
1468}
1469
1471 return TTIImpl->getFeatureMask(F);
1472}
1473
1475 return TTIImpl->getPriorityMask(F);
1476}
1477
1479 return TTIImpl->isMultiversionedFunction(F);
1480}
1481
1483 return TTIImpl->getMaxNumArgs();
1484}
1485
1487 return TTIImpl->shouldExpandReduction(II);
1488}
1489
1492 const IntrinsicInst *II) const {
1493 return TTIImpl->getPreferredExpandedReductionShuffle(II);
1494}
1495
1497 return TTIImpl->getGISelRematGlobalCost();
1498}
1499
1501 return TTIImpl->getMinTripCountTailFoldingThreshold();
1502}
1503
1505 return TTIImpl->supportsScalableVectors();
1506}
1507
1509 return TTIImpl->enableScalableVectorization();
1510}
1511
1513 return TTIImpl->hasActiveVectorLength();
1514}
1515
1517 Instruction *I, SmallVectorImpl<Use *> &OpsToSink) const {
1518 return TTIImpl->isProfitableToSinkOperands(I, OpsToSink);
1519}
1520
1522 return TTIImpl->isVectorShiftByScalarCheap(Ty);
1523}
1524
1525unsigned
1527 Type *ArrayType) const {
1528 return TTIImpl->getNumBytesToPadGlobalArray(Size, ArrayType);
1529}
1530
1532 const Function &F,
1533 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {
1534 return TTIImpl->collectKernelLaunchBounds(F, LB);
1535}
1536
1538 return TTIImpl->allowVectorElementIndexingUsingGEP();
1539}
1540
1542
1543TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
1544
1546 std::function<Result(const Function &)> TTICallback)
1547 : TTICallback(std::move(TTICallback)) {}
1548
1551 assert(!F.isIntrinsic() && "Should not request TTI for intrinsics");
1552 return TTICallback(F);
1553}
1554
1555AnalysisKey TargetIRAnalysis::Key;
1556
1557TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1558 return Result(F.getDataLayout());
1559}
1560
1561// Register the basic pass.
1563 "Target Transform Information", false, true)
1565
1566void TargetTransformInfoWrapperPass::anchor() {}
1567
1570
1574
1576 FunctionAnalysisManager DummyFAM;
1577 TTI = TIRA.run(F, DummyFAM);
1578 return *TTI;
1579}
1580
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< bool > ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false), cl::desc("Force allowance of nested hardware loops"))
static cl::opt< bool > ForceHardwareLoopPHI("force-hardware-loop-phi", cl::Hidden, cl::init(false), cl::desc("Force hardware loop counter to be updated through a phi"))
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
static cl::opt< unsigned > PredictableBranchThreshold("predictable-branch-threshold", cl::init(99), cl::Hidden, cl::desc("Use this to override the target's predictable branch threshold (%)."))
static cl::opt< bool > EnableReduxCost("costmodel-reduxcost", cl::init(false), cl::Hidden, cl::desc("Recognize reduction patterns."))
static cl::opt< unsigned > MinPageSize("min-page-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target's minimum page size."))
static cl::opt< unsigned > CacheLineSize("cache-line-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target cache line size when " "specified by the user."))
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
iterator begin() const
Definition ArrayRef.h:130
Class to represent array types.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Class to represent function types.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
ImmutablePass class - This class is used to provide information that does not need to be run.
Definition Pass.h:285
ImmutablePass(char &pid)
Definition Pass.h:287
The core instruction combiner logic.
LLVM_ABI IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarCost=InstructionCost::getInvalid(), bool TypeBasedOnly=false, TargetLibraryInfo const *LibInfo=nullptr)
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Information for memory intrinsic cost model.
The optimization diagnostic interface.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This class represents a constant integer value.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
An instruction for storing to memory.
Multiway switch.
Analysis pass providing the TargetTransformInfo.
LLVM_ABI Result run(const Function &F, FunctionAnalysisManager &)
LLVM_ABI TargetIRAnalysis()
Default construct a target IR analysis.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
StringRef getName(LibFunc F) const
bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
Wrapper pass for TargetTransformInfo.
TargetTransformInfoWrapperPass()
We must provide a default constructor for the pass but it should never be used.
TargetTransformInfo & getTTI(const Function &F)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
LLVM_ABI Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate=true) const
LLVM_ABI bool isLegalToVectorizeLoad(LoadInst *LI) const
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
static LLVM_ABI CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
LLVM_ABI unsigned getMaxNumArgs() const
LLVM_ABI bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Return false if a AS0 address cannot possibly alias a AS1 address.
LLVM_ABI bool isLegalMaskedScatter(Type *DataType, Align Alignment) const
Return true if the target supports masked scatter.
LLVM_ABI bool shouldBuildLookupTables() const
Return true if switches should be turned into lookup tables for the target.
LLVM_ABI bool isLegalToVectorizeStore(StoreInst *SI) const
LLVM_ABI InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of an Add/...
LLVM_ABI bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const
LLVM_ABI bool enableAggressiveInterleaving(bool LoopHasReductions) const
Don't restrict interleaved unrolling to small loops.
LLVM_ABI bool isMultiversionedFunction(const Function &F) const
Returns true if this is an instance of a function with multiple versions.
LLVM_ABI InstructionUniformity getInstructionUniformity(const Value *V) const
Get target-specific uniformity information for an instruction.
LLVM_ABI bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const
Return true if it is faster to check if a floating-point value is NaN (or not-NaN) versus a compariso...
LLVM_ABI bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const
Return true if the target supports masked store.
LLVM_ABI bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
LLVM_ABI unsigned getAssumedAddrSpace(const Value *V) const
LLVM_ABI bool preferAlternateOpcodeVectorization() const
LLVM_ABI bool shouldDropLSRSolutionIfLessProfitable() const
Return true if LSR should drop a found solution if it's calculated to be less profitable than the bas...
LLVM_ABI bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const
Return true if LSR cost of C1 is lower than C2.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
LLVM_ABI unsigned getPrefetchDistance() const
LLVM_ABI Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const
LLVM_ABI bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const
Return true if the target supports masked expand load.
LLVM_ABI bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
LLVM_ABI InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo Op1Info={OK_AnyValue, OP_None}, OperandValueInfo Op2Info={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI bool hasBranchDivergence(const Function *F=nullptr) const
Return true if branch divergence exists.
LLVM_ABI MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
LLVM_ABI void getUnrollingPreferences(Loop *L, ScalarEvolution &, UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const
Get target-customized preferences for the generic loop unrolling transformation.
LLVM_ABI bool shouldBuildLookupTablesForConstant(Constant *C) const
Return true if switches should be turned into lookup tables containing this constant value for the ta...
LLVM_ABI bool supportsTailCallFor(const CallBase *CB) const
If target supports tail call on CB.
LLVM_ABI std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
LLVM_ABI bool isProfitableLSRChainElement(Instruction *I) const
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
MaskKind
Some targets only support masked load/store with a constant mask.
LLVM_ABI unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const
Returns a penalty for invoking call Call in F.
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing operands with the given types.
LLVM_ABI bool hasActiveVectorLength() const
LLVM_ABI bool isExpensiveToSpeculativelyExecute(const Instruction *I) const
Return true if the cost of the instruction is too high to speculatively execute and should be kept be...
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
LLVM_ABI bool isLegalMaskedGather(Type *DataType, Align Alignment) const
Return true if the target supports masked gather.
static LLVM_ABI OperandValueInfo commonOperandInfo(const Value *X, const Value *Y)
Collect common data between two OperandValueInfo inputs.
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI std::optional< unsigned > getMaxVScale() const
LLVM_ABI InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const
LLVM_ABI bool allowVectorElementIndexingUsingGEP() const
Returns true if GEP should not be used to index into vectors for this target.
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI bool isSingleThreaded() const
LLVM_ABI std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
LLVM_ABI bool enableOrderedReductions() const
Return true if we should be enabling ordered reductions for the target.
LLVM_ABI unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
LLVM_ABI InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of vector reduction intrinsics.
LLVM_ABI unsigned getAtomicMemIntrinsicMaxElementSize() const
LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index=-1, const Value *Op0=nullptr, const Value *Op1=nullptr, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
LLVM_ABI std::pair< KnownBits, KnownBits > computeKnownBitsAddrSpaceCast(unsigned ToAS, const Value &PtrOp) const
LLVM_ABI bool LSRWithInstrQueries() const
Return true if the loop strength reduce pass should make Instruction* based TTI queries to isLegalAdd...
LLVM_ABI unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
LLVM_ABI VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
LLVM_ABI bool shouldConsiderVectorizationRegPressure() const
LLVM_ABI bool enableWritePrefetching() const
LLVM_ABI bool shouldTreatInstructionLikeSelect(const Instruction *I) const
Should the Select Optimization pass treat the given instruction like a select, potentially converting...
LLVM_ABI bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
LLVM_ABI bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const
LLVM_ABI TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Query the target what the preferred style of tail folding is.
LLVM_ABI InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType=nullptr, TargetCostKind CostKind=TCK_SizeAndLatency) const
Estimate the cost of a GEP operation when lowered.
LLVM_ABI bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) const
Return true is the target supports interleaved access for the given vector type VTy,...
LLVM_ABI unsigned getRegUsageForType(Type *Ty) const
Returns the estimated number of registers required to represent Ty.
LLVM_ABI bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const
\Returns true if the target supports broadcasting a load to a vector of type <NumElements x ElementTy...
LLVM_ABI bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const
LLVM_ABI std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
LLVM_ABI InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of a reduc...
LLVM_ABI unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
LLVM_ABI ReductionShuffle getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
LLVM_ABI bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0, Instruction *I=nullptr, int64_t ScalableOffset=0) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
LLVM_ABI PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const
Return hardware support for population count.
LLVM_ABI unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.gather intrinsics.
LLVM_ABI unsigned getMaxPrefetchIterationsAhead() const
LLVM_ABI bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const
Return true if globals in this address space can have initializers other than undef.
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
LLVM_ABI InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
LLVM_ABI bool enableMaskedInterleavedAccessVectorization() const
Enable matching of interleaved access groups that contain predicated accesses or gaps and therefore v...
LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const
Return the expected cost of materialization for the given integer immediate of the specified type for...
LLVM_ABI bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const
Return true if the target supports strided load.
LLVM_ABI TargetTransformInfo & operator=(TargetTransformInfo &&RHS)
LLVM_ABI InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF=FastMathFlags(), TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
LLVM_ABI bool enableSelectOptimize() const
Should the Select Optimization pass be enabled and ran.
LLVM_ABI bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Return any intrinsic address operand indexes which may be rewritten if they use a flat address space ...
OperandValueProperties
Additional properties of an operand's values.
LLVM_ABI int getInliningLastCallToStaticBonus() const
LLVM_ABI InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const PointersChainInfo &Info, Type *AccessTy, TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Estimate the cost of a chain of pointers (typically pointer operands of a chain of loads or stores wi...
LLVM_ABI bool isVScaleKnownToBeAPowerOfTwo() const
LLVM_ABI bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const
LLVM_ABI unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
LLVM_ABI bool isLegalICmpImmediate(int64_t Imm) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
LLVM_ABI bool isTypeLegal(Type *Ty) const
Return true if this type is legal.
LLVM_ABI bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
LLVM_ABI std::optional< unsigned > getCacheAssociativity(CacheLevel Level) const
LLVM_ABI bool isLegalNTLoad(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal load.
LLVM_ABI InstructionCost getMemcpyCost(const Instruction *I) const
LLVM_ABI unsigned adjustInliningThreshold(const CallBase *CB) const
LLVM_ABI bool isLegalAddImmediate(int64_t Imm) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
LLVM_ABI bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const
Identifies if the vector form of the intrinsic that returns a struct is overloaded at the struct elem...
LLVM_ABI unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
LLVM_ABI InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
LLVM_ABI bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const
Return true if the target can save a compare for loop count, for example hardware loop saves a compar...
LLVM_ABI bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const
LLVM_ABI Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Rewrite intrinsic call II such that OldV will be replaced with NewV, which has a different address sp...
LLVM_ABI InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const
LLVM_ABI unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Some HW prefetchers can handle accesses up to a certain constant stride.
LLVM_ABI bool shouldPrefetchAddressSpace(unsigned AS) const
LLVM_ABI InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
Return the expected cost of materializing for the given integer immediate of the specified type.
LLVM_ABI unsigned getMinVectorRegisterBitWidth() const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal store.
LLVM_ABI unsigned getFlatAddressSpace() const
Returns the address space ID for a target's 'flat' address space.
LLVM_ABI bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
It can be advantageous to detach complex constants from their uses to make their generation cheaper.
LLVM_ABI bool hasArmWideBranch(bool Thumb) const
LLVM_ABI const char * getRegisterClassName(unsigned ClassID) const
LLVM_ABI bool preferEpilogueVectorization() const
Return true if the loop vectorizer should consider vectorizing an otherwise scalar epilogue loop.
LLVM_ABI bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const
LLVM_ABI APInt getPriorityMask(const Function &F) const
Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...
LLVM_ABI BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor,...
LLVM_ABI TargetTransformInfo(std::unique_ptr< const TargetTransformInfoImplBase > Impl)
Construct a TTI object using a type implementing the Concept API below.
LLVM_ABI bool preferInLoopReduction(RecurKind Kind, Type *Ty) const
LLVM_ABI unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
LLVM_ABI bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const
LLVM_ABI unsigned getCacheLineSize() const
LLVM_ABI bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace=0, Align Alignment=Align(1), unsigned *Fast=nullptr) const
Determine if the target supports unaligned memory accesses.
LLVM_ABI int getInlinerVectorBonusPercent() const
LLVM_ABI unsigned getEpilogueVectorizationMinVF() const
LLVM_ABI void collectKernelLaunchBounds(const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const
Collect kernel launch bounds for F into LB.
PopcntSupportKind
Flags indicating the kind of support for population count.
LLVM_ABI bool preferPredicatedReductionSelect() const
LLVM_ABI InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty) const
Return the expected cost for the given integer when optimising for size.
LLVM_ABI AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
Return the preferred addressing mode LSR should make efforts to generate.
LLVM_ABI bool isLoweredToCall(const Function *F) const
Test whether calls to a function lower to actual program function calls.
LLVM_ABI bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
Query the target whether it would be profitable to convert the given loop into a hardware loop.
LLVM_ABI unsigned getInliningThresholdMultiplier() const
LLVM_ABI InstructionCost getBranchMispredictPenalty() const
Returns estimated penalty of a branch misprediction in latency.
LLVM_ABI unsigned getNumberOfRegisters(unsigned ClassID) const
LLVM_ABI bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const
Return true if this is an alternating opcode pattern that can be lowered to a single instruction on t...
LLVM_ABI bool isProfitableToHoist(Instruction *I) const
Return true if it is profitable to hoist instruction in the then/else to before if.
LLVM_ABI bool supportsScalableVectors() const
LLVM_ABI bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const
Return true if the given instruction (assumed to be a memory access instruction) has a volatile varia...
LLVM_ABI bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const
Return true if the target supports masked compress store.
LLVM_ABI std::optional< unsigned > getMinPageSize() const
LLVM_ABI bool isFPVectorizationPotentiallyUnsafe() const
Indicate that it is potentially unsafe to automatically vectorize floating-point operations because t...
LLVM_ABI InstructionCost getInsertExtractValueCost(unsigned Opcode, TTI::TargetCostKind CostKind) const
LLVM_ABI bool shouldBuildRelLookupTables() const
Return true if lookup tables should be turned into relative lookup tables.
LLVM_ABI unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
LLVM_ABI std::optional< unsigned > getCacheSize(CacheLevel Level) const
LLVM_ABI std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
LLVM_ABI bool isLegalAddScalableImmediate(int64_t Imm) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
LLVM_ABI bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI bool hasDivRemOp(Type *DataType, bool IsSigned) const
Return true if the target has a unified operation to calculate division and remainder.
LLVM_ABI InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Returns the cost estimation for alternating opcode pattern that can be lowered to a single instructio...
LLVM_ABI bool enableInterleavedAccessVectorization() const
Enable matching of interleaved access groups.
LLVM_ABI unsigned getMinTripCountTailFoldingThreshold() const
LLVM_ABI InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, PartialReductionExtendKind OpAExtend, PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind, std::optional< FastMathFlags > FMF) const
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM_ABI unsigned getMaxInterleaveFactor(ElementCount VF) const
LLVM_ABI bool enableScalableVectorization() const
LLVM_ABI bool useFastCCForInternalCall(Function &F) const
Return true if the input function is internal, should use fastcc calling convention.
LLVM_ABI bool isVectorShiftByScalarCheap(Type *Ty) const
Return true if it's significantly cheaper to shift a vector by a uniform scalar than by an amount whi...
LLVM_ABI bool isNumRegsMajorCostOfLSR() const
Return true if LSR major cost is number of registers.
LLVM_ABI unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const
LLVM_ABI bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const
LLVM_ABI unsigned getGISelRematGlobalCost() const
LLVM_ABI unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const
MemIndexedMode
The type of load/store indexing.
LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const
Return true if the target supports masked load.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI bool areInlineCompatible(const Function *Caller, const Function *Callee) const
LLVM_ABI bool useColdCCForColdCall(Function &F) const
Return true if the input function which is cold at all call sites, should use coldcc calling conventi...
LLVM_ABI InstructionCost getFPOpCost(Type *Ty) const
Return the expected cost of supporting the floating point operation of the specified type.
LLVM_ABI bool supportsTailCalls() const
If the target supports tail calls.
LLVM_ABI bool canMacroFuseCmp() const
Return true if the target can fuse a compare and branch.
LLVM_ABI bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
LLVM_ABI unsigned getNumberOfParts(Type *Tp) const
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
LLVM_ABI InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0) const
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
LLVM_ABI bool isTruncateFree(Type *Ty1, Type *Ty2) const
Return true if it's free to truncate a value of type Ty1 to type Ty2.
LLVM_ABI bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
Return true if sinking I's operands to the same basic block as I is profitable, e....
LLVM_ABI void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const
LLVM_ABI bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
Query the target whether it would be prefered to create a predicated vector loop, which can avoid the...
LLVM_ABI bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.scatter intrinsics.
LLVM_ABI bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
static VectorInstrContext getVectorInstrContextHint(const Instruction *I)
Calculates a VectorInstrContext from I.
LLVM_ABI bool haveFastSqrt(Type *Ty) const
Return true if the hardware has a fast square-root instruction.
LLVM_ABI bool shouldExpandReduction(const IntrinsicInst *II) const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing an instruction.
LLVM_ABI uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
Returns the maximum memset / memcpy size in bytes that still makes it profitable to inline the call.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
LLVM_ABI APInt getFeatureMask(const Function &F) const
Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...
LLVM_ABI void getPeelingPreferences(Loop *L, ScalarEvolution &SE, PeelingPreferences &PP) const
Get target-customized preferences for the generic loop peeling transformation.
LLVM_ABI InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency) const
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ GatherScatter
The cast is used with a gather/scatter.
LLVM_ABI InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const
OperandValueKind
Additional information about an operand's possible values.
CacheLevel
The possible cache levels.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Length
Definition DWP.cpp:532
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:149
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
auto predecessors(const MachineBasicBlock *BB)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition Uniformity.h:18
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Attributes of a target dependent hardware loop.
LLVM_ABI bool canAnalyze(LoopInfo &LI)
LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.
Returns options for expansion of memcmp. IsZeroCmp is.
OperandValueInfo mergeWith(const OperandValueInfo OpInfoY)
Describe known properties for a set of pointers.
Parameters that control the generic loop unrolling transformation.