162#define LV_NAME "loop-vectorize"
163#define DEBUG_TYPE LV_NAME
169STATISTIC(LoopsVectorized,
"Number of loops vectorized");
170STATISTIC(LoopsAnalyzed,
"Number of loops analyzed for vectorization");
171STATISTIC(LoopsEpilogueVectorized,
"Number of epilogues vectorized");
172STATISTIC(LoopsEarlyExitVectorized,
"Number of early exit loops vectorized");
176 cl::desc(
"Enable vectorization of epilogue loops."));
180 cl::desc(
"When epilogue vectorization is enabled, and a value greater than "
181 "1 is specified, forces the given VF for all applicable epilogue "
185 "epilogue-vectorization-minimum-VF",
cl::Hidden,
186 cl::desc(
"Only loops with vectorization factor equal to or larger than "
187 "the specified value are considered for epilogue vectorization."));
193 cl::desc(
"Loops with a constant trip count that is smaller than this "
194 "value are vectorized only if no scalar iteration overheads "
199 cl::desc(
"The maximum allowed number of runtime memory checks"));
205 cl::desc(
"Assume the target supports masked memory operations (used for "
222 "prefer-predicate-over-epilogue",
225 cl::desc(
"Tail-folding and predication preferences over creating a scalar "
229 "Don't tail-predicate loops, create scalar epilogue"),
231 "predicate-else-scalar-epilogue",
232 "prefer tail-folding, create scalar epilogue if tail "
235 "predicate-dont-vectorize",
236 "prefers tail-folding, don't attempt vectorization if "
237 "tail-folding fails.")));
240 "force-tail-folding-style",
cl::desc(
"Force the tail folding style"),
246 "Create lane mask for data only, using active.lane.mask intrinsic"),
248 "data-without-lane-mask",
249 "Create lane mask with compare/stepvector"),
251 "Create lane mask using active.lane.mask intrinsic, and use "
252 "it for both data and control flow"),
254 "Use predicated EVL instructions for tail folding. If EVL "
255 "is unsupported, fallback to data-without-lane-mask.")));
259 cl::desc(
"Enable use of wide lane masks when used for control flow in "
260 "tail-folded loops"));
264 cl::desc(
"Maximize bandwidth when selecting vectorization factor which "
265 "will be determined by the smallest type in loop."));
269 cl::desc(
"Enable vectorization on interleaved memory accesses in a loop"));
275 cl::desc(
"Enable vectorization on masked interleaved memory accesses in a loop"));
279 cl::desc(
"A flag that overrides the target's number of scalar registers."));
283 cl::desc(
"A flag that overrides the target's number of vector registers."));
287 cl::desc(
"A flag that overrides the target's max interleave factor for "
292 cl::desc(
"A flag that overrides the target's max interleave factor for "
293 "vectorized loops."));
297 cl::desc(
"A flag that overrides the target's expected cost for "
298 "an instruction to a single constant value. Mostly "
299 "useful for getting consistent testing."));
304 "Pretend that scalable vectors are supported, even if the target does "
305 "not support them. This flag should only be used for testing."));
310 "The cost of a loop that is considered 'small' by the interleaver."));
314 cl::desc(
"Enable the use of the block frequency analysis to access PGO "
315 "heuristics minimizing code growth in cold regions and being more "
316 "aggressive in hot regions."));
322 "Enable runtime interleaving until load/store ports are saturated"));
327 cl::desc(
"Max number of stores to be predicated behind an if."));
331 cl::desc(
"Count the induction variable only once when interleaving"));
335 cl::desc(
"The maximum interleave count to use when interleaving a scalar "
336 "reduction in a nested loop."));
341 cl::desc(
"Prefer in-loop vector reductions, "
342 "overriding the targets preference."));
346 cl::desc(
"Enable the vectorisation of loops with in-order (strict) "
352 "Prefer predicating a reduction operation over an after loop select."));
356 cl::desc(
"Enable VPlan-native vectorization path with "
357 "support for outer loop vectorization."));
361#ifdef EXPENSIVE_CHECKS
367 cl::desc(
"Verify VPlans after VPlan transforms."));
369#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
372 cl::desc(
"Print VPlans after all VPlan transformations."));
376 cl::desc(
"Print VPlans after specified VPlan transformations (regexp)."));
380 cl::desc(
"Limit VPlan printing to vector loop region in "
381 "`-vplan-print-after*` if the plan has one."));
391 "Build VPlan for every supported loop nest in the function and bail "
392 "out right after the build (stress test the VPlan H-CFG construction "
393 "in the VPlan-native vectorization path)."));
397 cl::desc(
"Enable loop interleaving in Loop vectorization passes"));
400 cl::desc(
"Run the Loop vectorization passes"));
403 "force-widen-divrem-via-safe-divisor",
cl::Hidden,
405 "Override cost based safe divisor widening for div/rem instructions"));
408 "vectorizer-maximize-bandwidth-for-vector-calls",
cl::init(
true),
410 cl::desc(
"Try wider VFs if they enable the use of vector variants"));
415 "Enable vectorization of early exit loops with uncountable exits."));
419 cl::desc(
"Discard VFs if their register pressure is too high."));
432 return DL.getTypeAllocSizeInBits(Ty) !=
DL.getTypeSizeInBits(Ty);
487static std::optional<ElementCount>
489 bool CanUseConstantMax =
true,
490 bool CanExcludeZeroTrips =
false) {
500 if (!CanUseConstantMax)
510 if (CanUseConstantMax && CanExcludeZeroTrips)
519class GeneratedRTChecks;
551 VF(VecWidth),
UF(UnrollFactor),
Builder(
PSE.getSE()->getContext()),
554 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
648 "A high UF for the epilogue loop is likely not beneficial.");
668 UnrollFactor, CM, Checks,
Plan),
697 EPI.MainLoopVF,
EPI.MainLoopUF) {}
718 EPI.EpilogueVF,
EPI.EpilogueUF) {}
735 if (
I->getDebugLoc() !=
Empty)
736 return I->getDebugLoc();
739 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
740 if (OpInst->getDebugLoc() != Empty)
741 return OpInst->getDebugLoc();
744 return I->getDebugLoc();
753 dbgs() <<
"LV: " << Prefix << DebugMsg;
769static OptimizationRemarkAnalysis
775 if (
I &&
I->getDebugLoc())
776 DL =
I->getDebugLoc();
780 return OptimizationRemarkAnalysis(
PassName, RemarkName,
DL, CodeRegion);
787 return B.CreateElementCount(Ty, VF);
798 <<
"loop not vectorized: " << OREMsg);
821 "Vectorizing: ", TheLoop->
isInnermost() ?
"innermost loop" :
"outer loop",
827 <<
"vectorized " << LoopType <<
"loop (vectorization width: "
829 <<
", interleaved count: " <<
ore::NV(
"InterleaveCount", IC) <<
")";
886 initializeVScaleForTuning();
897 bool runtimeChecksRequired();
916 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
935 void collectValuesToIgnore();
938 void collectElementTypesForWidening();
942 void collectInLoopReductions();
963 "Profitable to scalarize relevant only for VF > 1.");
966 "cost-model should not be used for outer loops (in VPlan-native path)");
968 auto Scalars = InstsToScalarize.find(VF);
969 assert(Scalars != InstsToScalarize.end() &&
970 "VF not yet analyzed for scalarization profitability");
971 return Scalars->second.contains(
I);
978 "cost-model should not be used for outer loops (in VPlan-native path)");
989 auto UniformsPerVF = Uniforms.find(VF);
990 assert(UniformsPerVF != Uniforms.end() &&
991 "VF not yet analyzed for uniformity");
992 return UniformsPerVF->second.count(
I);
999 "cost-model should not be used for outer loops (in VPlan-native path)");
1003 auto ScalarsPerVF = Scalars.find(VF);
1004 assert(ScalarsPerVF != Scalars.end() &&
1005 "Scalar values are not calculated for VF");
1006 return ScalarsPerVF->second.count(
I);
1014 I->getType()->getScalarSizeInBits() < MinBWs.lookup(
I))
1016 return VF.
isVector() && MinBWs.contains(
I) &&
1038 WideningDecisions[{
I, VF}] = {W,
Cost};
1057 for (
unsigned Idx = 0; Idx < Grp->
getFactor(); ++Idx) {
1060 WideningDecisions[{
I, VF}] = {W, InsertPosCost};
1062 WideningDecisions[{
I, VF}] = {W, OtherMemberCost};
1074 "cost-model should not be used for outer loops (in VPlan-native path)");
1076 std::pair<Instruction *, ElementCount> InstOnVF(
I, VF);
1077 auto Itr = WideningDecisions.find(InstOnVF);
1078 if (Itr == WideningDecisions.end())
1080 return Itr->second.first;
1087 std::pair<Instruction *, ElementCount> InstOnVF(
I, VF);
1088 assert(WideningDecisions.contains(InstOnVF) &&
1089 "The cost is not calculated");
1090 return WideningDecisions[InstOnVF].second;
1103 std::optional<unsigned> MaskPos,
1106 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos,
Cost};
1112 auto I = CallWideningDecisions.find({CI, VF});
1113 if (
I == CallWideningDecisions.end())
1136 Value *
Op = Trunc->getOperand(0);
1137 if (
Op !=
Legal->getPrimaryInduction() &&
TTI.isTruncateFree(SrcTy, DestTy))
1141 return Legal->isInductionPhi(
Op);
1157 if (VF.
isScalar() || Uniforms.contains(VF))
1160 collectLoopUniforms(VF);
1162 collectLoopScalars(VF);
1170 return Legal->isConsecutivePtr(DataType, Ptr) &&
1179 return Legal->isConsecutivePtr(DataType, Ptr) &&
1195 return (
LI &&
TTI.isLegalMaskedGather(Ty,
Align)) ||
1202 return (
all_of(
Legal->getReductionVars(), [&](
auto &Reduction) ->
bool {
1203 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1204 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1215 return ScalarCost < SafeDivisorCost;
1262 std::pair<InstructionCost, InstructionCost>
1289 LLVM_DEBUG(
dbgs() <<
"LV: Loop does not require scalar epilogue\n");
1296 LLVM_DEBUG(
dbgs() <<
"LV: Loop requires scalar epilogue: not exiting "
1297 "from latch block\n");
1302 "interleaved group requires scalar epilogue\n");
1305 LLVM_DEBUG(
dbgs() <<
"LV: Loop does not require scalar epilogue\n");
1323 return ChosenTailFoldingStyle;
1331 "Tail folding must not be selected yet.");
1332 if (!
Legal->canFoldTailByMasking()) {
1338 ChosenTailFoldingStyle =
TTI.getPreferredTailFoldingStyle();
1346 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1359 dbgs() <<
"LV: Preference for VP intrinsics indicated. Will "
1360 "not try to generate VP Intrinsics "
1362 ?
"since interleave count specified is greater than 1.\n"
1363 :
"due to non-interleaving reasons.\n"));
1404 return InLoopReductions.contains(Phi);
1409 return InLoopReductions;
1427 TTI.preferPredicatedReductionSelect();
1442 WideningDecisions.clear();
1443 CallWideningDecisions.clear();
1459 bool isEpilogueVectorizationProfitable(
const ElementCount VF,
1460 const unsigned IC)
const;
1468 std::optional<InstructionCost> getReductionPatternCost(
Instruction *
I,
1470 Type *VectorTy)
const;
1474 bool shouldConsiderInvariant(
Value *
Op);
1480 unsigned NumPredStores = 0;
1484 std::optional<unsigned> VScaleForTuning;
1489 void initializeVScaleForTuning() {
1494 auto Max = Attr.getVScaleRangeMax();
1495 if (Max && Min == Max) {
1496 VScaleForTuning = Max;
1509 FixedScalableVFPair computeFeasibleMaxVF(
unsigned MaxTripCount,
1510 ElementCount UserVF,
unsigned UserIC,
1511 bool FoldTailByMasking);
1515 ElementCount clampVFByMaxTripCount(ElementCount VF,
unsigned MaxTripCount,
1517 bool FoldTailByMasking)
const;
1522 ElementCount getMaximizedVFForTarget(
unsigned MaxTripCount,
1523 unsigned SmallestType,
1524 unsigned WidestType,
1525 ElementCount MaxSafeVF,
unsigned UserIC,
1526 bool FoldTailByMasking);
1530 bool isScalableVectorizationAllowed();
1534 ElementCount getMaxLegalScalableVF(
unsigned MaxSafeElements);
1540 InstructionCost getMemInstScalarizationCost(Instruction *
I, ElementCount VF);
1561 ElementCount VF)
const;
1566 MapVector<Instruction *, uint64_t> MinBWs;
1571 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1575 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
1576 PredicatedBBsAfterVectorization;
1591 std::optional<bool> IsScalableVectorizationAllowed;
1597 std::optional<unsigned> MaxSafeElements;
1603 MapVector<ElementCount, ScalarCostsTy> InstsToScalarize;
1607 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1611 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1615 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1618 SmallPtrSet<PHINode *, 4> InLoopReductions;
1623 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1631 ScalarCostsTy &ScalarCosts,
1643 void collectLoopUniforms(ElementCount VF);
1652 void collectLoopScalars(ElementCount VF);
1656 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1657 std::pair<InstWidening, InstructionCost>>;
1659 DecisionList WideningDecisions;
1661 using CallDecisionList =
1662 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1664 CallDecisionList CallWideningDecisions;
1668 bool needsExtract(
Value *V, ElementCount VF)
const {
1672 getWideningDecision(
I, VF) == CM_Scalarize ||
1683 return !Scalars.
contains(VF) || !isScalarAfterVectorization(
I, VF);
1687 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range
Ops,
1688 ElementCount VF)
const {
1690 SmallPtrSet<const Value *, 4> UniqueOperands;
1691 SmallVector<Value *, 4> Res;
1694 !needsExtract(
Op, VF))
1780class GeneratedRTChecks {
1786 Value *SCEVCheckCond =
nullptr;
1793 Value *MemRuntimeCheckCond =
nullptr;
1802 bool CostTooHigh =
false;
1804 Loop *OuterLoop =
nullptr;
1815 : DT(DT), LI(LI),
TTI(
TTI),
1816 SCEVExp(*PSE.
getSE(),
"scev.check",
false),
1817 MemCheckExp(*PSE.
getSE(),
"scev.check",
false),
1825 void create(Loop *L,
const LoopAccessInfo &LAI,
1826 const SCEVPredicate &UnionPred, ElementCount VF,
unsigned IC,
1827 OptimizationRemarkEmitter &ORE) {
1840 return OptimizationRemarkAnalysisAliasing(
1841 DEBUG_TYPE,
"TooManyMemoryRuntimeChecks",
L->getStartLoc(),
1843 <<
"loop not vectorized: too many memory checks needed";
1858 nullptr,
"vector.scevcheck");
1865 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1866 SCEVCleaner.cleanup();
1871 if (RtPtrChecking.Need) {
1872 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1873 MemCheckBlock =
SplitBlock(Pred, Pred->getTerminator(), DT, LI,
nullptr,
1876 auto DiffChecks = RtPtrChecking.getDiffChecks();
1878 Value *RuntimeVF =
nullptr;
1881 [VF, &RuntimeVF](IRBuilderBase &
B,
unsigned Bits) {
1883 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1889 MemCheckBlock->
getTerminator(), L, RtPtrChecking.getChecks(),
1892 assert(MemRuntimeCheckCond &&
1893 "no RT checks generated although RtPtrChecking "
1894 "claimed checks are required");
1899 if (!MemCheckBlock && !SCEVCheckBlock)
1909 if (SCEVCheckBlock) {
1912 auto *UI =
new UnreachableInst(Preheader->
getContext(), SCEVCheckBlock);
1916 if (MemCheckBlock) {
1919 auto *UI =
new UnreachableInst(Preheader->
getContext(), MemCheckBlock);
1925 if (MemCheckBlock) {
1929 if (SCEVCheckBlock) {
1935 OuterLoop =
L->getParentLoop();
1939 if (SCEVCheckBlock || MemCheckBlock)
1951 for (Instruction &
I : *SCEVCheckBlock) {
1952 if (SCEVCheckBlock->getTerminator() == &
I)
1958 if (MemCheckBlock) {
1960 for (Instruction &
I : *MemCheckBlock) {
1961 if (MemCheckBlock->getTerminator() == &
I)
1973 ScalarEvolution *SE = MemCheckExp.
getSE();
1978 const SCEV *
Cond = SE->
getSCEV(MemRuntimeCheckCond);
1983 unsigned BestTripCount = 2;
1987 PSE, OuterLoop,
false))
1988 if (EstimatedTC->isFixed())
1989 BestTripCount = EstimatedTC->getFixedValue();
1994 NewMemCheckCost = std::max(NewMemCheckCost.
getValue(),
1995 (InstructionCost::CostType)1);
1997 if (BestTripCount > 1)
1999 <<
"We expect runtime memory checks to be hoisted "
2000 <<
"out of the outer loop. Cost reduced from "
2001 << MemCheckCost <<
" to " << NewMemCheckCost <<
'\n');
2003 MemCheckCost = NewMemCheckCost;
2007 RTCheckCost += MemCheckCost;
2010 if (SCEVCheckBlock || MemCheckBlock)
2011 LLVM_DEBUG(
dbgs() <<
"Total cost of runtime checks: " << RTCheckCost
2019 ~GeneratedRTChecks() {
2020 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2021 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2022 bool SCEVChecksUsed = !SCEVCheckBlock || !
pred_empty(SCEVCheckBlock);
2023 bool MemChecksUsed = !MemCheckBlock || !
pred_empty(MemCheckBlock);
2025 SCEVCleaner.markResultUsed();
2027 if (MemChecksUsed) {
2028 MemCheckCleaner.markResultUsed();
2030 auto &SE = *MemCheckExp.
getSE();
2037 I.eraseFromParent();
2040 MemCheckCleaner.cleanup();
2041 SCEVCleaner.cleanup();
2043 if (!SCEVChecksUsed)
2044 SCEVCheckBlock->eraseFromParent();
2046 MemCheckBlock->eraseFromParent();
2051 std::pair<Value *, BasicBlock *> getSCEVChecks()
const {
2052 using namespace llvm::PatternMatch;
2054 return {
nullptr,
nullptr};
2056 return {SCEVCheckCond, SCEVCheckBlock};
2061 std::pair<Value *, BasicBlock *> getMemRuntimeChecks()
const {
2062 using namespace llvm::PatternMatch;
2063 if (MemRuntimeCheckCond &&
match(MemRuntimeCheckCond,
m_ZeroInt()))
2064 return {
nullptr,
nullptr};
2065 return {MemRuntimeCheckCond, MemCheckBlock};
2069 bool hasChecks()
const {
2070 return getSCEVChecks().first || getMemRuntimeChecks().first;
2111 LLVM_DEBUG(
dbgs() <<
"LV: Loop hints prevent outer loop vectorization.\n");
2117 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Interleave is not supported for "
2147 for (
Loop *InnerL : L)
2166 ?
B.CreateSExtOrTrunc(Index, StepTy)
2167 :
B.CreateCast(Instruction::SIToFP, Index, StepTy);
2168 if (CastedIndex != Index) {
2170 Index = CastedIndex;
2180 assert(
X->getType() ==
Y->getType() &&
"Types don't match!");
2185 return B.CreateAdd(
X,
Y);
2191 assert(
X->getType()->getScalarType() ==
Y->getType() &&
2192 "Types don't match!");
2200 return B.CreateMul(
X,
Y);
2203 switch (InductionKind) {
2206 "Vector indices not supported for integer inductions yet");
2208 "Index type does not match StartValue type");
2210 return B.CreateSub(StartValue, Index);
2215 return B.CreatePtrAdd(StartValue,
CreateMul(Index, Step));
2218 "Vector indices not supported for FP inductions yet");
2221 (InductionBinOp->
getOpcode() == Instruction::FAdd ||
2222 InductionBinOp->
getOpcode() == Instruction::FSub) &&
2223 "Original bin op should be defined for FP induction");
2225 Value *MulExp =
B.CreateFMul(Step, Index);
2226 return B.CreateBinOp(InductionBinOp->
getOpcode(), StartValue, MulExp,
2237 if (std::optional<unsigned> MaxVScale =
TTI.getMaxVScale())
2240 if (
F.hasFnAttribute(Attribute::VScaleRange))
2241 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2243 return std::nullopt;
2252 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2254 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
2256 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
2262 if (
unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
2265 std::optional<unsigned> MaxVScale =
2269 MaxVF *= *MaxVScale;
2272 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2286 return TTI.enableMaskedInterleavedAccessVectorization();
2295 VPlan *Plan =
nullptr) {
2299 auto IP = IRVPBB->
begin();
2301 R.moveBefore(*IRVPBB, IP);
2305 R.moveBefore(*IRVPBB, IRVPBB->
end());
2314 assert(VectorPH &&
"Invalid loop structure");
2316 Cost->requiresScalarEpilogue(
VF.isVector())) &&
2317 "loops not exiting via the latch without required epilogue?");
2324 Twine(Prefix) +
"scalar.ph");
2333 auto *Cmp = L->getLatchCmpInst();
2335 InstsToIgnore.
insert(Cmp);
2336 for (
const auto &KV : IL) {
2345 [&](
const User *U) { return U == IV || U == Cmp; }))
2346 InstsToIgnore.
insert(IVInst);
2358struct CSEDenseMapInfo {
2369 return DenseMapInfo<Instruction *>::getTombstoneKey();
2372 static unsigned getHashValue(
const Instruction *
I) {
2373 assert(canHandle(
I) &&
"Unknown instruction!");
2378 static bool isEqual(
const Instruction *
LHS,
const Instruction *
RHS) {
2379 if (
LHS == getEmptyKey() ||
RHS == getEmptyKey() ||
2380 LHS == getTombstoneKey() ||
RHS == getTombstoneKey())
2382 return LHS->isIdenticalTo(
RHS);
2394 if (!CSEDenseMapInfo::canHandle(&In))
2400 In.replaceAllUsesWith(V);
2401 In.eraseFromParent();
2414 std::optional<unsigned> VScale) {
2418 EstimatedVF *= *VScale;
2419 assert(EstimatedVF >= 1 &&
"Estimated VF shouldn't be less than 1");
2437 for (
auto &ArgOp : CI->
args())
2448 return ScalarCallCost;
2461 assert(
ID &&
"Expected intrinsic call!");
2465 FMF = FPMO->getFastMathFlags();
2471 std::back_inserter(ParamTys),
2472 [&](
Type *Ty) { return maybeVectorizeType(Ty, VF); });
2477 return TTI.getIntrinsicInstrCost(CostAttrs,
CostKind);
2491 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2506 Builder.SetInsertPoint(NewPhi);
2508 NewPhi->
addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2513void LoopVectorizationCostModel::collectLoopScalars(
ElementCount VF) {
2518 "This function should not be visited twice for the same VF");
2541 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2542 assert(WideningDecision != CM_Unknown &&
2543 "Widening decision should be ready at this moment");
2545 if (Ptr == Store->getValueOperand())
2546 return WideningDecision == CM_Scalarize;
2548 "Ptr is neither a value or pointer operand");
2549 return WideningDecision != CM_GatherScatter;
2554 auto IsLoopVaryingGEP = [&](
Value *
V) {
2565 if (!IsLoopVaryingGEP(Ptr))
2577 if (IsScalarUse(MemAccess, Ptr) &&
2581 PossibleNonScalarPtrs.
insert(
I);
2597 for (
auto *BB : TheLoop->
blocks())
2598 for (
auto &
I : *BB) {
2600 EvaluatePtrUse(Load,
Load->getPointerOperand());
2602 EvaluatePtrUse(Store,
Store->getPointerOperand());
2603 EvaluatePtrUse(Store,
Store->getValueOperand());
2606 for (
auto *
I : ScalarPtrs)
2607 if (!PossibleNonScalarPtrs.
count(
I)) {
2615 auto ForcedScalar = ForcedScalars.
find(VF);
2616 if (ForcedScalar != ForcedScalars.
end())
2617 for (
auto *
I : ForcedScalar->second) {
2618 LLVM_DEBUG(
dbgs() <<
"LV: Found (forced) scalar instruction: " << *
I <<
"\n");
2627 while (Idx != Worklist.
size()) {
2629 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2633 auto *J = cast<Instruction>(U);
2634 return !TheLoop->contains(J) || Worklist.count(J) ||
2635 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2636 IsScalarUse(J, Src));
2639 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Src <<
"\n");
2645 for (
const auto &Induction :
Legal->getInductionVars()) {
2646 auto *Ind = Induction.first;
2651 if (Ind ==
Legal->getPrimaryInduction() && foldTailByMasking())
2656 auto IsDirectLoadStoreFromPtrIndvar = [&](
Instruction *Indvar,
2658 return Induction.second.getKind() ==
2666 bool ScalarInd =
all_of(Ind->users(), [&](User *U) ->
bool {
2667 auto *I = cast<Instruction>(U);
2668 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2669 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2678 if (IndUpdatePhi &&
Legal->isFixedOrderRecurrence(IndUpdatePhi))
2683 bool ScalarIndUpdate =
all_of(IndUpdate->users(), [&](User *U) ->
bool {
2684 auto *I = cast<Instruction>(U);
2685 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2686 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2688 if (!ScalarIndUpdate)
2693 Worklist.
insert(IndUpdate);
2694 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Ind <<
"\n");
2695 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *IndUpdate
2709 switch(
I->getOpcode()) {
2712 case Instruction::Call:
2716 case Instruction::Load:
2717 case Instruction::Store: {
2726 TTI.isLegalMaskedGather(VTy, Alignment))
2728 TTI.isLegalMaskedScatter(VTy, Alignment));
2730 case Instruction::UDiv:
2731 case Instruction::SDiv:
2732 case Instruction::SRem:
2733 case Instruction::URem: {
2758 if (
Legal->blockNeedsPredication(
I->getParent()))
2770 switch(
I->getOpcode()) {
2773 "instruction should have been considered by earlier checks");
2774 case Instruction::Call:
2778 "should have returned earlier for calls not needing a mask");
2780 case Instruction::Load:
2783 case Instruction::Store: {
2791 case Instruction::UDiv:
2792 case Instruction::URem:
2794 return !
Legal->isInvariant(
I->getOperand(1));
2795 case Instruction::SDiv:
2796 case Instruction::SRem:
2809 if (!
Legal->blockNeedsPredication(BB))
2816 "Header has smaller block freq than dominated BB?");
2817 return std::round((
double)HeaderFreq /
BBFreq);
2820std::pair<InstructionCost, InstructionCost>
2823 assert(
I->getOpcode() == Instruction::UDiv ||
2824 I->getOpcode() == Instruction::SDiv ||
2825 I->getOpcode() == Instruction::SRem ||
2826 I->getOpcode() == Instruction::URem);
2835 ScalarizationCost = 0;
2841 ScalarizationCost +=
2845 ScalarizationCost +=
2847 TTI.getArithmeticInstrCost(
I->getOpcode(),
I->getType(),
CostKind);
2865 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2870 SafeDivisorCost +=
TTI.getArithmeticInstrCost(
2872 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2873 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2875 return {ScalarizationCost, SafeDivisorCost};
2882 "Decision should not be set yet.");
2884 assert(Group &&
"Must have a group.");
2885 unsigned InterleaveFactor = Group->getFactor();
2889 auto &
DL =
I->getDataLayout();
2901 bool ScalarNI =
DL.isNonIntegralPointerType(ScalarTy);
2902 for (
unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
2907 bool MemberNI =
DL.isNonIntegralPointerType(MemberTy);
2909 if (MemberNI != ScalarNI)
2912 if (MemberNI && ScalarNI &&
2913 ScalarTy->getPointerAddressSpace() !=
2914 MemberTy->getPointerAddressSpace())
2923 bool PredicatedAccessRequiresMasking =
2925 bool LoadAccessWithGapsRequiresEpilogMasking =
2928 bool StoreAccessWithGapsRequiresMasking =
2930 if (!PredicatedAccessRequiresMasking &&
2931 !LoadAccessWithGapsRequiresEpilogMasking &&
2932 !StoreAccessWithGapsRequiresMasking)
2939 "Masked interleave-groups for predicated accesses are not enabled.");
2941 if (Group->isReverse())
2945 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
2946 StoreAccessWithGapsRequiresMasking;
2954 :
TTI.isLegalMaskedStore(Ty, Alignment, AS);
2966 if (!
Legal->isConsecutivePtr(ScalarTy, Ptr))
2976 auto &
DL =
I->getDataLayout();
2983void LoopVectorizationCostModel::collectLoopUniforms(
ElementCount VF) {
2990 "This function should not be visited twice for the same VF");
2994 Uniforms[VF].
clear();
3002 auto IsOutOfScope = [&](
Value *V) ->
bool {
3014 auto AddToWorklistIfAllowed = [&](
Instruction *
I) ->
void {
3015 if (IsOutOfScope(
I)) {
3020 if (isPredicatedInst(
I)) {
3022 dbgs() <<
"LV: Found not uniform due to requiring predication: " << *
I
3026 LLVM_DEBUG(
dbgs() <<
"LV: Found uniform instruction: " << *
I <<
"\n");
3036 for (BasicBlock *
E : Exiting) {
3040 if (Cmp && TheLoop->
contains(Cmp) &&
Cmp->hasOneUse())
3041 AddToWorklistIfAllowed(Cmp);
3050 if (PrevVF.isVector()) {
3051 auto Iter = Uniforms.
find(PrevVF);
3052 if (Iter != Uniforms.
end() && !Iter->second.contains(
I))
3055 if (!
Legal->isUniformMemOp(*
I, VF))
3065 auto IsUniformDecision = [&](
Instruction *
I, ElementCount VF) {
3066 InstWidening WideningDecision = getWideningDecision(
I, VF);
3067 assert(WideningDecision != CM_Unknown &&
3068 "Widening decision should be ready at this moment");
3070 if (IsUniformMemOpUse(
I))
3073 return (WideningDecision == CM_Widen ||
3074 WideningDecision == CM_Widen_Reverse ||
3075 WideningDecision == CM_Interleave);
3085 (IsUniformDecision(
I, VF) ||
Legal->isInvariant(Ptr));
3093 SetVector<Value *> HasUniformUse;
3097 for (
auto *BB : TheLoop->
blocks())
3098 for (
auto &
I : *BB) {
3100 switch (
II->getIntrinsicID()) {
3101 case Intrinsic::sideeffect:
3102 case Intrinsic::experimental_noalias_scope_decl:
3103 case Intrinsic::assume:
3104 case Intrinsic::lifetime_start:
3105 case Intrinsic::lifetime_end:
3107 AddToWorklistIfAllowed(&
I);
3115 if (IsOutOfScope(EVI->getAggregateOperand())) {
3116 AddToWorklistIfAllowed(EVI);
3122 "Expected aggregate value to be call return value");
3135 if (IsUniformMemOpUse(&
I))
3136 AddToWorklistIfAllowed(&
I);
3138 if (IsVectorizedMemAccessUse(&
I, Ptr))
3139 HasUniformUse.
insert(Ptr);
3145 for (
auto *V : HasUniformUse) {
3146 if (IsOutOfScope(V))
3149 bool UsersAreMemAccesses =
all_of(
I->users(), [&](User *U) ->
bool {
3150 auto *UI = cast<Instruction>(U);
3151 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
3153 if (UsersAreMemAccesses)
3154 AddToWorklistIfAllowed(
I);
3161 while (Idx != Worklist.
size()) {
3164 for (
auto *OV :
I->operand_values()) {
3166 if (IsOutOfScope(OV))
3171 if (
OP &&
Legal->isFixedOrderRecurrence(
OP))
3177 auto *J = cast<Instruction>(U);
3178 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
3180 AddToWorklistIfAllowed(OI);
3191 for (
const auto &Induction :
Legal->getInductionVars()) {
3192 auto *Ind = Induction.first;
3197 bool UniformInd =
all_of(Ind->users(), [&](User *U) ->
bool {
3198 auto *I = cast<Instruction>(U);
3199 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3200 IsVectorizedMemAccessUse(I, Ind);
3207 bool UniformIndUpdate =
all_of(IndUpdate->users(), [&](User *U) ->
bool {
3208 auto *I = cast<Instruction>(U);
3209 return I == Ind || Worklist.count(I) ||
3210 IsVectorizedMemAccessUse(I, IndUpdate);
3212 if (!UniformIndUpdate)
3216 AddToWorklistIfAllowed(Ind);
3217 AddToWorklistIfAllowed(IndUpdate);
3226 if (
Legal->getRuntimePointerChecking()->Need) {
3228 "runtime pointer checks needed. Enable vectorization of this "
3229 "loop with '#pragma clang loop vectorize(enable)' when "
3230 "compiling with -Os/-Oz",
3231 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
3235 if (!
PSE.getPredicate().isAlwaysTrue()) {
3237 "runtime SCEV checks needed. Enable vectorization of this "
3238 "loop with '#pragma clang loop vectorize(enable)' when "
3239 "compiling with -Os/-Oz",
3240 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
3245 if (!
Legal->getLAI()->getSymbolicStrides().empty()) {
3247 "runtime stride == 1 checks needed. Enable vectorization of "
3248 "this loop without such check by compiling with -Os/-Oz",
3249 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
3256bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3257 if (IsScalableVectorizationAllowed)
3258 return *IsScalableVectorizationAllowed;
3260 IsScalableVectorizationAllowed =
false;
3264 if (Hints->isScalableVectorizationDisabled()) {
3266 "ScalableVectorizationDisabled", ORE, TheLoop);
3270 LLVM_DEBUG(
dbgs() <<
"LV: Scalable vectorization is available\n");
3273 std::numeric_limits<ElementCount::ScalarTy>::max());
3282 if (!canVectorizeReductions(MaxScalableVF)) {
3284 "Scalable vectorization not supported for the reduction "
3285 "operations found in this loop.",
3286 "ScalableVFUnfeasible", ORE, TheLoop);
3292 if (
any_of(ElementTypesInLoop, [&](
Type *Ty) {
3297 "for all element types found in this loop.",
3298 "ScalableVFUnfeasible", ORE, TheLoop);
3304 "for safe distance analysis.",
3305 "ScalableVFUnfeasible", ORE, TheLoop);
3309 IsScalableVectorizationAllowed =
true;
3314LoopVectorizationCostModel::getMaxLegalScalableVF(
unsigned MaxSafeElements) {
3315 if (!isScalableVectorizationAllowed())
3319 std::numeric_limits<ElementCount::ScalarTy>::max());
3320 if (
Legal->isSafeForAnyVectorWidth())
3321 return MaxScalableVF;
3329 "Max legal vector width too small, scalable vectorization "
3331 "ScalableVFUnfeasible", ORE, TheLoop);
3333 return MaxScalableVF;
3336FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
3337 unsigned MaxTripCount, ElementCount UserVF,
unsigned UserIC,
3338 bool FoldTailByMasking) {
3340 unsigned SmallestType, WidestType;
3341 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
3347 unsigned MaxSafeElementsPowerOf2 =
3349 if (!
Legal->isSafeForAnyStoreLoadForwardDistances()) {
3350 unsigned SLDist =
Legal->getMaxStoreLoadForwardSafeDistanceInBits();
3351 MaxSafeElementsPowerOf2 =
3352 std::min(MaxSafeElementsPowerOf2, SLDist / WidestType);
3355 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElementsPowerOf2);
3357 if (!
Legal->isSafeForAnyVectorWidth())
3358 this->MaxSafeElements = MaxSafeElementsPowerOf2;
3360 LLVM_DEBUG(
dbgs() <<
"LV: The max safe fixed VF is: " << MaxSafeFixedVF
3362 LLVM_DEBUG(
dbgs() <<
"LV: The max safe scalable VF is: " << MaxSafeScalableVF
3367 auto MaxSafeUserVF =
3368 UserVF.
isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3370 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
3373 return FixedScalableVFPair(
3379 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
3385 <<
" is unsafe, clamping to max safe VF="
3386 << MaxSafeFixedVF <<
".\n");
3388 return OptimizationRemarkAnalysis(
DEBUG_TYPE,
"VectorizationFactor",
3391 <<
"User-specified vectorization factor "
3392 <<
ore::NV(
"UserVectorizationFactor", UserVF)
3393 <<
" is unsafe, clamping to maximum safe vectorization factor "
3394 <<
ore::NV(
"VectorizationFactor", MaxSafeFixedVF);
3396 return MaxSafeFixedVF;
3401 <<
" is ignored because scalable vectors are not "
3404 return OptimizationRemarkAnalysis(
DEBUG_TYPE,
"VectorizationFactor",
3407 <<
"User-specified vectorization factor "
3408 <<
ore::NV(
"UserVectorizationFactor", UserVF)
3409 <<
" is ignored because the target does not support scalable "
3410 "vectors. The compiler will pick a more suitable value.";
3414 <<
" is unsafe. Ignoring scalable UserVF.\n");
3416 return OptimizationRemarkAnalysis(
DEBUG_TYPE,
"VectorizationFactor",
3419 <<
"User-specified vectorization factor "
3420 <<
ore::NV(
"UserVectorizationFactor", UserVF)
3421 <<
" is unsafe. Ignoring the hint to let the compiler pick a "
3422 "more suitable value.";
3427 LLVM_DEBUG(
dbgs() <<
"LV: The Smallest and Widest types: " << SmallestType
3428 <<
" / " << WidestType <<
" bits.\n");
3433 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3434 MaxSafeFixedVF, UserIC, FoldTailByMasking))
3438 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3439 MaxSafeScalableVF, UserIC, FoldTailByMasking))
3440 if (MaxVF.isScalable()) {
3441 Result.ScalableVF = MaxVF;
3442 LLVM_DEBUG(
dbgs() <<
"LV: Found feasible scalable VF = " << MaxVF
3451 if (
Legal->getRuntimePointerChecking()->Need &&
TTI.hasBranchDivergence()) {
3455 "Not inserting runtime ptr check for divergent target",
3456 "runtime pointer checks needed. Not enabled for divergent target",
3457 "CantVersionLoopWithDivergentTarget",
ORE,
TheLoop);
3463 unsigned MaxTC =
PSE.getSmallConstantMaxTripCount();
3468 LLVM_DEBUG(
dbgs() <<
"LV: Found maximum trip count: " << MaxTC <<
'\n');
3471 "loop trip count is one, irrelevant for vectorization",
3482 Legal->getWidestInductionType()->getScalarSizeInBits() &&
3486 "Trip count computation wrapped",
3487 "backedge-taken count is -1, loop trip count wrapped to 0",
3492 switch (EpilogueLoweringStatus) {
3494 return computeFeasibleMaxVF(MaxTC, UserVF, UserIC,
false);
3499 <<
"LV: Not allowing epilogue, creating tail-folded "
3500 <<
"vector loop.\n");
3506 LLVM_DEBUG(
dbgs() <<
"LV: Not allowing epilogue due to -Os/-Oz.\n");
3508 LLVM_DEBUG(
dbgs() <<
"LV: Not allowing epilogue due to low trip "
3524 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
3525 "No decisions should have been taken at this point");
3532 computeFeasibleMaxVF(MaxTC, UserVF, UserIC,
true);
3536 std::optional<unsigned> MaxPowerOf2RuntimeVF =
3541 MaxPowerOf2RuntimeVF = std::max<unsigned>(
3542 *MaxPowerOf2RuntimeVF,
3545 MaxPowerOf2RuntimeVF = std::nullopt;
3548 auto NoScalarEpilogueNeeded = [
this, &UserIC](
unsigned MaxVF) {
3552 !
Legal->hasUncountableEarlyExit())
3554 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
3559 const SCEV *BackedgeTakenCount =
PSE.getSymbolicMaxBackedgeTakenCount();
3561 BackedgeTakenCount ==
PSE.getBackedgeTakenCount()) &&
3562 "Invalid loop count");
3564 BackedgeTakenCount, SE->
getOne(BackedgeTakenCount->
getType()));
3571 if (MaxPowerOf2RuntimeVF > 0u) {
3573 "MaxFixedVF must be a power of 2");
3574 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3576 LLVM_DEBUG(
dbgs() <<
"LV: No tail will remain for any chosen VF.\n");
3582 if (ExpectedTC && ExpectedTC->isFixed() &&
3583 ExpectedTC->getFixedValue() <=
3584 TTI.getMinTripCountTailFoldingThreshold()) {
3585 if (MaxPowerOf2RuntimeVF > 0u) {
3591 LLVM_DEBUG(
dbgs() <<
"LV: Picking a fixed-width so that no tail will "
3592 "remain for any chosen VF.\n");
3599 "The trip count is below the minial threshold value.",
3600 "loop trip count is too low, avoiding vectorization",
"LowTripCount",
3615 <<
"LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3616 "try to generate VP Intrinsics with scalable vector "
3621 assert(ContainsScalableVF &&
"Expected scalable vector factor.");
3631 LLVM_DEBUG(
dbgs() <<
"LV: Cannot fold tail by masking: vectorize with an "
3632 "epilogue instead.\n");
3638 LLVM_DEBUG(
dbgs() <<
"LV: Can't fold tail by masking: don't vectorize\n");
3644 "unable to calculate the loop count due to complex control flow",
3650 "Cannot optimize for size and vectorize at the same time.",
3651 "cannot optimize for size and vectorize at the same time. "
3652 "Enable vectorization of this loop with '#pragma clang loop "
3653 "vectorize(enable)' when compiling with -Os/-Oz",
3665 if (
TTI.shouldConsiderVectorizationRegPressure())
3681 (
TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3683 Legal->hasVectorCallVariants())));
3686ElementCount LoopVectorizationCostModel::clampVFByMaxTripCount(
3687 ElementCount VF,
unsigned MaxTripCount,
unsigned UserIC,
3688 bool FoldTailByMasking)
const {
3690 if (VF.
isScalable() && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
3691 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
3692 auto Min = Attr.getVScaleRangeMin();
3699 if (MaxTripCount > 0 && requiresScalarEpilogue(
true))
3704 unsigned IC = UserIC > 0 ? UserIC : 1;
3705 unsigned EstimatedVFTimesIC = EstimatedVF * IC;
3707 if (MaxTripCount && MaxTripCount <= EstimatedVFTimesIC &&
3715 if (ClampedUpperTripCount == 0)
3716 ClampedUpperTripCount = 1;
3717 LLVM_DEBUG(
dbgs() <<
"LV: Clamping the MaxVF to maximum power of two not "
3718 "exceeding the constant trip count"
3719 << (UserIC > 0 ?
" divided by UserIC" :
"") <<
": "
3720 << ClampedUpperTripCount <<
"\n");
3722 FoldTailByMasking ? VF.
isScalable() :
false);
3727ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
3728 unsigned MaxTripCount,
unsigned SmallestType,
unsigned WidestType,
3729 ElementCount MaxSafeVF,
unsigned UserIC,
bool FoldTailByMasking) {
3730 bool ComputeScalableMaxVF = MaxSafeVF.
isScalable();
3736 auto MinVF = [](
const ElementCount &
LHS,
const ElementCount &
RHS) {
3738 "Scalable flags must match");
3746 ComputeScalableMaxVF);
3747 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
3749 << (MaxVectorElementCount * WidestType) <<
" bits.\n");
3751 if (!MaxVectorElementCount) {
3753 << (ComputeScalableMaxVF ?
"scalable" :
"fixed")
3754 <<
" vector registers.\n");
3758 ElementCount MaxVF = clampVFByMaxTripCount(
3759 MaxVectorElementCount, MaxTripCount, UserIC, FoldTailByMasking);
3762 if (MaxVF != MaxVectorElementCount)
3770 MaxPermissibleVFWithoutMaxBW.ScalableVF = MaxVF;
3772 MaxPermissibleVFWithoutMaxBW.FixedVF = MaxVF;
3774 if (useMaxBandwidth(RegKind)) {
3777 ComputeScalableMaxVF);
3778 MaxVF = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
3780 if (ElementCount MinVF =
3782 if (ElementCount::isKnownLT(MaxVF, MinVF)) {
3784 <<
") with target's minimum: " << MinVF <<
'\n');
3790 clampVFByMaxTripCount(MaxVF, MaxTripCount, UserIC, FoldTailByMasking);
3792 assert((MaxVectorElementCount == MaxVF ||
3793 (WideningDecisions.empty() && CallWideningDecisions.empty() &&
3795 "No decisions should have been taken at this point");
3802 const unsigned MaxTripCount,
3804 bool IsEpilogue)
const {
3810 unsigned EstimatedWidthB =
B.Width.getKnownMinValue();
3811 if (std::optional<unsigned> VScale = CM.getVScaleForTuning()) {
3812 if (
A.Width.isScalable())
3813 EstimatedWidthA *= *VScale;
3814 if (
B.Width.isScalable())
3815 EstimatedWidthB *= *VScale;
3822 return CostA < CostB ||
3823 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3829 A.Width.isScalable() && !
B.Width.isScalable();
3839 bool LowerCostWithoutTC =
3840 CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3842 return LowerCostWithoutTC;
3844 auto GetCostForTC = [MaxTripCount, HasTail](
unsigned VF,
3856 return VectorCost * (MaxTripCount / VF) +
3857 ScalarCost * (MaxTripCount % VF);
3858 return VectorCost *
divideCeil(MaxTripCount, VF);
3861 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA,
A.ScalarCost);
3862 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB,
B.ScalarCost);
3863 bool LowerCostWithTC = CmpFn(RTCostA, RTCostB);
3864 LLVM_DEBUG(
if (LowerCostWithTC != LowerCostWithoutTC) {
3865 dbgs() <<
"LV: VF " << (LowerCostWithTC ?
A.Width :
B.Width)
3866 <<
" has lower cost than VF "
3867 << (LowerCostWithTC ?
B.Width :
A.Width)
3868 <<
" when taking the cost of the remaining scalar loop iterations "
3869 "into consideration for a maximum trip count of "
3870 << MaxTripCount <<
".\n";
3872 return LowerCostWithTC;
3878 bool IsEpilogue)
const {
3880 return LoopVectorizationPlanner::isMoreProfitable(
A,
B, MaxTripCount, HasTail,
3886 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3888 for (
const auto &Plan : VPlans) {
3897 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, CM.PSE,
3899 precomputeCosts(*Plan, VF, CostCtx);
3902 for (
auto &R : *VPBB) {
3903 if (!R.cost(VF, CostCtx).isValid())
3909 if (InvalidCosts.
empty())
3917 for (
auto &Pair : InvalidCosts)
3922 sort(InvalidCosts, [&Numbering](RecipeVFPair &
A, RecipeVFPair &
B) {
3923 unsigned NA = Numbering[
A.first];
3924 unsigned NB = Numbering[
B.first];
3939 Subset =
Tail.take_front(1);
3949 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
3950 [](
const auto *R) {
return Instruction::Call; })
3953 [](
const auto *R) {
return R->getOpcode(); })
3955 return R->getStoredValues().empty() ? Instruction::Load
3956 : Instruction::Store;
3967 if (Subset ==
Tail ||
Tail[Subset.size()].first != R) {
3968 std::string OutString;
3970 assert(!Subset.empty() &&
"Unexpected empty range");
3971 OS <<
"Recipe with invalid costs prevented vectorization at VF=(";
3972 for (
const auto &Pair : Subset)
3973 OS << (Pair.second == Subset.front().second ?
"" :
", ") << Pair.second;
3975 if (Opcode == Instruction::Call) {
3978 Name =
Int->getIntrinsicName();
3982 WidenCall ? WidenCall->getCalledScalarFunction()
3984 ->getLiveInIRValue());
3987 OS <<
" call to " << Name;
3992 Tail =
Tail.drop_front(Subset.size());
3996 Subset =
Tail.take_front(Subset.size() + 1);
3997 }
while (!
Tail.empty());
4019 switch (R.getVPRecipeID()) {
4020 case VPRecipeBase::VPDerivedIVSC:
4021 case VPRecipeBase::VPScalarIVStepsSC:
4022 case VPRecipeBase::VPReplicateSC:
4023 case VPRecipeBase::VPInstructionSC:
4024 case VPRecipeBase::VPCanonicalIVPHISC:
4025 case VPRecipeBase::VPCurrentIterationPHISC:
4026 case VPRecipeBase::VPVectorPointerSC:
4027 case VPRecipeBase::VPVectorEndPointerSC:
4028 case VPRecipeBase::VPExpandSCEVSC:
4029 case VPRecipeBase::VPPredInstPHISC:
4030 case VPRecipeBase::VPBranchOnMaskSC:
4032 case VPRecipeBase::VPReductionSC:
4033 case VPRecipeBase::VPActiveLaneMaskPHISC:
4034 case VPRecipeBase::VPWidenCallSC:
4035 case VPRecipeBase::VPWidenCanonicalIVSC:
4036 case VPRecipeBase::VPWidenCastSC:
4037 case VPRecipeBase::VPWidenGEPSC:
4038 case VPRecipeBase::VPWidenIntrinsicSC:
4039 case VPRecipeBase::VPWidenSC:
4040 case VPRecipeBase::VPBlendSC:
4041 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
4042 case VPRecipeBase::VPHistogramSC:
4043 case VPRecipeBase::VPWidenPHISC:
4044 case VPRecipeBase::VPWidenIntOrFpInductionSC:
4045 case VPRecipeBase::VPWidenPointerInductionSC:
4046 case VPRecipeBase::VPReductionPHISC:
4047 case VPRecipeBase::VPInterleaveEVLSC:
4048 case VPRecipeBase::VPInterleaveSC:
4049 case VPRecipeBase::VPWidenLoadEVLSC:
4050 case VPRecipeBase::VPWidenLoadSC:
4051 case VPRecipeBase::VPWidenStoreEVLSC:
4052 case VPRecipeBase::VPWidenStoreSC:
4058 auto WillGenerateTargetVectors = [&
TTI, VF](
Type *VectorTy) {
4059 unsigned NumLegalParts =
TTI.getNumberOfParts(VectorTy);
4075 if (R.getNumDefinedValues() == 0 &&
4084 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4086 if (!Visited.
insert({ScalarTy}).second)
4100 [](
auto *VPRB) { return VPRB->isReplicator(); });
4108 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
4110 RecurrenceDescriptor::isFindLastRecurrenceKind(
4111 RedPhi->getRecurrenceKind());
4121 if (auto *WidenInd = dyn_cast<VPWidenIntOrFpInductionRecipe>(&R))
4122 return !WidenInd->getPHINode();
4123 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
4126 if (RecurrenceDescriptor::isFindLastRecurrenceKind(
4127 RedPhi->getRecurrenceKind()) ||
4128 !RedPhi->getUnderlyingValue())
4135 if (RecurrenceDescriptor::isFindIVRecurrenceKind(
4136 RedPhi->getRecurrenceKind())) {
4137 auto *RdxResult = vputils::findComputeReductionResult(RedPhi);
4139 "FindIV reduction must have ComputeReductionResult");
4140 return any_of(RdxResult->users(),
4141 [](VPUser *U) { return !isa<VPInstruction>(U); });
4147bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4148 VPlan &MainPlan)
const {
4151 if (
any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4152 if (!Legal->isReductionVariable(&Phi))
4153 return Legal->isFixedOrderRecurrence(&Phi);
4155 Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind();
4156 return RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(Kind);
4167 for (
const auto &Entry : Legal->getInductionVars()) {
4170 Entry.first->getIncomingValueForBlock(OrigLoop->getLoopLatch());
4171 for (User *U :
PostInc->users())
4175 for (User *U :
Entry.first->users())
4184 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
4198 if (!
TTI.preferEpilogueVectorization(VF * IC))
4203 :
TTI.getEpilogueVectorizationMinVF();
4210 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is disabled.\n");
4214 if (!CM.isEpilogueAllowed()) {
4215 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because no "
4216 "epilogue is allowed.\n");
4222 if (!isCandidateForEpilogueVectorization(MainPlan)) {
4223 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because the loop "
4224 "is not a supported candidate.\n");
4234 LLVM_DEBUG(
dbgs() <<
"LEV: Forced epilogue VF results in dead epilogue "
4235 "vector loop, skipping vectorizing epilogue.\n");
4239 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization factor is forced.\n");
4242 std::unique_ptr<VPlan> Clone(
getPlanFor(ForcedEC).duplicate());
4243 Clone->setVF(ForcedEC);
4247 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization forced factor is not "
4252 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
4254 dbgs() <<
"LEV: Epilogue vectorization skipped due to opt for size.\n");
4258 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
4259 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is not profitable for "
4270 if (
match(&Exiting->back(),
4280 MainLoopVF = GetEffectiveVF(MainPlan, MainLoopVF);
4288 Type *TCType = Legal->getWidestInductionType();
4289 const SCEV *RemainingIterations =
nullptr;
4290 unsigned MaxTripCount = 0;
4293 const SCEV *KnownMinTC;
4295 bool ScalableRemIter =
false;
4299 ScalableRemIter = ScalableTC;
4300 RemainingIterations =
4302 }
else if (ScalableTC) {
4305 SE.
getConstant(TCType, CM.getVScaleForTuning().value_or(1)));
4309 RemainingIterations =
4313 if (RemainingIterations->
isZero())
4323 << MaxTripCount <<
"\n");
4326 auto SkipVF = [&](
const SCEV *VF,
const SCEV *RemIter) ->
bool {
4330 VPlan *BestPlan =
nullptr;
4331 for (
auto &NextVF : ProfitableVFs) {
4337 ElementCount EffectiveVF = GetEffectiveVF(CurrentPlan, NextVF.Width);
4355 if (!ScalableRemIter) {
4361 if (SkipVF(SE.
getElementCount(TCType, EffectiveVF), RemainingIterations))
4365 if (Result.Width.isScalar() ||
4366 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
4369 BestPlan = &CurrentPlan;
4377 << Result.Width <<
"\n");
4378 std::unique_ptr<VPlan> Clone(BestPlan->
duplicate());
4379 Clone->setVF(Result.Width);
4383std::pair<unsigned, unsigned>
4385 unsigned MinWidth = -1U;
4386 unsigned MaxWidth = 8;
4392 for (
const auto &PhiDescriptorPair :
Legal->getReductionVars()) {
4396 MinWidth = std::min(
4400 MaxWidth = std::max(MaxWidth,
4405 MinWidth = std::min<unsigned>(
4406 MinWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
4407 MaxWidth = std::max<unsigned>(
4408 MaxWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
4411 return {MinWidth, MaxWidth};
4433 if (!
Legal->isReductionVariable(PN))
4436 Legal->getRecurrenceDescriptor(PN);
4446 T = ST->getValueOperand()->getType();
4449 "Expected the load/store/recurrence type to be sized");
4477 if (!CM.isEpilogueAllowed() &&
4478 !(CM.preferTailFoldedLoop() && CM.useWideActiveLaneMask()))
4484 "Unroll factor forced to be 1.\n");
4489 if (!Legal->isSafeForAnyVectorWidth())
4498 const bool HasReductions =
4511 if (LoopCost == 0) {
4513 LoopCost = CM.expectedCost(VF);
4515 LoopCost = cost(Plan, VF, &R);
4516 assert(LoopCost.
isValid() &&
"Expected to have chosen a VF with valid cost");
4525 for (
auto &Pair : R.MaxLocalUsers) {
4526 Pair.second = std::max(Pair.second, 1U);
4540 unsigned IC = UINT_MAX;
4542 for (
const auto &Pair : R.MaxLocalUsers) {
4543 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
4546 << TTI.getRegisterClassName(Pair.first)
4547 <<
" register class\n");
4555 unsigned MaxLocalUsers = Pair.second;
4556 unsigned LoopInvariantRegs = 0;
4557 if (R.LoopInvariantRegs.contains(Pair.first))
4558 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
4560 unsigned TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4564 TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4565 std::max(1U, (MaxLocalUsers - 1)));
4568 IC = std::min(IC, TmpIC);
4572 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4573 LLVM_DEBUG(
dbgs() <<
"LV: MaxInterleaveFactor for the target is "
4574 << MaxInterleaveCount <<
"\n");
4590 CM.isEpilogueAllowed());
4593 if (BestKnownTC && (BestKnownTC->isFixed() || VF.
isScalable())) {
4595 unsigned AvailableTC =
4601 if (CM.requiresScalarEpilogue(VF.
isVector()))
4604 unsigned InterleaveCountLB =
bit_floor(std::max(
4605 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4619 unsigned InterleaveCountUB =
bit_floor(std::max(
4620 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4621 MaxInterleaveCount = InterleaveCountLB;
4623 if (InterleaveCountUB != InterleaveCountLB) {
4624 unsigned TailTripCountUB =
4625 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4626 unsigned TailTripCountLB =
4627 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4630 if (TailTripCountUB == TailTripCountLB)
4631 MaxInterleaveCount = InterleaveCountUB;
4639 MaxInterleaveCount = InterleaveCountLB;
4643 assert(MaxInterleaveCount > 0 &&
4644 "Maximum interleave count must be greater than 0");
4648 if (IC > MaxInterleaveCount)
4649 IC = MaxInterleaveCount;
4652 IC = std::max(1u, IC);
4654 assert(IC > 0 &&
"Interleave count must be greater than 0.");
4658 if (VF.
isVector() && HasReductions) {
4659 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving because of reductions.\n");
4667 bool ScalarInterleavingRequiresPredication =
4669 return Legal->blockNeedsPredication(BB);
4671 bool ScalarInterleavingRequiresRuntimePointerCheck =
4672 (VF.
isScalar() && Legal->getRuntimePointerChecking()->Need);
4677 <<
"LV: IC is " << IC <<
'\n'
4678 <<
"LV: VF is " << VF <<
'\n');
4679 const bool AggressivelyInterleave =
4680 TTI.enableAggressiveInterleaving(HasReductions);
4681 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
4682 !ScalarInterleavingRequiresPredication && LoopCost <
SmallLoopCost) {
4691 unsigned NumStores = 0;
4692 unsigned NumLoads = 0;
4706 if (
unsigned StoreOps = InterleaveR->getNumStoreOperands())
4707 NumStores += StoreOps;
4709 NumLoads += InterleaveR->getNumDefinedValues();
4724 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
4725 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
4731 bool HasSelectCmpReductions =
4735 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4736 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4737 RedR->getRecurrenceKind()) ||
4738 RecurrenceDescriptor::isFindIVRecurrenceKind(
4739 RedR->getRecurrenceKind()));
4741 if (HasSelectCmpReductions) {
4742 LLVM_DEBUG(
dbgs() <<
"LV: Not interleaving select-cmp reductions.\n");
4751 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
4752 bool HasOrderedReductions =
4755 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4757 return RedR && RedR->isOrdered();
4759 if (HasOrderedReductions) {
4761 dbgs() <<
"LV: Not interleaving scalar ordered reductions.\n");
4766 SmallIC = std::min(SmallIC,
F);
4767 StoresIC = std::min(StoresIC,
F);
4768 LoadsIC = std::min(LoadsIC,
F);
4772 std::max(StoresIC, LoadsIC) > SmallIC) {
4774 dbgs() <<
"LV: Interleaving to saturate store or load ports.\n");
4775 return std::max(StoresIC, LoadsIC);
4780 if (VF.
isScalar() && AggressivelyInterleave) {
4784 return std::max(IC / 2, SmallIC);
4787 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving to reduce branch cost.\n");
4793 if (AggressivelyInterleave) {
4813 "Expecting a scalar emulated instruction");
4826 if (InstsToScalarize.contains(VF) ||
4827 PredicatedBBsAfterVectorization.contains(VF))
4833 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4843 ScalarCostsTy ScalarCosts;
4851 computePredInstDiscount(&
I, ScalarCosts, VF) >= 0) {
4852 for (
const auto &[
I, IC] : ScalarCosts)
4853 ScalarCostsVF.
insert({
I, IC});
4856 for (
const auto &[
I,
Cost] : ScalarCosts) {
4858 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4861 CallWideningDecisions[{CI, VF}].Cost =
Cost;
4865 PredicatedBBsAfterVectorization[VF].insert(BB);
4867 if (Pred->getSingleSuccessor() == BB)
4868 PredicatedBBsAfterVectorization[VF].insert(Pred);
4877 "Instruction marked uniform-after-vectorization will be predicated");
4895 if (!
I->hasOneUse() || PredInst->
getParent() !=
I->getParent() ||
4914 for (
Use &U :
I->operands())
4927 while (!Worklist.
empty()) {
4931 if (ScalarCosts.contains(
I))
4954 ScalarCost +=
TTI.getScalarizationOverhead(
4967 for (Use &U :
I->operands())
4970 "Instruction has non-scalar type");
4971 if (CanBeScalarized(J))
4973 else if (needsExtract(J, VF)) {
4976 ScalarCost +=
TTI.getScalarizationOverhead(
4989 Discount += VectorCost - ScalarCost;
4990 ScalarCosts[
I] = ScalarCost;
5006 ValuesToIgnoreForVF);
5036 LLVM_DEBUG(
dbgs() <<
"LV: Found an estimated cost of " <<
C <<
" for VF "
5037 << VF <<
" For instruction: " <<
I <<
'\n');
5065 const Loop *TheLoop) {
5072LoopVectorizationCostModel::getMemInstScalarizationCost(
Instruction *
I,
5075 "Scalarization cost of instruction implies vectorization.");
5080 auto *SE =
PSE.getSE();
5106 Cost += getScalarizationOverhead(
I, VF);
5117 Cost +=
TTI.getScalarizationOverhead(
5132LoopVectorizationCostModel::getConsecutiveMemOpCost(
Instruction *
I,
5138 int ConsecutiveStride =
Legal->isConsecutivePtr(ValTy, Ptr);
5140 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5141 "Stride should be 1 or -1 for consecutive memory access");
5145 unsigned IID =
I->getOpcode() == Instruction::Load
5146 ? Intrinsic::masked_load
5147 : Intrinsic::masked_store;
5148 Cost +=
TTI.getMemIntrinsicInstrCost(
5149 MemIntrinsicCostAttributes(IID, VectorTy, Alignment, AS),
CostKind);
5152 Cost +=
TTI.getMemoryOpCost(
I->getOpcode(), VectorTy, Alignment, AS,
5156 bool Reverse = ConsecutiveStride < 0;
5164LoopVectorizationCostModel::getUniformMemOpCost(
Instruction *
I,
5174 return TTI.getAddressComputationCost(PtrTy,
nullptr,
nullptr,
CostKind) +
5175 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5182 bool IsLoopInvariantStoreValue =
Legal->isInvariant(
SI->getValueOperand());
5188 TTI.getAddressComputationCost(PtrTy,
nullptr,
nullptr,
CostKind) +
5189 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
CostKind);
5190 if (!IsLoopInvariantStoreValue)
5191 Cost +=
TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
5197LoopVectorizationCostModel::getGatherScatterCost(
Instruction *
I,
5205 if (!
Legal->isUniform(Ptr, VF))
5208 unsigned IID =
I->getOpcode() == Instruction::Load
5209 ? Intrinsic::masked_gather
5210 : Intrinsic::masked_scatter;
5211 return TTI.getAddressComputationCost(PtrTy,
nullptr,
nullptr,
CostKind) +
5212 TTI.getMemIntrinsicInstrCost(
5219LoopVectorizationCostModel::getInterleaveGroupCost(
Instruction *
I,
5222 assert(Group &&
"Fail to get an interleaved access group.");
5229 unsigned InterleaveFactor = Group->getFactor();
5233 SmallVector<unsigned, 4> Indices;
5234 for (
unsigned IF = 0; IF < InterleaveFactor; IF++)
5235 if (Group->getMember(IF))
5239 bool UseMaskForGaps =
5243 InsertPos->
getOpcode(), WideVecTy, Group->getFactor(), Indices,
5246 if (Group->isReverse()) {
5249 "Reverse masked interleaved access not supported.");
5250 Cost += Group->getNumMembers() *
5257std::optional<InstructionCost>
5264 return std::nullopt;
5282 return std::nullopt;
5293 Instruction *LastChain = InLoopReductionImmediateChains.lookup(RetI);
5295 return std::nullopt;
5301 ReductionPhi = InLoopReductionImmediateChains.at(ReductionPhi);
5310 BaseCost =
TTI.getMinMaxReductionCost(MinMaxID, VectorTy,
5313 BaseCost =
TTI.getArithmeticReductionCost(
5321 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy,
CostKind);
5338 if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
5344 !
TheLoop->isLoopInvariant(Op0) && !
TheLoop->isLoopInvariant(Op1) &&
5356 TTI.getCastInstrCost(Op0->
getOpcode(), MulType, ExtType,
5359 TTI.getArithmeticInstrCost(Instruction::Mul, MulType,
CostKind);
5361 TTI.getCastInstrCost(RedOp->
getOpcode(), VectorTy, MulType,
5369 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5370 return I == RetI ? RedCost : 0;
5372 !
TheLoop->isLoopInvariant(RedOp)) {
5381 TTI.getCastInstrCost(RedOp->
getOpcode(), VectorTy, ExtType,
5383 if (RedCost.
isValid() && RedCost < BaseCost + ExtCost)
5384 return I == RetI ? RedCost : 0;
5385 }
else if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
5389 !
TheLoop->isLoopInvariant(Op0) && !
TheLoop->isLoopInvariant(Op1)) {
5408 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
CostKind);
5414 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5415 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5416 ExtraExtCost =
TTI.getCastInstrCost(
5423 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5424 return I == RetI ? RedCost : 0;
5428 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
CostKind);
5434 if (RedCost.
isValid() && RedCost < MulCost + BaseCost)
5435 return I == RetI ? RedCost : 0;
5439 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5443LoopVectorizationCostModel::getMemoryInstructionCost(
Instruction *
I,
5454 return TTI.getAddressComputationCost(PtrTy,
nullptr,
nullptr,
CostKind) +
5455 TTI.getMemoryOpCost(
I->getOpcode(), ValTy, Alignment, AS,
CostKind,
5462LoopVectorizationCostModel::getScalarizationOverhead(
Instruction *
I,
5485 Cost +=
TTI.getScalarizationOverhead(
5507 for (
auto *V : filterExtractingOperands(
Ops, VF))
5513 return Cost +
TTI.getOperandsScalarizationOverhead(Tys,
CostKind, OperandVIC);
5534 if (
Legal->isUniformMemOp(
I, VF)) {
5535 auto IsLegalToScalarize = [&]() {
5555 return TheLoop->isLoopInvariant(
SI.getValueOperand());
5567 IsLegalToScalarize() ? getUniformMemOpCost(&
I, VF)
5573 if (GatherScatterCost < ScalarizationCost)
5583 int ConsecutiveStride =
Legal->isConsecutivePtr(
5585 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5586 "Expected consecutive stride.");
5595 unsigned NumAccesses = 1;
5598 assert(Group &&
"Fail to get an interleaved access group.");
5604 NumAccesses = Group->getNumMembers();
5606 InterleaveCost = getInterleaveGroupCost(&
I, VF);
5611 ? getGatherScatterCost(&
I, VF) * NumAccesses
5615 getMemInstScalarizationCost(&
I, VF) * NumAccesses;
5621 if (InterleaveCost <= GatherScatterCost &&
5622 InterleaveCost < ScalarizationCost) {
5624 Cost = InterleaveCost;
5625 }
else if (GatherScatterCost < ScalarizationCost) {
5627 Cost = GatherScatterCost;
5630 Cost = ScalarizationCost;
5637 for (
unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5638 if (
auto *
I = Group->getMember(Idx)) {
5640 getMemInstScalarizationCost(
I, VF));
5656 if (
TTI.prefersVectorizedAddressing())
5665 if (PtrDef &&
TheLoop->contains(PtrDef) &&
5673 while (!Worklist.
empty()) {
5675 for (
auto &
Op :
I->operands())
5678 AddrDefs.
insert(InstOp).second)
5682 auto UpdateMemOpUserCost = [
this, VF](
LoadInst *
LI) {
5686 for (
User *U :
LI->users()) {
5696 for (
auto *
I : AddrDefs) {
5717 for (
unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5718 if (
Instruction *Member = Group->getMember(Idx)) {
5722 getMemoryInstructionCost(Member,
5724 : getMemInstScalarizationCost(Member, VF);
5737 ForcedScalars[VF].insert(
I);
5744 "Trying to set a vectorization decision for a scalar VF");
5746 auto ForcedScalar = ForcedScalars.find(VF);
5761 for (
auto &ArgOp : CI->
args())
5770 TTI.getCallInstrCost(ScalarFunc, ScalarRetTy, ScalarTys,
CostKind);
5780 "Unexpected valid cost for scalarizing scalable vectors");
5787 if (VF.
isVector() && ((ForcedScalar != ForcedScalars.end() &&
5788 ForcedScalar->second.contains(CI)) ||
5799 for (
Type *ScalarTy : ScalarTys)
5808 std::nullopt, *RedCost);
5819 if (Info.Shape.VF != VF)
5823 if (MaskRequired && !Info.isMasked())
5827 bool ParamsOk =
true;
5829 switch (Param.ParamKind) {
5835 if (!
PSE.getSE()->isLoopInvariant(
PSE.getSCEV(ScalarParam),
5872 VectorCost =
TTI.getCallInstrCost(
nullptr, RetTy, Tys,
CostKind);
5905 return !OpI || !
TheLoop->contains(OpI) ||
5909 [
this](
Value *
Op) { return shouldConsiderInvariant(Op); }));
5921 return InstsToScalarize[VF][
I];
5924 auto ForcedScalar = ForcedScalars.find(VF);
5925 if (VF.
isVector() && ForcedScalar != ForcedScalars.end()) {
5926 auto InstSet = ForcedScalar->second;
5927 if (InstSet.count(
I))
5932 Type *RetTy =
I->getType();
5935 auto *SE =
PSE.getSE();
5939 [[maybe_unused]]
auto HasSingleCopyAfterVectorization =
5944 auto Scalarized = InstsToScalarize.find(VF);
5945 assert(Scalarized != InstsToScalarize.end() &&
5946 "VF not yet analyzed for scalarization profitability");
5947 return !Scalarized->second.count(
I) &&
5949 auto *UI = cast<Instruction>(U);
5950 return !Scalarized->second.count(UI);
5959 assert(
I->getOpcode() == Instruction::GetElementPtr ||
5960 I->getOpcode() == Instruction::PHI ||
5961 (
I->getOpcode() == Instruction::BitCast &&
5962 I->getType()->isPointerTy()) ||
5963 HasSingleCopyAfterVectorization(
I, VF));
5969 !
TTI.getNumberOfParts(VectorTy))
5973 switch (
I->getOpcode()) {
5974 case Instruction::GetElementPtr:
5980 case Instruction::UncondBr:
5981 case Instruction::CondBr: {
5988 bool ScalarPredicatedBB =
false;
5991 (PredicatedBBsAfterVectorization[VF].count(BI->
getSuccessor(0)) ||
5992 PredicatedBBsAfterVectorization[VF].count(BI->
getSuccessor(1))) &&
5993 BI->getParent() !=
TheLoop->getLoopLatch())
5994 ScalarPredicatedBB =
true;
5996 if (ScalarPredicatedBB) {
6003 return (
TTI.getScalarizationOverhead(
6006 (
TTI.getCFInstrCost(Instruction::CondBr,
CostKind) *
6012 return TTI.getCFInstrCost(Instruction::UncondBr,
CostKind);
6020 case Instruction::Switch: {
6022 return TTI.getCFInstrCost(Instruction::Switch,
CostKind);
6024 return Switch->getNumCases() *
6025 TTI.getCmpSelInstrCost(
6027 toVectorTy(Switch->getCondition()->getType(), VF),
6031 case Instruction::PHI: {
6045 Type *ResultTy = Phi->getType();
6051 auto *Phi = dyn_cast<PHINode>(U);
6052 if (Phi && Phi->getParent() == TheLoop->getHeader())
6057 auto &ReductionVars =
Legal->getReductionVars();
6058 auto Iter = ReductionVars.find(HeaderUser);
6059 if (Iter != ReductionVars.end() &&
6061 Iter->second.getRecurrenceKind()))
6064 return (Phi->getNumIncomingValues() - 1) *
6065 TTI.getCmpSelInstrCost(
6066 Instruction::Select,
toVectorTy(ResultTy, VF),
6076 Intrinsic::vp_merge,
toVectorTy(Phi->getType(), VF),
6077 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6081 return TTI.getCFInstrCost(Instruction::PHI,
CostKind);
6083 case Instruction::UDiv:
6084 case Instruction::SDiv:
6085 case Instruction::URem:
6086 case Instruction::SRem:
6090 ScalarCost : SafeDivisorCost;
6094 case Instruction::Add:
6095 case Instruction::Sub: {
6096 auto Info =
Legal->getHistogramInfo(
I);
6103 if (!RHS || RHS->getZExtValue() != 1)
6105 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
CostKind);
6109 Type *ScalarTy =
I->getType();
6113 {PtrTy, ScalarTy, MaskTy});
6116 return TTI.getIntrinsicInstrCost(ICA,
CostKind) + MulCost +
6117 TTI.getArithmeticInstrCost(
I->getOpcode(), VectorTy,
CostKind);
6121 case Instruction::FAdd:
6122 case Instruction::FSub:
6123 case Instruction::Mul:
6124 case Instruction::FMul:
6125 case Instruction::FDiv:
6126 case Instruction::FRem:
6127 case Instruction::Shl:
6128 case Instruction::LShr:
6129 case Instruction::AShr:
6130 case Instruction::And:
6131 case Instruction::Or:
6132 case Instruction::Xor: {
6136 if (
I->getOpcode() == Instruction::Mul &&
6137 ((
TheLoop->isLoopInvariant(
I->getOperand(0)) &&
6138 PSE.getSCEV(
I->getOperand(0))->isOne()) ||
6139 (
TheLoop->isLoopInvariant(
I->getOperand(1)) &&
6140 PSE.getSCEV(
I->getOperand(1))->isOne())))
6149 Value *Op2 =
I->getOperand(1);
6155 auto Op2Info =
TTI.getOperandInfo(Op2);
6161 return TTI.getArithmeticInstrCost(
6163 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6164 Op2Info, Operands,
I,
TLI);
6166 case Instruction::FNeg: {
6167 return TTI.getArithmeticInstrCost(
6169 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6170 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6171 I->getOperand(0),
I);
6173 case Instruction::Select: {
6178 const Value *Op0, *Op1;
6189 return TTI.getArithmeticInstrCost(
6191 VectorTy,
CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1},
I);
6194 Type *CondTy =
SI->getCondition()->getType();
6200 Pred = Cmp->getPredicate();
6201 return TTI.getCmpSelInstrCost(
I->getOpcode(), VectorTy, CondTy, Pred,
6202 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
6203 {TTI::OK_AnyValue, TTI::OP_None},
I);
6205 case Instruction::ICmp:
6206 case Instruction::FCmp: {
6207 Type *ValTy =
I->getOperand(0)->getType();
6213 MinBWs[
I] == MinBWs[Op0AsInstruction]) &&
6214 "if both the operand and the compare are marked for "
6215 "truncation, they must have the same bitwidth");
6220 return TTI.getCmpSelInstrCost(
6223 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
I);
6225 case Instruction::Store:
6226 case Instruction::Load: {
6231 "CM decision should be taken at this point");
6238 return getMemoryInstructionCost(
I, VF);
6240 case Instruction::BitCast:
6241 if (
I->getType()->isPointerTy())
6244 case Instruction::ZExt:
6245 case Instruction::SExt:
6246 case Instruction::FPToUI:
6247 case Instruction::FPToSI:
6248 case Instruction::FPExt:
6249 case Instruction::PtrToInt:
6250 case Instruction::IntToPtr:
6251 case Instruction::SIToFP:
6252 case Instruction::UIToFP:
6253 case Instruction::Trunc:
6254 case Instruction::FPTrunc: {
6258 "Expected a load or a store!");
6284 unsigned Opcode =
I->getOpcode();
6287 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6290 CCH = ComputeCCH(Store);
6293 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6294 Opcode == Instruction::FPExt) {
6296 CCH = ComputeCCH(Load);
6304 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6305 Trunc->getSrcTy(), CCH,
CostKind, Trunc);
6312 Type *SrcScalarTy =
I->getOperand(0)->getType();
6324 (
I->getOpcode() == Instruction::ZExt ||
6325 I->getOpcode() == Instruction::SExt))
6329 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH,
CostKind,
I);
6331 case Instruction::Call:
6333 case Instruction::ExtractValue:
6335 case Instruction::Alloca:
6340 return TTI.getArithmeticInstrCost(Instruction::Mul, RetTy,
CostKind);
6343 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
CostKind);
6358 auto IsLiveOutDead = [
this, RequiresScalarEpilogue](
User *U) {
6359 return RequiresScalarEpilogue &&
6373 all_of(
I.users(), [
this, IsLiveOutDead](
User *U) {
6374 return VecValuesToIgnore.contains(U) ||
6375 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6384 if (Group->getInsertPos() == &
I)
6387 DeadInterleavePointerOps.
push_back(PointerOp);
6398 for (
unsigned I = 0;
I != DeadInterleavePointerOps.
size(); ++
I) {
6401 Instruction *UI = cast<Instruction>(U);
6402 return !VecValuesToIgnore.contains(U) &&
6403 (!isAccessInterleaved(UI) ||
6404 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6424 for (
unsigned I = 0;
I != DeadOps.
size(); ++
I) {
6436 if ((ThenEmpty && ElseEmpty) ||
6438 ElseBB->
phis().empty()) ||
6440 ThenBB->
phis().empty())) {
6452 return !VecValuesToIgnore.contains(U) &&
6453 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6461 [
this](
User *U) { return ValuesToIgnore.contains(U); }))
6470 for (
const auto &Reduction :
Legal->getReductionVars()) {
6477 for (
const auto &Induction :
Legal->getInductionVars()) {
6485 if (!InLoopReductions.empty())
6488 for (
const auto &Reduction :
Legal->getReductionVars()) {
6489 PHINode *Phi = Reduction.first;
6511 !
TTI.preferInLoopReduction(Kind, Phi->getType()))
6519 bool InLoop = !ReductionOperations.
empty();
6522 InLoopReductions.insert(Phi);
6525 for (
auto *
I : ReductionOperations) {
6526 InLoopReductionImmediateChains[
I] = LastChain;
6530 LLVM_DEBUG(
dbgs() <<
"LV: Using " << (InLoop ?
"inloop" :
"out of loop")
6531 <<
" reduction for phi: " << *Phi <<
"\n");
6544 unsigned WidestType;
6548 TTI.enableScalableVectorization()
6553 unsigned N =
RegSize.getKnownMinValue() / WidestType;
6564 if (!OrigLoop->isInnermost()) {
6574 <<
"overriding computed VF.\n");
6577 }
else if (UserVF.
isScalable() && !TTI.supportsScalableVectors() &&
6579 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing. Scalable VF requested, but "
6580 <<
"not supported by the target.\n");
6582 "Scalable vectorization requested but not supported by the target",
6583 "the scalable user-specified vectorization width for outer-loop "
6584 "vectorization cannot be used because the target does not support "
6585 "scalable vectors.",
6586 "ScalableVFUnfeasible", ORE, OrigLoop);
6591 "VF needs to be a power of two");
6593 <<
"VF " << VF <<
" to build VPlans.\n");
6603 return {VF, 0 , 0 };
6607 dbgs() <<
"LV: Not vectorizing. Inner loops aren't supported in the "
6608 "VPlan-native path.\n");
6613 assert(OrigLoop->isInnermost() &&
"Inner loop expected.");
6614 CM.collectValuesToIgnore();
6615 CM.collectElementTypesForWidening();
6622 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
6626 <<
"LV: Invalidate all interleaved groups due to fold-tail by masking "
6627 "which requires masked-interleaved support.\n");
6628 if (CM.InterleaveInfo.invalidateGroups())
6632 CM.invalidateCostModelingDecisions();
6635 if (CM.foldTailByMasking())
6636 Legal->prepareToFoldTailByMasking();
6643 "UserVF ignored because it may be larger than the maximal safe VF",
6644 "InvalidUserVF", ORE, OrigLoop);
6647 "VF needs to be a power of two");
6650 CM.collectInLoopReductions();
6651 if (CM.selectUserVectorizationFactor(UserVF)) {
6657 CM.selectUserVectorizationFactor(EpilogueUserVF)) {
6659 buildVPlansWithVPRecipes(EpilogueUserVF, EpilogueUserVF);
6661 buildVPlansWithVPRecipes(UserVF, UserVF);
6666 "InvalidCost", ORE, OrigLoop);
6679 CM.collectInLoopReductions();
6680 for (
const auto &VF : VFCandidates) {
6682 CM.collectNonVectorizedAndSetWideningDecisions(VF);
6700 return CM.ValuesToIgnore.contains(UI) ||
6701 (IsVector &&
CM.VecValuesToIgnore.contains(UI)) ||
6706 return CM.getPredBlockCostDivisor(
CostKind, BB);
6725 for (
const auto &[
IV, IndDesc] :
Legal->getInductionVars()) {
6729 for (
unsigned I = 0;
I != IVInsts.
size();
I++) {
6730 for (
Value *
Op : IVInsts[
I]->operands()) {
6732 if (
Op ==
IV || !OpI || !OrigLoop->
contains(OpI) || !
Op->hasOneUse())
6738 for (User *U :
IV->users()) {
6751 if (TC == VF && !CM.foldTailByMasking())
6755 for (Instruction *IVInst : IVInsts) {
6760 dbgs() <<
"Cost of " << InductionCost <<
" for VF " << VF
6761 <<
": induction instruction " << *IVInst <<
"\n";
6763 Cost += InductionCost;
6773 CM.TheLoop->getExitingBlocks(Exiting);
6774 SetVector<Instruction *> ExitInstrs;
6776 for (BasicBlock *EB : Exiting) {
6781 ExitInstrs.
insert(CondI);
6785 for (
unsigned I = 0;
I != ExitInstrs.
size(); ++
I) {
6787 if (!OrigLoop->contains(CondI) ||
6792 dbgs() <<
"Cost of " << CondICost <<
" for VF " << VF
6793 <<
": exit condition instruction " << *CondI <<
"\n";
6799 any_of(OpI->users(), [&ExitInstrs](User *U) {
6800 return !ExitInstrs.contains(cast<Instruction>(U));
6812 for (BasicBlock *BB : OrigLoop->blocks()) {
6816 if (BB == OrigLoop->getLoopLatch())
6818 auto BranchCost = CostCtx.
getLegacyCost(BB->getTerminator(), VF);
6832 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
6838 dbgs() <<
"Cost of " << ForcedCost <<
" for VF " << VF
6839 <<
": forced scalar " << *ForcedScalar <<
"\n";
6845 switch (
I->getOpcode()) {
6846 case Instruction::SDiv:
6847 case Instruction::UDiv:
6848 case Instruction::SRem:
6849 case Instruction::URem:
6855 for (
const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
6856 if (UseVPlanCostModel(Scalarized) ||
6861 dbgs() <<
"Cost of " << ScalarCost <<
" for VF " << VF
6862 <<
": profitable to scalarize " << *Scalarized <<
"\n";
6872 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, PSE, OrigLoop);
6879 if (CM.shouldConsiderRegPressureForVF(VF))
6885 <<
" (Estimated cost per lane: ");
6887 double CostPerLane = double(
Cost.
getValue()) / EstimatedWidth;
6896std::pair<VectorizationFactor, VPlan *>
6901 VPlan &FirstPlan = *VPlans[0];
6904 if (VPlans.size() == 1) {
6906 "UserVF must match single VF");
6910 assert(VPlans.size() == 2 &&
"Must have exactly 2 VPlans built");
6911 assert(VPlans[0]->getSingleVF() ==
6913 "expected first plan to be for the forced epilogue VF");
6914 assert(VPlans[1]->getSingleVF() == UserVF &&
6915 "expected second plan to be for the forced UserVF");
6922 ?
"Reciprocal Throughput\n"
6924 ?
"Instruction Latency\n"
6927 ?
"Code Size and Latency\n"
6932 "More than a single plan/VF w/o any plan having scalar VF");
6936 LLVM_DEBUG(
dbgs() <<
"LV: Scalar loop costs: " << ScalarCost <<
".\n");
6941 if (ForceVectorization) {
6948 VPlan *PlanForBestVF = &FirstPlan;
6950 for (
auto &
P : VPlans) {
6952 P->vectorFactors().end());
6956 return CM.shouldConsiderRegPressureForVF(VF);
6961 for (
unsigned I = 0;
I < VFs.
size();
I++) {
6968 <<
"LV: Not considering vector loop of width " << VF
6969 <<
" because it will not generate any vector instructions.\n");
6975 <<
"LV: Not considering vector loop of width " << VF
6976 <<
" because it would cause replicated blocks to be generated,"
6977 <<
" which isn't allowed when optimizing for size.\n");
6985 if (isMoreProfitable(CurrentFactor, BestFactor,
P->hasScalarTail())) {
6986 BestFactor = CurrentFactor;
6987 PlanForBestVF =
P.get();
6991 if (isMoreProfitable(CurrentFactor, ScalarFactor,
P->hasScalarTail()))
6992 ProfitableVFs.push_back(CurrentFactor);
6996 VPlan &BestPlan = *PlanForBestVF;
6999 "when vectorizing, the scalar cost must be computed.");
7002 return {BestFactor, &BestPlan};
7010 "Trying to execute plan with unsupported VF");
7012 "Trying to execute plan with unsupported UF");
7014 ++LoopsEarlyExitVectorized;
7021 bool HasBranchWeights =
7023 if (HasBranchWeights) {
7024 std::optional<unsigned> VScale = CM.getVScaleForTuning();
7026 BestVPlan, BestVF, VScale);
7043 OrigLoop->getStartLoc(),
7044 OrigLoop->getHeader())
7045 <<
"Created vector loop never executes due to insufficient trip "
7070 BestVPlan, VectorPH, CM.foldTailByMasking(),
7084 OrigLoop->getParentLoop(),
7085 Legal->getWidestInductionType());
7087#ifdef EXPENSIVE_CHECKS
7088 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
7106 if (!Exit->hasPredecessors())
7128 MDNode *LID = OrigLoop->getLoopID();
7129 unsigned OrigLoopInvocationWeight = 0;
7130 std::optional<unsigned> OrigAverageTripCount =
7142 bool DisableRuntimeUnroll = !ILV.
RTChecks.hasChecks() && !BestVF.
isScalar();
7144 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
7146 HeaderVPBB, BestVPlan,
7148 OrigAverageTripCount, OrigLoopInvocationWeight,
7150 DisableRuntimeUnroll);
7158 return ExpandedSCEVs;
7167 dbgs() <<
"Create Skeleton for epilogue vectorized loop (first pass)\n"
7168 <<
"Main Loop VF:" <<
EPI.MainLoopVF
7169 <<
", Main Loop UF:" <<
EPI.MainLoopUF
7170 <<
", Epilogue Loop VF:" <<
EPI.EpilogueVF
7171 <<
", Epilogue Loop UF:" <<
EPI.EpilogueUF <<
"\n";
7177 dbgs() <<
"intermediate fn:\n"
7178 << *
OrigLoop->getHeader()->getParent() <<
"\n";
7192 OriginalScalarPH->
setName(
"vec.epilog.iter.check");
7200 R.moveBefore(*NewEntry, NewEntry->
end());
7204 Plan.setEntry(NewEntry);
7207 return OriginalScalarPH;
7212 dbgs() <<
"Create Skeleton for epilogue vectorized loop (second pass)\n"
7213 <<
"Epilogue Loop VF:" <<
EPI.EpilogueVF
7214 <<
", Epilogue Loop UF:" <<
EPI.EpilogueUF <<
"\n";
7220 dbgs() <<
"final fn:\n" << *
OrigLoop->getHeader()->getParent() <<
"\n";
7227 VPI->
getOpcode() == Instruction::Store) &&
7228 "Must be called with either a load or store");
7233 CM.getWideningDecision(
I, VF);
7235 "CM decision should be taken at this point.");
7238 if (CM.isScalarAfterVectorization(
I, VF) ||
7239 CM.isProfitableToScalarize(
I, VF))
7254 CM.getWideningDecision(
I,
Range.Start);
7271 CM.foldTailByMasking() || !
GEP
7273 :
GEP->getNoWrapFlags().withoutNoUnsignedWrap();
7279 GEP ?
GEP->getNoWrapFlags()
7283 Builder.setInsertPoint(VPI);
7284 Builder.insert(VectorPtr);
7291 if (VPI->
getOpcode() == Instruction::Load) {
7294 Load->getDebugLoc());
7296 Builder.insert(LoadR);
7298 LoadR->getDebugLoc());
7307 Store->getDebugLoc());
7309 Store->getDebugLoc());
7313VPRecipeBuilder::tryToOptimizeInductionTruncate(
VPInstruction *VPI,
7331 PHINode *Phi = WidenIV->getPHINode();
7332 VPIRValue *Start = WidenIV->getStartValue();
7357 if (
ID && (
ID == Intrinsic::assume ||
ID == Intrinsic::lifetime_end ||
7358 ID == Intrinsic::lifetime_start ||
ID == Intrinsic::sideeffect ||
7359 ID == Intrinsic::pseudoprobe ||
7360 ID == Intrinsic::experimental_noalias_scope_decl))
7367 bool ShouldUseVectorIntrinsic =
7369 [&](ElementCount VF) ->
bool {
7370 return CM.getCallWideningDecision(CI, VF).Kind ==
7374 if (ShouldUseVectorIntrinsic)
7375 return new VPWidenIntrinsicRecipe(*CI,
ID,
Ops, CI->
getType(), *VPI, *VPI,
7379 std::optional<unsigned> MaskPos;
7383 [&](ElementCount VF) ->
bool {
7398 LoopVectorizationCostModel::CallWideningDecision Decision =
7399 CM.getCallWideningDecision(CI, VF);
7409 if (ShouldUseVectorCall) {
7410 if (MaskPos.has_value()) {
7420 Ops.insert(
Ops.begin() + *MaskPos, Mask);
7424 return new VPWidenCallRecipe(CI, Variant,
Ops, *VPI, *VPI,
7433 "Instruction should have been handled earlier");
7436 auto WillScalarize = [
this,
I](ElementCount VF) ->
bool {
7437 return CM.isScalarAfterVectorization(
I, VF) ||
7438 CM.isProfitableToScalarize(
I, VF) ||
7439 CM.isScalarWithPredication(
I, VF);
7450 case Instruction::SDiv:
7451 case Instruction::UDiv:
7452 case Instruction::SRem:
7453 case Instruction::URem: {
7456 if (CM.isPredicatedInst(
I)) {
7459 VPValue *One = Plan.getConstantInt(
I->getType(), 1u);
7467 case Instruction::Add:
7468 case Instruction::And:
7469 case Instruction::AShr:
7470 case Instruction::FAdd:
7471 case Instruction::FCmp:
7472 case Instruction::FDiv:
7473 case Instruction::FMul:
7474 case Instruction::FNeg:
7475 case Instruction::FRem:
7476 case Instruction::FSub:
7477 case Instruction::ICmp:
7478 case Instruction::LShr:
7479 case Instruction::Mul:
7480 case Instruction::Or:
7481 case Instruction::Select:
7482 case Instruction::Shl:
7483 case Instruction::Sub:
7484 case Instruction::Xor:
7485 case Instruction::Freeze:
7488 case Instruction::ExtractValue: {
7491 assert(EVI->getNumIndices() == 1 &&
"Expected one extractvalue index");
7492 unsigned Idx = EVI->getIndices()[0];
7493 NewOps.push_back(Plan.getConstantInt(32, Idx));
7494 return new VPWidenRecipe(*
I, NewOps, *VPI, *VPI, VPI->
getDebugLoc());
7500 if (VPI->
getOpcode() != Instruction::Store)
7510 unsigned Opcode = HI->Update->getOpcode();
7511 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
7512 "Histogram update operation must be an Add or Sub");
7522 if (CM.isMaskRequired(HI->Store))
7532 Legal->isInvariantAddressOfReduction(
SI->getPointerOperand())) {
7534 if (Legal->isInvariantStoreOfReduction(
SI)) {
7538 FinalRedStoresBuilder.
insert(Recipe);
7551 [&](
ElementCount VF) {
return CM.isUniformAfterVectorization(
I, VF); },
7554 bool IsPredicated = CM.isPredicatedInst(
I);
7562 case Intrinsic::assume:
7563 case Intrinsic::lifetime_start:
7564 case Intrinsic::lifetime_end:
7586 VPValue *BlockInMask =
nullptr;
7587 if (!IsPredicated) {
7591 LLVM_DEBUG(
dbgs() <<
"LV: Scalarizing and predicating:" << *
I <<
"\n");
7602 assert((
Range.Start.isScalar() || !IsUniform || !IsPredicated ||
7604 "Should not predicate a uniform recipe");
7614 assert(!R->isPhi() &&
"phis must be handled earlier");
7620 if (VPI->
getOpcode() == Instruction::Trunc &&
7621 (Recipe = tryToOptimizeInductionTruncate(VPI,
Range)))
7629 if (VPI->
getOpcode() == Instruction::Call)
7630 return tryToWidenCall(VPI,
Range);
7635 "Should have been handled prior to this!");
7637 if (!shouldWiden(Instr,
Range))
7640 if (VPI->
getOpcode() == Instruction::GetElementPtr)
7649 CastR->getResultType(), CI, *VPI, *VPI,
7653 return tryToWiden(VPI);
7660void LoopVectorizationPlanner::buildVPlansWithVPRecipes(
ElementCount MinVF,
7665 assert(OrigLoop->isInnermost() &&
"Inner loop expected.");
7667 const LoopAccessInfo *LAI = Legal->getLAI();
7669 OrigLoop, LI, DT, PSE.getSE());
7674 LVer.prepareNoAliasMetadata();
7680 OrigLoop, *LI, Legal->getWidestInductionType(),
7685 *VPlan0, PSE, *OrigLoop, Legal->getInductionVars(),
7686 Legal->getReductionVars(), Legal->getFixedOrderRecurrences(),
7687 CM.getInLoopReductions(), Hints.allowReordering());
7696 if (Legal->hasUncountableEarlyExit())
7697 EEStyle = Legal->hasUncountableExitWithSideEffects()
7702 Legal->getAssumptionCache()))
7706 if (CM.foldTailByMasking())
7711 auto MaxVFTimes2 = MaxVF * 2;
7713 VFRange SubRange = {VF, MaxVFTimes2};
7714 if (
auto Plan = tryToBuildVPlanWithVPRecipes(
7715 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange, &LVer)) {
7720 CM.getMinimalBitwidths());
7723 if (CM.foldTailWithEVL()) {
7725 CM.getMaxSafeElements());
7730 VPlans.push_back(std::move(
P));
7734 VPlans.push_back(std::move(Plan));
7740VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
7743 using namespace llvm::VPlanPatternMatch;
7744 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7751 bool RequiresScalarEpilogueCheck =
7753 [
this](ElementCount VF) {
7754 return !CM.requiresScalarEpilogue(VF.
isVector());
7758 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
7759 if (!RequiresScalarEpilogueCheck && MiddleVPBB->getNumSuccessors() == 2) {
7761 assert(MiddleVPBB->getSuccessors()[1] == Plan->getScalarPreheader() &&
7762 "second successor must be scalar preheader");
7763 BranchOnCond->setOperand(0, Plan->getFalse());
7770 bool IVUpdateMayOverflow =
false;
7771 for (ElementCount VF :
Range)
7779 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
7785 m_VPInstruction<Instruction::Add>(
7787 "Did not find the canonical IV increment");
7800 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7801 auto ApplyIG = [IG,
this](ElementCount VF) ->
bool {
7803 CM.getWideningDecision(IG->getInsertPos(), VF) ==
7808 "Unsupported interleave factor for scalable vectors");
7813 InterleaveGroups.
insert(IG);
7820 VPRecipeBuilder RecipeBuilder(*Plan, TLI, Legal, CM, Builder);
7825 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
7829 DenseSet<BasicBlock *> BlocksNeedingPredication;
7830 for (BasicBlock *BB : OrigLoop->blocks())
7831 if (CM.blockNeedsPredicationForAnyReason(BB))
7832 BlocksNeedingPredication.
insert(BB);
7837 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, CM.PSE,
7841 Range, RecipeBuilder);
7847 make_range(VPBB->getFirstNonPhi(), VPBB->end()))) {
7850 if (
isa<VPWidenCanonicalIVRecipe, VPBlendRecipe, VPReductionRecipe,
7851 VPReplicateRecipe, VPWidenLoadRecipe, VPWidenStoreRecipe,
7852 VPVectorPointerRecipe, VPVectorEndPointerRecipe,
7853 VPHistogramRecipe>(&R))
7863 Builder.setInsertPoint(VPI);
7865 VPRecipeBase *Recipe =
7866 RecipeBuilder.tryToCreateWidenNonPhiRecipe(VPI,
Range);
7871 RecipeBuilder.setRecipe(Instr, Recipe);
7877 Builder.insert(Recipe);
7883 "Unexpected multidef recipe");
7885 R.eraseFromParent();
7891 "entry block must be set to a VPRegionBlock having a non-empty entry "
7903 addReductionResultComputation(Plan, RecipeBuilder,
Range.Start);
7909 CM.foldTailByMasking());
7932 if (!CM.foldTailWithEVL()) {
7939 for (ElementCount VF :
Range)
7941 Plan->setName(
"Initial VPlan");
7947 InterleaveGroups, RecipeBuilder, CM.isEpilogueAllowed());
7951 Legal->getLAI()->getSymbolicStrides());
7953 auto BlockNeedsPredication = [
this](
BasicBlock *BB) {
7954 return Legal->blockNeedsPredication(BB);
7957 BlockNeedsPredication);
7981 assert(!OrigLoop->isInnermost());
7985 OrigLoop, *LI, Legal->getWidestInductionType(),
7989 *Plan, PSE, *OrigLoop, Legal->getInductionVars(),
7990 MapVector<PHINode *, RecurrenceDescriptor>(),
7991 SmallPtrSet<const PHINode *, 1>(), SmallPtrSet<PHINode *, 1>(),
7995 Legal->getAssumptionCache());
7997 "early-exits are not supported in VPlan-native path");
8002 for (ElementCount VF :
Range)
8016void LoopVectorizationPlanner::addReductionResultComputation(
8018 using namespace VPlanPatternMatch;
8019 VPTypeAnalysis TypeInfo(*Plan);
8020 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8021 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8024 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->
end())));
8026 for (VPRecipeBase &R :
8027 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8035 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
8037 Type *PhiTy = TypeInfo.inferScalarType(PhiR);
8047 if (!PhiR->
isInLoop() && CM.foldTailByMasking() &&
8048 (!RR || !RR->isPartialReduction())) {
8051 Builder.createSelect(
Cond, OrigExitingVPV, PhiR, {},
"", *PhiR);
8052 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U,
unsigned) {
8054 m_VPInstruction<VPInstruction::ComputeReductionResult>());
8057 if (CM.usePredicatedReductionSelect(RecurrenceKind))
8068 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
8074 VPInstruction *FinalReductionResult;
8075 VPBuilder::InsertPointGuard Guard(Builder);
8076 Builder.setInsertPoint(MiddleVPBB, IP);
8083 return match(U, m_Select(m_VPValue(), m_VPValue(), m_VPValue()));
8086 bool TrueValIsPhi = AnyOfSelect->getOperand(1) == PhiR;
8088 VPValue *NewVal = TrueValIsPhi ? AnyOfSelect->getOperand(2)
8089 : AnyOfSelect->getOperand(1);
8095 VPValue *
Cmp = AnyOfSelect->getOperand(0);
8098 if (VPRecipeBase *CmpR =
Cmp->getDefiningRecipe())
8100 Builder.setInsertPoint(AnyOfSelect);
8105 Cmp = Builder.createNot(Cmp);
8106 VPValue *
Or = Builder.createOr(PhiR, Cmp);
8110 AnyOfSelect->replaceUsesWithIf(
Or, [](VPUser &U,
unsigned) {
8119 if (NewExitingVPV == AnyOfSelect)
8122 Builder.setInsertPoint(MiddleVPBB, IP);
8124 FinalReductionResult =
8125 Builder.createAnyOfReduction(NewExitingVPV, NewVal, Start, ExitDL);
8129 FinalReductionResult =
8131 {NewExitingVPV},
Flags, ExitDL);
8138 assert(!PhiR->
isInLoop() &&
"Unexpected truncated inloop reduction!");
8140 "Unexpected truncated min-max recurrence!");
8142 VPWidenCastRecipe *Trunc;
8144 RdxDesc.
isSigned() ? Instruction::SExt : Instruction::ZExt;
8145 VPWidenCastRecipe *Extnd;
8147 VPBuilder::InsertPointGuard Guard(Builder);
8148 Builder.setInsertPoint(
8149 NewExitingVPV->getDefiningRecipe()->getParent(),
8150 std::next(NewExitingVPV->getDefiningRecipe()->getIterator()));
8152 Builder.createWidenCast(Instruction::Trunc, NewExitingVPV, RdxTy);
8153 Extnd = Builder.createWidenCast(ExtendOpc, Trunc, PhiTy);
8161 FinalReductionResult =
8162 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
8167 for (
auto *U :
to_vector(OrigExitingVPV->users())) {
8169 if (FinalReductionResult == U || Parent->getParent())
8173 if (
match(U, m_VPInstruction<VPInstruction::ComputeReductionResult>()) ||
8175 match(U, m_VPInstruction<Instruction::ICmp>())))
8177 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
8193 VPBuilder PHBuilder(Plan->getVectorPreheader());
8194 VPValue *Iden = Plan->getOrAddLiveIn(
8196 auto *ScaleFactorVPV = Plan->getConstantInt(32, 1);
8197 VPValue *StartV = PHBuilder.createNaryOp(
8203 for (VPRecipeBase *R : ToDelete)
8204 R->eraseFromParent();
8210 VPlan &Plan, GeneratedRTChecks &RTChecks,
bool HasBranchWeights)
const {
8211 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
8212 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
8213 assert((!CM.OptForSize ||
8215 "Cannot SCEV check stride or overflow when optimizing for size");
8219 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
8220 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
8224 "Runtime checks are not supported for outer loops yet");
8226 if (CM.OptForSize) {
8229 "Cannot emit memory checks when optimizing for size, unless forced "
8233 OrigLoop->getStartLoc(),
8234 OrigLoop->getHeader())
8235 <<
"Code-size may be reduced by not forcing "
8236 "vectorization, or by source-code modifications "
8237 "eliminating the need for runtime checks "
8238 "(e.g., adding 'restrict').";
8254 Plan, VF, UF, MinProfitableTripCount,
8255 CM.requiresScalarEpilogue(VF.
isVector()), CM.foldTailByMasking(),
8256 OrigLoop, BranchWeights,
8257 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(), PSE);
8271 if (
F->hasOptSize() ||
8297 if (
TTI->preferTailFoldingOverEpilogue(&TFI))
8316 LLVM_DEBUG(
dbgs() <<
"LV: cannot compute the outer-loop trip count\n");
8320 Function *
F = L->getHeader()->getParent();
8326 LoopVectorizationCostModel CM(
SEL, L, PSE, LI, LVL, *
TTI, TLI, DB, AC, ORE,
8327 GetBFI,
F, &Hints, IAI, OptForSize);
8331 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *
TTI, LVL, CM, IAI, PSE, Hints,
8351 GeneratedRTChecks Checks(PSE, DT, LI,
TTI, CM.
CostKind);
8358 bool HasBranchWeights =
8380 if (S->getValueOperand()->getType()->isFloatTy())
8390 while (!Worklist.
empty()) {
8392 if (!L->contains(
I))
8394 if (!Visited.
insert(
I).second)
8404 I->getDebugLoc(), L->getHeader())
8405 <<
"floating point conversion changes vector width. "
8406 <<
"Mixed floating point precision requires an up/down "
8407 <<
"cast that will negatively impact performance.";
8410 for (
Use &
Op :
I->operands())
8426 for (
auto *PredVPBB : ExitVPBB->getPredecessors()) {
8432 << PredVPBB->getName() <<
":\n");
8433 Cost += PredVPBB->cost(VF, CostCtx);
8453 std::optional<unsigned> VScale) {
8465 <<
"LV: Interleaving only is not profitable due to runtime checks\n");
8532 uint64_t MinTC = std::max(MinTC1, MinTC2);
8534 MinTC =
alignTo(MinTC, IntVF);
8538 dbgs() <<
"LV: Minimum required TC for runtime checks to be profitable:"
8545 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is not beneficial: expected "
8546 "trip count < minimum profitable VF ("
8557 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
8559 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
8573 auto AddFreezeForFindLastIVReductions = [](
VPlan &Plan,
8574 bool UpdateResumePhis) {
8586 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {},
"fr");
8588 if (UpdateResumePhis)
8594 AddFreezeForFindLastIVReductions(MainPlan,
true);
8595 AddFreezeForFindLastIVReductions(EpiPlan,
false);
8600 [[maybe_unused]]
bool MatchedTC =
8602 assert(MatchedTC &&
"must match vector trip count");
8608 auto ResumePhiIter =
8610 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
8613 VPPhi *ResumePhi =
nullptr;
8614 if (ResumePhiIter == MainScalarPH->
phis().
end()) {
8619 "canonical IV must start at 0");
8623 {VectorTC, MainPlan.
getZero(Ty)}, {},
"vec.epilog.resume.val");
8626 ResumePhi->
setName(
"vec.epilog.resume.val");
8627 if (&MainScalarPH->
front() != ResumePhi)
8641 assert(isa<VPIRPhi>(R) &&
8642 "only VPIRPhis expected in the scalar header");
8643 return ResumeBuilder.createNaryOp(VPInstruction::ResumeForEpilogue,
8655 VPlan &Plan,
Loop *L,
const SCEV2ValueTy &ExpandedSCEVs,
8660 Header->
setName(
"vec.epilog.vector.body");
8669 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
8674 "Must only have a single non-zero incoming value");
8685 [](
Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
8686 "all incoming values must be 0");
8692 return isa<VPScalarIVStepsRecipe>(U) ||
8693 isa<VPDerivedIVRecipe>(U) ||
8694 cast<VPRecipeBase>(U)->isScalarCast() ||
8695 cast<VPInstruction>(U)->getOpcode() ==
8698 "the canonical IV should only be used by its increment or "
8699 "ScalarIVSteps when resetting the start value");
8700 VPBuilder Builder(Header, Header->getFirstNonPhi());
8710 Increment->replaceUsesWithIf(OffsetIVInc,
8711 [
IV](
VPUser &U,
unsigned) {
return &U !=
IV; });
8720 Value *ResumeV =
nullptr;
8731 assert(RdxResult &&
"expected to find reduction result");
8734 ->getIncomingValueForBlock(L->getLoopPreheader());
8739 VPValue *SentinelVPV =
nullptr;
8740 bool IsFindIV =
any_of(RdxResult->users(), [&](
VPUser *U) {
8741 return match(U, VPlanPatternMatch::m_SpecificICmp(
8742 ICmpInst::ICMP_NE, m_Specific(RdxResult),
8743 m_VPValue(SentinelVPV)));
8746 RecurKind RK = ReductionPhi->getRecurrenceKind();
8749 Value *StartV = ResumePhi->getIncomingValueForBlock(
8752 ResumePhi->getParent()->getFirstNonPHIIt());
8758 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
8762 assert(SentinelVPV &&
"expected to find icmp using RdxResult");
8764 ToFrozen[FreezeI->getOperand(0)] = StartV;
8767 Value *Cmp = Builder.CreateICmpEQ(ResumeV, StartV);
8780 "unexpected start value");
8787 assert(
Sub->getOpcode() == Instruction::Sub &&
"Unexpected opcode");
8789 "Expected operand to match the original start value of the "
8793 "Expected start value for partial sub-reduction to start at "
8795 Sub->setOperand(0, StartVal);
8809 assert(ResumeV &&
"Must have a resume value");
8823 if (VPI && VPI->
getOpcode() == Instruction::Freeze) {
8840 ExpandR->eraseFromParent();
8844 unsigned MainLoopStep =
8846 unsigned EpilogueLoopStep =
8851 EPI.
EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
8864 if (Phi.getBasicBlockIndex(Pred) != -1)
8866 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
8870 if (ScalarPH->hasPredecessors()) {
8874 for (
auto [ResumeV, HeaderPhi] :
8877 auto *EpiResumePhi =
8878 cast<PHINode>(HeaderPhiR->getIRPhi().getIncomingValueForBlock(PH));
8879 if (EpiResumePhi->getBasicBlockIndex(BypassBlock) == -1)
8881 auto *MainResumePhi =
cast<PHINode>(ResumeV->getUnderlyingValue());
8882 EpiResumePhi->setIncomingValueForBlock(
8883 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
8896 GeneratedRTChecks &Checks,
8908 "expected this to be saved from the previous pass.");
8911 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
8914 VecEpilogueIterationCountCheck},
8916 VecEpiloguePreHeader}});
8921 VecEpilogueIterationCountCheck, ScalarPH);
8924 VecEpilogueIterationCountCheck},
8928 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
8929 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
8930 if (SCEVCheckBlock) {
8932 VecEpilogueIterationCountCheck, ScalarPH);
8934 VecEpilogueIterationCountCheck},
8937 if (MemCheckBlock) {
8939 VecEpilogueIterationCountCheck, ScalarPH);
8952 for (
PHINode *Phi : PhisInBlock) {
8954 Phi->replaceIncomingBlockWith(
8956 VecEpilogueIterationCountCheck);
8963 return EPI.EpilogueIterationCountCheck == IncB;
8968 Phi->removeIncomingValue(SCEVCheckBlock);
8970 Phi->removeIncomingValue(MemCheckBlock);
8974 for (
auto *
I : InstsToMove)
8986 if (Phi.use_empty())
8987 Phi.eraseFromParent();
8992 "VPlan-native path is not enabled. Only process inner loops.");
8995 << L->getHeader()->getParent()->getName() <<
"' from "
8996 << L->getLocStr() <<
"\n");
9001 dbgs() <<
"LV: Loop hints:"
9012 Function *
F = L->getHeader()->getParent();
9032 L->getHeader(),
PSI,
9039 &Requirements, &Hints,
DB,
AC,
9042 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Cannot prove legality.\n");
9050 "early exit is not enabled",
9051 "UncountableEarlyExitLoopsDisabled",
ORE, L);
9061 if (!L->isInnermost())
9066 assert(L->isInnermost() &&
"Inner loop expected.");
9069 bool UseInterleaved =
TTI->enableInterleavedAccessVectorization();
9083 [LoopLatch](
BasicBlock *BB) { return BB != LoopLatch; })) {
9085 "requiring a scalar epilogue is unsupported",
9086 "UncountableEarlyExitUnsupported",
ORE, L);
9099 if (ExpectedTC && ExpectedTC->isFixed() &&
9101 LLVM_DEBUG(
dbgs() <<
"LV: Found a loop with a very small trip count. "
9102 <<
"This loop is worth vectorizing only if no scalar "
9103 <<
"iteration overheads are incurred.");
9105 LLVM_DEBUG(
dbgs() <<
" But vectorizing was explicitly forced.\n");
9121 if (
F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9123 "Can't vectorize when the NoImplicitFloat attribute is used",
9124 "loop not vectorized due to NoImplicitFloat attribute",
9125 "NoImplicitFloat",
ORE, L);
9135 TTI->isFPVectorizationPotentiallyUnsafe()) {
9137 "Potentially unsafe FP op prevents vectorization",
9138 "loop not vectorized due to unsafe FP support.",
9139 "UnsafeFP",
ORE, L);
9144 bool AllowOrderedReductions;
9149 AllowOrderedReductions =
TTI->enableOrderedReductions();
9154 ExactFPMathInst->getDebugLoc(),
9155 ExactFPMathInst->getParent())
9156 <<
"loop not vectorized: cannot prove it is safe to reorder "
9157 "floating-point operations";
9159 LLVM_DEBUG(
dbgs() <<
"LV: loop not vectorized: cannot prove it is safe to "
9160 "reorder floating-point operations\n");
9166 LoopVectorizationCostModel CM(
SEL, L, PSE,
LI, &LVL, *
TTI,
TLI,
DB,
AC,
ORE,
9167 GetBFI,
F, &Hints, IAI, OptForSize);
9169 LoopVectorizationPlanner LVP(L,
LI,
DT,
TLI, *
TTI, &LVL, CM, IAI, PSE, Hints,
9179 LVP.
plan(UserVF, UserIC);
9191 unsigned SelectedIC = std::max(IC, UserIC);
9194 if (VF.Width.
isVector() || SelectedIC > 1) {
9201 if (Checks.getSCEVChecks().first &&
9202 match(Checks.getSCEVChecks().first,
m_One()))
9204 if (Checks.getMemRuntimeChecks().first &&
9205 match(Checks.getMemRuntimeChecks().first,
m_One()))
9210 bool ForceVectorization =
9214 if (!ForceVectorization &&
9219 DEBUG_TYPE,
"CantReorderMemOps", L->getStartLoc(),
9221 <<
"loop not vectorized: cannot prove it is safe to reorder "
9222 "memory operations";
9231 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9232 bool VectorizeLoop =
true, InterleaveLoop =
true;
9234 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is possible but not beneficial.\n");
9236 "VectorizationNotBeneficial",
9237 "the cost-model indicates that vectorization is not beneficial"};
9238 VectorizeLoop =
false;
9243 "UserIC should only be ignored due to unsafe dependencies");
9244 LLVM_DEBUG(
dbgs() <<
"LV: Ignoring user-specified interleave count.\n");
9245 IntDiagMsg = {
"InterleavingUnsafe",
9246 "Ignoring user-specified interleave count due to possibly "
9247 "unsafe dependencies in the loop."};
9248 InterleaveLoop =
false;
9252 LLVM_DEBUG(
dbgs() <<
"LV: Ignoring UserIC, because vectorization and "
9253 "interleaving should be avoided up front\n");
9254 IntDiagMsg = {
"InterleavingAvoided",
9255 "Ignoring UserIC, because interleaving was avoided up front"};
9256 InterleaveLoop =
false;
9257 }
else if (IC == 1 && UserIC <= 1) {
9261 "InterleavingNotBeneficial",
9262 "the cost-model indicates that interleaving is not beneficial"};
9263 InterleaveLoop =
false;
9265 IntDiagMsg.first =
"InterleavingNotBeneficialAndDisabled";
9266 IntDiagMsg.second +=
9267 " and is explicitly disabled or interleave count is set to 1";
9269 }
else if (IC > 1 && UserIC == 1) {
9271 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving is beneficial but is explicitly "
9273 IntDiagMsg = {
"InterleavingBeneficialButDisabled",
9274 "the cost-model indicates that interleaving is beneficial "
9275 "but is explicitly disabled or interleave count is set to 1"};
9276 InterleaveLoop =
false;
9282 if (!VectorizeLoop && InterleaveLoop && LVL.
hasHistograms()) {
9283 LLVM_DEBUG(
dbgs() <<
"LV: Not interleaving without vectorization due "
9284 <<
"to histogram operations.\n");
9286 "HistogramPreventsScalarInterleaving",
9287 "Unable to interleave without vectorization due to constraints on "
9288 "the order of histogram operations"};
9289 InterleaveLoop =
false;
9293 IC = UserIC > 0 ? UserIC : IC;
9297 if (!VectorizeLoop && !InterleaveLoop) {
9301 L->getStartLoc(), L->getHeader())
9302 << VecDiagMsg.second;
9306 L->getStartLoc(), L->getHeader())
9307 << IntDiagMsg.second;
9312 if (!VectorizeLoop && InterleaveLoop) {
9316 L->getStartLoc(), L->getHeader())
9317 << VecDiagMsg.second;
9319 }
else if (VectorizeLoop && !InterleaveLoop) {
9320 LLVM_DEBUG(
dbgs() <<
"LV: Found a vectorizable loop (" << VF.Width
9321 <<
") in " << L->getLocStr() <<
'\n');
9324 L->getStartLoc(), L->getHeader())
9325 << IntDiagMsg.second;
9327 }
else if (VectorizeLoop && InterleaveLoop) {
9328 LLVM_DEBUG(
dbgs() <<
"LV: Found a vectorizable loop (" << VF.Width
9329 <<
") in " << L->getLocStr() <<
'\n');
9335 using namespace ore;
9340 <<
"interleaved loop (interleaved count: "
9341 << NV(
"InterleaveCount", IC) <<
")";
9353 VPlan &BestPlan = *BestPlanPtr;
9355 std::unique_ptr<VPlan> EpiPlan =
9357 bool HasBranchWeights =
9360 VPlan &BestEpiPlan = *EpiPlan;
9361 VPlan &BestMainPlan = BestPlan;
9382 L->getLoopPredecessor()->getTerminator()->getDebugLoc(), PSE);
9385 Checks, BestMainPlan);
9394 EntryBB->
setName(
"iter.check");
9400 if (
BasicBlock *MemBB = Checks.getMemRuntimeChecks().second)
9402 else if (
BasicBlock *SCEVBB = Checks.getSCEVChecks().second)
9404 BasicBlock *ScalarPH = L->getLoopPreheader();
9407 BI->getSuccessor(BI->getSuccessor(0) == ScalarPH);
9412 Checks, BestEpiPlan);
9414 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, *PSE.
getSE());
9421 ++LoopsEpilogueVectorized;
9423 InnerLoopVectorizer LB(L, PSE,
LI,
DT,
TTI,
AC, VF.Width, IC, &CM, Checks,
9426 VF.MinProfitableTripCount);
9433 assert(
DT->verify(DominatorTree::VerificationLevel::Fast) &&
9434 "DT not preserved correctly");
9449 if (!
TTI->getNumberOfRegisters(
TTI->getRegisterClassForType(
true)) &&
9453 bool Changed =
false, CFGChanged =
false;
9460 for (
const auto &L : *
LI)
9472 LoopsAnalyzed += Worklist.
size();
9475 while (!Worklist.
empty()) {
9521 if (!Result.MadeAnyChange)
9535 if (Result.MadeCFGChange) {
9551 OS, MapClassName2PassName);
9554 OS << (InterleaveOnlyWhenForced ?
"" :
"no-") <<
"interleave-forced-only;";
9555 OS << (VectorizeOnlyWhenForced ?
"" :
"no-") <<
"vectorize-forced-only;";
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI)
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static unsigned getMaxTCFromNonZeroRange(PredicatedScalarEvolution &PSE, Loop *L)
Get the maximum trip count for L from the SCEV unsigned range, excluding zero from the range.
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static bool hasUnsupportedHeaderPhiRecipe(VPlan &Plan)
Returns true if the VPlan contains header phi recipes that are not currently supported for epilogue v...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove, ArrayRef< VPInstruction * > ResumeValues)
Connect the epilogue vector loop generated for EpiPlan to the main vector loop, after both plans have...
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, bool OptForSize, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static SmallVector< VPInstruction * > preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static void printOptimizedVPlan(VPlan &)
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true, bool CanExcludeZeroTrips=false)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static const SCEV * getAddressAccessSCEV(Value *Ptr, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets the address access SCEV for Ptr, if it should be used for cost modeling according to isAddressSC...
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static bool hasFindLastReductionPhi(VPlan &Plan)
Returns true if the VPlan contains a VPReductionPHIRecipe with FindLast recurrence kind.
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, EpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static EpilogueLowering getEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, ArrayRef< VPInstruction * > ResumeValues)
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static cl::opt< bool > ForceTargetSupportsMaskedMemoryOps("force-target-supports-masked-memory-ops", cl::init(false), cl::Hidden, cl::desc("Assume the target supports masked memory operations (used for " "testing)."))
Note: This currently only applies to llvm.masked.load and llvm.masked.store.
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
This file implements a map that provides insertion order iteration.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={})
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
static const uint32_t IV[8]
A manager for alias analyses.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getActiveBits() const
Compute the number of active bits in the value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
BinaryOps getOpcode() const
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Represents analyses that only rely on functions' control flow.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
Conditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
This class represents a range of values.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
A parsed version of the target data layout string in and methods for querying it.
static DebugLoc getTemporary()
static DebugLoc getUnknown()
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Implements a dense probed hash-table based set.
Analysis pass which computes a DominatorTree.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
static constexpr UpdateKind Delete
static constexpr UpdateKind Insert
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getScalable(ScalarTy MinVal)
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
void printDebugTracesAtEnd() override
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void printDebugTracesAtEnd() override
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Check, VPlan &Plan)
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
A struct for saving information about induction variables.
const SCEV * getStep() const
ArrayRef< Instruction * > getCastInsts() const
Returns an ArrayRef to the type cast instructions in the induction update chain, that are redundant w...
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
ElementCount MinProfitableTripCount
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
friend class LoopVectorizationPlanner
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, GeneratedRTChecks &RTChecks, VPlan &Plan)
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
DominatorTree * DT
Dominator Tree.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool useWideActiveLaneMask() const
Returns true if the use of wide lane masks is requested and the loop is using tail-folding with a lan...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
BlockFrequencyInfo * BFI
The BlockFrequencyInfo returned from GetBFI.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
BlockFrequencyInfo & getBFI()
Returns the BlockFrequencyInfo for the function if cached, otherwise fetches it via GetBFI.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
bool preferTailFoldedLoop() const
Returns true if tail-folding is preferred over an epilogue.
bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF)
Returns true if an artificially high cost for emulated masked memrefs should be used.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
bool isMaskRequired(Instruction *I) const
Wrapper function for LoopVectorizationLegality::isMaskRequired, that passes the Instruction I and if ...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< unsigned > getMaxSafeElements() const
Return maximum safe number of elements to be processed per vector iteration, which do not prevent sto...
const TargetTransformInfo & TTI
Vector target information.
friend class LoopVectorizationPlanner
const Function * TheFunction
LoopVectorizationLegality * Legal
Vectorization legality.
uint64_t getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, const BasicBlock *BB)
A helper function that returns how much we should divide the cost of a predicated block by.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool OptForSize
Whether this loop should be optimized for size based on function attribute or profile information.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind)
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
bool shouldConsiderRegPressureForVF(ElementCount VF)
Loop * TheLoop
The loop that we evaluate.
TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
std::optional< unsigned > getVScaleForTuning() const
Return the value of vscale used for tuning the cost model.
void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
LoopVectorizationCostModel(EpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, bool OptForSize)
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
bool isEpilogueAllowed() const
Returns true if an epilogue is allowed (e.g., not prevented by optsize or a loop hint annotation).
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool runtimeChecksRequired()
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF)
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool isScalarWithPredication(Instruction *I, ElementCount VF)
Returns true if I is an instruction which requires predication and for which our chosen predication s...
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
std::function< BlockFrequencyInfo &()> GetBFI
A function to lazily fetch BlockFrequencyInfo.
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
FixedScalableVFPair MaxPermissibleVFWithoutMaxBW
The highest VF possible for this loop, without using MaxBandwidth.
const SmallPtrSetImpl< PHINode * > & getInLoopReductions() const
Returns the set of in-loop reduction PHIs.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
TailFoldingStyle getTailFoldingStyle() const
Returns the TailFoldingStyle that is best for the current loop.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
bool isSafeForAnyVectorWidth() const
bool hasUncountableEarlyExit() const
Returns true if the loop has uncountable early exits, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, EpilogueVectorizationKind EpilogueVecKind=EpilogueVectorizationKind::None)
EpilogueVectorizationKind
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
@ None
Not part of epilogue vectorization.
@ Epilogue
Vectorizing the epilogue loop.
@ MainLoop
Vectorizing the main loop of epilogue vectorization.
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
void attachRuntimeChecks(VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const
Attach the runtime checks of RTChecks to Plan.
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
void printPlans(raw_ostream &O)
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
std::unique_ptr< VPlan > selectBestEpiloguePlan(VPlan &MainPlan, ElementCount MainLoopVF, unsigned IC)
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
std::pair< VectorizationFactor, VPlan * > computeBestVF()
Compute and return the most profitable vectorization factor and the corresponding best VPlan.
This holds vectorization requirements that must be verified late in the process.
Instruction * getExactFPInst()
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
enum ForceKind getForce() const
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
bool isPotentiallyUnsafe() const
ElementCount getWidth() const
@ FK_Enabled
Forcing enabled.
@ FK_Undefined
Not selected.
@ FK_Disabled
Forcing disabled.
unsigned getPredicate() const
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
unsigned getInterleave() const
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
This class implements a map that also provides access to all stored values in a deterministic order.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
unsigned getOpcode() const
Type * getRecurrenceType() const
Returns the type of the recurrence.
bool hasUsesOutsideReductionChain() const
Returns true if the reduction PHI has any uses outside the reduction chain.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
static bool isFindLastRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
LLVM_ABI SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
void insert_range(Range &&R)
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
A Use represents the edge between a Value definition and its users.
iterator_range< op_iterator > op_range
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
RecipeListTy::iterator iterator
Instruction iterators...
iterator begin()
Recipe iterator methods.
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx) override
Return the cost of this VPBasicBlock.
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
const VPRecipeBase & front() const
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
const VPBasicBlock * getExitingBasicBlock() const
void setName(const Twine &newName)
const VPBasicBlock * getEntryBasicBlock() const
VPBlockBase * getSingleSuccessor() const
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
static auto blocksOnly(T &&Range)
Return an iterator range over Range which only includes BlockTy blocks.
VPlan-based builder utility analogous to IRBuilder.
VPInstruction * createAdd(VPValue *LHS, VPValue *RHS, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", VPRecipeWithIRFlags::WrapFlagsTy WrapFlags={false, false})
T * insert(T *R)
Insert R at the current insertion point. Returns R unchanged.
static VPBuilder getToInsertAfter(VPRecipeBase *R)
Create a VPBuilder to insert after R.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", const VPIRFlags &Flags={})
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
Canonical scalar induction phi of the vector loop.
VPIRValue * getStartValue() const
Returns the start value of the canonical induction.
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
A special type of VPBasicBlock that wraps an existing IR basic block.
Class to record and manage LLVM IR flags.
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
This is a concrete Recipe that models a single VPlan-level instruction.
unsigned getNumOperandsWithoutMask() const
Returns the number of operands, excluding the mask if the VPInstruction is masked.
iterator_range< operand_iterator > operandsWithoutMask()
Returns an iterator range over the operands excluding the mask operand if present.
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
@ ComputeReductionResult
Reduce the operands to the final reduction result using the operation specified via the operation's V...
unsigned getOpcode() const
void setName(StringRef NewName)
Set the symbolic name for the VPInstruction.
VPValue * getMask() const
Returns the mask for the VPInstruction.
bool isMasked() const
Returns true if the VPInstruction has a mask operand.
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenNonPhiRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for a non-phi recipe R if one can be created within the given VF R...
VPHistogramRecipe * widenIfHistogram(VPInstruction *VPI)
If VPI represents a histogram operation (as determined by LoopVectorizationLegality) make that safe f...
VPValue * getVPValueOrAddLiveIn(Value *V)
VPRecipeBase * tryToWidenMemory(VPInstruction *VPI, VFRange &Range)
Check if the load or store instruction VPI should widened for Range.Start and potentially masked.
bool replaceWithFinalIfReductionStore(VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder)
If VPI is a store of a reduction into an invariant address, delete it.
VPReplicateRecipe * handleReplication(VPInstruction *VPI, VFRange &Range)
Build a VPReplicationRecipe for VPI.
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
A recipe to represent inloop, ordered or partial reduction operations.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
const VPBlockBase * getEntry() const
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the region.
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
void setOperand(unsigned I, VPValue *New)
operand_iterator op_begin()
VPValue * getOperand(unsigned N) const
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
void replaceAllUsesWith(VPValue *New)
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
A recipe to compute the pointers for widened memory accesses of SourceElementTy.
VPWidenCastRecipe is a recipe to create vector cast instructions.
A recipe for handling GEP instructions.
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
A recipe for widened phis.
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
bool hasVF(ElementCount VF) const
ElementCount getSingleVF() const
Returns the single VF of the plan, asserting that the plan has exactly one VF.
VPBasicBlock * getEntry()
VPValue * getTripCount() const
The trip count of the original loop.
VPSymbolicValue & getVFxUF()
Returns VF * UF of the vector loop region.
bool hasUF(unsigned UF) const
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
VPIRValue * getZero(Type *Ty)
Return a VPIRValue wrapping the null value of type Ty.
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
VPSymbolicValue & getUF()
Returns the UF of the vector loop region.
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
VPSymbolicValue & getVF()
Returns the VF of the vector loop region.
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool isZero() const
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ PredicateElseScalarEpilogue
@ PredicateOrDontVectorize
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
SCEVAffineAddRec_match< Op0_t, Op1_t, match_isa< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
int_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start)
Match FindIV result pattern: select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),...
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
auto m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
bool match(Val *V, const Pattern &P)
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
friend class Instruction
Iterator for Instructions in a `BasicBlock.
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
VPRecipeBase * findRecipe(VPValue *Start, PredT Pred)
Search Start's users for a recipe satisfying Pred, looking through recipes with definitions.
VPSingleDefRecipe * findHeaderMask(VPlan &Plan)
Collect the header mask with the pattern: (ICMP_ULE, WideCanonicalIV, backedge-taken-count) TODO: Int...
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
FunctionAddr VTableAddr Value
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
LLVM_ABI_FOR_TEST cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintAfterAll
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
auto map_range(ContainerTy &&C, FuncTy F)
Return a range that applies F to the elements of C.
constexpr auto bind_front(FnT &&Fn, BindArgsT &&...BindArgs)
C++20 bind_front.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI_FOR_TEST cl::opt< bool > EnableWideActiveLaneMask
UncountableExitStyle
Different methods of handling early exits.
@ ReadOnly
No side effects to worry about, so we can process any uncountable exits in the loop and branch either...
@ MaskedHandleExitInScalarLoop
All memory operations other than the load(s) required to determine whether an uncountable exit occurr...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI cl::opt< bool > EnableLoopVectorization
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
LLVM_ABI_FOR_TEST cl::list< std::string > VPlanPrintAfterPasses
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
@ CM_EpilogueNotAllowedLowTripLoop
@ CM_EpilogueNotNeededFoldTail
@ CM_EpilogueNotAllowedFoldTail
@ CM_EpilogueNotAllowedOptSize
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
cl::opt< bool > EnableVPlanNativePath
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
@ Increment
Incrementally increasing token ID.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
LLVM_ABI MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintVectorRegionScope
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
BasicBlock * MainLoopIterationCountCheck
BasicBlock * EpilogueIterationCountCheck
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
std::optional< unsigned > MaskPos
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
std::function< BlockFrequencyInfo &()> GetBFI
TargetTransformInfo * TTI
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
uint64_t getPredBlockCostDivisor(BasicBlock *BB) const
TargetTransformInfo::TargetCostKind CostKind
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A VPValue representing a live-in from the input IR or a constant.
A struct that represents some properties of the register usage of a loop.
InstructionCost spillCost(VPCostContext &Ctx, unsigned OverrideMaxNumRegs=0) const
Calculate the estimated cost of any spills due to using more registers than the number available for ...
A recipe for widening load operations, using the address to load from and an optional mask.
A recipe for widening store operations, using the stored value, the address to store to and an option...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks