163#define LV_NAME "loop-vectorize"
164#define DEBUG_TYPE LV_NAME
174 "llvm.loop.vectorize.followup_vectorized";
176 "llvm.loop.vectorize.followup_epilogue";
179STATISTIC(LoopsVectorized,
"Number of loops vectorized");
180STATISTIC(LoopsAnalyzed,
"Number of loops analyzed for vectorization");
181STATISTIC(LoopsEpilogueVectorized,
"Number of epilogues vectorized");
185 cl::desc(
"Enable vectorization of epilogue loops."));
189 cl::desc(
"When epilogue vectorization is enabled, and a value greater than "
190 "1 is specified, forces the given VF for all applicable epilogue "
195 cl::desc(
"Only loops with vectorization factor equal to or larger than "
196 "the specified value are considered for epilogue vectorization."));
202 cl::desc(
"Loops with a constant trip count that is smaller than this "
203 "value are vectorized only if no scalar iteration overheads "
208 cl::desc(
"The maximum allowed number of runtime memory checks"));
224 "prefer-predicate-over-epilogue",
227 cl::desc(
"Tail-folding and predication preferences over creating a scalar "
231 "Don't tail-predicate loops, create scalar epilogue"),
233 "predicate-else-scalar-epilogue",
234 "prefer tail-folding, create scalar epilogue if tail "
237 "predicate-dont-vectorize",
238 "prefers tail-folding, don't attempt vectorization if "
239 "tail-folding fails.")));
242 "force-tail-folding-style",
cl::desc(
"Force the tail folding style"),
245 clEnumValN(TailFoldingStyle::None,
"none",
"Disable tail folding"),
247 TailFoldingStyle::Data,
"data",
248 "Create lane mask for data only, using active.lane.mask intrinsic"),
249 clEnumValN(TailFoldingStyle::DataWithoutLaneMask,
250 "data-without-lane-mask",
251 "Create lane mask with compare/stepvector"),
252 clEnumValN(TailFoldingStyle::DataAndControlFlow,
"data-and-control",
253 "Create lane mask using active.lane.mask intrinsic, and use "
254 "it for both data and control flow"),
255 clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck,
256 "data-and-control-without-rt-check",
257 "Similar to data-and-control, but remove the runtime check"),
258 clEnumValN(TailFoldingStyle::DataWithEVL,
"data-with-evl",
259 "Use predicated EVL instructions for tail folding. If EVL "
260 "is unsupported, fallback to data-without-lane-mask.")));
264 cl::desc(
"Maximize bandwidth when selecting vectorization factor which "
265 "will be determined by the smallest type in loop."));
269 cl::desc(
"Enable vectorization on interleaved memory accesses in a loop"));
275 cl::desc(
"Enable vectorization on masked interleaved memory accesses in a loop"));
279 cl::desc(
"A flag that overrides the target's number of scalar registers."));
283 cl::desc(
"A flag that overrides the target's number of vector registers."));
287 cl::desc(
"A flag that overrides the target's max interleave factor for "
292 cl::desc(
"A flag that overrides the target's max interleave factor for "
293 "vectorized loops."));
297 cl::desc(
"A flag that overrides the target's expected cost for "
298 "an instruction to a single constant value. Mostly "
299 "useful for getting consistent testing."));
304 "Pretend that scalable vectors are supported, even if the target does "
305 "not support them. This flag should only be used for testing."));
310 "The cost of a loop that is considered 'small' by the interleaver."));
314 cl::desc(
"Enable the use of the block frequency analysis to access PGO "
315 "heuristics minimizing code growth in cold regions and being more "
316 "aggressive in hot regions."));
322 "Enable runtime interleaving until load/store ports are saturated"));
327 cl::desc(
"Max number of stores to be predicated behind an if."));
331 cl::desc(
"Count the induction variable only once when interleaving"));
335 cl::desc(
"Enable if predication of stores during vectorization."));
339 cl::desc(
"The maximum interleave count to use when interleaving a scalar "
340 "reduction in a nested loop."));
345 cl::desc(
"Prefer in-loop vector reductions, "
346 "overriding the targets preference."));
350 cl::desc(
"Enable the vectorisation of loops with in-order (strict) "
356 "Prefer predicating a reduction operation over an after loop select."));
361 cl::desc(
"Enable VPlan-native vectorization path with "
362 "support for outer loop vectorization."));
372 "Build VPlan for every supported loop nest in the function and bail "
373 "out right after the build (stress test the VPlan H-CFG construction "
374 "in the VPlan-native vectorization path)."));
378 cl::desc(
"Enable loop interleaving in Loop vectorization passes"));
381 cl::desc(
"Run the Loop vectorization passes"));
385 cl::desc(
"Use dot format instead of plain text when dumping VPlans"));
388 "force-widen-divrem-via-safe-divisor",
cl::Hidden,
390 "Override cost based safe divisor widening for div/rem instructions"));
393 "vectorizer-maximize-bandwidth-for-vector-calls",
cl::init(
true),
395 cl::desc(
"Try wider VFs if they enable the use of vector variants"));
414 return DL.getTypeAllocSizeInBits(Ty) !=
DL.getTypeSizeInBits(Ty);
443class GeneratedRTChecks;
487 this->MinProfitableTripCount = VecWidth;
503 virtual std::pair<BasicBlock *, Value *>
535 std::pair<BasicBlock *, Value *> AdditionalBypass = {
nullptr,
nullptr});
595 const SCEV2ValueTy &ExpandedSCEVs,
596 std::pair<BasicBlock *, Value *> AdditionalBypass = {
nullptr,
nullptr});
743 "A high UF for the epilogue loop is likely not beneficial.");
763 GeneratedRTChecks &Checks)
765 EPI.MainLoopVF,
EPI.MainLoopVF,
EPI.MainLoopUF, LVL,
772 const SCEV2ValueTy &ExpandedSCEVs)
final {
779 virtual std::pair<BasicBlock *, Value *>
803 GeneratedRTChecks &Check)
808 std::pair<BasicBlock *, Value *>
832 GeneratedRTChecks &Checks)
839 std::pair<BasicBlock *, Value *>
861 if (
I->getDebugLoc() !=
Empty)
862 return I->getDebugLoc();
864 for (
Use &
Op :
I->operands()) {
866 if (OpInst->getDebugLoc() !=
Empty)
867 return OpInst->getDebugLoc();
870 return I->getDebugLoc();
879 dbgs() <<
"LV: " << Prefix << DebugMsg;
901 if (
I &&
I->getDebugLoc())
902 DL =
I->getDebugLoc();
920 return B.CreateElementCount(Ty, VF);
926 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
"Invalid loop count");
940 <<
"loop not vectorized: " << OREMsg);
963 "Vectorizing: ", TheLoop->
isInnermost() ?
"innermost loop" :
"outer loop",
969 <<
"vectorized " << LoopType <<
"loop (vectorization width: "
971 <<
", interleaved count: " <<
ore::NV(
"InterleaveCount", IC) <<
")";
1115 "Profitable to scalarize relevant only for VF > 1.");
1118 "cost-model should not be used for outer loops (in VPlan-native path)");
1120 auto Scalars = InstsToScalarize.find(VF);
1121 assert(Scalars != InstsToScalarize.end() &&
1122 "VF not yet analyzed for scalarization profitability");
1123 return Scalars->second.contains(
I);
1130 "cost-model should not be used for outer loops (in VPlan-native path)");
1134 if (isa<PseudoProbeInst>(
I))
1140 auto UniformsPerVF = Uniforms.find(VF);
1141 assert(UniformsPerVF != Uniforms.end() &&
1142 "VF not yet analyzed for uniformity");
1143 return UniformsPerVF->second.count(
I);
1150 "cost-model should not be used for outer loops (in VPlan-native path)");
1154 auto ScalarsPerVF = Scalars.find(VF);
1155 assert(ScalarsPerVF != Scalars.end() &&
1156 "Scalar values are not calculated for VF");
1157 return ScalarsPerVF->second.count(
I);
1163 return VF.
isVector() && MinBWs.contains(
I) &&
1185 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W,
Cost);
1196 for (
unsigned i = 0; i < Grp->
getFactor(); ++i) {
1199 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W,
Cost);
1201 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W, 0);
1213 "cost-model should not be used for outer loops (in VPlan-native path)");
1215 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(
I, VF);
1216 auto Itr = WideningDecisions.
find(InstOnVF);
1217 if (Itr == WideningDecisions.
end())
1219 return Itr->second.first;
1226 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(
I, VF);
1228 "The cost is not calculated");
1229 return WideningDecisions[InstOnVF].second;
1242 std::optional<unsigned> MaskPos,
1245 CallWideningDecisions[std::make_pair(CI, VF)] = {Kind, Variant, IID,
1252 return CallWideningDecisions.
at(std::make_pair(CI, VF));
1260 auto *Trunc = dyn_cast<TruncInst>(
I);
1273 Value *
Op = Trunc->getOperand(0);
1293 if (VF.
isScalar() || Uniforms.contains(VF))
1297 collectLoopUniforms(VF);
1298 collectLoopScalars(VF);
1318 bool LI = isa<LoadInst>(V);
1319 bool SI = isa<StoreInst>(V);
1334 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1335 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1346 return ScalarCost < SafeDivisorCost;
1370 std::pair<InstructionCost, InstructionCost>
1398 LLVM_DEBUG(
dbgs() <<
"LV: Loop does not require scalar epilogue\n");
1405 dbgs() <<
"LV: Loop requires scalar epilogue: multiple exits\n");
1410 "interleaved group requires scalar epilogue\n");
1413 LLVM_DEBUG(
dbgs() <<
"LV: Loop does not require scalar epilogue\n");
1422 auto RequiresScalarEpilogue = [
this](
ElementCount VF) {
1425 bool IsRequired =
all_of(
Range, RequiresScalarEpilogue);
1427 (IsRequired ||
none_of(
Range, RequiresScalarEpilogue)) &&
1428 "all VFs in range must agree on whether a scalar epilogue is required");
1440 if (!ChosenTailFoldingStyle)
1442 return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first
1443 : ChosenTailFoldingStyle->second;
1451 assert(!ChosenTailFoldingStyle &&
"Tail folding must not be selected yet.");
1453 ChosenTailFoldingStyle =
1459 ChosenTailFoldingStyle = std::make_pair(
1474 IsScalableVF && UserIC <= 1 &&
1483 ChosenTailFoldingStyle =
1488 <<
"LV: Preference for VP intrinsics indicated. Will "
1489 "not try to generate VP Intrinsics "
1491 ?
"since interleave count specified is greater than 1.\n"
1492 :
"due to non-interleaving reasons.\n"));
1518 return InLoopReductions.contains(Phi);
1533 WideningDecisions.
clear();
1534 CallWideningDecisions.
clear();
1558 std::optional<InstructionCost>
1563 unsigned NumPredStores = 0;
1572 bool FoldTailByMasking);
1577 ElementCount getMaximizedVFForTarget(
unsigned MaxTripCount,
1578 unsigned SmallestType,
1579 unsigned WidestType,
1581 bool FoldTailByMasking);
1585 bool isScalableVectorizationAllowed();
1589 ElementCount getMaxLegalScalableVF(
unsigned MaxSafeElements);
1635 PredicatedBBsAfterVectorization;
1648 std::optional<std::pair<TailFoldingStyle, TailFoldingStyle>>
1649 ChosenTailFoldingStyle;
1652 std::optional<bool> IsScalableVectorizationAllowed;
1686 ScalarCostsTy &ScalarCosts,
1712 std::pair<InstWidening, InstructionCost>>;
1714 DecisionList WideningDecisions;
1716 using CallDecisionList =
1719 CallDecisionList CallWideningDecisions;
1742 Ops, [
this, VF](
Value *V) {
return this->needsExtract(V, VF); }));
1800class GeneratedRTChecks {
1806 Value *SCEVCheckCond =
nullptr;
1814 Value *MemRuntimeCheckCond =
nullptr;
1823 bool CostTooHigh =
false;
1824 const bool AddBranchWeights;
1826 Loop *OuterLoop =
nullptr;
1831 bool AddBranchWeights)
1832 : DT(DT), LI(LI),
TTI(
TTI), SCEVExp(SE,
DL,
"scev.check"),
1833 MemCheckExp(SE,
DL,
"scev.check"), AddBranchWeights(AddBranchWeights) {}
1861 nullptr,
"vector.scevcheck");
1868 if (RtPtrChecking.Need) {
1869 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1870 MemCheckBlock =
SplitBlock(Pred, Pred->getTerminator(), DT, LI,
nullptr,
1873 auto DiffChecks = RtPtrChecking.getDiffChecks();
1875 Value *RuntimeVF =
nullptr;
1880 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1886 MemCheckBlock->
getTerminator(), L, RtPtrChecking.getChecks(),
1889 assert(MemRuntimeCheckCond &&
1890 "no RT checks generated although RtPtrChecking "
1891 "claimed checks are required");
1894 if (!MemCheckBlock && !SCEVCheckBlock)
1904 if (SCEVCheckBlock) {
1909 if (MemCheckBlock) {
1916 if (MemCheckBlock) {
1920 if (SCEVCheckBlock) {
1926 OuterLoop =
L->getParentLoop();
1930 if (SCEVCheckBlock || MemCheckBlock)
1943 if (SCEVCheckBlock->getTerminator() == &
I)
1950 if (MemCheckBlock) {
1953 if (MemCheckBlock->getTerminator() == &
I)
1976 unsigned BestTripCount = 2;
1980 BestTripCount = SmallTC;
1984 BestTripCount = *EstimatedTC;
1987 BestTripCount = std::max(BestTripCount, 1U);
1991 NewMemCheckCost = std::max(*NewMemCheckCost.
getValue(),
1994 if (BestTripCount > 1)
1996 <<
"We expect runtime memory checks to be hoisted "
1997 <<
"out of the outer loop. Cost reduced from "
1998 << MemCheckCost <<
" to " << NewMemCheckCost <<
'\n');
2000 MemCheckCost = NewMemCheckCost;
2004 RTCheckCost += MemCheckCost;
2007 if (SCEVCheckBlock || MemCheckBlock)
2008 LLVM_DEBUG(
dbgs() <<
"Total cost of runtime checks: " << RTCheckCost
2016 ~GeneratedRTChecks() {
2020 SCEVCleaner.markResultUsed();
2022 if (!MemRuntimeCheckCond)
2023 MemCheckCleaner.markResultUsed();
2025 if (MemRuntimeCheckCond) {
2026 auto &SE = *MemCheckExp.
getSE();
2033 I.eraseFromParent();
2036 MemCheckCleaner.cleanup();
2037 SCEVCleaner.cleanup();
2040 SCEVCheckBlock->eraseFromParent();
2041 if (MemRuntimeCheckCond)
2042 MemCheckBlock->eraseFromParent();
2056 SCEVCheckCond =
nullptr;
2057 if (
auto *
C = dyn_cast<ConstantInt>(
Cond))
2068 SCEVCheckBlock->getTerminator()->eraseFromParent();
2069 SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2070 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2077 if (AddBranchWeights)
2080 return SCEVCheckBlock;
2089 if (!MemRuntimeCheckCond)
2098 MemCheckBlock->moveBefore(LoopVectorPreHeader);
2105 if (AddBranchWeights) {
2109 MemCheckBlock->getTerminator()->setDebugLoc(
2110 Pred->getTerminator()->getDebugLoc());
2113 MemRuntimeCheckCond =
nullptr;
2114 return MemCheckBlock;
2120 return Style == TailFoldingStyle::Data ||
2121 Style == TailFoldingStyle::DataAndControlFlow ||
2122 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
2126 return Style == TailFoldingStyle::DataAndControlFlow ||
2127 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
2157 LLVM_DEBUG(
dbgs() <<
"LV: Loop hints prevent outer loop vectorization.\n");
2163 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Interleave is not supported for "
2183 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2193 for (
Loop *InnerL : L)
2215 ?
B.CreateSExtOrTrunc(
Index, StepTy)
2216 :
B.CreateCast(Instruction::SIToFP,
Index, StepTy);
2217 if (CastedIndex !=
Index) {
2219 Index = CastedIndex;
2229 assert(
X->getType() ==
Y->getType() &&
"Types don't match!");
2230 if (
auto *CX = dyn_cast<ConstantInt>(
X))
2233 if (
auto *CY = dyn_cast<ConstantInt>(
Y))
2236 return B.CreateAdd(
X,
Y);
2242 assert(
X->getType()->getScalarType() ==
Y->getType() &&
2243 "Types don't match!");
2244 if (
auto *CX = dyn_cast<ConstantInt>(
X))
2247 if (
auto *CY = dyn_cast<ConstantInt>(
Y))
2250 VectorType *XVTy = dyn_cast<VectorType>(
X->getType());
2251 if (XVTy && !isa<VectorType>(
Y->getType()))
2252 Y =
B.CreateVectorSplat(XVTy->getElementCount(),
Y);
2253 return B.CreateMul(
X,
Y);
2256 switch (InductionKind) {
2259 "Vector indices not supported for integer inductions yet");
2261 "Index type does not match StartValue type");
2262 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2263 return B.CreateSub(StartValue,
Index);
2271 "Vector indices not supported for FP inductions yet");
2274 (InductionBinOp->
getOpcode() == Instruction::FAdd ||
2275 InductionBinOp->
getOpcode() == Instruction::FSub) &&
2276 "Original bin op should be defined for FP induction");
2279 return B.CreateBinOp(InductionBinOp->
getOpcode(), StartValue, MulExp,
2293 if (
F.hasFnAttribute(Attribute::VScaleRange))
2294 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2296 return std::nullopt;
2305 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2307 unsigned MaxUF = UF ? *UF :
Cost->TTI.getMaxInterleaveFactor(VF);
2309 Type *IdxTy =
Cost->Legal->getWidestInductionType();
2310 APInt MaxUIntTripCount = cast<IntegerType>(IdxTy)->getMask();
2316 Cost->PSE.getSE()->getSmallConstantMaxTripCount(
Cost->TheLoop)) {
2319 std::optional<unsigned> MaxVScale =
2323 MaxVF *= *MaxVScale;
2326 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2347 assert(!Instr->getType()->isAggregateType() &&
"Can't handle vectors");
2351 if (isa<NoAliasScopeDeclInst>(Instr))
2356 bool IsVoidRetTy = Instr->getType()->isVoidTy();
2360 Cloned->
setName(Instr->getName() +
".cloned");
2365 "inferred type and type from generated instructions do not match");
2371 if (
auto DL = Instr->getDebugLoc())
2377 auto InputInstance = Instance;
2381 Cloned->
setOperand(
I.index(), State.
get(Operand, InputInstance));
2388 State.
set(RepRecipe, Cloned, Instance);
2391 if (
auto *
II = dyn_cast<AssumeInst>(Cloned))
2396 if (IfPredicateInstr)
2420 if (
Cost->foldTailByMasking()) {
2422 "VF*UF must be a power of 2 when folding tail by masking");
2468 auto CreateStep = [&]() ->
Value * {
2493 Value *MaxUIntTripCount =
2494 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2508 "TC check is expected to dominate Bypass");
2523 if (!SCEVCheckBlock)
2529 "Cannot SCEV check stride or overflow when optimizing for size");
2544 return SCEVCheckBlock;
2563 "Cannot emit memory checks when optimizing for size, unless forced "
2569 <<
"Code-size may be reduced by not forcing "
2570 "vectorization, or by source-code modifications "
2571 "eliminating the need for runtime checks "
2572 "(e.g., adding 'restrict').";
2580 return MemCheckBlock;
2589 "multiple exit loop without required epilogue?");
2593 LI,
nullptr,
Twine(Prefix) +
"middle.block");
2596 nullptr,
Twine(Prefix) +
"scalar.ph");
2602 std::pair<BasicBlock *, Value *> AdditionalBypass) {
2608 Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
2609 if (OrigPhi == OldInduction) {
2616 if (
II.getInductionBinOp() && isa<FPMathOperator>(
II.getInductionBinOp()))
2617 B.setFastMathFlags(
II.getInductionBinOp()->getFastMathFlags());
2620 Step,
II.getKind(),
II.getInductionBinOp());
2624 if (AdditionalBypass.first) {
2625 B.SetInsertPoint(AdditionalBypass.first,
2626 AdditionalBypass.first->getFirstInsertionPt());
2627 EndValueFromAdditionalBypass =
2629 Step,
II.getKind(),
II.getInductionBinOp());
2630 EndValueFromAdditionalBypass->
setName(
"ind.end");
2651 if (AdditionalBypass.first)
2653 EndValueFromAdditionalBypass);
2660 const SCEV2ValueTy &ExpandedSCEVs) {
2661 const SCEV *Step =
ID.getStep();
2662 if (
auto *
C = dyn_cast<SCEVConstant>(Step))
2663 return C->getValue();
2664 if (
auto *U = dyn_cast<SCEVUnknown>(Step))
2665 return U->getValue();
2666 auto I = ExpandedSCEVs.find(Step);
2667 assert(
I != ExpandedSCEVs.end() &&
"SCEV must be expanded at this point");
2672 const SCEV2ValueTy &ExpandedSCEVs,
2673 std::pair<BasicBlock *, Value *> AdditionalBypass) {
2674 assert(((AdditionalBypass.first && AdditionalBypass.second) ||
2675 (!AdditionalBypass.first && !AdditionalBypass.second)) &&
2676 "Inconsistent information about additional bypass.");
2685 PHINode *OrigPhi = InductionEntry.first;
2694std::pair<BasicBlock *, Value *>
2696 const SCEV2ValueTy &ExpandedSCEVs) {
2782 assert(isa<PHINode>(UI) &&
"Expected LCSSA form");
2783 MissingVals[UI] = EndValue;
2791 auto *UI = cast<Instruction>(U);
2793 assert(isa<PHINode>(UI) &&
"Expected LCSSA form");
2797 if (
II.getInductionBinOp() && isa<FPMathOperator>(
II.getInductionBinOp()))
2798 B.setFastMathFlags(
II.getInductionBinOp()->getFastMathFlags());
2800 Value *CountMinusOne =
B.CreateSub(
2802 CountMinusOne->
setName(
"cmo");
2805 assert(StepVPV &&
"step must have been expanded during VPlan execution");
2807 : State.
get(StepVPV, {0, 0});
2810 II.getKind(),
II.getInductionBinOp());
2811 Escape->
setName(
"ind.escape");
2812 MissingVals[UI] = Escape;
2816 for (
auto &
I : MissingVals) {
2823 if (
PHI->getBasicBlockIndex(MiddleBlock) == -1)
2824 PHI->addIncoming(
I.second, MiddleBlock);
2830struct CSEDenseMapInfo {
2832 return isa<InsertElementInst>(
I) || isa<ExtractElementInst>(
I) ||
2833 isa<ShuffleVectorInst>(
I) || isa<GetElementPtrInst>(
I);
2845 assert(canHandle(
I) &&
"Unknown instruction!");
2847 I->value_op_end()));
2851 if (
LHS == getEmptyKey() ||
RHS == getEmptyKey() ||
2852 LHS == getTombstoneKey() ||
RHS == getTombstoneKey())
2854 return LHS->isIdenticalTo(
RHS);
2865 if (!CSEDenseMapInfo::canHandle(&In))
2871 In.replaceAllUsesWith(V);
2872 In.eraseFromParent();
2886 return CallWideningDecisions.at(std::make_pair(CI, VF)).Cost;
2895 for (
auto &ArgOp : CI->
args())
2904 return std::min(ScalarCallCost, IntrinsicCost);
2906 return ScalarCallCost;
2919 assert(
ID &&
"Expected intrinsic call!");
2922 if (
auto *FPMO = dyn_cast<FPMathOperator>(CI))
2923 FMF = FPMO->getFastMathFlags();
2929 std::back_inserter(ParamTys),
2930 [&](
Type *Ty) { return MaybeVectorizeType(Ty, VF); });
2933 dyn_cast<IntrinsicInst>(CI));
2954 for (
PHINode &PN : Exit->phis())
2982 KV.second->fixPhi(Plan, State);
3022 auto isBlockOfUsePredicated = [&](
Use &U) ->
bool {
3023 auto *
I = cast<Instruction>(U.getUser());
3025 if (
auto *Phi = dyn_cast<PHINode>(
I))
3026 BB = Phi->getIncomingBlock(
3028 return BB == PredBB;
3039 Worklist.
insert(InstsToReanalyze.
begin(), InstsToReanalyze.
end());
3040 InstsToReanalyze.
clear();
3043 while (!Worklist.
empty()) {
3049 if (!
I || isa<PHINode>(
I) || !VectorLoop->contains(
I) ||
3050 I->mayHaveSideEffects() ||
I->mayReadFromMemory())
3058 if (
I->getParent() == PredBB) {
3059 Worklist.
insert(
I->op_begin(),
I->op_end());
3073 I->moveBefore(&*PredBB->getFirstInsertionPt());
3074 Worklist.
insert(
I->op_begin(),
I->op_end());
3086 for (
VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
3091 PHINode *NewPhi = cast<PHINode>(State.
get(VPPhi, 0));
3103void LoopVectorizationCostModel::collectLoopScalars(
ElementCount VF) {
3108 "This function should not be visited twice for the same VF");
3114 Scalars[VF].
insert(Uniforms[VF].begin(), Uniforms[VF].end());
3133 "Widening decision should be ready at this moment");
3134 if (
auto *Store = dyn_cast<StoreInst>(MemAccess))
3135 if (
Ptr == Store->getValueOperand())
3138 "Ptr is neither a value or pointer operand");
3144 auto isLoopVaryingGEP = [&](
Value *
V) {
3155 if (!isLoopVaryingGEP(
Ptr))
3160 auto *
I = cast<Instruction>(
Ptr);
3168 return isa<LoadInst>(U) || isa<StoreInst>(U);
3172 PossibleNonScalarPtrs.
insert(
I);
3190 for (
auto &
I : *BB) {
3191 if (
auto *Load = dyn_cast<LoadInst>(&
I)) {
3192 evaluatePtrUse(Load,
Load->getPointerOperand());
3193 }
else if (
auto *Store = dyn_cast<StoreInst>(&
I)) {
3194 evaluatePtrUse(Store,
Store->getPointerOperand());
3195 evaluatePtrUse(Store,
Store->getValueOperand());
3198 for (
auto *
I : ScalarPtrs)
3199 if (!PossibleNonScalarPtrs.
count(
I)) {
3207 auto ForcedScalar = ForcedScalars.
find(VF);
3208 if (ForcedScalar != ForcedScalars.
end())
3209 for (
auto *
I : ForcedScalar->second) {
3210 LLVM_DEBUG(
dbgs() <<
"LV: Found (forced) scalar instruction: " << *
I <<
"\n");
3219 while (
Idx != Worklist.
size()) {
3221 if (!isLoopVaryingGEP(Dst->getOperand(0)))
3223 auto *Src = cast<Instruction>(Dst->getOperand(0));
3225 auto *J = cast<Instruction>(U);
3226 return !TheLoop->contains(J) || Worklist.count(J) ||
3227 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
3228 isScalarUse(J, Src));
3231 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Src <<
"\n");
3238 auto *Ind = Induction.first;
3239 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3248 auto IsDirectLoadStoreFromPtrIndvar = [&](
Instruction *Indvar,
3250 return Induction.second.getKind() ==
3252 (isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
3259 auto *I = cast<Instruction>(U);
3260 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3261 IsDirectLoadStoreFromPtrIndvar(Ind, I);
3269 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
3275 auto ScalarIndUpdate =
3277 auto *I = cast<Instruction>(U);
3278 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
3279 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
3281 if (!ScalarIndUpdate)
3286 Worklist.
insert(IndUpdate);
3287 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Ind <<
"\n");
3288 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *IndUpdate
3302 switch(
I->getOpcode()) {
3305 case Instruction::Call:
3308 return CallWideningDecisions.at(std::make_pair(cast<CallInst>(
I), VF))
3310 case Instruction::Load:
3311 case Instruction::Store: {
3323 case Instruction::UDiv:
3324 case Instruction::SDiv:
3325 case Instruction::SRem:
3326 case Instruction::URem: {
3344 isa<BranchInst, SwitchInst, PHINode, AllocaInst>(
I))
3357 switch(
I->getOpcode()) {
3360 "instruction should have been considered by earlier checks");
3361 case Instruction::Call:
3365 "should have returned earlier for calls not needing a mask");
3367 case Instruction::Load:
3370 case Instruction::Store: {
3378 case Instruction::UDiv:
3379 case Instruction::SDiv:
3380 case Instruction::SRem:
3381 case Instruction::URem:
3387std::pair<InstructionCost, InstructionCost>
3390 assert(
I->getOpcode() == Instruction::UDiv ||
3391 I->getOpcode() == Instruction::SDiv ||
3392 I->getOpcode() == Instruction::SRem ||
3393 I->getOpcode() == Instruction::URem);
3404 ScalarizationCost = 0;
3419 ScalarizationCost += getScalarizationOverhead(
I, VF,
CostKind);
3433 Instruction::Select, VecTy,
3439 Value *Op2 =
I->getOperand(1);
3448 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
3450 return {ScalarizationCost, SafeDivisorCost};
3457 "Decision should not be set yet.");
3459 assert(Group &&
"Must have a group.");
3463 auto &
DL =
I->getDataLayout();
3470 unsigned InterleaveFactor = Group->getFactor();
3471 bool ScalarNI =
DL.isNonIntegralPointerType(ScalarTy);
3472 for (
unsigned i = 0; i < InterleaveFactor; i++) {
3477 bool MemberNI =
DL.isNonIntegralPointerType(
MemberTy);
3479 if (MemberNI != ScalarNI) {
3482 }
else if (MemberNI && ScalarNI &&
3483 ScalarTy->getPointerAddressSpace() !=
3484 MemberTy->getPointerAddressSpace()) {
3494 bool PredicatedAccessRequiresMasking =
3497 bool LoadAccessWithGapsRequiresEpilogMasking =
3498 isa<LoadInst>(
I) && Group->requiresScalarEpilogue() &&
3500 bool StoreAccessWithGapsRequiresMasking =
3501 isa<StoreInst>(
I) && (Group->getNumMembers() < Group->getFactor());
3502 if (!PredicatedAccessRequiresMasking &&
3503 !LoadAccessWithGapsRequiresEpilogMasking &&
3504 !StoreAccessWithGapsRequiresMasking)
3511 "Masked interleave-groups for predicated accesses are not enabled.");
3513 if (Group->isReverse())
3525 assert((isa<LoadInst, StoreInst>(
I)) &&
"Invalid memory instruction");
3541 auto &
DL =
I->getDataLayout();
3548void LoopVectorizationCostModel::collectLoopUniforms(
ElementCount VF) {
3555 "This function should not be visited twice for the same VF");
3559 Uniforms[VF].
clear();
3567 auto isOutOfScope = [&](
Value *V) ->
bool {
3579 auto addToWorklistIfAllowed = [&](
Instruction *
I) ->
void {
3580 if (isOutOfScope(
I)) {
3587 dbgs() <<
"LV: Found not uniform due to requiring predication: " << *
I
3591 LLVM_DEBUG(
dbgs() <<
"LV: Found uniform instruction: " << *
I <<
"\n");
3601 auto *
Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3603 addToWorklistIfAllowed(Cmp);
3612 if (PrevVF.isVector()) {
3613 auto Iter = Uniforms.
find(PrevVF);
3614 if (Iter != Uniforms.
end() && !Iter->second.contains(
I))
3619 if (isa<LoadInst>(
I))
3630 "Widening decision should be ready at this moment");
3632 if (isUniformMemOpUse(
I))
3635 return (WideningDecision ==
CM_Widen ||
3644 if (isa<StoreInst>(
I) &&
I->getOperand(0) ==
Ptr)
3660 for (
auto &
I : *BB) {
3662 switch (
II->getIntrinsicID()) {
3663 case Intrinsic::sideeffect:
3664 case Intrinsic::experimental_noalias_scope_decl:
3665 case Intrinsic::assume:
3666 case Intrinsic::lifetime_start:
3667 case Intrinsic::lifetime_end:
3669 addToWorklistIfAllowed(&
I);
3678 if (
auto *EVI = dyn_cast<ExtractValueInst>(&
I)) {
3679 assert(isOutOfScope(EVI->getAggregateOperand()) &&
3680 "Expected aggregate value to be loop invariant");
3681 addToWorklistIfAllowed(EVI);
3690 if (isUniformMemOpUse(&
I))
3691 addToWorklistIfAllowed(&
I);
3693 if (isVectorizedMemAccessUse(&
I,
Ptr))
3700 for (
auto *V : HasUniformUse) {
3701 if (isOutOfScope(V))
3703 auto *
I = cast<Instruction>(V);
3704 auto UsersAreMemAccesses =
3706 auto *UI = cast<Instruction>(U);
3707 return TheLoop->contains(UI) && isVectorizedMemAccessUse(UI, V);
3709 if (UsersAreMemAccesses)
3710 addToWorklistIfAllowed(
I);
3717 while (idx != Worklist.
size()) {
3720 for (
auto *OV :
I->operand_values()) {
3722 if (isOutOfScope(OV))
3726 auto *
OP = dyn_cast<PHINode>(OV);
3731 auto *OI = cast<Instruction>(OV);
3733 auto *J = cast<Instruction>(U);
3734 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
3736 addToWorklistIfAllowed(OI);
3748 auto *Ind = Induction.first;
3749 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3754 auto *I = cast<Instruction>(U);
3755 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3756 isVectorizedMemAccessUse(I, Ind);
3763 auto UniformIndUpdate =
3765 auto *I = cast<Instruction>(U);
3766 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
3767 isVectorizedMemAccessUse(I, IndUpdate);
3769 if (!UniformIndUpdate)
3773 addToWorklistIfAllowed(Ind);
3774 addToWorklistIfAllowed(IndUpdate);
3785 "runtime pointer checks needed. Enable vectorization of this "
3786 "loop with '#pragma clang loop vectorize(enable)' when "
3787 "compiling with -Os/-Oz",
3788 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
3794 "runtime SCEV checks needed. Enable vectorization of this "
3795 "loop with '#pragma clang loop vectorize(enable)' when "
3796 "compiling with -Os/-Oz",
3797 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
3804 "runtime stride == 1 checks needed. Enable vectorization of "
3805 "this loop without such check by compiling with -Os/-Oz",
3806 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
3813bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3814 if (IsScalableVectorizationAllowed)
3815 return *IsScalableVectorizationAllowed;
3817 IsScalableVectorizationAllowed =
false;
3823 "ScalableVectorizationDisabled",
ORE,
TheLoop);
3827 LLVM_DEBUG(
dbgs() <<
"LV: Scalable vectorization is available\n");
3830 std::numeric_limits<ElementCount::ScalarTy>::max());
3841 "Scalable vectorization not supported for the reduction "
3842 "operations found in this loop.",
3854 "for all element types found in this loop.",
3861 "for safe distance analysis.",
3866 IsScalableVectorizationAllowed =
true;
3871LoopVectorizationCostModel::getMaxLegalScalableVF(
unsigned MaxSafeElements) {
3872 if (!isScalableVectorizationAllowed())
3876 std::numeric_limits<ElementCount::ScalarTy>::max());
3878 return MaxScalableVF;
3886 "Max legal vector width too small, scalable vectorization "
3890 return MaxScalableVF;
3894 unsigned MaxTripCount,
ElementCount UserVF,
bool FoldTailByMasking) {
3896 unsigned SmallestType, WidestType;
3903 unsigned MaxSafeElements =
3907 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
3909 LLVM_DEBUG(
dbgs() <<
"LV: The max safe fixed VF is: " << MaxSafeFixedVF
3911 LLVM_DEBUG(
dbgs() <<
"LV: The max safe scalable VF is: " << MaxSafeScalableVF
3916 auto MaxSafeUserVF =
3917 UserVF.
isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3934 <<
" is unsafe, clamping to max safe VF="
3935 << MaxSafeFixedVF <<
".\n");
3940 <<
"User-specified vectorization factor "
3941 <<
ore::NV(
"UserVectorizationFactor", UserVF)
3942 <<
" is unsafe, clamping to maximum safe vectorization factor "
3943 <<
ore::NV(
"VectorizationFactor", MaxSafeFixedVF);
3945 return MaxSafeFixedVF;
3950 <<
" is ignored because scalable vectors are not "
3956 <<
"User-specified vectorization factor "
3957 <<
ore::NV(
"UserVectorizationFactor", UserVF)
3958 <<
" is ignored because the target does not support scalable "
3959 "vectors. The compiler will pick a more suitable value.";
3963 <<
" is unsafe. Ignoring scalable UserVF.\n");
3968 <<
"User-specified vectorization factor "
3969 <<
ore::NV(
"UserVectorizationFactor", UserVF)
3970 <<
" is unsafe. Ignoring the hint to let the compiler pick a "
3971 "more suitable value.";
3976 LLVM_DEBUG(
dbgs() <<
"LV: The Smallest and Widest types: " << SmallestType
3977 <<
" / " << WidestType <<
" bits.\n");
3982 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3983 MaxSafeFixedVF, FoldTailByMasking))
3987 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3988 MaxSafeScalableVF, FoldTailByMasking))
3989 if (MaxVF.isScalable()) {
3990 Result.ScalableVF = MaxVF;
3991 LLVM_DEBUG(
dbgs() <<
"LV: Found feasible scalable VF = " << MaxVF
4004 "Not inserting runtime ptr check for divergent target",
4005 "runtime pointer checks needed. Not enabled for divergent target",
4006 "CantVersionLoopWithDivergentTarget",
ORE,
TheLoop);
4015 "loop trip count is one, irrelevant for vectorization",
4020 switch (ScalarEpilogueStatus) {
4022 return computeFeasibleMaxVF(MaxTC, UserVF,
false);
4027 dbgs() <<
"LV: vector predicate hint/switch found.\n"
4028 <<
"LV: Not allowing scalar epilogue, creating predicated "
4029 <<
"vector loop.\n");
4036 dbgs() <<
"LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
4038 LLVM_DEBUG(
dbgs() <<
"LV: Not allowing scalar epilogue due to low trip "
4057 LLVM_DEBUG(
dbgs() <<
"LV: Cannot fold tail by masking: vectorize with a "
4058 "scalar epilogue instead.\n");
4060 return computeFeasibleMaxVF(MaxTC, UserVF,
false);
4071 "No decisions should have been taken at this point");
4081 std::optional<unsigned> MaxPowerOf2RuntimeVF =
4086 MaxPowerOf2RuntimeVF = std::max<unsigned>(
4087 *MaxPowerOf2RuntimeVF,
4090 MaxPowerOf2RuntimeVF = std::nullopt;
4093 if (MaxPowerOf2RuntimeVF && *MaxPowerOf2RuntimeVF > 0) {
4095 "MaxFixedVF must be a power of 2");
4096 unsigned MaxVFtimesIC =
4097 UserIC ? *MaxPowerOf2RuntimeVF * UserIC : *MaxPowerOf2RuntimeVF;
4101 BackedgeTakenCount, SE->
getOne(BackedgeTakenCount->
getType()));
4107 LLVM_DEBUG(
dbgs() <<
"LV: No tail will remain for any chosen VF.\n");
4121 <<
"LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
4122 "try to generate VP Intrinsics with scalable vector "
4128 "Expected scalable vector factor.");
4138 LLVM_DEBUG(
dbgs() <<
"LV: Cannot fold tail by masking: vectorize with a "
4139 "scalar epilogue instead.\n");
4145 LLVM_DEBUG(
dbgs() <<
"LV: Can't fold tail by masking: don't vectorize\n");
4151 "Unable to calculate the loop count due to complex control flow",
4152 "unable to calculate the loop count due to complex control flow",
4158 "Cannot optimize for size and vectorize at the same time.",
4159 "cannot optimize for size and vectorize at the same time. "
4160 "Enable vectorization of this loop with '#pragma clang loop "
4161 "vectorize(enable)' when compiling with -Os/-Oz",
4166ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
4167 unsigned MaxTripCount,
unsigned SmallestType,
unsigned WidestType,
4169 bool ComputeScalableMaxVF = MaxSafeVF.
isScalable();
4177 "Scalable flags must match");
4185 ComputeScalableMaxVF);
4186 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
4188 << (MaxVectorElementCount * WidestType) <<
" bits.\n");
4190 if (!MaxVectorElementCount) {
4192 << (ComputeScalableMaxVF ?
"scalable" :
"fixed")
4193 <<
" vector registers.\n");
4197 unsigned WidestRegisterMinEC = MaxVectorElementCount.getKnownMinValue();
4198 if (MaxVectorElementCount.isScalable() &&
4202 WidestRegisterMinEC *= Min;
4211 if (MaxTripCount && MaxTripCount <= WidestRegisterMinEC &&
4219 LLVM_DEBUG(
dbgs() <<
"LV: Clamping the MaxVF to maximum power of two not "
4220 "exceeding the constant trip count: "
4221 << ClampedUpperTripCount <<
"\n");
4223 ClampedUpperTripCount,
4224 FoldTailByMasking ? MaxVectorElementCount.isScalable() :
false);
4237 ComputeScalableMaxVF);
4238 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
4252 for (
int I = RUs.size() - 1;
I >= 0; --
I) {
4253 const auto &MLU = RUs[
I].MaxLocalUsers;
4254 if (
all_of(MLU, [&](
decltype(MLU.front()) &LU) {
4255 return LU.second <= TTI.getNumberOfRegisters(LU.first);
4265 <<
") with target's minimum: " << MinVF <<
'\n');
4281static std::optional<unsigned>
4283 const Function *Fn = L->getHeader()->getParent();
4287 auto Max = Attr.getVScaleRangeMax();
4288 if (Max && Min == Max)
4295bool LoopVectorizationPlanner::isMoreProfitable(
4303 unsigned EstimatedWidthA =
A.Width.getKnownMinValue();
4304 unsigned EstimatedWidthB =
B.Width.getKnownMinValue();
4306 if (
A.Width.isScalable())
4307 EstimatedWidthA *= *VScale;
4308 if (
B.Width.isScalable())
4309 EstimatedWidthB *= *VScale;
4316 A.Width.isScalable() && !
B.Width.isScalable();
4327 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
4329 auto GetCostForTC = [MaxTripCount,
this](
unsigned VF,
4341 return VectorCost *
divideCeil(MaxTripCount, VF);
4342 return VectorCost * (MaxTripCount / VF) + ScalarCost * (MaxTripCount % VF);
4345 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA,
A.ScalarCost);
4346 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB,
B.ScalarCost);
4347 return CmpFn(RTCostA, RTCostB);
4352 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
4355 for (
const auto &Plan : VPlans) {
4360 for (
VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
4361 for (
auto &R : *VPBB) {
4362 if (!R.cost(VF, CostCtx).isValid())
4368 if (InvalidCosts.
empty())
4376 for (
auto &Pair : InvalidCosts)
4377 if (!Numbering.
count(Pair.first))
4378 Numbering[Pair.first] =
I++;
4381 sort(InvalidCosts, [&Numbering](RecipeVFPair &
A, RecipeVFPair &
B) {
4382 if (Numbering[
A.first] != Numbering[
B.first])
4383 return Numbering[
A.first] < Numbering[
B.first];
4384 const auto &
LHS =
A.second;
4385 const auto &
RHS =
B.second;
4386 return std::make_tuple(
LHS.isScalable(),
LHS.getKnownMinValue()) <
4387 std::make_tuple(
RHS.isScalable(),
RHS.getKnownMinValue());
4399 Subset =
Tail.take_front(1);
4406 [](
const auto *R) {
return Instruction::PHI; })
4407 .Case<VPWidenSelectRecipe>(
4408 [](
const auto *R) {
return Instruction::Select; })
4409 .Case<VPWidenStoreRecipe>(
4410 [](
const auto *R) {
return Instruction::Store; })
4411 .Case<VPWidenLoadRecipe>(
4412 [](
const auto *R) {
return Instruction::Load; })
4413 .Case<VPWidenCallRecipe>(
4414 [](
const auto *R) {
return Instruction::Call; })
4417 [](
const auto *R) {
return R->getOpcode(); })
4419 return R->getStoredValues().empty() ? Instruction::Load
4420 : Instruction::Store;
4428 if (Subset ==
Tail ||
Tail[Subset.size()].first != R) {
4429 std::string OutString;
4431 assert(!Subset.empty() &&
"Unexpected empty range");
4432 OS <<
"Recipe with invalid costs prevented vectorization at VF=(";
4433 for (
const auto &Pair : Subset)
4434 OS << (Pair.second == Subset.front().second ?
"" :
", ") << Pair.second;
4436 if (Opcode == Instruction::Call) {
4437 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
4439 WidenCall ? WidenCall->getCalledScalarFunction()
4440 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
4441 ->getLiveInIRValue());
4442 OS <<
" call to " << CalledFn->
getName();
4448 Tail =
Tail.drop_front(Subset.size());
4452 Subset =
Tail.take_front(Subset.size() + 1);
4453 }
while (!
Tail.empty());
4467 for (
VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4476 switch (R.getVPDefID()) {
4477 case VPDef::VPDerivedIVSC:
4478 case VPDef::VPScalarIVStepsSC:
4479 case VPDef::VPScalarCastSC:
4480 case VPDef::VPReplicateSC:
4481 case VPDef::VPInstructionSC:
4482 case VPDef::VPCanonicalIVPHISC:
4483 case VPDef::VPVectorPointerSC:
4484 case VPDef::VPExpandSCEVSC:
4485 case VPDef::VPEVLBasedIVPHISC:
4486 case VPDef::VPPredInstPHISC:
4487 case VPDef::VPBranchOnMaskSC:
4489 case VPDef::VPReductionSC:
4490 case VPDef::VPActiveLaneMaskPHISC:
4491 case VPDef::VPWidenCallSC:
4492 case VPDef::VPWidenCanonicalIVSC:
4493 case VPDef::VPWidenCastSC:
4494 case VPDef::VPWidenGEPSC:
4495 case VPDef::VPWidenSC:
4496 case VPDef::VPWidenSelectSC:
4497 case VPDef::VPBlendSC:
4498 case VPDef::VPFirstOrderRecurrencePHISC:
4499 case VPDef::VPWidenPHISC:
4500 case VPDef::VPWidenIntOrFpInductionSC:
4501 case VPDef::VPWidenPointerInductionSC:
4502 case VPDef::VPReductionPHISC:
4503 case VPDef::VPInterleaveSC:
4504 case VPDef::VPWidenLoadEVLSC:
4505 case VPDef::VPWidenLoadSC:
4506 case VPDef::VPWidenStoreEVLSC:
4507 case VPDef::VPWidenStoreSC:
4513 auto WillWiden = [&
TTI, VF](
Type *ScalarTy) {
4531 if (R.getNumDefinedValues() == 0 &&
4532 !isa<VPWidenStoreRecipe, VPWidenStoreEVLRecipe, VPInterleaveRecipe>(
4541 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4543 if (!Visited.
insert({ScalarTy}).second)
4545 if (WillWiden(ScalarTy))
4555 LLVM_DEBUG(
dbgs() <<
"LV: Scalar loop costs: " << ExpectedCost <<
".\n");
4556 assert(ExpectedCost.
isValid() &&
"Unexpected invalid cost for scalar loop");
4558 [](std::unique_ptr<VPlan> &
P) {
4561 "Expected Scalar VF to be a candidate");
4568 if (ForceVectorization &&
4569 (VPlans.
size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4576 for (
auto &
P : VPlans) {
4586 unsigned AssumedMinimumVscale =
4589 Candidate.Width.isScalable()
4590 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
4591 : Candidate.Width.getFixedValue();
4593 <<
" costs: " << (Candidate.Cost / Width));
4594 if (VF.isScalable())
4596 << AssumedMinimumVscale <<
")");
4603 <<
"LV: Not considering vector loop of width " << VF
4604 <<
" because it will not generate any vector instructions.\n");
4608 if (isMoreProfitable(Candidate, ChosenFactor))
4609 ChosenFactor = Candidate;
4615 "There are conditional stores.",
4616 "store that is conditionally executed prevents vectorization",
4617 "ConditionalStore", ORE, OrigLoop);
4618 ChosenFactor = ScalarCost;
4622 !isMoreProfitable(ChosenFactor, ScalarCost))
dbgs()
4623 <<
"LV: Vectorization seems to be not beneficial, "
4624 <<
"but was forced by a user.\n");
4626 return ChosenFactor;
4629bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4634 [&](
PHINode &Phi) { return Legal->isFixedOrderRecurrence(&Phi); }))
4644 if (!OrigLoop->
contains(cast<Instruction>(U)))
4648 if (!OrigLoop->
contains(cast<Instruction>(U)))
4677 unsigned Multiplier = 1;
4689 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is disabled.\n");
4694 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because no "
4695 "epilogue is allowed.\n");
4701 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
4702 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because the loop "
4703 "is not a supported candidate.\n");
4708 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization factor is forced.\n");
4711 return {ForcedEC, 0, 0};
4713 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization forced factor is not "
4722 dbgs() <<
"LEV: Epilogue vectorization skipped due to opt for size.\n");
4727 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is not profitable for "
4739 EstimatedRuntimeVF *= *VScale;
4744 const SCEV *RemainingIterations =
nullptr;
4745 for (
auto &NextVF : ProfitableVFs) {
4752 if ((!NextVF.Width.isScalable() && MainLoopVF.
isScalable() &&
4759 if (!MainLoopVF.
isScalable() && !NextVF.Width.isScalable()) {
4761 if (!RemainingIterations) {
4768 SE.
getConstant(TCType, NextVF.Width.getKnownMinValue()),
4769 RemainingIterations))
4773 if (Result.Width.isScalar() || isMoreProfitable(NextVF, Result))
4779 << Result.Width <<
"\n");
4783std::pair<unsigned, unsigned>
4785 unsigned MinWidth = -1U;
4786 unsigned MaxWidth = 8;
4799 MaxWidth = std::min<unsigned>(
4800 MaxWidth, std::min<unsigned>(
4806 MinWidth = std::min<unsigned>(
4807 MinWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
4808 MaxWidth = std::max<unsigned>(
4809 MaxWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
4812 return {MinWidth, MaxWidth};
4820 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
4828 if (!isa<LoadInst>(
I) && !isa<StoreInst>(
I) && !isa<PHINode>(
I))
4833 if (
auto *PN = dyn_cast<PHINode>(&
I)) {
4847 if (
auto *ST = dyn_cast<StoreInst>(&
I))
4848 T = ST->getValueOperand()->getType();
4851 "Expected the load/store/recurrence type to be sized");
4880 LLVM_DEBUG(
dbgs() <<
"LV: Preference for VP intrinsics indicated. "
4881 "Unroll factor forced to be 1.\n");
4894 if (LoopCost == 0) {
4896 assert(LoopCost.
isValid() &&
"Expected to have chosen a VF with valid cost");
4906 for (
auto& pair : R.MaxLocalUsers) {
4907 pair.second = std::max(pair.second, 1U);
4921 unsigned IC = UINT_MAX;
4923 for (
auto& pair : R.MaxLocalUsers) {
4935 unsigned MaxLocalUsers = pair.second;
4936 unsigned LoopInvariantRegs = 0;
4937 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
4938 LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
4940 unsigned TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4944 TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4945 std::max(1U, (MaxLocalUsers - 1)));
4948 IC = std::min(IC, TmpIC);
4966 EstimatedVF *= *VScale;
4968 assert(EstimatedVF >= 1 &&
"Estimated VF shouldn't be less than 1");
4974 unsigned AvailableTC =
4986 std::max(1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4987 unsigned InterleaveCountLB =
bit_floor(std::max(
4988 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4989 MaxInterleaveCount = InterleaveCountLB;
4991 if (InterleaveCountUB != InterleaveCountLB) {
4992 unsigned TailTripCountUB =
4993 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4994 unsigned TailTripCountLB =
4995 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4998 if (TailTripCountUB == TailTripCountLB)
4999 MaxInterleaveCount = InterleaveCountUB;
5001 }
else if (BestKnownTC && *BestKnownTC > 0) {
5005 ? (*BestKnownTC) - 1
5013 MaxInterleaveCount =
bit_floor(std::max(
5014 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
5017 assert(MaxInterleaveCount > 0 &&
5018 "Maximum interleave count must be greater than 0");
5022 if (IC > MaxInterleaveCount)
5023 IC = MaxInterleaveCount;
5026 IC = std::max(1u, IC);
5028 assert(IC > 0 &&
"Interleave count must be greater than 0.");
5032 if (VF.
isVector() && HasReductions) {
5033 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving because of reductions.\n");
5041 bool ScalarInterleavingRequiresPredication =
5043 return Legal->blockNeedsPredication(BB);
5045 bool ScalarInterleavingRequiresRuntimePointerCheck =
5051 <<
"LV: IC is " << IC <<
'\n'
5052 <<
"LV: VF is " << VF <<
'\n');
5053 const bool AggressivelyInterleaveReductions =
5055 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5056 !ScalarInterleavingRequiresPredication && LoopCost <
SmallLoopCost) {
5060 unsigned SmallIC = std::min(IC, (
unsigned)llvm::bit_floor<uint64_t>(
5067 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5068 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5074 bool HasSelectCmpReductions =
5077 const RecurrenceDescriptor &RdxDesc = Reduction.second;
5078 return RecurrenceDescriptor::isAnyOfRecurrenceKind(
5079 RdxDesc.getRecurrenceKind());
5081 if (HasSelectCmpReductions) {
5082 LLVM_DEBUG(
dbgs() <<
"LV: Not interleaving select-cmp reductions.\n");
5092 bool HasOrderedReductions =
5094 const RecurrenceDescriptor &RdxDesc = Reduction.second;
5095 return RdxDesc.isOrdered();
5097 if (HasOrderedReductions) {
5099 dbgs() <<
"LV: Not interleaving scalar ordered reductions.\n");
5104 SmallIC = std::min(SmallIC,
F);
5105 StoresIC = std::min(StoresIC,
F);
5106 LoadsIC = std::min(LoadsIC,
F);
5110 std::max(StoresIC, LoadsIC) > SmallIC) {
5112 dbgs() <<
"LV: Interleaving to saturate store or load ports.\n");
5113 return std::max(StoresIC, LoadsIC);
5118 if (VF.
isScalar() && AggressivelyInterleaveReductions) {
5122 return std::max(IC / 2, SmallIC);
5124 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving to reduce branch cost.\n");
5131 if (AggressivelyInterleaveReductions) {
5181 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5185 for (
Value *U :
I.operands()) {
5186 auto *Instr = dyn_cast<Instruction>(U);
5197 LoopInvariants.
insert(Instr);
5202 EndPoint[Instr] = IdxToInstr.
size();
5220 LLVM_DEBUG(
dbgs() <<
"LV(REG): Calculating max register usage:\n");
5222 const auto &TTICapture =
TTI;
5229 for (
unsigned int i = 0, s = IdxToInstr.
size(); i < s; ++i) {
5233 InstrList &
List = TransposeEnds[i];
5248 for (
unsigned j = 0, e = VFs.
size(); j < e; ++j) {
5256 if (VFs[j].isScalar()) {
5257 for (
auto *Inst : OpenIntervals) {
5266 for (
auto *Inst : OpenIntervals) {
5279 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5285 auto &Entry = MaxUsages[j][pair.first];
5286 Entry = std::max(Entry, pair.second);
5291 << OpenIntervals.
size() <<
'\n');
5297 for (
unsigned i = 0, e = VFs.
size(); i < e; ++i) {
5303 for (
auto *Inst : LoopInvariants) {
5306 bool IsScalar =
all_of(Inst->users(), [&](
User *U) {
5307 auto *I = cast<Instruction>(U);
5308 return TheLoop != LI->getLoopFor(I->getParent()) ||
5309 isScalarAfterVectorization(I, VFs[i]);
5315 Invariant[ClassID] += GetRegUsage(Inst->getType(), VF);
5319 dbgs() <<
"LV(REG): VF = " << VFs[i] <<
'\n';
5320 dbgs() <<
"LV(REG): Found max usage: " << MaxUsages[i].
size()
5322 for (
const auto &pair : MaxUsages[i]) {
5323 dbgs() <<
"LV(REG): RegisterClass: "
5327 dbgs() <<
"LV(REG): Found invariant usage: " << Invariant.
size()
5329 for (
const auto &pair : Invariant) {
5330 dbgs() <<
"LV(REG): RegisterClass: "
5344bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(
Instruction *
I,
5355 "Expecting a scalar emulated instruction");
5356 return isa<LoadInst>(
I) ||
5357 (isa<StoreInst>(
I) &&
5374 PredicatedBBsAfterVectorization[VF].
clear();
5391 !useEmulatedMaskMemRefHack(&
I, VF) &&
5392 computePredInstDiscount(&
I, ScalarCosts, VF) >= 0)
5395 PredicatedBBsAfterVectorization[VF].
insert(BB);
5397 if (Pred->getSingleSuccessor() == BB)
5398 PredicatedBBsAfterVectorization[VF].
insert(Pred);
5407 "Instruction marked uniform-after-vectorization will be predicated");
5425 if (!
I->hasOneUse() || PredInst->
getParent() !=
I->getParent() ||
5444 for (
Use &U :
I->operands())
5445 if (
auto *J = dyn_cast<Instruction>(U.get()))
5457 while (!Worklist.
empty()) {
5461 if (ScalarCosts.contains(
I))
5491 for (
Use &U :
I->operands())
5492 if (
auto *J = dyn_cast<Instruction>(
U.get())) {
5494 "Instruction has non-scalar type");
5495 if (canBeScalarized(J))
5497 else if (needsExtract(J, VF)) {
5499 cast<VectorType>(
ToVectorTy(J->getType(), VF)),
5510 Discount += VectorCost - ScalarCost;
5511 ScalarCosts[
I] = ScalarCost;
5525 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5538 LLVM_DEBUG(
dbgs() <<
"LV: Found an estimated cost of " <<
C <<
" for VF "
5539 << VF <<
" For instruction: " <<
I <<
'\n');
5567 const Loop *TheLoop) {
5569 auto *Gep = dyn_cast<GetElementPtrInst>(
Ptr);
5575 auto SE = PSE.
getSE();
5576 unsigned NumOperands = Gep->getNumOperands();
5577 for (
unsigned i = 1; i < NumOperands; ++i) {
5578 Value *Opd = Gep->getOperand(i);
5580 !
Legal->isInductionVariable(Opd))
5589LoopVectorizationCostModel::getMemInstScalarizationCost(
Instruction *
I,
5592 "Scalarization cost of instruction implies vectorization.");
5639 if (useEmulatedMaskMemRefHack(
I, VF))
5649LoopVectorizationCostModel::getConsecutiveMemOpCost(
Instruction *
I,
5652 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
5658 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5659 "Stride should be 1 or -1 for consecutive memory access");
5671 bool Reverse = ConsecutiveStride < 0;
5679LoopVectorizationCostModel::getUniformMemOpCost(
Instruction *
I,
5684 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
5688 if (isa<LoadInst>(
I)) {
5700 (isLoopInvariantStoreValue
5707LoopVectorizationCostModel::getGatherScatterCost(
Instruction *
I,
5710 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
5721LoopVectorizationCostModel::getInterleaveGroupCost(
Instruction *
I,
5724 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
5729 assert(Group &&
"Fail to get an interleaved access group.");
5731 unsigned InterleaveFactor = Group->getFactor();
5736 for (
unsigned IF = 0;
IF < InterleaveFactor;
IF++)
5737 if (Group->getMember(IF))
5741 bool UseMaskForGaps =
5743 (isa<StoreInst>(
I) && (Group->getNumMembers() < Group->getFactor()));
5745 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
5748 if (Group->isReverse()) {
5751 "Reverse masked interleaved access not supported.");
5752 Cost += Group->getNumMembers() *
5759std::optional<InstructionCost>
5765 if (InLoopReductions.
empty() || VF.
isScalar() || !isa<VectorType>(Ty))
5766 return std::nullopt;
5767 auto *VectorTy = cast<VectorType>(Ty);
5784 return std::nullopt;
5795 if (!InLoopReductionImmediateChains.
count(RetI))
5796 return std::nullopt;
5800 Instruction *LastChain = InLoopReductionImmediateChains.
at(RetI);
5802 while (!isa<PHINode>(ReductionPhi))
5803 ReductionPhi = InLoopReductionImmediateChains.
at(ReductionPhi);
5835 : dyn_cast<Instruction>(RetI->
getOperand(1));
5840 if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
5853 bool IsUnsigned = isa<ZExtInst>(Op0);
5870 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5871 return I == RetI ? RedCost : 0;
5875 bool IsUnsigned = isa<ZExtInst>(RedOp);
5884 if (RedCost.
isValid() && RedCost < BaseCost + ExtCost)
5885 return I == RetI ? RedCost : 0;
5886 }
else if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
5891 bool IsUnsigned = isa<ZExtInst>(Op0);
5914 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5915 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5923 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5924 return I == RetI ? RedCost : 0;
5933 if (RedCost.
isValid() && RedCost < MulCost + BaseCost)
5934 return I == RetI ? RedCost : 0;
5938 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5942LoopVectorizationCostModel::getMemoryInstructionCost(
Instruction *
I,
5972 if (!
RetTy->isVoidTy() &&
5994 for (
auto *V : filterExtractingOperands(Ops, VF))
5997 filterExtractingOperands(Ops, VF), Tys,
CostKind);
6019 auto isLegalToScalarize = [&]() {
6033 if (isa<LoadInst>(
I))
6038 auto &SI = cast<StoreInst>(
I);
6056 if (GatherScatterCost < ScalarizationCost)
6068 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6069 "Expected consecutive stride.");
6078 unsigned NumAccesses = 1;
6081 assert(Group &&
"Fail to get an interleaved access group.");
6087 NumAccesses = Group->getNumMembers();
6089 InterleaveCost = getInterleaveGroupCost(&
I, VF);
6094 ? getGatherScatterCost(&
I, VF) * NumAccesses
6098 getMemInstScalarizationCost(&
I, VF) * NumAccesses;
6104 if (InterleaveCost <= GatherScatterCost &&
6105 InterleaveCost < ScalarizationCost) {
6107 Cost = InterleaveCost;
6108 }
else if (GatherScatterCost < ScalarizationCost) {
6110 Cost = GatherScatterCost;
6113 Cost = ScalarizationCost;
6147 while (!Worklist.
empty()) {
6149 for (
auto &
Op :
I->operands())
6150 if (
auto *InstOp = dyn_cast<Instruction>(
Op))
6151 if ((InstOp->getParent() ==
I->getParent()) && !isa<PHINode>(InstOp) &&
6152 AddrDefs.
insert(InstOp).second)
6156 for (
auto *
I : AddrDefs) {
6157 if (isa<LoadInst>(
I)) {
6171 for (
unsigned I = 0;
I < Group->getFactor(); ++
I) {
6188 "Trying to set a vectorization decision for a scalar VF");
6207 for (
auto &ArgOp : CI->
args())
6212 for (
Type *ScalarTy : ScalarTys)
6221 std::nullopt, *RedCost);
6235 getScalarizationOverhead(CI, VF,
CostKind);
6241 bool UsesMask =
false;
6247 if (
Info.Shape.VF != VF)
6251 if (MaskRequired && !
Info.isMasked())
6255 bool ParamsOk =
true;
6257 switch (Param.ParamKind) {
6276 dyn_cast<SCEVAddRecExpr>(SE->
getSCEV(ScalarParam));
6278 if (!SAR || SAR->getLoop() !=
TheLoop) {
6284 dyn_cast<SCEVConstant>(SAR->getStepRecurrence(*SE));
6312 if (VecFunc && UsesMask && !MaskRequired)
6332 if (VectorCost <=
Cost) {
6337 if (IntrinsicCost <=
Cost) {
6338 Cost = IntrinsicCost;
6357 return InstsToScalarize[VF][
I];
6360 auto ForcedScalar = ForcedScalars.
find(VF);
6361 if (VF.
isVector() && ForcedScalar != ForcedScalars.
end()) {
6362 auto InstSet = ForcedScalar->second;
6363 if (InstSet.count(
I))
6374 auto hasSingleCopyAfterVectorization = [
this](
Instruction *
I,
6379 auto Scalarized = InstsToScalarize.
find(VF);
6380 assert(Scalarized != InstsToScalarize.
end() &&
6381 "VF not yet analyzed for scalarization profitability");
6382 return !Scalarized->second.count(
I) &&
6384 auto *UI = cast<Instruction>(U);
6385 return !Scalarized->second.count(UI);
6388 (void) hasSingleCopyAfterVectorization;
6397 assert(
I->getOpcode() == Instruction::GetElementPtr ||
6398 I->getOpcode() == Instruction::PHI ||
6399 (
I->getOpcode() == Instruction::BitCast &&
6400 I->getType()->isPointerTy()) ||
6401 hasSingleCopyAfterVectorization(
I, VF));
6411 switch (
I->getOpcode()) {
6412 case Instruction::GetElementPtr:
6418 case Instruction::Br: {
6425 bool ScalarPredicatedBB =
false;
6431 ScalarPredicatedBB =
true;
6433 if (ScalarPredicatedBB) {
6455 case Instruction::Switch: {
6458 auto *Switch = cast<SwitchInst>(
I);
6459 return Switch->getNumCases() *
6462 ToVectorTy(Switch->getCondition()->getType(), VF),
6466 case Instruction::PHI: {
6467 auto *Phi = cast<PHINode>(
I);
6479 cast<VectorType>(VectorTy), Mask,
CostKind,
6487 return (Phi->getNumIncomingValues() - 1) *
6489 Instruction::Select,
ToVectorTy(Phi->getType(), VF),
6495 case Instruction::UDiv:
6496 case Instruction::SDiv:
6497 case Instruction::URem:
6498 case Instruction::SRem:
6502 ScalarCost : SafeDivisorCost;
6506 case Instruction::Add:
6507 case Instruction::FAdd:
6508 case Instruction::Sub:
6509 case Instruction::FSub:
6510 case Instruction::Mul:
6511 case Instruction::FMul:
6512 case Instruction::FDiv:
6513 case Instruction::FRem:
6514 case Instruction::Shl:
6515 case Instruction::LShr:
6516 case Instruction::AShr:
6517 case Instruction::And:
6518 case Instruction::Or:
6519 case Instruction::Xor: {
6523 if (
I->getOpcode() == Instruction::Mul &&
6534 Value *Op2 =
I->getOperand(1);
6543 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6546 case Instruction::FNeg: {
6549 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6550 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6551 I->getOperand(0),
I);
6553 case Instruction::Select: {
6555 const SCEV *CondSCEV = SE->
getSCEV(SI->getCondition());
6558 const Value *Op0, *Op1;
6575 Type *CondTy = SI->getCondition()->getType();
6580 if (
auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6581 Pred = Cmp->getPredicate();
6585 case Instruction::ICmp:
6586 case Instruction::FCmp: {
6587 Type *ValTy =
I->getOperand(0)->getType();
6588 Instruction *Op0AsInstruction = dyn_cast<Instruction>(
I->getOperand(0));
6593 cast<CmpInst>(
I)->getPredicate(),
CostKind,
6596 case Instruction::Store:
6597 case Instruction::Load: {
6602 "CM decision should be taken at this point");
6609 return getMemoryInstructionCost(
I, VF);
6611 case Instruction::BitCast:
6612 if (
I->getType()->isPointerTy())
6615 case Instruction::ZExt:
6616 case Instruction::SExt:
6617 case Instruction::FPToUI:
6618 case Instruction::FPToSI:
6619 case Instruction::FPExt:
6620 case Instruction::PtrToInt:
6621 case Instruction::IntToPtr:
6622 case Instruction::SIToFP:
6623 case Instruction::UIToFP:
6624 case Instruction::Trunc:
6625 case Instruction::FPTrunc: {
6628 assert((isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
6629 "Expected a load or a store!");
6655 unsigned Opcode =
I->getOpcode();
6658 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6660 if (
StoreInst *Store = dyn_cast<StoreInst>(*
I->user_begin()))
6661 CCH = ComputeCCH(Store);
6664 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6665 Opcode == Instruction::FPExt) {
6666 if (
LoadInst *Load = dyn_cast<LoadInst>(
I->getOperand(0)))
6667 CCH = ComputeCCH(Load);
6674 auto *Trunc = cast<TruncInst>(
I);
6676 Trunc->getSrcTy(), CCH,
CostKind, Trunc);
6683 Type *SrcScalarTy =
I->getOperand(0)->getType();
6684 Instruction *Op0AsInstruction = dyn_cast<Instruction>(
I->getOperand(0));
6695 (
I->getOpcode() == Instruction::ZExt ||
6696 I->getOpcode() == Instruction::SExt))
6702 case Instruction::Call:
6704 case Instruction::ExtractValue:
6706 case Instruction::Alloca:
6729 auto IsLiveOutDead = [
this, RequiresScalarEpilogue](
User *U) {
6730 return RequiresScalarEpilogue &&
6739 if ((SI = dyn_cast<StoreInst>(&
I)) &&
6742 auto I = DeadInvariantStoreOps.
insert({SI->getPointerOperand(), {}});
6743 I.first->second.push_back(SI->getValueOperand());
6752 all_of(
I.users(), [
this, IsLiveOutDead](
User *U) {
6753 return VecValuesToIgnore.contains(U) ||
6754 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6763 if (Group->getInsertPos() == &
I)
6766 DeadInterleavePointerOps.
push_back(PointerOp);
6772 for (
unsigned I = 0;
I != DeadInterleavePointerOps.
size(); ++
I) {
6773 auto *
Op = dyn_cast<Instruction>(DeadInterleavePointerOps[
I]);
6775 Instruction *UI = cast<Instruction>(U);
6776 return !VecValuesToIgnore.contains(U) &&
6777 (!isAccessInterleaved(UI) ||
6778 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6782 DeadInterleavePointerOps.
append(
Op->op_begin(),
Op->op_end());
6785 for (
const auto &[
_, Ops] : DeadInvariantStoreOps) {
6792 for (
unsigned I = 0;
I != DeadOps.
size(); ++
I) {
6793 auto *
Op = dyn_cast<Instruction>(DeadOps[
I]);
6796 (isa<PHINode>(
Op) &&
Op->getParent() == Header) ||
6799 return !VecValuesToIgnore.contains(U) &&
6800 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6811 [
this](
User *U) { return ValuesToIgnore.contains(U); }))
6815 DeadOps.
append(
Op->op_begin(),
Op->op_end());
6856 bool InLoop = !ReductionOperations.
empty();
6859 InLoopReductions.
insert(Phi);
6862 for (
auto *
I : ReductionOperations) {
6863 InLoopReductionImmediateChains[
I] = LastChain;
6867 LLVM_DEBUG(
dbgs() <<
"LV: Using " << (InLoop ?
"inloop" :
"out of loop")
6868 <<
" reduction for phi: " << *Phi <<
"\n");
6876 return tryInsertInstruction(
6889 unsigned WidestType;
6898 unsigned N =
RegSize.getKnownMinValue() / WidestType;
6919 <<
"overriding computed VF.\n");
6924 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing. Scalable VF requested, but "
6925 <<
"not supported by the target.\n");
6927 "Scalable vectorization requested but not supported by the target",
6928 "the scalable user-specified vectorization width for outer-loop "
6929 "vectorization cannot be used because the target does not support "
6930 "scalable vectors.",
6931 "ScalableVFUnfeasible", ORE, OrigLoop);
6936 "VF needs to be a power of two");
6938 <<
"VF " << VF <<
" to build VPlans.\n");
6945 return {VF, 0 , 0 };
6949 dbgs() <<
"LV: Not vectorizing. Inner loops aren't supported in the "
6950 "VPlan-native path.\n");
6954std::optional<VectorizationFactor>
6962 return std::nullopt;
6969 <<
"LV: Invalidate all interleaved groups due to fold-tail by masking "
6970 "which requires masked-interleaved support.\n");
6984 if (!UserVF.
isZero() && UserVFIsLegal) {
6986 "VF needs to be a power of two");
6992 buildVPlansWithVPRecipes(UserVF, UserVF);
6994 LLVM_DEBUG(
dbgs() <<
"LV: No VPlan could be built for " << UserVF
6996 return std::nullopt;
7000 return {{UserVF, 0, 0}};
7003 "InvalidCost", ORE, OrigLoop);
7016 for (
const auto &VF : VFCandidates) {
7031 return std::nullopt;
7033 [](std::unique_ptr<VPlan> &
P) {
return P->hasScalarVFOnly(); }))
7045 return std::nullopt;
7083 for (
unsigned I = 0;
I != IVInsts.
size();
I++) {
7084 for (
Value *
Op : IVInsts[
I]->operands()) {
7085 auto *OpI = dyn_cast<Instruction>(
Op);
7086 if (
Op ==
IV || !OpI || !OrigLoop->
contains(OpI) || !
Op->hasOneUse())
7092 for (
User *U :
IV->users()) {
7093 auto *CI = cast<Instruction>(U);
7094 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
7099 if (!CostCtx.SkipCostComputation.insert(IVInst).second)
7103 dbgs() <<
"Cost of " << InductionCost <<
" for VF " << VF
7104 <<
": induction instruction " << *IVInst <<
"\n";
7106 Cost += InductionCost;
7119 auto *
Term = dyn_cast<BranchInst>(EB->getTerminator());
7122 if (
auto *CondI = dyn_cast<Instruction>(
Term->getOperand(0))) {
7123 ExitInstrs.
insert(CondI);
7127 for (
unsigned I = 0;
I != ExitInstrs.
size(); ++
I) {
7130 !CostCtx.SkipCostComputation.insert(CondI).second)
7132 Cost += CostCtx.getLegacyCost(CondI, VF);
7134 auto *OpI = dyn_cast<Instruction>(
Op);
7135 if (!OpI ||
any_of(OpI->users(), [&ExitInstrs,
this](
User *U) {
7136 return OrigLoop->contains(cast<Instruction>(U)->getParent()) &&
7137 !ExitInstrs.contains(cast<Instruction>(U));
7153 RdxDesc.getRecurrenceKind()))
7159 RdxDesc.getRecurrenceKind())) {
7161 RedPhi->users(), [](
User *U) { return isa<SelectInst>(U); }));
7162 assert(!CostCtx.SkipCostComputation.contains(
Select) &&
7163 "reduction op visited multiple times");
7164 CostCtx.SkipCostComputation.insert(
Select);
7165 auto ReductionCost = CostCtx.getLegacyCost(
Select, VF);
7166 LLVM_DEBUG(
dbgs() <<
"Cost of " << ReductionCost <<
" for VF " << VF
7167 <<
":\n any-of reduction " << *
Select <<
"\n");
7168 Cost += ReductionCost;
7172 const auto &ChainOps = RdxDesc.getReductionOpChain(RedPhi, OrigLoop);
7177 for (
auto *ChainOp : ChainOps) {
7178 for (
Value *
Op : ChainOp->operands()) {
7179 if (
auto *
I = dyn_cast<Instruction>(
Op))
7180 ChainOpsAndOperands.insert(
I);
7191 assert(!CostCtx.SkipCostComputation.contains(
I) &&
7192 "reduction op visited multiple times");
7193 CostCtx.SkipCostComputation.insert(
I);
7194 LLVM_DEBUG(
dbgs() <<
"Cost of " << ReductionCost <<
" for VF " << VF
7195 <<
":\n in-loop reduction " << *
I <<
"\n");
7196 Cost += *ReductionCost;
7208 CostCtx.SkipCostComputation.insert(BB->getTerminator());
7209 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
7220 VPlan &FirstPlan = *VPlans[0];
7226 "More than a single plan/VF w/o any plan having scalar VF");
7234 if (ForceVectorization) {
7241 for (
auto &
P : VPlans) {
7248 <<
"LV: Not considering vector loop of width " << VF
7249 <<
" because it will not generate any vector instructions.\n");
7255 if (isMoreProfitable(CurrentFactor, BestFactor))
7256 BestFactor = CurrentFactor;
7259 if (isMoreProfitable(CurrentFactor, ScalarFactor))
7260 ProfitableVFs.push_back(CurrentFactor);
7263 return BestFactor.
Width;
7268 [VF](
const VPlanPtr &Plan) {
return Plan->hasVF(VF); }) ==
7270 "Best VF has not a single VPlan.");
7272 for (
const VPlanPtr &Plan : VPlans) {
7273 if (Plan->hasVF(VF))
7283 bool IsUnrollMetadata =
false;
7284 MDNode *LoopID = L->getLoopID();
7287 for (
unsigned i = 1, ie = LoopID->
getNumOperands(); i < ie; ++i) {
7288 auto *MD = dyn_cast<MDNode>(LoopID->
getOperand(i));
7290 const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7292 S && S->getString().starts_with(
"llvm.loop.unroll.disable");
7298 if (!IsUnrollMetadata) {
7300 LLVMContext &Context = L->getHeader()->getContext();
7303 MDString::get(Context,
"llvm.loop.unroll.runtime.disable"));
7309 L->setLoopID(NewLoopID);
7319 bool VectorizingEpilogue) {
7324 auto *PhiR = cast<VPReductionPHIRecipe>(RedResult->
getOperand(0));
7330 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
7333 auto *Cmp = cast<ICmpInst>(PhiR->getStartValue()->getUnderlyingValue());
7336 ResumePhi = cast<PHINode>(Cmp->getOperand(0));
7338 assert((!VectorizingEpilogue || ResumePhi) &&
7339 "when vectorizing the epilogue loop, we need a resume phi from main "
7356 BCBlockPhi->addIncoming(FinalValue,
Incoming);
7358 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(
Incoming),
7364 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
7368 int IncomingEdgeBlockIdx =
7370 assert(IncomingEdgeBlockIdx >= 0 &&
"Invalid block index");
7372 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
7373 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
7375 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
7377 ReductionResumeValues[&RdxDesc] = BCBlockPhi;
7380std::pair<DenseMap<const SCEV *, Value *>,
7387 "Trying to execute plan with unsupported VF");
7389 "Trying to execute plan with unsupported UF");
7391 (IsEpilogueVectorization || !ExpandedSCEVs) &&
7392 "expanded SCEVs to reuse can only be used during epilogue vectorization");
7393 (void)IsEpilogueVectorization;
7398 <<
", UF=" << BestUF <<
'\n');
7399 BestVPlan.
setName(
"Final VPlan");
7416 assert(IsEpilogueVectorization &&
"should only re-use the existing trip "
7417 "count during epilogue vectorization");
7421 Value *CanonicalIVStartValue;
7422 std::tie(State.
CFG.
PrevBB, CanonicalIVStartValue) =
7425#ifdef EXPENSIVE_CHECKS
7426 assert(DT->
verify(DominatorTree::VerificationLevel::Fast));
7432 std::unique_ptr<LoopVersioning> LVer =
nullptr;
7440 LVer = std::make_unique<LoopVersioning>(
7443 State.
LVer = &*LVer;
7460 CanonicalIVStartValue, State);
7470 dyn_cast<VPInstruction>(&R), ReductionResumeValues, State, OrigLoop,
7479 std::optional<MDNode *> VectorizedLoopID =
7486 if (VectorizedLoopID)
7487 L->setLoopID(*VectorizedLoopID);
7510 cast<BranchInst>(State.
CFG.
VPBB2IRBB[ExitVPBB]->getTerminator());
7511 if (MiddleTerm->isConditional() &&
7515 assert(TripCount > 0 &&
"trip count should not be zero");
7516 const uint32_t Weights[] = {1, TripCount - 1};
7523#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7525 for (
const auto &Plan : VPlans)
7539std::pair<BasicBlock *, Value *>
7541 const SCEV2ValueTy &ExpandedSCEVs) {
7581 dbgs() <<
"Create Skeleton for epilogue vectorized loop (first pass)\n"
7591 dbgs() <<
"intermediate fn:\n"
7599 assert(Bypass &&
"Expected valid bypass basic block.");
7620 TCCheckBlock->
setName(
"vector.main.loop.iter.check");
7624 DT,
LI,
nullptr,
"vector.ph");
7629 "TC check is expected to dominate Bypass");
7647 return TCCheckBlock;
7656std::pair<BasicBlock *, Value *>
7658 const SCEV2ValueTy &ExpandedSCEVs) {
7666 nullptr,
"vec.epilog.iter.check",
true);
7668 VecEpilogueIterationCountCheck);
7673 "expected this to be saved from the previous pass.");
7691 VecEpilogueIterationCountCheck,
7715 for (
PHINode &Phi : VecEpilogueIterationCountCheck->
phis())
7718 for (
PHINode *Phi : PhisInBlock) {
7720 Phi->replaceIncomingBlockWith(
7722 VecEpilogueIterationCountCheck);
7729 return EPI.EpilogueIterationCountCheck == IncB;
7741 Type *IdxTy =
Legal->getWidestInductionType();
7745 EPResumeVal->
addIncoming(ConstantInt::get(IdxTy, 0),
7756 {VecEpilogueIterationCountCheck,
7767 "Expected trip count to have been safed in the first pass.");
7771 "saved trip count does not dominate insertion point.");
7782 Value *CheckMinIters =
7786 "min.epilog.iters.check");
7792 unsigned EpilogueLoopStep =
7798 unsigned EstimatedSkipCount = std::min(MainLoopStep, EpilogueLoopStep);
7799 const uint32_t Weights[] = {EstimatedSkipCount,
7800 MainLoopStep - EstimatedSkipCount};
7810 dbgs() <<
"Create Skeleton for epilogue vectorized loop (second pass)\n"
7824 assert(!
Range.isEmpty() &&
"Trying to test an empty VF range.");
7825 bool PredicateAtRangeStart = Predicate(
Range.Start);
7828 if (Predicate(TmpVF) != PredicateAtRangeStart) {
7833 return PredicateAtRangeStart;
7843 auto MaxVFTimes2 = MaxVF * 2;
7845 VFRange SubRange = {VF, MaxVFTimes2};
7846 auto Plan = buildVPlan(SubRange);
7848 VPlans.push_back(std::move(Plan));
7856 if (
auto *
I = dyn_cast<Instruction>(
Op)) {
7857 if (
auto *R = Ingredient2Recipe.lookup(
I))
7858 return R->getVPSingleValue();
7872 "unsupported switch either exiting loop or continuing to header");
7877 BasicBlock *DefaultDst = SI->getDefaultDest();
7879 for (
auto &
C : SI->cases()) {
7881 assert(!EdgeMaskCache.
contains({Src, Dst}) &&
"Edge masks already created");
7884 if (Dst == DefaultDst)
7886 auto I = Dst2Compares.
insert({Dst, {}});
7894 VPValue *DefaultMask =
nullptr;
7895 for (
const auto &[Dst, Conds] : Dst2Compares) {
7904 EdgeMaskCache[{Src, Dst}] = Mask;
7910 DefaultMask = DefaultMask ? Builder.
createOr(DefaultMask, Mask) : Mask;
7914 DefaultMask = Builder.
createNot(DefaultMask);
7918 EdgeMaskCache[{Src, DefaultDst}] = DefaultMask;
7925 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
7927 if (ECEntryIt != EdgeMaskCache.
end())
7928 return ECEntryIt->second;
7930 if (
auto *SI = dyn_cast<SwitchInst>(Src->getTerminator())) {
7932 assert(EdgeMaskCache.
contains(Edge) &&
"Mask for Edge not created?");
7933 return EdgeMaskCache[Edge];
7939 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
7940 assert(BI &&
"Unexpected terminator found");
7942 return EdgeMaskCache[Edge] = SrcMask;
7948 return EdgeMaskCache[Edge] = SrcMask;
7951 assert(EdgeMask &&
"No Edge Mask found for condition");
7963 return EdgeMaskCache[Edge] = EdgeMask;
7970 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
7972 assert(ECEntryIt != EdgeMaskCache.
end() &&
7973 "looking up mask for edge which has not been created");
7974 return ECEntryIt->second;
7982 BlockMaskCache[Header] =
nullptr;
7994 HeaderVPBB->
insert(
IV, NewInsertionPoint);
8001 BlockMaskCache[Header] = BlockMask;
8007 assert(BCEntryIt != BlockMaskCache.
end() &&
8008 "Trying to access mask for block without one.");
8009 return BCEntryIt->second;
8013 assert(OrigLoop->
contains(BB) &&
"Block is not a part of a loop");
8014 assert(BlockMaskCache.
count(BB) == 0 &&
"Mask for block already computed");
8016 "Loop header must have cached block mask");
8022 for (
auto *Predecessor :
8026 BlockMaskCache[BB] = EdgeMask;
8031 BlockMask = EdgeMask;
8035 BlockMask = Builder.
createOr(BlockMask, EdgeMask, {});
8038 BlockMaskCache[BB] = BlockMask;
8044 assert((isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
8045 "Must be called with either a load or store");
8051 "CM decision should be taken at this point.");
8077 auto *
GEP = dyn_cast<GetElementPtrInst>(
8078 Ptr->getUnderlyingValue()->stripPointerCasts());
8085 if (
LoadInst *Load = dyn_cast<LoadInst>(
I))
8103 "step must be loop invariant");
8107 if (
auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8110 assert(isa<PHINode>(PhiOrTrunc) &&
"must be a phi node here");
8121 *PSE.
getSE(), *OrigLoop);
8147 auto isOptimizableIVTruncate =
8155 isOptimizableIVTruncate(
I),
Range)) {
8157 auto *
Phi = cast<PHINode>(
I->getOperand(0));
8168 unsigned NumIncoming =
Phi->getNumIncomingValues();
8179 for (
unsigned In = 0;
In < NumIncoming;
In++) {
8184 assert(In == 0 &&
"Both null and non-null edge masks found");
8186 "Distinct incoming values with one having a full mask");
8209 if (
ID && (
ID == Intrinsic::assume ||
ID == Intrinsic::lifetime_end ||
8210 ID == Intrinsic::lifetime_start ||
ID == Intrinsic::sideeffect ||
8211 ID == Intrinsic::pseudoprobe ||
8212 ID == Intrinsic::experimental_noalias_scope_decl))
8219 bool ShouldUseVectorIntrinsic =
8226 if (ShouldUseVectorIntrinsic)
8231 std::optional<unsigned> MaskPos;
8253 Variant = Decision.Variant;
8254 MaskPos = Decision.MaskPos;
8261 if (ShouldUseVectorCall) {
8262 if (MaskPos.has_value()) {
8277 Ops.insert(Ops.
begin() + *MaskPos, Mask);
8289 assert(!isa<BranchInst>(
I) && !isa<PHINode>(
I) && !isa<LoadInst>(
I) &&
8290 !isa<StoreInst>(
I) &&
"Instruction should have been handled earlier");
8305 switch (
I->getOpcode()) {
8308 case Instruction::SDiv:
8309 case Instruction::UDiv:
8310 case Instruction::SRem:
8311 case Instruction::URem: {
8319 auto *SafeRHS = Builder.
createSelect(Mask, Ops[1], One,
I->getDebugLoc());
8325 case Instruction::Add:
8326 case Instruction::And:
8327 case Instruction::AShr:
8328 case Instruction::FAdd:
8329 case Instruction::FCmp:
8330 case Instruction::FDiv:
8331 case Instruction::FMul:
8332 case Instruction::FNeg:
8333 case Instruction::FRem:
8334 case Instruction::FSub:
8335 case Instruction::ICmp:
8336 case Instruction::LShr:
8337 case Instruction::Mul:
8338 case Instruction::Or:
8339 case Instruction::Select:
8340 case Instruction::Shl:
8341 case Instruction::Sub:
8342 case Instruction::Xor:
8343 case Instruction::Freeze:
8351 auto *PN = cast<PHINode>(R->getUnderlyingValue());
8353 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8370 if (!IsUniform &&
Range.Start.isScalable() && isa<IntrinsicInst>(
I)) {
8372 case Intrinsic::assume:
8373 case Intrinsic::lifetime_start:
8374 case Intrinsic::lifetime_end:
8396 VPValue *BlockInMask =
nullptr;
8397 if (!IsPredicated) {
8401 LLVM_DEBUG(
dbgs() <<
"LV: Scalarizing and predicating:" << *
I <<
"\n");
8412 assert((
Range.Start.isScalar() || !IsUniform || !IsPredicated ||
8413 (
Range.Start.isScalable() && isa<IntrinsicInst>(
I))) &&
8414 "Should not predicate a uniform recipe");
8416 IsUniform, BlockInMask);
8427 if (
auto Phi = dyn_cast<PHINode>(Instr)) {
8428 if (Phi->getParent() != OrigLoop->
getHeader())
8431 if ((Recipe = tryToOptimizeInductionPHI(Phi,
Operands,
Range)))
8437 "can only widen reductions and fixed-order recurrences here");
8455 PhisToFix.push_back(PhiRecipe);
8459 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8468 if (
auto *CI = dyn_cast<CallInst>(Instr))
8471 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8474 if (!shouldWiden(Instr,
Range))
8477 if (
auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8481 if (
auto *SI = dyn_cast<SelectInst>(Instr)) {
8486 if (
auto *CI = dyn_cast<CastInst>(Instr)) {
8491 return tryToWiden(Instr,
Operands, VPBB);
8494void LoopVectorizationPlanner::buildVPlansWithVPRecipes(
ElementCount MinVF,
8498 auto MaxVFTimes2 = MaxVF * 2;
8500 VFRange SubRange = {VF, MaxVFTimes2};
8501 if (
auto Plan = tryToBuildVPlanWithVPRecipes(SubRange)) {
8513 VPlans.push_back(std::move(Plan));
8523 Value *StartIdx = ConstantInt::get(IdxTy, 0);
8530 Header->insert(CanonicalIVPHI, Header->begin());
8535 Instruction::Add, {CanonicalIVPHI, &Plan.
getVFxUF()}, {HasNUW,
false},
DL,
8537 CanonicalIVPHI->
addOperand(CanonicalIVIncrement);
8554 if (MiddleVPBB->getNumSuccessors() != 2)
8559 cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0])->getIRBasicBlock();
8562 Value *IncomingValue =
8563 ExitPhi.getIncomingValueForBlock(ExitingBB);
8569 if ((isa<VPWidenIntOrFpInductionRecipe>(V) &&
8570 !cast<VPWidenIntOrFpInductionRecipe>(V)->getTruncInst()) ||
8571 isa<VPWidenPointerInductionRecipe>(V) ||
8572 (isa<Instruction>(IncomingValue) &&
8574 auto *P = dyn_cast<PHINode>(U);
8575 return P && Inductions.contains(P);
8598 if (isa<VPIRBasicBlock>(Succ))
8600 assert(!ScalarPHVPBB &&
"Two candidates for ScalarPHVPBB?");
8601 ScalarPHVPBB = cast<VPBasicBlock>(Succ);
8606 VPBuilder ScalarPHBuilder(ScalarPHVPBB);
8610 if (
auto *Terminator = MiddleVPBB->getTerminator()) {
8611 auto *Condition = dyn_cast<VPInstruction>(Terminator->getOperand(0));
8612 assert((!Condition || Condition->getParent() == MiddleVPBB) &&
8613 "Condition expected in MiddleVPBB");
8614 MiddleBuilder.
setInsertPoint(Condition ? Condition : Terminator);
8620 auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi);
8626 {FOR->getBackedgeValue(), OneVPV},
8627 {},
"vector.recur.extract");
8630 "scalar.recur.init");
8631 Plan.
addLiveOut(cast<PHINode>(FOR->getUnderlyingInstr()), ResumePhiRecipe);
8636LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
VFRange &
Range) {
8651 bool RequiresScalarEpilogueCheck =
8666 bool IVUpdateMayOverflow =
false;
8677 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, Legal, CM, PSE, Builder);
8697 "Unsupported interleave factor for scalable vectors");
8702 InterleaveGroups.
insert(IG);
8720 bool NeedsBlends = BB != HeaderBB && !BB->phis().empty();
8721 return Legal->blockNeedsPredication(BB) || NeedsBlends;
8726 if (VPBB != HeaderVPBB)
8730 if (VPBB == HeaderVPBB)
8731 RecipeBuilder.createHeaderMask();
8732 else if (NeedsMasks)
8733 RecipeBuilder.createBlockInMask(BB);
8740 auto *
Phi = dyn_cast<PHINode>(Instr);
8741 if (Phi &&
Phi->getParent() == HeaderBB) {
8742 Operands.push_back(Plan->getOrAddLiveIn(
8745 auto OpRange = RecipeBuilder.mapToVPValues(
Instr->operands());
8746 Operands = {OpRange.begin(), OpRange.end()};
8752 if ((SI = dyn_cast<StoreInst>(&
I)) &&
8757 RecipeBuilder.tryToCreateWidenRecipe(Instr,
Operands,
Range, VPBB);
8759 Recipe = RecipeBuilder.handleReplication(Instr,
Range);
8761 RecipeBuilder.setRecipe(Instr, Recipe);
8762 if (isa<VPHeaderPHIRecipe>(Recipe)) {
8773 "unexpected recipe needs moving");
8789 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8790 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8791 "entry block must be set to a VPRegionBlock having a non-empty entry "
8793 RecipeBuilder.fixHeaderPhis();
8803 adjustRecipesForReductions(Plan, RecipeBuilder,
Range.Start);
8808 for (
const auto *IG : InterleaveGroups) {
8810 cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IG->getInsertPos()));
8812 for (
unsigned i = 0; i < IG->getFactor(); ++i)
8813 if (
auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
8814 auto *StoreR = cast<VPWidenStoreRecipe>(RecipeBuilder.getRecipe(SI));
8815 StoredValues.
push_back(StoreR->getStoredValue());
8818 bool NeedsMaskForGaps =
8821 "masked interleaved groups are not allowed.");
8823 Recipe->getMask(), NeedsMaskForGaps);
8824 VPIG->insertBefore(Recipe);
8826 for (
unsigned i = 0; i < IG->getFactor(); ++i)
8828 VPRecipeBase *MemberR = RecipeBuilder.getRecipe(Member);
8829 if (!
Member->getType()->isVoidTy()) {
8840 Plan->setName(
"Initial VPlan");
8845 auto *StrideV = cast<SCEVUnknown>(Stride)->getValue();
8846 auto *ScevStride = dyn_cast<SCEVConstant>(PSE.
getSCEV(StrideV));
8851 auto *CI = Plan->getOrAddLiveIn(
8852 ConstantInt::get(Stride->getType(), ScevStride->getAPInt()));
8853 if (
VPValue *StrideVPV = Plan->getLiveIn(StrideV))
8859 if (!isa<SExtInst, ZExtInst>(U))
8861 VPValue *StrideVPV = Plan->getLiveIn(U);
8864 unsigned BW =
U->getType()->getScalarSizeInBits();
8865 APInt C = isa<SExtInst>(U) ? ScevStride->getAPInt().sext(BW)
8866 : ScevStride->getAPInt().zext(BW);
8867 VPValue *CI = Plan->getOrAddLiveIn(ConstantInt::get(
U->getType(),
C));
8885 bool WithoutRuntimeCheck =
8888 WithoutRuntimeCheck);
8904 *PSE.
getSE(),
true,
false, OrigLoop);
8908 HCFGBuilder.buildHierarchicalCFG();
8916 *PSE.
getSE(), *TLI);
8921 Plan->getVectorLoopRegion()->getExitingBasicBlock()->getTerminator();
8922 Term->eraseFromParent();
8946void LoopVectorizationPlanner::adjustRecipesForReductions(
8948 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8955 if (
auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
8958 bool HasIntermediateStore =
false;
8963 auto *IS2 =
R2->getRecurrenceDescriptor().IntermediateStore;
8964 HasIntermediateStore |= IS1 || IS2;
8985 if (HasIntermediateStore && ReductionPHIList.
size() > 1)
8987 R->moveBefore(*Header, Header->getFirstNonPhi());
8990 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8991 if (!PhiR || !PhiR->isInLoop() || (MinVF.
isScalar() && !PhiR->isOrdered()))
8997 "AnyOf reductions are not allowed for in-loop reductions");
9002 for (
unsigned I = 0;
I != Worklist.
size(); ++
I) {
9005 auto *UserRecipe = dyn_cast<VPSingleDefRecipe>(U);
9007 assert(isa<VPLiveOut>(U) &&
9008 "U must either be a VPSingleDef or VPLiveOut");
9011 Worklist.
insert(UserRecipe);
9024 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
9027 unsigned IndexOfFirstOperand;
9035 "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9036 assert(((MinVF.
isScalar() && isa<VPReplicateRecipe>(CurrentLink)) ||
9037 isa<VPWidenCallRecipe>(CurrentLink)) &&
9038 CurrentLink->getOperand(2) == PreviousLink &&
9039 "expected a call where the previous link is the added operand");
9047 {CurrentLink->getOperand(0), CurrentLink->getOperand(1)},
9049 LinkVPBB->
insert(FMulRecipe, CurrentLink->getIterator());
9052 auto *Blend = dyn_cast<VPBlendRecipe>(CurrentLink);
9053 if (PhiR->isInLoop() && Blend) {
9054 assert(Blend->getNumIncomingValues() == 2 &&
9055 "Blend must have 2 incoming values");
9056 if (Blend->getIncomingValue(0) == PhiR)
9057 Blend->replaceAllUsesWith(Blend->getIncomingValue(1));
9059 assert(Blend->getIncomingValue(1) == PhiR &&
9060 "PhiR must be an operand of the blend");
9061 Blend->replaceAllUsesWith(Blend->getIncomingValue(0));
9067 if (isa<VPWidenRecipe>(CurrentLink)) {
9068 assert(isa<CmpInst>(CurrentLinkI) &&
9069 "need to have the compare of the select");
9072 assert(isa<VPWidenSelectRecipe>(CurrentLink) &&
9073 "must be a select recipe");
9074 IndexOfFirstOperand = 1;
9077 "Expected to replace a VPWidenSC");
9078 IndexOfFirstOperand = 0;
9083 CurrentLink->getOperand(IndexOfFirstOperand) == PreviousLink
9084 ? IndexOfFirstOperand + 1
9085 : IndexOfFirstOperand;
9086 VecOp = CurrentLink->getOperand(VecOpId);
9087 assert(VecOp != PreviousLink &&
9088 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
9089 (VecOpId - IndexOfFirstOperand)) ==
9091 "PreviousLink must be the operand other than VecOp");
9107 CurrentLink->replaceAllUsesWith(RedRecipe);
9108 PreviousLink = RedRecipe;
9117 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
9130 return isa<VPWidenSelectRecipe>(U) ||
9131 (isa<VPReplicateRecipe>(U) &&
9132 cast<VPReplicateRecipe>(U)->getUnderlyingInstr()->getOpcode() ==
9133 Instruction::Select);
9139 for (
unsigned I = 0;
I != CmpR->getNumOperands(); ++
I)
9140 if (CmpR->getOperand(
I) == PhiR)
9148 if (
Select->getOperand(1) == PhiR)
9151 Select->getVPSingleValue()->replaceAllUsesWith(
Or);
9165 assert(OrigExitingVPV->getDefiningRecipe()->getParent() != LatchVPBB &&
9166 "reduction recipe must be defined before latch");
9168 std::optional<FastMathFlags> FMFs =
9175 return isa<VPInstruction>(&U) &&
9176 cast<VPInstruction>(&U)->getOpcode() ==
9193 assert(!PhiR->
isInLoop() &&
"Unexpected truncated inloop reduction!");
9202 Trunc->
insertAfter(NewExitingVPV->getDefiningRecipe());
9203 Extnd->insertAfter(Trunc);
9205 PhiR->
setOperand(1, Extnd->getVPSingleValue());
9206 NewExitingVPV = Extnd;
9225 OrigExitingVPV->replaceUsesWithIf(
9226 FinalReductionResult,
9234 assert(!State.
Instance &&
"VPDerivedIVRecipe being replicated.");
9245 Kind, cast_if_present<BinaryOperator>(FPBinOp));
9246 DerivedIV->
setName(
"offset.idx");
9247 assert(DerivedIV != CanonicalIV &&
"IV didn't need transforming?");
9256 "uniform recipe shouldn't be predicated");
9262 if (State.
Instance->Lane.isFirstLane()) {
9276 if ((isa<LoadInst>(UI) || isa<StoreInst>(UI)) &&
9278 return Op->isDefinedOutsideVectorRegions();
9282 for (
unsigned Part = 1; Part < State.
UF; ++Part)
9291 for (
unsigned Part = 0; Part < State.
UF; ++Part)
9298 if (isa<StoreInst>(UI) &&
9309 for (
unsigned Part = 0; Part < State.
UF; ++Part)
9310 for (
unsigned Lane = 0; Lane < EndLane; ++Lane)
9319 Value *AllTrueMask =
9321 return Builder.
CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
9322 {Operand, AllTrueMask, EVL},
nullptr,
Name);
9326 assert(State.
UF == 1 &&
"Expected only UF == 1 when vectorizing with "
9327 "explicit vector length.");
9335 auto &Builder = State.
Builder;
9340 Value *Mask =
nullptr;
9342 Mask = State.
get(VPMask, 0);
9346 Mask = Builder.CreateVectorSplat(State.
VF, Builder.getTrue());
9351 Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {
Addr, Mask, EVL},
9352 nullptr,
"wide.masked.gather");
9357 Instruction::Load, DataTy,
Addr,
"vp.op.load"));
9365 State.
set(
this, Res, 0);
9369 assert(State.
UF == 1 &&
"Expected only UF == 1 when vectorizing with "
9370 "explicit vector length.");
9377 auto &Builder = State.
Builder;
9381 Value *StoredVal = State.
get(StoredValue, 0);
9385 Value *Mask =
nullptr;
9387 Mask = State.
get(VPMask, 0);
9391 Mask = Builder.CreateVectorSplat(State.
VF, Builder.getTrue());
9394 if (CreateScatter) {
9396 Intrinsic::vp_scatter,
9397 {StoredVal, Addr, Mask, EVL});
9403 {StoredVal, Addr}));
9472 LLVM_DEBUG(
dbgs() <<
"LV: cannot compute the outer-loop trip count\n");
9476 Function *
F = L->getHeader()->getParent();
9482 LoopVectorizationCostModel CM(
SEL, L, PSE, LI, LVL, *
TTI, TLI, DB, AC, ORE,
F,
9487 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *
TTI, LVL, CM, IAI, PSE, Hints,
9507 bool AddBranchWeights =
9509 GeneratedRTChecks Checks(*PSE.
getSE(), DT, LI,
TTI,
9510 F->getDataLayout(), AddBranchWeights);
9512 VF.
Width, 1, LVL, &CM, BFI, PSI, Checks);
9514 << L->getHeader()->getParent()->getName() <<
"\"\n");
9534 if (
auto *S = dyn_cast<StoreInst>(&Inst)) {
9535 if (S->getValueOperand()->getType()->isFloatTy())
9545 while (!Worklist.
empty()) {
9547 if (!L->contains(
I))
9549 if (!Visited.
insert(
I).second)
9556 if (isa<FPExtInst>(
I) && EmittedRemark.
insert(
I).second)
9559 I->getDebugLoc(), L->getHeader())
9560 <<
"floating point conversion changes vector width. "
9561 <<
"Mixed floating point precision requires an up/down "
9562 <<
"cast that will negatively impact performance.";
9565 for (
Use &
Op :
I->operands())
9566 if (
auto *OpI = dyn_cast<Instruction>(
Op))
9573 std::optional<unsigned> VScale,
Loop *L,
9586 <<
"LV: Interleaving only is not profitable due to runtime checks\n");
9627 unsigned AssumedMinimumVscale = 1;
9629 AssumedMinimumVscale = *VScale;
9630 IntVF *= AssumedMinimumVscale;
9648 uint64_t MinTC = std::max(MinTC1, MinTC2);
9650 MinTC =
alignTo(MinTC, IntVF);
9654 dbgs() <<
"LV: Minimum required TC for runtime checks to be profitable:"
9662 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is not beneficial: expected "
9663 "trip count < minimum profitable VF ("
9674 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9676 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9681 "VPlan-native path is not enabled. Only process inner loops.");
9684 << L->getHeader()->getParent()->getName() <<
"' from "
9685 << L->getLocStr() <<
"\n");
9690 dbgs() <<
"LV: Loop hints:"
9701 Function *
F = L->getHeader()->getParent();
9723 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Cannot prove legality.\n");
9733 if (!L->isInnermost())
9737 assert(L->isInnermost() &&
"Inner loop expected.");
9759 LLVM_DEBUG(
dbgs() <<
"LV: Found a loop with a very small trip count. "
9760 <<
"This loop is worth vectorizing only if no scalar "
9761 <<
"iteration overheads are incurred.");
9763 LLVM_DEBUG(
dbgs() <<
" But vectorizing was explicitly forced.\n");
9776 LLVM_DEBUG(
dbgs() <<
" But the target considers the trip count too "
9777 "small to consider vectorizing.\n");
9779 "The trip count is below the minial threshold value.",
9780 "loop trip count is too low, avoiding vectorization",
9781 "LowTripCount",
ORE, L);
9790 if (
F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9792 "Can't vectorize when the NoImplicitFloat attribute is used",
9793 "loop not vectorized due to NoImplicitFloat attribute",
9794 "NoImplicitFloat",
ORE, L);
9806 "Potentially unsafe FP op prevents vectorization",
9807 "loop not vectorized due to unsafe FP support.",
9808 "UnsafeFP",
ORE, L);
9813 bool AllowOrderedReductions;
9823 ExactFPMathInst->getDebugLoc(),
9824 ExactFPMathInst->getParent())
9825 <<
"loop not vectorized: cannot prove it is safe to reorder "
9826 "floating-point operations";
9828 LLVM_DEBUG(
dbgs() <<
"LV: loop not vectorized: cannot prove it is safe to "
9829 "reorder floating-point operations\n");
9835 LoopVectorizationCostModel CM(
SEL, L, PSE,
LI, &LVL, *
TTI,
TLI,
DB,
AC,
ORE,
9838 LoopVectorizationPlanner LVP(L,
LI,
DT,
TLI, *
TTI, &LVL, CM, IAI, PSE, Hints,
9846 std::optional<VectorizationFactor> MaybeVF = LVP.
plan(UserVF, UserIC);
9854 bool AddBranchWeights =
9857 F->getDataLayout(), AddBranchWeights);
9863 unsigned SelectedIC = std::max(IC, UserIC);
9870 bool ForceVectorization =
9872 if (!ForceVectorization &&
9874 *PSE.
getSE(),
SEL)) {
9877 DEBUG_TYPE,
"CantReorderMemOps", L->getStartLoc(),
9879 <<
"loop not vectorized: cannot prove it is safe to reorder "
9880 "memory operations";
9889 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9890 bool VectorizeLoop =
true, InterleaveLoop =
true;
9892 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is possible but not beneficial.\n");
9893 VecDiagMsg = std::make_pair(
9894 "VectorizationNotBeneficial",
9895 "the cost-model indicates that vectorization is not beneficial");
9896 VectorizeLoop =
false;
9899 if (!MaybeVF && UserIC > 1) {
9902 LLVM_DEBUG(
dbgs() <<
"LV: Ignoring UserIC, because vectorization and "
9903 "interleaving should be avoided up front\n");
9904 IntDiagMsg = std::make_pair(
9905 "InterleavingAvoided",
9906 "Ignoring UserIC, because interleaving was avoided up front");
9907 InterleaveLoop =
false;
9908 }
else if (IC == 1 && UserIC <= 1) {
9911 IntDiagMsg = std::make_pair(
9912 "InterleavingNotBeneficial",
9913 "the cost-model indicates that interleaving is not beneficial");
9914 InterleaveLoop =
false;
9916 IntDiagMsg.first =
"InterleavingNotBeneficialAndDisabled";
9917 IntDiagMsg.second +=
9918 " and is explicitly disabled or interleave count is set to 1";
9920 }
else if (IC > 1 && UserIC == 1) {
9923 dbgs() <<
"LV: Interleaving is beneficial but is explicitly disabled.");
9924 IntDiagMsg = std::make_pair(
9925 "InterleavingBeneficialButDisabled",
9926 "the cost-model indicates that interleaving is beneficial "
9927 "but is explicitly disabled or interleave count is set to 1");
9928 InterleaveLoop =
false;
9932 IC = UserIC > 0 ? UserIC : IC;
9936 if (!VectorizeLoop && !InterleaveLoop) {
9940 L->getStartLoc(), L->getHeader())
9941 << VecDiagMsg.second;
9945 L->getStartLoc(), L->getHeader())
9946 << IntDiagMsg.second;
9949 }
else if (!VectorizeLoop && InterleaveLoop) {
9953 L->getStartLoc(), L->getHeader())
9954 << VecDiagMsg.second;
9956 }
else if (VectorizeLoop && !InterleaveLoop) {
9958 <<
") in " << L->getLocStr() <<
'\n');
9961 L->getStartLoc(), L->getHeader())
9962 << IntDiagMsg.second;
9964 }
else if (VectorizeLoop && InterleaveLoop) {
9966 <<
") in " << L->getLocStr() <<
'\n');
9970 bool DisableRuntimeUnroll =
false;
9971 MDNode *OrigLoopID = L->getLoopID();
9973 using namespace ore;
9974 if (!VectorizeLoop) {
9975 assert(IC > 1 &&
"interleave count should not be 1 or 0");
9978 InnerLoopUnroller Unroller(L, PSE,
LI,
DT,
TLI,
TTI,
AC,
ORE, IC, &LVL,
9983 "VPlan cost model and legacy cost model disagreed");
9990 <<
"interleaved loop (interleaved count: "
9991 << NV(
"InterleaveCount", IC) <<
")";
9997 LLVM_DEBUG(
dbgs() <<
"VF picked by VPlan cost model: " << BestVF <<
"\n");
9999 "VPlan cost model and legacy cost model disagreed");
10011 EPI, &LVL, &CM,
BFI,
PSI, Checks);
10013 std::unique_ptr<VPlan> BestMainPlan(BestPlan.
duplicate());
10014 const auto &[ExpandedSCEVs, ReductionResumeValues] = LVP.
executePlan(
10029 Header->setName(
"vec.epilog.vector.body");
10039 auto *ExpandR = cast<VPExpandSCEVRecipe>(&R);
10041 ExpandedSCEVs.find(ExpandR->getSCEV())->second);
10045 ExpandR->eraseFromParent();
10052 if (isa<VPCanonicalIVPHIRecipe>(&R))
10055 Value *ResumeV =
nullptr;
10057 if (
auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10059 ReductionPhi->getRecurrenceDescriptor();
10061 ResumeV = ReductionResumeValues.find(&RdxDesc)->second;
10067 cast<Instruction>(ResumeV)->
getParent()->getFirstNonPHI());
10077 if (
auto *Ind = dyn_cast<VPWidenPointerInductionRecipe>(&R)) {
10078 IndPhi = cast<PHINode>(Ind->getUnderlyingValue());
10079 ID = &Ind->getInductionDescriptor();
10081 auto *WidenInd = cast<VPWidenIntOrFpInductionRecipe>(&R);
10082 IndPhi = WidenInd->getPHINode();
10083 ID = &WidenInd->getInductionDescriptor();
10090 assert(ResumeV &&
"Must have a resume value");
10092 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
10096 "DT not preserved correctly");
10098 DT,
true, &ExpandedSCEVs);
10099 ++LoopsEpilogueVectorized;
10102 DisableRuntimeUnroll =
true;
10114 DisableRuntimeUnroll =
true;
10124 std::optional<MDNode *> RemainderLoopID =
10127 if (RemainderLoopID) {
10128 L->setLoopID(*RemainderLoopID);
10130 if (DisableRuntimeUnroll)
10154 bool Changed =
false, CFGChanged =
false;
10161 for (
const auto &L : *
LI)
10162 Changed |= CFGChanged |=
10173 LoopsAnalyzed += Worklist.
size();
10176 while (!Worklist.
empty()) {
10221 if (!Result.MadeAnyChange)
10235 if (Result.MadeCFGChange) {
10251 OS, MapClassName2PassName);
10254 OS << (InterleaveOnlyWhenForced ?
"" :
"no-") <<
"interleave-forced-only;";
10255 OS << (VectorizeOnlyWhenForced ?
"" :
"no-") <<
"vectorize-forced-only;";
static unsigned getIntrinsicID(const SDNode *N)
AMDGPU Lower Kernel Arguments
amdgpu AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
loop Loop Strength Reduction
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static void createAndCollectMergePhiForReduction(VPInstruction *RedResult, DenseMap< const RecurrenceDescriptor *, Value * > &ReductionResumeValues, VPTransformState &State, Loop *OrigLoop, BasicBlock *LoopMiddleBlock, bool VectorizingEpilogue)
static std::optional< unsigned > getSmallBestKnownTC(ScalarEvolution &SE, Loop *L)
Returns "best known" trip count for the specified loop L as defined by the following procedure: 1) Re...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static Instruction * createReverseEVL(IRBuilderBase &Builder, Value *Operand, Value *EVL, const Twine &Name)
Use all-true mask for reverse rather than actual mask, as it avoids a dependence w/o affecting the re...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or it's operands.
const char LLVMLoopVectorizeFollowupAll[]
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, bool HasNUW, DebugLoc DL)
static std::optional< unsigned > getVScaleForTuning(const Loop *L, const TargetTransformInfo &TTI)
Convenience function that returns the value of vscale_range iff vscale_range.min == vscale_range....
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static constexpr uint32_t MemCheckBypassWeights[]
static Type * MaybeVectorizeType(Type *Elt, ElementCount VF)
cl::opt< unsigned > ForceTargetInstructionCost("force-target-instruction-cost", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's expected cost for " "an instruction to a single constant value. Mostly " "useful for getting consistent testing."))
std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, std::optional< unsigned > VScale, Loop *L, ScalarEvolution &SE, ScalarEpilogueLowering SEL)
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
const char VerboseDebug[]
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static VPWidenIntOrFpInductionRecipe * createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, const InductionDescriptor &IndDesc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop)
Creates a VPWidenIntOrFpInductionRecpipe for Phi.
static constexpr uint32_t SCEVCheckBypassWeights[]
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
const char LLVMLoopVectorizeFollowupVectorized[]
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static void addLiveOutsForFirstOrderRecurrences(VPlan &Plan)
Feed a resume value for every FOR from the vector loop to the scalar loop, if middle block branches t...
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static void addUsersInExitBlock(Loop *OrigLoop, VPRecipeBuilder &Builder, VPlan &Plan, const MapVector< PHINode *, InductionDescriptor > &Inductions)
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
const char LLVMLoopVectorizeFollowupEpilogue[]
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static void cse(BasicBlock *BB)
Perform cse of induction variable instructions.
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static void AddRuntimeUnrollDisableMetaData(Loop *L)
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static cl::opt< bool > PrintVPlansInDotFormat("vplan-print-in-dot-format", cl::Hidden, cl::desc("Use dot format instead of plain text when dumping VPlans"))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
mir Rename Register Operands
This file implements a map that provides insertion order iteration.
std::pair< uint64_t, uint64_t > Interval
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file defines the VPlanHCFGBuilder class which contains the public interface (buildHierarchicalCF...
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
static const uint32_t IV[8]
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
int64_t getSExtValue() const
Get sign extended value.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Represents analyses that only rely on functions' control flow.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
@ ICMP_ULE
unsigned less or equal
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
DomTreeNodeBase * getIDom() const
Analysis pass which computes a DominatorTree.
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getScalable(ScalarTy MinVal)
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
BasicBlock * emitMinimumVectorEpilogueIterCountCheck(BasicBlock *Bypass, BasicBlock *Insert)
Emits an iteration count bypass check after the main vector loop has finished to see if there are any...
void printDebugTracesAtEnd() override
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks)
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (ie the ...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
void printDebugTracesAtEnd() override
std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Implements the interface for creating a vectorized skeleton using the main loop strategy (ie the firs...
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * emitIterationCountCheck(BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
FunctionType * getFunctionType() const
Returns the FunctionType for me.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Common base class shared among various IRBuilders.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
ConstantInt * getTrue()
Get the constant value for i1 true.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
A struct for saving information about induction variables.
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
const SmallVectorImpl< Instruction * > & getCastInsts() const
Returns a reference to the type cast instructions in the induction update chain, that are redundant w...
Value * getStartValue() const
An extension of the inner loop vectorizer that creates a skeleton for a vectorized loop that has its ...
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks)
virtual std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs)=0
The interface for creating a vectorized skeleton using one of two different strategies,...
std::pair< BasicBlock *, Value * > createVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Create a new empty loop that will contain vectorized instructions later on, while the old loop will b...
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, LoopVectorizationLegality *LVL, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
PHINode * createInductionResumeValue(PHINode *OrigPhi, const InductionDescriptor &ID, Value *Step, ArrayRef< BasicBlock * > BypassBlocks, std::pair< BasicBlock *, Value * > AdditionalBypass={nullptr, nullptr})
Create a new phi node for the induction variable OrigPhi to resume iteration count in the scalar epil...
void scalarizeInstruction(const Instruction *Instr, VPReplicateRecipe *RepRecipe, const VPIteration &Instance, VPTransformState &State)
A helper function to scalarize a single Instruction in the innermost loop.
BasicBlock * LoopScalarBody
The scalar loop body.
Value * TripCount
Trip count of the original loop.
void sinkScalarOperands(Instruction *PredInst)
Iteratively sink the scalarized operands of a predicated instruction into the block that was created ...
const TargetLibraryInfo * TLI
Target Library Info.
DenseMap< PHINode *, Value * > IVEndValues
ElementCount MinProfitableTripCount
const TargetTransformInfo * TTI
Target Transform Info.
Value * VectorTripCount
Trip count of the widened loop (TripCount - TripCount % (VF*UF))
bool areSafetyChecksAdded()
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor, LoopVectorizationLegality *LVL, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
BasicBlock * emitSCEVChecks(BasicBlock *Bypass)
Emit a bypass check to see if all of the SCEV assumptions we've had to make are correct.
LoopVectorizationCostModel * Cost
The profitablity analysis.
SmallMapVector< const RecurrenceDescriptor *, PHINode *, 4 > ReductionResumeValues
BlockFrequencyInfo * BFI
BFI and PSI are used to check for profile guided size optimizations.
Value * getTripCount() const
Returns the original loop trip count.
BasicBlock * LoopMiddleBlock
Middle Block between the vector and the scalar.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
SmallVector< Instruction *, 4 > PredicatedInstructions
Store instructions that were predicated.
BasicBlock * completeLoopSkeleton()
Complete the loop skeleton by adding debug MDs, creating appropriate conditional branches in the midd...
void createVectorLoopSkeleton(StringRef Prefix)
Emit basic blocks (prefixed with Prefix) for the iteration check, vector loop preheader,...
BasicBlock * emitMemRuntimeChecks(BasicBlock *Bypass)
Emit bypass checks to check any memory assumptions we may have made.
BasicBlock * LoopScalarPreHeader
The scalar-loop preheader.
LoopVectorizationLegality * Legal
The legality analysis.
void emitIterationCountCheck(BasicBlock *Bypass)
Emit a bypass check to see if the vector trip count is zero, including if it overflows.
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, Value *VectorTripCount, Value *EndValue, BasicBlock *MiddleBlock, BasicBlock *VectorHeader, VPlan &Plan, VPTransformState &State)
Set up the values of the IVs correctly when exiting the vector loop.
void createInductionResumeValues(const SCEV2ValueTy &ExpandedSCEVs, std::pair< BasicBlock *, Value * > AdditionalBypass={nullptr, nullptr})
Create new phi nodes for the induction variables to resume iteration count in the scalar epilogue,...
void fixNonInductionPHIs(VPlan &Plan, VPTransformState &State)
Fix the non-induction PHIs in Plan.
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
bool OptForSizeBasedOnProfile
BasicBlock * LoopVectorPreHeader
The vector-loop preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
Value * getOrCreateVectorTripCount(BasicBlock *InsertBlock)
Returns (and creates if needed) the trip count of the widened loop.
IRBuilder Builder
The builder that we use.
virtual std::pair< BasicBlock *, Value * > createVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs)
Create a new empty loop that will contain vectorized instructions later on, while the old loop will b...
unsigned UF
The vectorization unroll factor to use.
void fixVectorizedLoop(VPTransformState &State, VPlan &Plan)
Fix the vectorized code, taking care of header phi's, live-outs, and more.
BasicBlock * LoopExitBlock
The unique ExitBlock of the scalar loop if one exists.
SmallVector< BasicBlock *, 4 > LoopBypassBlocks
A list of all bypass blocks. The first block is the entry of the loop.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB)
Replace specified successor OldBB to point at the provided block.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
Drive the analysis of interleaved memory accesses in the loop.
InterleaveGroup< Instruction > * getInterleaveGroup(const Instruction *Instr) const
Get the interleave group that Instr belongs to.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
bool isInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleave group.
bool invalidateGroups()
Invalidate groups, e.g., in case all blocks in loop will be predicated contrary to original assumptio...
iterator_range< SmallPtrSetIterator< llvm::InterleaveGroup< Instruction > * > > getInterleaveGroups()
void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
void invalidateGroupsRequiringScalarEpilogue()
Invalidate groups that require a scalar epilogue (due to gaps).
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
const DenseMap< Value *, const SCEV * > & getSymbolicStrides() const
If an access has a symbolic strides, this maps the pointer value to the stride symbol.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all of the successor blocks of this loop.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
iterator_range< block_iterator > blocks() const
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
bool isLoopExiting(const BlockT *BB) const
True if terminator in the block can branch to another block that is outside of the current loop.
BlockT * getUniqueExitBlock() const
If getUniqueExitBlocks would return exactly one block, return that block.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool requiresScalarEpilogue(VFRange Range) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
bool hasPredStores() const
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, TTI::TargetCostKind CostKind) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI)
const Function * TheFunction
LoopVectorizationLegality * Legal
Vectorization legality.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
Loop * TheLoop
The loop that we evaluate.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallVector< RegisterUsage, 8 > calculateRegisterUsage(ArrayRef< ElementCount > VFs)
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool runtimeChecksRequired()
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool isEpilogueVectorizationProfitable(const ElementCount VF) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
void collectUniformsAndScalars(ElementCount VF)
Collect Uniform and Scalar values for the given VF.
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool isScalarWithPredication(Instruction *I, ElementCount VF) const
Returns true if I is an instruction which requires predication and for which our chosen predication s...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF) const
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
unsigned selectInterleaveCount(ElementCount VF, InstructionCost LoopCost)
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
unsigned getNumStores() const
bool hasVectorCallVariants() const
Returns true if there is at least one function call in the loop which has a vectorized variant availa...
uint64_t getMaxSafeVectorWidthInBits() const
bool isInvariantAddressOfReduction(Value *V)
Returns True if given address is invariant and is used to store recurrent expression.
bool blockNeedsPredication(BasicBlock *BB) const
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
int isConsecutivePtr(Type *AccessTy, Value *Ptr) const
Check if this pointer is consecutive when vectorizing.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
bool isReductionVariable(PHINode *PN) const
Returns True if PN is a reduction variable in this loop.
bool isFixedOrderRecurrence(const PHINode *Phi) const
Returns True if Phi is a fixed-order recurrence in this loop.
const InductionDescriptor * getPointerInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is pointer induction.
const InductionDescriptor * getIntOrFpInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is an integer or floating point induction.
bool isInductionPhi(const Value *V) const
Returns True if V is a Phi node of an induction variable in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool isInvariant(Value *V) const
Returns true if V is invariant across all loop iterations according to SCEV.
const ReductionList & getReductionVars() const
Returns the reduction variables found in the loop.
bool isSafeForAnyVectorWidth() const
unsigned getNumLoads() const
bool canFoldTailByMasking() const
Return true if we can vectorize this loop while folding its tail by masking.
void prepareToFoldTailByMasking()
Mark all respective loads/stores for masking.
Type * getWidestInductionType()
Returns the widest induction type.
const LoopAccessInfo * getLAI() const
bool isUniformMemOp(Instruction &I, ElementCount VF) const
A uniform memory op is a load or store which accesses the same memory location on all VF lanes,...
bool isMaskRequired(const Instruction *I) const
Returns true if vector representation of the instruction I requires mask.
const RuntimePointerChecking * getRuntimePointerChecking() const
Returns the information that we collected about runtime memory check.
Planner drives the vectorization process after having passed Legality checks.
std::optional< VectorizationFactor > plan(ElementCount UserVF, unsigned UserIC)
Plan how to best vectorize, return the best VF and its cost, or std::nullopt if vectorization and int...
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MaxVF, unsigned IC)
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
std::pair< DenseMap< const SCEV *, Value * >, DenseMap< const RecurrenceDescriptor *, Value * > > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool IsEpilogueVectorization, const DenseMap< const SCEV *, Value * > *ExpandedSCEVs=nullptr)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
ElementCount getBestVF()
Return the most profitable vectorization factor.
VPlan & getBestPlanFor(ElementCount VF) const
Return the best VPlan for VF.
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
void printPlans(raw_ostream &O)
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Instruction * getExactFPInst()
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool isScalableVectorizationDisabled() const
enum ForceKind getForce() const
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
bool allowReordering() const
When enabling loop hints are provided we allow the vectorizer to change the order of operations that ...
void emitRemarkWithHints() const
Dumps all the hint information.
bool isPotentiallyUnsafe() const
ElementCount getWidth() const
@ FK_Enabled
Forcing enabled.
@ FK_Undefined
Not selected.
@ FK_Disabled
Forcing disabled.
unsigned getPredicate() const
void setAlreadyVectorized()
Mark the loop L as already vectorized by setting the width to 1.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
unsigned getInterleave() const
void prepareNoAliasMetadata()
Set up the aliasing scopes based on the memchecks.
Represents a single loop in the control flow graph.
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
MDNode * getLoopID() const
Return the llvm.loop loop id metadata node for this loop if it is present.
void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static MDString * get(LLVMContext &Context, StringRef Str)
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static unsigned getIncomingValueNumForOperand(unsigned i)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
void preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
Instruction * getLoopExitInstr() const
static unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
TrackingVH< Value > getRecurrenceStartValue() const
SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
bool Need
This flag indicates if we need to add the runtime check.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class represents a constant integer value.
const APInt & getAPInt() const
Helper to remove instructions inserted during SCEV expansion, unless they are marked as used.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
This class represents an assumption made using SCEV expressions which can be checked at run-time.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
bool isOne() const
Return true if the expression is a constant one.
bool isZero() const
Return true if the expression is a constant zero.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
unsigned getSmallConstantMaxTripCount(const Loop *L)
Returns the upper bound of the loop trip count as a normal unsigned value.
const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
ArrayRef< value_type > getArrayRef() const
size_type size() const
Determine the number of elements in the SetVector.
iterator end()
Get an iterator to the end of the SetVector.
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
bool empty() const
Determine if the SetVector is empty or not.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
value_type pop_back_val()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
RecipeListTy::iterator iterator
Instruction iterators...
void execute(VPTransformState *State) override
The method which generates the output IR instructions that correspond to this VPBasicBlock,...
iterator begin()
Recipe iterator methods.
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
void insert(VPRecipeBase *Recipe, iterator InsertPt)
A recipe for vectorizing a phi-node as a sequence of mask-based select instructions.
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
VPRegionBlock * getParent()
const VPBasicBlock * getExitingBasicBlock() const
void setName(const Twine &newName)
const VPBasicBlock * getEntryBasicBlock() const
VPBlockBase * getSingleSuccessor() const
const VPBlocksTy & getSuccessors() const
static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr)
Insert disconnected VPBlockBase NewBlock after BlockPtr.
RAII object that stores the current insertion point and restores it when the object is destroyed.
VPlan-based builder utility analogous to IRBuilder.
VPValue * createOr(VPValue *LHS, VPValue *RHS, DebugLoc DL={}, const Twine &Name="")
VPBasicBlock * getInsertBlock() const
VPValue * createICmp(CmpInst::Predicate Pred, VPValue *A, VPValue *B, DebugLoc DL={}, const Twine &Name="")
Create a new ICmp VPInstruction with predicate Pred and operands A and B.
VPInstruction * createOverflowingOp(unsigned Opcode, std::initializer_list< VPValue * > Operands, VPRecipeWithIRFlags::WrapFlagsTy WrapFlags, DebugLoc DL={}, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPValue * createNot(VPValue *Operand, DebugLoc DL={}, const Twine &Name="")
VPValue * createLogicalAnd(VPValue *LHS, VPValue *RHS, DebugLoc DL={}, const Twine &Name="")
VPValue * createSelect(VPValue *Cond, VPValue *TrueVal, VPValue *FalseVal, DebugLoc DL={}, const Twine &Name="", std::optional< FastMathFlags > FMFs=std::nullopt)
void setInsertPoint(VPBasicBlock *TheBB)
This specifies that created VPInstructions should be appended to the end of the specified block.
Canonical scalar induction phi of the vector loop.
Type * getScalarType() const
Returns the scalar type of the induction.
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPValue * getStepValue() const
VPValue * getStartValue() const
This is a concrete Recipe that models a single VPlan-level instruction.
@ ResumePhi
Creates a scalar phi in a leaf VPBB with a single predecessor in VPlan.
unsigned getOpcode() const
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
static VPLane getLastLaneForVF(const ElementCount &VF)
static VPLane getFirstLane()
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
VPBasicBlock * getParent()
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPValue * getVPValueOrAddLiveIn(Value *V, VPlan &Plan)
VPValue * createEdgeMask(BasicBlock *Src, BasicBlock *Dst)
A helper function that computes the predicate of the edge between SRC and DST.
VPReplicateRecipe * handleReplication(Instruction *I, VFRange &Range)
Build a VPReplicationRecipe for I.
void createSwitchEdgeMasks(SwitchInst *SI)
Create an edge mask for every destination of cases and/or default.
VPValue * getBlockInMask(BasicBlock *BB) const
Returns the entry mask for the block BB.
VPValue * getEdgeMask(BasicBlock *Src, BasicBlock *Dst) const
A helper that returns the previously computed predicate of the edge between SRC and DST.
iterator_range< mapped_iterator< Use *, std::function< VPValue *(Value *)> > > mapToVPValues(User::op_range Operands)
Returns a range mapping the values of the range Operands to their corresponding VPValues.
void fixHeaderPhis()
Add the incoming values from the backedge to reduction & first-order recurrence cross-iteration phis.
VPRecipeBase * tryToCreateWidenRecipe(Instruction *Instr, ArrayRef< VPValue * > Operands, VFRange &Range, VPBasicBlock *VPBB)
Create and return a widened recipe for I if one can be created within the given VF Range.
void createHeaderMask()
Create the mask for the vector loop header block.
void createBlockInMask(BasicBlock *BB)
A helper function that computes the predicate of the block BB, assuming that the header block of the ...
VPRecipeBase * getRecipe(Instruction *I)
Return the recipe created for given ingredient.
void setFlags(Instruction *I) const
Set the IR flags for I.
A recipe for handling reduction phis.
bool isInLoop() const
Returns true, if the phi is part of an in-loop reduction.
const RecurrenceDescriptor & getRecurrenceDescriptor() const
A recipe to represent inloop reduction operations, performing a reduction on a vector operand into a ...
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
const VPBlockBase * getEntry() const
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
void setOperand(unsigned I, VPValue *New)
unsigned getNumOperands() const
VPValue * getOperand(unsigned N) const
void addOperand(VPValue *Operand)
void replaceAllUsesWith(VPValue *New)
user_iterator user_begin()
Value * getLiveInIRValue()
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
bool isLiveIn() const
Returns true if this VPValue is a live-in, i.e. defined outside the VPlan.
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
A recipe to compute the pointers for widened memory accesses of IndexTy for all parts.
A recipe for widening Call instructions.
A Recipe for widening the canonical induction variable of the vector loop.
VPWidenCastRecipe is a recipe to create vector cast instructions.
A recipe for handling GEP instructions.
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
A common base class for widening memory operations.
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
VPValue * getMask() const
Return the mask used by this recipe.
VPValue * getAddr() const
Return the address accessed by this recipe.
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
A recipe for handling phis that are widened in the vector loop.
VPValue * getIncomingValue(unsigned I)
Returns the I th incoming VPValue.
VPBasicBlock * getIncomingBlock(unsigned I)
Returns the I th incoming VPBasicBlock.
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Main class to build the VPlan H-CFG for an incoming IR.
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
void printDOT(raw_ostream &O) const
Print this VPlan in DOT format to O.
void prepareToExecute(Value *TripCount, Value *VectorTripCount, Value *CanonicalIVStartValue, VPTransformState &State)
Prepare the plan for execution, setting up the required live-in values.
VPBasicBlock * getEntry()
VPValue & getVectorTripCount()
The vector trip count.
void setName(const Twine &newName)
VPValue & getVFxUF()
Returns VF * UF of the vector loop region.
VPValue * getTripCount() const
The trip count of the original loop.
VPValue * getOrCreateBackedgeTakenCount()
The backedge taken count of the original loop.
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
void addLiveOut(PHINode *PN, VPValue *V)
VPBasicBlock * getPreheader()
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
bool hasVF(ElementCount VF)
bool hasUF(unsigned UF) const
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
static VPlanPtr createInitialVPlan(const SCEV *TripCount, ScalarEvolution &PSE, bool RequiresScalarEpilogueCheck, bool TailFolded, Loop *TheLoop)
Create initial VPlan, having an "entry" VPBasicBlock (wrapping original scalar pre-header ) which con...
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
LLVM_DUMP_METHOD void dump() const
Dump the plan to stderr (for debugging).
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
const MapVector< PHINode *, VPLiveOut * > & getLiveOuts() const
void print(raw_ostream &O) const
Print this VPlan to O.
VPValue * getSCEVExpansion(const SCEV *S) const
VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUser() const
Return true if there is exactly one user of this value.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
VectorBuilder & setEVL(Value *NewExplicitVectorLength)
VectorBuilder & setMask(Value *NewMask)
Value * createVectorInstruction(unsigned Opcode, Type *ReturnTy, ArrayRef< Value * > VecOpArray, const Twine &Name=Twine())
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool isZero() const
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ PredicateElseScalarEpilogue
@ PredicateOrDontVectorize
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ C
The default llvm calling convention, compatible with C.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
const_iterator end(StringRef path)
Get end iterator over path.
bool isUniformAfterVectorization(const VPValue *VPV)
Returns true if VPV is uniform after vectorization.
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, ScalarEvolution &SE)
Get or create a VPValue that corresponds to the expansion of Expr.
This is an optimization pass for GlobalISel generic memory operations.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
pred_iterator pred_end(BasicBlock *BB)
Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
void stable_sort(R &&Range)
bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Returns a loop's estimated trip count based on branch weight metadata.
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const SCEV * createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE, Loop *OrigLoop)
std::pair< Instruction *, ElementCount > InstructionVFPair
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< MDNode * > makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef< StringRef > FollowupAttrs, const char *InheritOptionsAttrsPrefix="", bool AlwaysNew=false)
Create a new loop identifier for a loop created from a loop transformation.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
auto map_range(ContainerTy &&C, FuncTy F)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
pred_iterator pred_begin(BasicBlock *BB)
auto reverse(ContainerTy &&C)
void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
cl::opt< bool > EnableVPlanNativePath("enable-vplan-native-path", cl::Hidden, cl::desc("Enable VPlan-native vectorization path with " "support for outer loop vectorization."))
void sort(IteratorTy Start, IteratorTy End)
std::unique_ptr< VPlan > VPlanPtr
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
cl::opt< bool > EnableLoopVectorization
bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Type * ToVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
void setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop, Loop *RemainderLoop, uint64_t UF)
Set weights for UnrolledLoop and RemainderLoop based on weights for OrigLoop and the following distri...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
unsigned getReciprocalPredBlockProb()
A helper function that returns the reciprocal of the block probability of predicated blocks.
bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
cl::opt< bool > EnableLoopInterleaving
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
BasicBlock * SCEVSafetyCheck
BasicBlock * MemSafetyCheck
BasicBlock * MainLoopIterationCountCheck
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF)
BasicBlock * EpilogueIterationCountCheck
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
std::optional< unsigned > MaskPos
A struct that represents some properties of the register usage of a loop.
SmallMapVector< unsigned, unsigned, 4 > MaxLocalUsers
Holds the maximum number of concurrent live intervals in the loop.
SmallMapVector< unsigned, unsigned, 4 > LoopInvariantRegs
Holds the number of loop invariant values that are used in the loop.
LoopVectorizeResult runImpl(Function &F)
bool processLoop(Loop *L)
LoopAccessInfoManager * LAIs
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LoopVectorizePass(LoopVectorizeOptions Opts={})
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
A MapVector that performs no allocations if smaller than a certain size.
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A recipe for handling first-order recurrence phis.
VPIteration represents a single point in the iteration space of the output (vectorized and/or unrolle...
bool isFirstIteration() const
void execute(VPTransformState &State) override
Generate the wide load or gather.
VPValue * getEVL() const
Return the EVL operand.
A recipe for widening load operations, using the address to load from and an optional mask.
A recipe for widening select instructions.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
void execute(VPTransformState &State) override
Generate the wide store or scatter.
VPValue * getEVL() const
Return the EVL operand.
A recipe for widening store operations, using the stored value, the address to store to and an option...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static bool HoistRuntimeChecks