162#define LV_NAME "loop-vectorize"
163#define DEBUG_TYPE LV_NAME
173 "llvm.loop.vectorize.followup_vectorized";
175 "llvm.loop.vectorize.followup_epilogue";
178STATISTIC(LoopsVectorized,
"Number of loops vectorized");
179STATISTIC(LoopsAnalyzed,
"Number of loops analyzed for vectorization");
180STATISTIC(LoopsEpilogueVectorized,
"Number of epilogues vectorized");
184 cl::desc(
"Enable vectorization of epilogue loops."));
188 cl::desc(
"When epilogue vectorization is enabled, and a value greater than "
189 "1 is specified, forces the given VF for all applicable epilogue "
194 cl::desc(
"Only loops with vectorization factor equal to or larger than "
195 "the specified value are considered for epilogue vectorization."));
201 cl::desc(
"Loops with a constant trip count that is smaller than this "
202 "value are vectorized only if no scalar iteration overheads "
207 cl::desc(
"The maximum allowed number of runtime memory checks"));
223 "prefer-predicate-over-epilogue",
226 cl::desc(
"Tail-folding and predication preferences over creating a scalar "
230 "Don't tail-predicate loops, create scalar epilogue"),
232 "predicate-else-scalar-epilogue",
233 "prefer tail-folding, create scalar epilogue if tail "
236 "predicate-dont-vectorize",
237 "prefers tail-folding, don't attempt vectorization if "
238 "tail-folding fails.")));
241 "force-tail-folding-style",
cl::desc(
"Force the tail folding style"),
244 clEnumValN(TailFoldingStyle::None,
"none",
"Disable tail folding"),
246 TailFoldingStyle::Data,
"data",
247 "Create lane mask for data only, using active.lane.mask intrinsic"),
248 clEnumValN(TailFoldingStyle::DataWithoutLaneMask,
249 "data-without-lane-mask",
250 "Create lane mask with compare/stepvector"),
251 clEnumValN(TailFoldingStyle::DataAndControlFlow,
"data-and-control",
252 "Create lane mask using active.lane.mask intrinsic, and use "
253 "it for both data and control flow"),
254 clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck,
255 "data-and-control-without-rt-check",
256 "Similar to data-and-control, but remove the runtime check"),
257 clEnumValN(TailFoldingStyle::DataWithEVL,
"data-with-evl",
258 "Use predicated EVL instructions for tail folding. If EVL "
259 "is unsupported, fallback to data-without-lane-mask.")));
263 cl::desc(
"Maximize bandwidth when selecting vectorization factor which "
264 "will be determined by the smallest type in loop."));
268 cl::desc(
"Enable vectorization on interleaved memory accesses in a loop"));
274 cl::desc(
"Enable vectorization on masked interleaved memory accesses in a loop"));
278 cl::desc(
"A flag that overrides the target's number of scalar registers."));
282 cl::desc(
"A flag that overrides the target's number of vector registers."));
286 cl::desc(
"A flag that overrides the target's max interleave factor for "
291 cl::desc(
"A flag that overrides the target's max interleave factor for "
292 "vectorized loops."));
296 cl::desc(
"A flag that overrides the target's expected cost for "
297 "an instruction to a single constant value. Mostly "
298 "useful for getting consistent testing."));
303 "Pretend that scalable vectors are supported, even if the target does "
304 "not support them. This flag should only be used for testing."));
309 "The cost of a loop that is considered 'small' by the interleaver."));
313 cl::desc(
"Enable the use of the block frequency analysis to access PGO "
314 "heuristics minimizing code growth in cold regions and being more "
315 "aggressive in hot regions."));
321 "Enable runtime interleaving until load/store ports are saturated"));
326 cl::desc(
"Max number of stores to be predicated behind an if."));
330 cl::desc(
"Count the induction variable only once when interleaving"));
334 cl::desc(
"Enable if predication of stores during vectorization."));
338 cl::desc(
"The maximum interleave count to use when interleaving a scalar "
339 "reduction in a nested loop."));
344 cl::desc(
"Prefer in-loop vector reductions, "
345 "overriding the targets preference."));
349 cl::desc(
"Enable the vectorisation of loops with in-order (strict) "
355 "Prefer predicating a reduction operation over an after loop select."));
360 cl::desc(
"Enable VPlan-native vectorization path with "
361 "support for outer loop vectorization."));
371 "Build VPlan for every supported loop nest in the function and bail "
372 "out right after the build (stress test the VPlan H-CFG construction "
373 "in the VPlan-native vectorization path)."));
377 cl::desc(
"Enable loop interleaving in Loop vectorization passes"));
380 cl::desc(
"Run the Loop vectorization passes"));
384 cl::desc(
"Use dot format instead of plain text when dumping VPlans"));
387 "force-widen-divrem-via-safe-divisor",
cl::Hidden,
389 "Override cost based safe divisor widening for div/rem instructions"));
392 "vectorizer-maximize-bandwidth-for-vector-calls",
cl::init(
true),
394 cl::desc(
"Try wider VFs if they enable the use of vector variants"));
413 return DL.getTypeAllocSizeInBits(Ty) !=
DL.getTypeSizeInBits(Ty);
442class GeneratedRTChecks;
486 this->MinProfitableTripCount = VecWidth;
502 virtual std::pair<BasicBlock *, Value *>
534 std::pair<BasicBlock *, Value *> AdditionalBypass = {
nullptr,
nullptr});
594 const SCEV2ValueTy &ExpandedSCEVs,
595 std::pair<BasicBlock *, Value *> AdditionalBypass = {
nullptr,
nullptr});
742 "A high UF for the epilogue loop is likely not beneficial.");
762 GeneratedRTChecks &Checks)
764 EPI.MainLoopVF,
EPI.MainLoopVF,
EPI.MainLoopUF, LVL,
771 const SCEV2ValueTy &ExpandedSCEVs)
final {
778 virtual std::pair<BasicBlock *, Value *>
802 GeneratedRTChecks &Check)
807 std::pair<BasicBlock *, Value *>
831 GeneratedRTChecks &Checks)
838 std::pair<BasicBlock *, Value *>
860 if (
I->getDebugLoc() !=
Empty)
861 return I->getDebugLoc();
863 for (
Use &
Op :
I->operands()) {
865 if (OpInst->getDebugLoc() !=
Empty)
866 return OpInst->getDebugLoc();
869 return I->getDebugLoc();
878 dbgs() <<
"LV: " << Prefix << DebugMsg;
900 CodeRegion =
I->getParent();
903 if (
I->getDebugLoc())
904 DL =
I->getDebugLoc();
921 return B.CreateElementCount(Ty, VF);
927 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
"Invalid loop count");
941 <<
"loop not vectorized: " << OREMsg);
962 "Vectorizing: ", TheLoop->
isInnermost() ?
"innermost loop" :
"outer loop",
968 <<
"vectorized " << LoopType <<
"loop (vectorization width: "
970 <<
", interleaved count: " <<
ore::NV(
"InterleaveCount", IC) <<
")";
1114 "Profitable to scalarize relevant only for VF > 1.");
1117 "cost-model should not be used for outer loops (in VPlan-native path)");
1119 auto Scalars = InstsToScalarize.find(VF);
1120 assert(Scalars != InstsToScalarize.end() &&
1121 "VF not yet analyzed for scalarization profitability");
1122 return Scalars->second.contains(
I);
1129 "cost-model should not be used for outer loops (in VPlan-native path)");
1133 if (isa<PseudoProbeInst>(
I))
1139 auto UniformsPerVF = Uniforms.find(VF);
1140 assert(UniformsPerVF != Uniforms.end() &&
1141 "VF not yet analyzed for uniformity");
1142 return UniformsPerVF->second.count(
I);
1149 "cost-model should not be used for outer loops (in VPlan-native path)");
1153 auto ScalarsPerVF = Scalars.find(VF);
1154 assert(ScalarsPerVF != Scalars.end() &&
1155 "Scalar values are not calculated for VF");
1156 return ScalarsPerVF->second.count(
I);
1162 return VF.
isVector() && MinBWs.contains(
I) &&
1184 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W,
Cost);
1195 for (
unsigned i = 0; i < Grp->
getFactor(); ++i) {
1198 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W,
Cost);
1200 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W, 0);
1212 "cost-model should not be used for outer loops (in VPlan-native path)");
1214 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(
I, VF);
1215 auto Itr = WideningDecisions.
find(InstOnVF);
1216 if (Itr == WideningDecisions.
end())
1218 return Itr->second.first;
1225 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(
I, VF);
1227 "The cost is not calculated");
1228 return WideningDecisions[InstOnVF].second;
1241 std::optional<unsigned> MaskPos,
1244 CallWideningDecisions[std::make_pair(CI, VF)] = {Kind, Variant, IID,
1251 return CallWideningDecisions.
at(std::make_pair(CI, VF));
1259 auto *Trunc = dyn_cast<TruncInst>(
I);
1272 Value *
Op = Trunc->getOperand(0);
1292 if (VF.
isScalar() || Uniforms.contains(VF))
1296 collectLoopUniforms(VF);
1297 collectLoopScalars(VF);
1317 bool LI = isa<LoadInst>(V);
1318 bool SI = isa<StoreInst>(V);
1333 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1334 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1345 return ScalarCost < SafeDivisorCost;
1369 std::pair<InstructionCost, InstructionCost>
1397 LLVM_DEBUG(
dbgs() <<
"LV: Loop does not require scalar epilogue\n");
1404 dbgs() <<
"LV: Loop requires scalar epilogue: multiple exits\n");
1409 "interleaved group requires scalar epilogue\n");
1412 LLVM_DEBUG(
dbgs() <<
"LV: Loop does not require scalar epilogue\n");
1421 auto RequiresScalarEpilogue = [
this](
ElementCount VF) {
1424 bool IsRequired =
all_of(
Range, RequiresScalarEpilogue);
1426 (IsRequired ||
none_of(
Range, RequiresScalarEpilogue)) &&
1427 "all VFs in range must agree on whether a scalar epilogue is required");
1439 if (!ChosenTailFoldingStyle)
1441 return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first
1442 : ChosenTailFoldingStyle->second;
1450 assert(!ChosenTailFoldingStyle &&
"Tail folding must not be selected yet.");
1452 ChosenTailFoldingStyle =
1458 ChosenTailFoldingStyle = std::make_pair(
1473 IsScalableVF && UserIC <= 1 &&
1482 ChosenTailFoldingStyle =
1487 <<
"LV: Preference for VP intrinsics indicated. Will "
1488 "not try to generate VP Intrinsics "
1490 ?
"since interleave count specified is greater than 1.\n"
1491 :
"due to non-interleaving reasons.\n"));
1517 return InLoopReductions.contains(Phi);
1532 WideningDecisions.
clear();
1533 CallWideningDecisions.
clear();
1561 std::optional<InstructionCost>
1566 unsigned NumPredStores = 0;
1575 bool FoldTailByMasking);
1580 ElementCount getMaximizedVFForTarget(
unsigned MaxTripCount,
1581 unsigned SmallestType,
1582 unsigned WidestType,
1584 bool FoldTailByMasking);
1588 bool isScalableVectorizationAllowed();
1592 ElementCount getMaxLegalScalableVF(
unsigned MaxSafeElements);
1638 PredicatedBBsAfterVectorization;
1651 std::optional<std::pair<TailFoldingStyle, TailFoldingStyle>>
1652 ChosenTailFoldingStyle;
1655 std::optional<bool> IsScalableVectorizationAllowed;
1689 ScalarCostsTy &ScalarCosts,
1715 std::pair<InstWidening, InstructionCost>>;
1717 DecisionList WideningDecisions;
1719 using CallDecisionList =
1722 CallDecisionList CallWideningDecisions;
1745 Ops, [
this, VF](
Value *V) {
return this->needsExtract(V, VF); }));
1803class GeneratedRTChecks {
1809 Value *SCEVCheckCond =
nullptr;
1817 Value *MemRuntimeCheckCond =
nullptr;
1826 bool CostTooHigh =
false;
1827 const bool AddBranchWeights;
1829 Loop *OuterLoop =
nullptr;
1834 bool AddBranchWeights)
1835 : DT(DT), LI(LI),
TTI(
TTI), SCEVExp(SE,
DL,
"scev.check"),
1836 MemCheckExp(SE,
DL,
"scev.check"), AddBranchWeights(AddBranchWeights) {}
1864 nullptr,
"vector.scevcheck");
1871 if (RtPtrChecking.Need) {
1872 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1873 MemCheckBlock =
SplitBlock(Pred, Pred->getTerminator(), DT, LI,
nullptr,
1876 auto DiffChecks = RtPtrChecking.getDiffChecks();
1878 Value *RuntimeVF =
nullptr;
1883 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1889 MemCheckBlock->
getTerminator(), L, RtPtrChecking.getChecks(),
1892 assert(MemRuntimeCheckCond &&
1893 "no RT checks generated although RtPtrChecking "
1894 "claimed checks are required");
1897 if (!MemCheckBlock && !SCEVCheckBlock)
1907 if (SCEVCheckBlock) {
1912 if (MemCheckBlock) {
1919 if (MemCheckBlock) {
1923 if (SCEVCheckBlock) {
1929 OuterLoop =
L->getParentLoop();
1933 if (SCEVCheckBlock || MemCheckBlock)
1946 if (SCEVCheckBlock->getTerminator() == &
I)
1953 if (MemCheckBlock) {
1956 if (MemCheckBlock->getTerminator() == &
I)
1979 unsigned BestTripCount = 2;
1983 BestTripCount = SmallTC;
1987 BestTripCount = *EstimatedTC;
1990 BestTripCount = std::max(BestTripCount, 1U);
1994 NewMemCheckCost = std::max(*NewMemCheckCost.
getValue(),
1997 if (BestTripCount > 1)
1999 <<
"We expect runtime memory checks to be hoisted "
2000 <<
"out of the outer loop. Cost reduced from "
2001 << MemCheckCost <<
" to " << NewMemCheckCost <<
'\n');
2003 MemCheckCost = NewMemCheckCost;
2007 RTCheckCost += MemCheckCost;
2010 if (SCEVCheckBlock || MemCheckBlock)
2011 LLVM_DEBUG(
dbgs() <<
"Total cost of runtime checks: " << RTCheckCost
2019 ~GeneratedRTChecks() {
2023 SCEVCleaner.markResultUsed();
2025 if (!MemRuntimeCheckCond)
2026 MemCheckCleaner.markResultUsed();
2028 if (MemRuntimeCheckCond) {
2029 auto &SE = *MemCheckExp.
getSE();
2036 I.eraseFromParent();
2039 MemCheckCleaner.cleanup();
2040 SCEVCleaner.cleanup();
2043 SCEVCheckBlock->eraseFromParent();
2044 if (MemRuntimeCheckCond)
2045 MemCheckBlock->eraseFromParent();
2059 SCEVCheckCond =
nullptr;
2060 if (
auto *
C = dyn_cast<ConstantInt>(
Cond))
2071 SCEVCheckBlock->getTerminator()->eraseFromParent();
2072 SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2073 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2080 if (AddBranchWeights)
2083 return SCEVCheckBlock;
2092 if (!MemRuntimeCheckCond)
2101 MemCheckBlock->moveBefore(LoopVectorPreHeader);
2108 if (AddBranchWeights) {
2112 MemCheckBlock->getTerminator()->setDebugLoc(
2113 Pred->getTerminator()->getDebugLoc());
2116 MemRuntimeCheckCond =
nullptr;
2117 return MemCheckBlock;
2123 return Style == TailFoldingStyle::Data ||
2124 Style == TailFoldingStyle::DataAndControlFlow ||
2125 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
2129 return Style == TailFoldingStyle::DataAndControlFlow ||
2130 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
2160 LLVM_DEBUG(
dbgs() <<
"LV: Loop hints prevent outer loop vectorization.\n");
2166 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Interleave is not supported for "
2186 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2196 for (
Loop *InnerL : L)
2218 ?
B.CreateSExtOrTrunc(
Index, StepTy)
2219 :
B.CreateCast(Instruction::SIToFP,
Index, StepTy);
2220 if (CastedIndex !=
Index) {
2222 Index = CastedIndex;
2232 assert(
X->getType() ==
Y->getType() &&
"Types don't match!");
2233 if (
auto *CX = dyn_cast<ConstantInt>(
X))
2236 if (
auto *CY = dyn_cast<ConstantInt>(
Y))
2239 return B.CreateAdd(
X,
Y);
2245 assert(
X->getType()->getScalarType() ==
Y->getType() &&
2246 "Types don't match!");
2247 if (
auto *CX = dyn_cast<ConstantInt>(
X))
2250 if (
auto *CY = dyn_cast<ConstantInt>(
Y))
2253 VectorType *XVTy = dyn_cast<VectorType>(
X->getType());
2254 if (XVTy && !isa<VectorType>(
Y->getType()))
2255 Y =
B.CreateVectorSplat(XVTy->getElementCount(),
Y);
2256 return B.CreateMul(
X,
Y);
2259 switch (InductionKind) {
2262 "Vector indices not supported for integer inductions yet");
2264 "Index type does not match StartValue type");
2265 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2266 return B.CreateSub(StartValue,
Index);
2274 "Vector indices not supported for FP inductions yet");
2277 (InductionBinOp->
getOpcode() == Instruction::FAdd ||
2278 InductionBinOp->
getOpcode() == Instruction::FSub) &&
2279 "Original bin op should be defined for FP induction");
2282 return B.CreateBinOp(InductionBinOp->
getOpcode(), StartValue, MulExp,
2296 if (
F.hasFnAttribute(Attribute::VScaleRange))
2297 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2299 return std::nullopt;
2308 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2310 unsigned MaxUF = UF ? *UF :
Cost->TTI.getMaxInterleaveFactor(VF);
2312 Type *IdxTy =
Cost->Legal->getWidestInductionType();
2313 APInt MaxUIntTripCount = cast<IntegerType>(IdxTy)->getMask();
2319 Cost->PSE.getSE()->getSmallConstantMaxTripCount(
Cost->TheLoop)) {
2322 std::optional<unsigned> MaxVScale =
2326 MaxVF *= *MaxVScale;
2329 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2350 assert(!Instr->getType()->isAggregateType() &&
"Can't handle vectors");
2354 if (isa<NoAliasScopeDeclInst>(Instr))
2359 bool IsVoidRetTy = Instr->getType()->isVoidTy();
2363 Cloned->
setName(Instr->getName() +
".cloned");
2368 "inferred type and type from generated instructions do not match");
2374 if (
auto DL = Instr->getDebugLoc())
2380 auto InputInstance = Instance;
2384 Cloned->
setOperand(
I.index(), State.
get(Operand, InputInstance));
2391 State.
set(RepRecipe, Cloned, Instance);
2394 if (
auto *
II = dyn_cast<AssumeInst>(Cloned))
2399 if (IfPredicateInstr)
2423 if (
Cost->foldTailByMasking()) {
2425 "VF*UF must be a power of 2 when folding tail by masking");
2471 auto CreateStep = [&]() ->
Value * {
2496 Value *MaxUIntTripCount =
2497 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2511 "TC check is expected to dominate Bypass");
2526 if (!SCEVCheckBlock)
2532 "Cannot SCEV check stride or overflow when optimizing for size");
2547 return SCEVCheckBlock;
2566 "Cannot emit memory checks when optimizing for size, unless forced "
2572 <<
"Code-size may be reduced by not forcing "
2573 "vectorization, or by source-code modifications "
2574 "eliminating the need for runtime checks "
2575 "(e.g., adding 'restrict').";
2583 return MemCheckBlock;
2592 "multiple exit loop without required epilogue?");
2596 LI,
nullptr,
Twine(Prefix) +
"middle.block");
2599 nullptr,
Twine(Prefix) +
"scalar.ph");
2605 std::pair<BasicBlock *, Value *> AdditionalBypass) {
2611 Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
2612 if (OrigPhi == OldInduction) {
2619 if (
II.getInductionBinOp() && isa<FPMathOperator>(
II.getInductionBinOp()))
2620 B.setFastMathFlags(
II.getInductionBinOp()->getFastMathFlags());
2623 Step,
II.getKind(),
II.getInductionBinOp());
2627 if (AdditionalBypass.first) {
2628 B.SetInsertPoint(AdditionalBypass.first,
2629 AdditionalBypass.first->getFirstInsertionPt());
2630 EndValueFromAdditionalBypass =
2632 Step,
II.getKind(),
II.getInductionBinOp());
2633 EndValueFromAdditionalBypass->
setName(
"ind.end");
2653 if (AdditionalBypass.first)
2655 EndValueFromAdditionalBypass);
2662 const SCEV2ValueTy &ExpandedSCEVs) {
2663 const SCEV *Step =
ID.getStep();
2664 if (
auto *
C = dyn_cast<SCEVConstant>(Step))
2665 return C->getValue();
2666 if (
auto *U = dyn_cast<SCEVUnknown>(Step))
2667 return U->getValue();
2668 auto I = ExpandedSCEVs.find(Step);
2669 assert(
I != ExpandedSCEVs.end() &&
"SCEV must be expanded at this point");
2674 const SCEV2ValueTy &ExpandedSCEVs,
2675 std::pair<BasicBlock *, Value *> AdditionalBypass) {
2676 assert(((AdditionalBypass.first && AdditionalBypass.second) ||
2677 (!AdditionalBypass.first && !AdditionalBypass.second)) &&
2678 "Inconsistent information about additional bypass.");
2687 PHINode *OrigPhi = InductionEntry.first;
2696std::pair<BasicBlock *, Value *>
2698 const SCEV2ValueTy &ExpandedSCEVs) {
2784 assert(isa<PHINode>(UI) &&
"Expected LCSSA form");
2785 MissingVals[UI] = EndValue;
2793 auto *UI = cast<Instruction>(U);
2795 assert(isa<PHINode>(UI) &&
"Expected LCSSA form");
2799 if (
II.getInductionBinOp() && isa<FPMathOperator>(
II.getInductionBinOp()))
2800 B.setFastMathFlags(
II.getInductionBinOp()->getFastMathFlags());
2802 Value *CountMinusOne =
B.CreateSub(
2804 CountMinusOne->
setName(
"cmo");
2807 assert(StepVPV &&
"step must have been expanded during VPlan execution");
2809 : State.
get(StepVPV, {0, 0});
2812 II.getKind(),
II.getInductionBinOp());
2813 Escape->
setName(
"ind.escape");
2814 MissingVals[UI] = Escape;
2818 for (
auto &
I : MissingVals) {
2825 if (
PHI->getBasicBlockIndex(MiddleBlock) == -1) {
2826 PHI->addIncoming(
I.second, MiddleBlock);
2834struct CSEDenseMapInfo {
2836 return isa<InsertElementInst>(
I) || isa<ExtractElementInst>(
I) ||
2837 isa<ShuffleVectorInst>(
I) || isa<GetElementPtrInst>(
I);
2849 assert(canHandle(
I) &&
"Unknown instruction!");
2851 I->value_op_end()));
2855 if (
LHS == getEmptyKey() ||
RHS == getEmptyKey() ||
2856 LHS == getTombstoneKey() ||
RHS == getTombstoneKey())
2858 return LHS->isIdenticalTo(
RHS);
2869 if (!CSEDenseMapInfo::canHandle(&In))
2875 In.replaceAllUsesWith(V);
2876 In.eraseFromParent();
2890 return CallWideningDecisions.at(std::make_pair(CI, VF)).Cost;
2899 for (
auto &ArgOp : CI->
args())
2908 return std::min(ScalarCallCost, IntrinsicCost);
2910 return ScalarCallCost;
2923 assert(
ID &&
"Expected intrinsic call!");
2926 if (
auto *FPMO = dyn_cast<FPMathOperator>(CI))
2927 FMF = FPMO->getFastMathFlags();
2933 std::back_inserter(ParamTys),
2934 [&](
Type *Ty) { return MaybeVectorizeType(Ty, VF); });
2937 dyn_cast<IntrinsicInst>(CI));
2958 for (
PHINode &PN : Exit->phis())
2986 KV.second->fixPhi(Plan, State);
3026 auto isBlockOfUsePredicated = [&](
Use &U) ->
bool {
3027 auto *
I = cast<Instruction>(U.getUser());
3029 if (
auto *Phi = dyn_cast<PHINode>(
I))
3030 BB = Phi->getIncomingBlock(
3032 return BB == PredBB;
3043 Worklist.
insert(InstsToReanalyze.
begin(), InstsToReanalyze.
end());
3044 InstsToReanalyze.
clear();
3047 while (!Worklist.
empty()) {
3053 if (!
I || isa<PHINode>(
I) || !VectorLoop->contains(
I) ||
3054 I->mayHaveSideEffects() ||
I->mayReadFromMemory())
3062 if (
I->getParent() == PredBB) {
3063 Worklist.
insert(
I->op_begin(),
I->op_end());
3077 I->moveBefore(&*PredBB->getFirstInsertionPt());
3078 Worklist.
insert(
I->op_begin(),
I->op_end());
3090 for (
VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
3095 PHINode *NewPhi = cast<PHINode>(State.
get(VPPhi, 0));
3107void LoopVectorizationCostModel::collectLoopScalars(
ElementCount VF) {
3112 "This function should not be visited twice for the same VF");
3118 Scalars[VF].
insert(Uniforms[VF].begin(), Uniforms[VF].end());
3137 "Widening decision should be ready at this moment");
3138 if (
auto *Store = dyn_cast<StoreInst>(MemAccess))
3139 if (
Ptr == Store->getValueOperand())
3142 "Ptr is neither a value or pointer operand");
3148 auto isLoopVaryingBitCastOrGEP = [&](
Value *
V) {
3149 return ((isa<BitCastInst>(V) &&
V->getType()->isPointerTy()) ||
3150 isa<GetElementPtrInst>(V)) &&
3161 if (!isLoopVaryingBitCastOrGEP(
Ptr))
3166 auto *
I = cast<Instruction>(
Ptr);
3174 return isa<LoadInst>(U) || isa<StoreInst>(U);
3178 PossibleNonScalarPtrs.
insert(
I);
3196 for (
auto &
I : *BB) {
3197 if (
auto *Load = dyn_cast<LoadInst>(&
I)) {
3198 evaluatePtrUse(Load,
Load->getPointerOperand());
3199 }
else if (
auto *Store = dyn_cast<StoreInst>(&
I)) {
3200 evaluatePtrUse(Store,
Store->getPointerOperand());
3201 evaluatePtrUse(Store,
Store->getValueOperand());
3204 for (
auto *
I : ScalarPtrs)
3205 if (!PossibleNonScalarPtrs.
count(
I)) {
3213 auto ForcedScalar = ForcedScalars.
find(VF);
3214 if (ForcedScalar != ForcedScalars.
end())
3215 for (
auto *
I : ForcedScalar->second) {
3216 LLVM_DEBUG(
dbgs() <<
"LV: Found (forced) scalar instruction: " << *
I <<
"\n");
3225 while (
Idx != Worklist.
size()) {
3227 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
3229 auto *Src = cast<Instruction>(Dst->getOperand(0));
3231 auto *J = cast<Instruction>(U);
3232 return !TheLoop->contains(J) || Worklist.count(J) ||
3233 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
3234 isScalarUse(J, Src));
3237 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Src <<
"\n");
3244 auto *Ind = Induction.first;
3245 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3254 auto IsDirectLoadStoreFromPtrIndvar = [&](
Instruction *Indvar,
3256 return Induction.second.getKind() ==
3258 (isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
3265 auto *I = cast<Instruction>(U);
3266 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3267 IsDirectLoadStoreFromPtrIndvar(Ind, I);
3275 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
3281 auto ScalarIndUpdate =
3283 auto *I = cast<Instruction>(U);
3284 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
3285 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
3287 if (!ScalarIndUpdate)
3292 Worklist.
insert(IndUpdate);
3293 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Ind <<
"\n");
3294 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *IndUpdate
3308 switch(
I->getOpcode()) {
3311 case Instruction::Call:
3314 return CallWideningDecisions.at(std::make_pair(cast<CallInst>(
I), VF))
3316 case Instruction::Load:
3317 case Instruction::Store: {
3329 case Instruction::UDiv:
3330 case Instruction::SDiv:
3331 case Instruction::SRem:
3332 case Instruction::URem: {
3348 switch(
I->getOpcode()) {
3351 case Instruction::Load:
3352 case Instruction::Store: {
3365 (isa<LoadInst>(
I) ||
3366 (isa<StoreInst>(
I) &&
3372 case Instruction::UDiv:
3373 case Instruction::SDiv:
3374 case Instruction::SRem:
3375 case Instruction::URem:
3379 case Instruction::Call:
3384std::pair<InstructionCost, InstructionCost>
3387 assert(
I->getOpcode() == Instruction::UDiv ||
3388 I->getOpcode() == Instruction::SDiv ||
3389 I->getOpcode() == Instruction::SRem ||
3390 I->getOpcode() == Instruction::URem);
3401 ScalarizationCost = 0;
3416 ScalarizationCost += getScalarizationOverhead(
I, VF,
CostKind);
3430 Instruction::Select, VecTy,
3436 Value *Op2 =
I->getOperand(1);
3445 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
3447 return {ScalarizationCost, SafeDivisorCost};
3454 "Decision should not be set yet.");
3456 assert(Group &&
"Must have a group.");
3460 auto &
DL =
I->getDataLayout();
3467 unsigned InterleaveFactor = Group->getFactor();
3468 bool ScalarNI =
DL.isNonIntegralPointerType(ScalarTy);
3469 for (
unsigned i = 0; i < InterleaveFactor; i++) {
3474 bool MemberNI =
DL.isNonIntegralPointerType(
MemberTy);
3476 if (MemberNI != ScalarNI) {
3479 }
else if (MemberNI && ScalarNI &&
3480 ScalarTy->getPointerAddressSpace() !=
3481 MemberTy->getPointerAddressSpace()) {
3491 bool PredicatedAccessRequiresMasking =
3494 bool LoadAccessWithGapsRequiresEpilogMasking =
3495 isa<LoadInst>(
I) && Group->requiresScalarEpilogue() &&
3497 bool StoreAccessWithGapsRequiresMasking =
3498 isa<StoreInst>(
I) && (Group->getNumMembers() < Group->getFactor());
3499 if (!PredicatedAccessRequiresMasking &&
3500 !LoadAccessWithGapsRequiresEpilogMasking &&
3501 !StoreAccessWithGapsRequiresMasking)
3508 "Masked interleave-groups for predicated accesses are not enabled.");
3510 if (Group->isReverse())
3522 assert((isa<LoadInst, StoreInst>(
I)) &&
"Invalid memory instruction");
3538 auto &
DL =
I->getDataLayout();
3545void LoopVectorizationCostModel::collectLoopUniforms(
ElementCount VF) {
3552 "This function should not be visited twice for the same VF");
3556 Uniforms[VF].
clear();
3564 auto isOutOfScope = [&](
Value *V) ->
bool {
3576 auto addToWorklistIfAllowed = [&](
Instruction *
I) ->
void {
3577 if (isOutOfScope(
I)) {
3584 dbgs() <<
"LV: Found not uniform due to requiring predication: " << *
I
3588 LLVM_DEBUG(
dbgs() <<
"LV: Found uniform instruction: " << *
I <<
"\n");
3598 auto *
Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3600 addToWorklistIfAllowed(Cmp);
3609 if (PrevVF.isVector()) {
3610 auto Iter = Uniforms.
find(PrevVF);
3611 if (Iter != Uniforms.
end() && !Iter->second.contains(
I))
3616 if (isa<LoadInst>(
I))
3627 "Widening decision should be ready at this moment");
3629 if (isUniformMemOpUse(
I))
3632 return (WideningDecision ==
CM_Widen ||
3641 if (isa<StoreInst>(
I) &&
I->getOperand(0) ==
Ptr)
3657 for (
auto &
I : *BB) {
3659 switch (
II->getIntrinsicID()) {
3660 case Intrinsic::sideeffect:
3661 case Intrinsic::experimental_noalias_scope_decl:
3662 case Intrinsic::assume:
3663 case Intrinsic::lifetime_start:
3664 case Intrinsic::lifetime_end:
3666 addToWorklistIfAllowed(&
I);
3675 if (
auto *EVI = dyn_cast<ExtractValueInst>(&
I)) {
3676 assert(isOutOfScope(EVI->getAggregateOperand()) &&
3677 "Expected aggregate value to be loop invariant");
3678 addToWorklistIfAllowed(EVI);
3687 if (isUniformMemOpUse(&
I))
3688 addToWorklistIfAllowed(&
I);
3690 if (isVectorizedMemAccessUse(&
I,
Ptr))
3697 for (
auto *V : HasUniformUse) {
3698 if (isOutOfScope(V))
3700 auto *
I = cast<Instruction>(V);
3701 auto UsersAreMemAccesses =
3703 return isVectorizedMemAccessUse(cast<Instruction>(U), V);
3705 if (UsersAreMemAccesses)
3706 addToWorklistIfAllowed(
I);
3713 while (idx != Worklist.
size()) {
3716 for (
auto *OV :
I->operand_values()) {
3718 if (isOutOfScope(OV))
3722 auto *
OP = dyn_cast<PHINode>(OV);
3727 auto *OI = cast<Instruction>(OV);
3729 auto *J = cast<Instruction>(U);
3730 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
3732 addToWorklistIfAllowed(OI);
3744 auto *Ind = Induction.first;
3745 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3750 auto *I = cast<Instruction>(U);
3751 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3752 isVectorizedMemAccessUse(I, Ind);
3759 auto UniformIndUpdate =
3761 auto *I = cast<Instruction>(U);
3762 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
3763 isVectorizedMemAccessUse(I, IndUpdate);
3765 if (!UniformIndUpdate)
3769 addToWorklistIfAllowed(Ind);
3770 addToWorklistIfAllowed(IndUpdate);
3781 "runtime pointer checks needed. Enable vectorization of this "
3782 "loop with '#pragma clang loop vectorize(enable)' when "
3783 "compiling with -Os/-Oz",
3784 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
3790 "runtime SCEV checks needed. Enable vectorization of this "
3791 "loop with '#pragma clang loop vectorize(enable)' when "
3792 "compiling with -Os/-Oz",
3793 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
3800 "runtime stride == 1 checks needed. Enable vectorization of "
3801 "this loop without such check by compiling with -Os/-Oz",
3802 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
3809bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3810 if (IsScalableVectorizationAllowed)
3811 return *IsScalableVectorizationAllowed;
3813 IsScalableVectorizationAllowed =
false;
3819 "ScalableVectorizationDisabled",
ORE,
TheLoop);
3823 LLVM_DEBUG(
dbgs() <<
"LV: Scalable vectorization is available\n");
3826 std::numeric_limits<ElementCount::ScalarTy>::max());
3837 "Scalable vectorization not supported for the reduction "
3838 "operations found in this loop.",
3850 "for all element types found in this loop.",
3857 "for safe distance analysis.",
3862 IsScalableVectorizationAllowed =
true;
3867LoopVectorizationCostModel::getMaxLegalScalableVF(
unsigned MaxSafeElements) {
3868 if (!isScalableVectorizationAllowed())
3872 std::numeric_limits<ElementCount::ScalarTy>::max());
3874 return MaxScalableVF;
3882 "Max legal vector width too small, scalable vectorization "
3886 return MaxScalableVF;
3890 unsigned MaxTripCount,
ElementCount UserVF,
bool FoldTailByMasking) {
3892 unsigned SmallestType, WidestType;
3899 unsigned MaxSafeElements =
3903 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
3905 LLVM_DEBUG(
dbgs() <<
"LV: The max safe fixed VF is: " << MaxSafeFixedVF
3907 LLVM_DEBUG(
dbgs() <<
"LV: The max safe scalable VF is: " << MaxSafeScalableVF
3912 auto MaxSafeUserVF =
3913 UserVF.
isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3930 <<
" is unsafe, clamping to max safe VF="
3931 << MaxSafeFixedVF <<
".\n");
3936 <<
"User-specified vectorization factor "
3937 <<
ore::NV(
"UserVectorizationFactor", UserVF)
3938 <<
" is unsafe, clamping to maximum safe vectorization factor "
3939 <<
ore::NV(
"VectorizationFactor", MaxSafeFixedVF);
3941 return MaxSafeFixedVF;
3946 <<
" is ignored because scalable vectors are not "
3952 <<
"User-specified vectorization factor "
3953 <<
ore::NV(
"UserVectorizationFactor", UserVF)
3954 <<
" is ignored because the target does not support scalable "
3955 "vectors. The compiler will pick a more suitable value.";
3959 <<
" is unsafe. Ignoring scalable UserVF.\n");
3964 <<
"User-specified vectorization factor "
3965 <<
ore::NV(
"UserVectorizationFactor", UserVF)
3966 <<
" is unsafe. Ignoring the hint to let the compiler pick a "
3967 "more suitable value.";
3972 LLVM_DEBUG(
dbgs() <<
"LV: The Smallest and Widest types: " << SmallestType
3973 <<
" / " << WidestType <<
" bits.\n");
3978 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3979 MaxSafeFixedVF, FoldTailByMasking))
3983 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3984 MaxSafeScalableVF, FoldTailByMasking))
3985 if (MaxVF.isScalable()) {
3986 Result.ScalableVF = MaxVF;
3987 LLVM_DEBUG(
dbgs() <<
"LV: Found feasible scalable VF = " << MaxVF
4000 "Not inserting runtime ptr check for divergent target",
4001 "runtime pointer checks needed. Not enabled for divergent target",
4002 "CantVersionLoopWithDivergentTarget",
ORE,
TheLoop);
4011 "loop trip count is one, irrelevant for vectorization",
4016 switch (ScalarEpilogueStatus) {
4018 return computeFeasibleMaxVF(MaxTC, UserVF,
false);
4023 dbgs() <<
"LV: vector predicate hint/switch found.\n"
4024 <<
"LV: Not allowing scalar epilogue, creating predicated "
4025 <<
"vector loop.\n");
4032 dbgs() <<
"LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
4034 LLVM_DEBUG(
dbgs() <<
"LV: Not allowing scalar epilogue due to low trip "
4053 LLVM_DEBUG(
dbgs() <<
"LV: Cannot fold tail by masking: vectorize with a "
4054 "scalar epilogue instead.\n");
4056 return computeFeasibleMaxVF(MaxTC, UserVF,
false);
4067 "No decisions should have been taken at this point");
4077 std::optional<unsigned> MaxPowerOf2RuntimeVF =
4082 MaxPowerOf2RuntimeVF = std::max<unsigned>(
4083 *MaxPowerOf2RuntimeVF,
4086 MaxPowerOf2RuntimeVF = std::nullopt;
4089 if (MaxPowerOf2RuntimeVF && *MaxPowerOf2RuntimeVF > 0) {
4091 "MaxFixedVF must be a power of 2");
4092 unsigned MaxVFtimesIC =
4093 UserIC ? *MaxPowerOf2RuntimeVF * UserIC : *MaxPowerOf2RuntimeVF;
4097 BackedgeTakenCount, SE->
getOne(BackedgeTakenCount->
getType()));
4103 LLVM_DEBUG(
dbgs() <<
"LV: No tail will remain for any chosen VF.\n");
4117 <<
"LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
4118 "try to generate VP Intrinsics with scalable vector "
4124 "Expected scalable vector factor.");
4134 LLVM_DEBUG(
dbgs() <<
"LV: Cannot fold tail by masking: vectorize with a "
4135 "scalar epilogue instead.\n");
4141 LLVM_DEBUG(
dbgs() <<
"LV: Can't fold tail by masking: don't vectorize\n");
4147 "Unable to calculate the loop count due to complex control flow",
4148 "unable to calculate the loop count due to complex control flow",
4154 "Cannot optimize for size and vectorize at the same time.",
4155 "cannot optimize for size and vectorize at the same time. "
4156 "Enable vectorization of this loop with '#pragma clang loop "
4157 "vectorize(enable)' when compiling with -Os/-Oz",
4162ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
4163 unsigned MaxTripCount,
unsigned SmallestType,
unsigned WidestType,
4165 bool ComputeScalableMaxVF = MaxSafeVF.
isScalable();
4173 "Scalable flags must match");
4181 ComputeScalableMaxVF);
4182 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
4184 << (MaxVectorElementCount * WidestType) <<
" bits.\n");
4186 if (!MaxVectorElementCount) {
4188 << (ComputeScalableMaxVF ?
"scalable" :
"fixed")
4189 <<
" vector registers.\n");
4193 unsigned WidestRegisterMinEC = MaxVectorElementCount.getKnownMinValue();
4194 if (MaxVectorElementCount.isScalable() &&
4198 WidestRegisterMinEC *= Min;
4207 if (MaxTripCount && MaxTripCount <= WidestRegisterMinEC &&
4215 LLVM_DEBUG(
dbgs() <<
"LV: Clamping the MaxVF to maximum power of two not "
4216 "exceeding the constant trip count: "
4217 << ClampedUpperTripCount <<
"\n");
4219 ClampedUpperTripCount,
4220 FoldTailByMasking ? MaxVectorElementCount.isScalable() :
false);
4233 ComputeScalableMaxVF);
4234 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
4248 for (
int I = RUs.size() - 1;
I >= 0; --
I) {
4249 const auto &MLU = RUs[
I].MaxLocalUsers;
4250 if (
all_of(MLU, [&](
decltype(MLU.front()) &LU) {
4251 return LU.second <= TTI.getNumberOfRegisters(LU.first);
4261 <<
") with target's minimum: " << MinVF <<
'\n');
4277static std::optional<unsigned>
4279 const Function *Fn = L->getHeader()->getParent();
4283 auto Max = Attr.getVScaleRangeMax();
4284 if (Max && Min == Max)
4291bool LoopVectorizationPlanner::isMoreProfitable(
4299 unsigned EstimatedWidthA =
A.Width.getKnownMinValue();
4300 unsigned EstimatedWidthB =
B.Width.getKnownMinValue();
4302 if (
A.Width.isScalable())
4303 EstimatedWidthA *= *VScale;
4304 if (
B.Width.isScalable())
4305 EstimatedWidthB *= *VScale;
4312 A.Width.isScalable() && !
B.Width.isScalable();
4323 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
4325 auto GetCostForTC = [MaxTripCount,
this](
unsigned VF,
4337 return VectorCost *
divideCeil(MaxTripCount, VF);
4338 return VectorCost * (MaxTripCount / VF) + ScalarCost * (MaxTripCount % VF);
4341 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA,
A.ScalarCost);
4342 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB,
B.ScalarCost);
4343 return CmpFn(RTCostA, RTCostB);
4349 if (InvalidCosts.
empty())
4356 std::map<Instruction *, unsigned> Numbering;
4358 for (
auto &Pair : InvalidCosts)
4359 if (!Numbering.count(Pair.first))
4360 Numbering[Pair.first] =
I++;
4364 if (Numbering[
A.first] != Numbering[
B.first])
4365 return Numbering[
A.first] < Numbering[
B.first];
4366 const auto &
LHS =
A.second;
4367 const auto &
RHS =
B.second;
4368 return std::make_tuple(
LHS.isScalable(),
LHS.getKnownMinValue()) <
4369 std::make_tuple(
RHS.isScalable(),
RHS.getKnownMinValue());
4381 Subset =
Tail.take_front(1);
4390 if (Subset ==
Tail ||
Tail[Subset.size()].first !=
I) {
4391 std::string OutString;
4393 assert(!Subset.empty() &&
"Unexpected empty range");
4394 OS <<
"Instruction with invalid costs prevented vectorization at VF=(";
4395 for (
const auto &Pair : Subset)
4396 OS << (Pair.second == Subset.front().second ?
"" :
", ") << Pair.second;
4398 if (
auto *CI = dyn_cast<CallInst>(
I))
4399 OS <<
" call to " << CI->getCalledFunction()->getName();
4401 OS <<
" " <<
I->getOpcodeName();
4404 Tail =
Tail.drop_front(Subset.size());
4408 Subset =
Tail.take_front(Subset.size() + 1);
4409 }
while (!
Tail.empty());
4423 for (
VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4432 switch (R.getVPDefID()) {
4433 case VPDef::VPDerivedIVSC:
4434 case VPDef::VPScalarIVStepsSC:
4435 case VPDef::VPScalarCastSC:
4436 case VPDef::VPReplicateSC:
4437 case VPDef::VPInstructionSC:
4438 case VPDef::VPCanonicalIVPHISC:
4439 case VPDef::VPVectorPointerSC:
4440 case VPDef::VPExpandSCEVSC:
4441 case VPDef::VPEVLBasedIVPHISC:
4442 case VPDef::VPPredInstPHISC:
4443 case VPDef::VPBranchOnMaskSC:
4445 case VPDef::VPReductionSC:
4446 case VPDef::VPActiveLaneMaskPHISC:
4447 case VPDef::VPWidenCallSC:
4448 case VPDef::VPWidenCanonicalIVSC:
4449 case VPDef::VPWidenCastSC:
4450 case VPDef::VPWidenGEPSC:
4451 case VPDef::VPWidenSC:
4452 case VPDef::VPWidenSelectSC:
4453 case VPDef::VPBlendSC:
4454 case VPDef::VPFirstOrderRecurrencePHISC:
4455 case VPDef::VPWidenPHISC:
4456 case VPDef::VPWidenIntOrFpInductionSC:
4457 case VPDef::VPWidenPointerInductionSC:
4458 case VPDef::VPReductionPHISC:
4459 case VPDef::VPInterleaveSC:
4460 case VPDef::VPWidenLoadEVLSC:
4461 case VPDef::VPWidenLoadSC:
4462 case VPDef::VPWidenStoreEVLSC:
4463 case VPDef::VPWidenStoreSC:
4469 auto WillWiden = [&
TTI, VF](
Type *ScalarTy) {
4487 if (R.getNumDefinedValues() == 0 &&
4488 !isa<VPWidenStoreRecipe, VPWidenStoreEVLRecipe, VPInterleaveRecipe>(
4497 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4499 if (!Visited.
insert({ScalarTy}).second)
4501 if (WillWiden(ScalarTy))
4511 LLVM_DEBUG(
dbgs() <<
"LV: Scalar loop costs: " << ExpectedCost <<
".\n");
4512 assert(ExpectedCost.
isValid() &&
"Unexpected invalid cost for scalar loop");
4514 [](std::unique_ptr<VPlan> &
P) {
4517 "Expected Scalar VF to be a candidate");
4524 if (ForceVectorization &&
4525 (VPlans.
size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4533 for (
auto &
P : VPlans) {
4543 unsigned AssumedMinimumVscale =
4546 Candidate.Width.isScalable()
4547 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
4548 : Candidate.Width.getFixedValue();
4550 <<
" costs: " << (Candidate.Cost / Width));
4551 if (VF.isScalable())
4553 << AssumedMinimumVscale <<
")");
4560 <<
"LV: Not considering vector loop of width " << VF
4561 <<
" because it will not generate any vector instructions.\n");
4566 if (isMoreProfitable(Candidate, ScalarCost))
4567 ProfitableVFs.push_back(Candidate);
4569 if (isMoreProfitable(Candidate, ChosenFactor))
4570 ChosenFactor = Candidate;
4578 "There are conditional stores.",
4579 "store that is conditionally executed prevents vectorization",
4580 "ConditionalStore", ORE, OrigLoop);
4581 ChosenFactor = ScalarCost;
4585 !isMoreProfitable(ChosenFactor, ScalarCost))
dbgs()
4586 <<
"LV: Vectorization seems to be not beneficial, "
4587 <<
"but was forced by a user.\n");
4589 return ChosenFactor;
4592bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4597 [&](
PHINode &Phi) { return Legal->isFixedOrderRecurrence(&Phi); }))
4607 if (!OrigLoop->
contains(cast<Instruction>(U)))
4611 if (!OrigLoop->
contains(cast<Instruction>(U)))
4640 unsigned Multiplier = 1;
4652 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is disabled.\n");
4657 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because no "
4658 "epilogue is allowed.\n");
4664 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
4665 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because the loop "
4666 "is not a supported candidate.\n");
4671 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization factor is forced.\n");
4674 return {ForcedEC, 0, 0};
4676 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization forced factor is not "
4685 dbgs() <<
"LEV: Epilogue vectorization skipped due to opt for size.\n");
4690 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is not profitable for "
4702 EstimatedRuntimeVF *= *VScale;
4707 const SCEV *RemainingIterations =
nullptr;
4708 for (
auto &NextVF : ProfitableVFs) {
4715 if ((!NextVF.Width.isScalable() && MainLoopVF.
isScalable() &&
4722 if (!MainLoopVF.
isScalable() && !NextVF.Width.isScalable()) {
4724 if (!RemainingIterations) {
4731 SE.
getConstant(TCType, NextVF.Width.getKnownMinValue()),
4732 RemainingIterations))
4736 if (Result.Width.isScalar() || isMoreProfitable(NextVF, Result))
4742 << Result.Width <<
"\n");
4746std::pair<unsigned, unsigned>
4748 unsigned MinWidth = -1U;
4749 unsigned MaxWidth = 8;
4762 MaxWidth = std::min<unsigned>(
4763 MaxWidth, std::min<unsigned>(
4769 MinWidth = std::min<unsigned>(
4770 MinWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
4771 MaxWidth = std::max<unsigned>(
4772 MaxWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
4775 return {MinWidth, MaxWidth};
4783 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
4791 if (!isa<LoadInst>(
I) && !isa<StoreInst>(
I) && !isa<PHINode>(
I))
4796 if (
auto *PN = dyn_cast<PHINode>(&
I)) {
4810 if (
auto *ST = dyn_cast<StoreInst>(&
I))
4811 T = ST->getValueOperand()->getType();
4814 "Expected the load/store/recurrence type to be sized");
4843 LLVM_DEBUG(
dbgs() <<
"LV: Preference for VP intrinsics indicated. "
4844 "Unroll factor forced to be 1.\n");
4857 if (LoopCost == 0) {
4859 assert(LoopCost.
isValid() &&
"Expected to have chosen a VF with valid cost");
4869 for (
auto& pair : R.MaxLocalUsers) {
4870 pair.second = std::max(pair.second, 1U);
4884 unsigned IC = UINT_MAX;
4886 for (
auto& pair : R.MaxLocalUsers) {
4898 unsigned MaxLocalUsers = pair.second;
4899 unsigned LoopInvariantRegs = 0;
4900 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
4901 LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
4903 unsigned TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4907 TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4908 std::max(1U, (MaxLocalUsers - 1)));
4911 IC = std::min(IC, TmpIC);
4929 EstimatedVF *= *VScale;
4931 assert(EstimatedVF >= 1 &&
"Estimated VF shouldn't be less than 1");
4937 unsigned AvailableTC =
4949 std::max(1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4950 unsigned InterleaveCountLB =
bit_floor(std::max(
4951 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4952 MaxInterleaveCount = InterleaveCountLB;
4954 if (InterleaveCountUB != InterleaveCountLB) {
4955 unsigned TailTripCountUB =
4956 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4957 unsigned TailTripCountLB =
4958 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4961 if (TailTripCountUB == TailTripCountLB)
4962 MaxInterleaveCount = InterleaveCountUB;
4964 }
else if (BestKnownTC && *BestKnownTC > 0) {
4968 ? (*BestKnownTC) - 1
4976 MaxInterleaveCount =
bit_floor(std::max(
4977 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4980 assert(MaxInterleaveCount > 0 &&
4981 "Maximum interleave count must be greater than 0");
4985 if (IC > MaxInterleaveCount)
4986 IC = MaxInterleaveCount;
4989 IC = std::max(1u, IC);
4991 assert(IC > 0 &&
"Interleave count must be greater than 0.");
4995 if (VF.
isVector() && HasReductions) {
4996 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving because of reductions.\n");
5004 bool ScalarInterleavingRequiresPredication =
5006 return Legal->blockNeedsPredication(BB);
5008 bool ScalarInterleavingRequiresRuntimePointerCheck =
5014 <<
"LV: IC is " << IC <<
'\n'
5015 <<
"LV: VF is " << VF <<
'\n');
5016 const bool AggressivelyInterleaveReductions =
5018 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5019 !ScalarInterleavingRequiresPredication && LoopCost <
SmallLoopCost) {
5023 unsigned SmallIC = std::min(IC, (
unsigned)llvm::bit_floor<uint64_t>(
5030 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5031 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5037 bool HasSelectCmpReductions =
5040 const RecurrenceDescriptor &RdxDesc = Reduction.second;
5041 return RecurrenceDescriptor::isAnyOfRecurrenceKind(
5042 RdxDesc.getRecurrenceKind());
5044 if (HasSelectCmpReductions) {
5045 LLVM_DEBUG(
dbgs() <<
"LV: Not interleaving select-cmp reductions.\n");
5055 bool HasOrderedReductions =
5057 const RecurrenceDescriptor &RdxDesc = Reduction.second;
5058 return RdxDesc.isOrdered();
5060 if (HasOrderedReductions) {
5062 dbgs() <<
"LV: Not interleaving scalar ordered reductions.\n");
5067 SmallIC = std::min(SmallIC,
F);
5068 StoresIC = std::min(StoresIC,
F);
5069 LoadsIC = std::min(LoadsIC,
F);
5073 std::max(StoresIC, LoadsIC) > SmallIC) {
5075 dbgs() <<
"LV: Interleaving to saturate store or load ports.\n");
5076 return std::max(StoresIC, LoadsIC);
5081 if (VF.
isScalar() && AggressivelyInterleaveReductions) {
5085 return std::max(IC / 2, SmallIC);
5087 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving to reduce branch cost.\n");
5094 if (AggressivelyInterleaveReductions) {
5144 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5148 for (
Value *U :
I.operands()) {
5149 auto *Instr = dyn_cast<Instruction>(U);
5160 LoopInvariants.
insert(Instr);
5165 EndPoint[Instr] = IdxToInstr.
size();
5183 LLVM_DEBUG(
dbgs() <<
"LV(REG): Calculating max register usage:\n");
5185 const auto &TTICapture =
TTI;
5192 for (
unsigned int i = 0, s = IdxToInstr.
size(); i < s; ++i) {
5196 InstrList &
List = TransposeEnds[i];
5211 for (
unsigned j = 0, e = VFs.
size(); j < e; ++j) {
5219 if (VFs[j].isScalar()) {
5220 for (
auto *Inst : OpenIntervals) {
5229 for (
auto *Inst : OpenIntervals) {
5242 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5248 auto &Entry = MaxUsages[j][pair.first];
5249 Entry = std::max(Entry, pair.second);
5254 << OpenIntervals.
size() <<
'\n');
5260 for (
unsigned i = 0, e = VFs.
size(); i < e; ++i) {
5266 for (
auto *Inst : LoopInvariants) {
5269 bool IsScalar =
all_of(Inst->users(), [&](
User *U) {
5270 auto *I = cast<Instruction>(U);
5271 return TheLoop != LI->getLoopFor(I->getParent()) ||
5272 isScalarAfterVectorization(I, VFs[i]);
5278 Invariant[ClassID] += GetRegUsage(Inst->getType(), VF);
5282 dbgs() <<
"LV(REG): VF = " << VFs[i] <<
'\n';
5283 dbgs() <<
"LV(REG): Found max usage: " << MaxUsages[i].
size()
5285 for (
const auto &pair : MaxUsages[i]) {
5286 dbgs() <<
"LV(REG): RegisterClass: "
5290 dbgs() <<
"LV(REG): Found invariant usage: " << Invariant.
size()
5292 for (
const auto &pair : Invariant) {
5293 dbgs() <<
"LV(REG): RegisterClass: "
5307bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(
Instruction *
I,
5318 "Expecting a scalar emulated instruction");
5319 return isa<LoadInst>(
I) ||
5320 (isa<StoreInst>(
I) &&
5337 PredicatedBBsAfterVectorization[VF].
clear();
5354 !useEmulatedMaskMemRefHack(&
I, VF) &&
5355 computePredInstDiscount(&
I, ScalarCosts, VF) >= 0)
5358 PredicatedBBsAfterVectorization[VF].
insert(BB);
5360 if (Pred->getSingleSuccessor() == BB)
5361 PredicatedBBsAfterVectorization[VF].
insert(Pred);
5370 "Instruction marked uniform-after-vectorization will be predicated");
5388 if (!
I->hasOneUse() || PredInst->
getParent() !=
I->getParent() ||
5407 for (
Use &U :
I->operands())
5408 if (
auto *J = dyn_cast<Instruction>(U.get()))
5420 while (!Worklist.
empty()) {
5424 if (ScalarCosts.contains(
I))
5454 for (
Use &U :
I->operands())
5455 if (
auto *J = dyn_cast<Instruction>(
U.get())) {
5457 "Instruction has non-scalar type");
5458 if (canBeScalarized(J))
5460 else if (needsExtract(J, VF)) {
5462 cast<VectorType>(
ToVectorTy(J->getType(), VF)),
5473 Discount += VectorCost - ScalarCost;
5474 ScalarCosts[
I] = ScalarCost;
5489 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5506 LLVM_DEBUG(
dbgs() <<
"LV: Found an estimated cost of " <<
C <<
" for VF "
5507 << VF <<
" For instruction: " <<
I <<
'\n');
5535 const Loop *TheLoop) {
5537 auto *Gep = dyn_cast<GetElementPtrInst>(
Ptr);
5543 auto SE = PSE.
getSE();
5544 unsigned NumOperands = Gep->getNumOperands();
5545 for (
unsigned i = 1; i < NumOperands; ++i) {
5546 Value *Opd = Gep->getOperand(i);
5548 !
Legal->isInductionVariable(Opd))
5557LoopVectorizationCostModel::getMemInstScalarizationCost(
Instruction *
I,
5560 "Scalarization cost of instruction implies vectorization.");
5607 if (useEmulatedMaskMemRefHack(
I, VF))
5617LoopVectorizationCostModel::getConsecutiveMemOpCost(
Instruction *
I,
5620 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
5626 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5627 "Stride should be 1 or -1 for consecutive memory access");
5639 bool Reverse = ConsecutiveStride < 0;
5647LoopVectorizationCostModel::getUniformMemOpCost(
Instruction *
I,
5652 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
5656 if (isa<LoadInst>(
I)) {
5668 (isLoopInvariantStoreValue
5675LoopVectorizationCostModel::getGatherScatterCost(
Instruction *
I,
5678 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
5689LoopVectorizationCostModel::getInterleaveGroupCost(
Instruction *
I,
5692 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
5697 assert(Group &&
"Fail to get an interleaved access group.");
5699 unsigned InterleaveFactor = Group->getFactor();
5704 for (
unsigned IF = 0;
IF < InterleaveFactor;
IF++)
5705 if (Group->getMember(IF))
5709 bool UseMaskForGaps =
5711 (isa<StoreInst>(
I) && (Group->getNumMembers() < Group->getFactor()));
5713 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
5716 if (Group->isReverse()) {
5719 "Reverse masked interleaved access not supported.");
5720 Cost += Group->getNumMembers() *
5727std::optional<InstructionCost>
5733 if (InLoopReductions.
empty() || VF.
isScalar() || !isa<VectorType>(Ty))
5734 return std::nullopt;
5735 auto *VectorTy = cast<VectorType>(Ty);
5752 return std::nullopt;
5763 if (!InLoopReductionImmediateChains.
count(RetI))
5764 return std::nullopt;
5768 Instruction *LastChain = InLoopReductionImmediateChains.
at(RetI);
5770 while (!isa<PHINode>(ReductionPhi))
5771 ReductionPhi = InLoopReductionImmediateChains.
at(ReductionPhi);
5803 : dyn_cast<Instruction>(RetI->
getOperand(1));
5808 if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
5821 bool IsUnsigned = isa<ZExtInst>(Op0);
5838 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5839 return I == RetI ? RedCost : 0;
5843 bool IsUnsigned = isa<ZExtInst>(RedOp);
5852 if (RedCost.
isValid() && RedCost < BaseCost + ExtCost)
5853 return I == RetI ? RedCost : 0;
5854 }
else if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
5859 bool IsUnsigned = isa<ZExtInst>(Op0);
5882 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5883 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5891 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5892 return I == RetI ? RedCost : 0;
5901 if (RedCost.
isValid() && RedCost < MulCost + BaseCost)
5902 return I == RetI ? RedCost : 0;
5906 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5910LoopVectorizationCostModel::getMemoryInstructionCost(
Instruction *
I,
5940 if (!
RetTy->isVoidTy() &&
5962 for (
auto *V : filterExtractingOperands(Ops, VF))
5965 filterExtractingOperands(Ops, VF), Tys,
CostKind);
5987 auto isLegalToScalarize = [&]() {
6001 if (isa<LoadInst>(
I))
6006 auto &SI = cast<StoreInst>(
I);
6024 if (GatherScatterCost < ScalarizationCost)
6036 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6037 "Expected consecutive stride.");
6046 unsigned NumAccesses = 1;
6049 assert(Group &&
"Fail to get an interleaved access group.");
6055 NumAccesses = Group->getNumMembers();
6057 InterleaveCost = getInterleaveGroupCost(&
I, VF);
6062 ? getGatherScatterCost(&
I, VF) * NumAccesses
6066 getMemInstScalarizationCost(&
I, VF) * NumAccesses;
6072 if (InterleaveCost <= GatherScatterCost &&
6073 InterleaveCost < ScalarizationCost) {
6075 Cost = InterleaveCost;
6076 }
else if (GatherScatterCost < ScalarizationCost) {
6078 Cost = GatherScatterCost;
6081 Cost = ScalarizationCost;
6115 while (!Worklist.
empty()) {
6117 for (
auto &
Op :
I->operands())
6118 if (
auto *InstOp = dyn_cast<Instruction>(
Op))
6119 if ((InstOp->getParent() ==
I->getParent()) && !isa<PHINode>(InstOp) &&
6120 AddrDefs.
insert(InstOp).second)
6124 for (
auto *
I : AddrDefs) {
6125 if (isa<LoadInst>(
I)) {
6139 for (
unsigned I = 0;
I < Group->getFactor(); ++
I) {
6156 "Trying to set a vectorization decision for a scalar VF");
6175 for (
auto &ArgOp : CI->
args())
6180 for (
Type *ScalarTy : ScalarTys)
6189 std::nullopt, *RedCost);
6203 getScalarizationOverhead(CI, VF,
CostKind);
6209 bool UsesMask =
false;
6215 if (
Info.Shape.VF != VF)
6219 if (MaskRequired && !
Info.isMasked())
6223 bool ParamsOk =
true;
6225 switch (Param.ParamKind) {
6244 dyn_cast<SCEVAddRecExpr>(SE->
getSCEV(ScalarParam));
6246 if (!SAR || SAR->getLoop() !=
TheLoop) {
6252 dyn_cast<SCEVConstant>(SAR->getStepRecurrence(*SE));
6280 if (VecFunc && UsesMask && !MaskRequired)
6300 if (VectorCost <=
Cost) {
6305 if (IntrinsicCost <=
Cost) {
6306 Cost = IntrinsicCost;
6325 return InstsToScalarize[VF][
I];
6328 auto ForcedScalar = ForcedScalars.
find(VF);
6329 if (VF.
isVector() && ForcedScalar != ForcedScalars.
end()) {
6330 auto InstSet = ForcedScalar->second;
6331 if (InstSet.count(
I))
6342 auto hasSingleCopyAfterVectorization = [
this](
Instruction *
I,
6347 auto Scalarized = InstsToScalarize.
find(VF);
6348 assert(Scalarized != InstsToScalarize.
end() &&
6349 "VF not yet analyzed for scalarization profitability");
6350 return !Scalarized->second.count(
I) &&
6352 auto *UI = cast<Instruction>(U);
6353 return !Scalarized->second.count(UI);
6356 (void) hasSingleCopyAfterVectorization;
6365 assert(
I->getOpcode() == Instruction::GetElementPtr ||
6366 I->getOpcode() == Instruction::PHI ||
6367 (
I->getOpcode() == Instruction::BitCast &&
6368 I->getType()->isPointerTy()) ||
6369 hasSingleCopyAfterVectorization(
I, VF));
6379 switch (
I->getOpcode()) {
6380 case Instruction::GetElementPtr:
6386 case Instruction::Br: {
6393 bool ScalarPredicatedBB =
false;
6399 ScalarPredicatedBB =
true;
6401 if (ScalarPredicatedBB) {
6423 case Instruction::PHI: {
6424 auto *Phi = cast<PHINode>(
I);
6436 cast<VectorType>(VectorTy), Mask,
CostKind,
6444 return (Phi->getNumIncomingValues() - 1) *
6446 Instruction::Select,
ToVectorTy(Phi->getType(), VF),
6452 case Instruction::UDiv:
6453 case Instruction::SDiv:
6454 case Instruction::URem:
6455 case Instruction::SRem:
6459 ScalarCost : SafeDivisorCost;
6463 case Instruction::Add:
6464 case Instruction::FAdd:
6465 case Instruction::Sub:
6466 case Instruction::FSub:
6467 case Instruction::Mul:
6468 case Instruction::FMul:
6469 case Instruction::FDiv:
6470 case Instruction::FRem:
6471 case Instruction::Shl:
6472 case Instruction::LShr:
6473 case Instruction::AShr:
6474 case Instruction::And:
6475 case Instruction::Or:
6476 case Instruction::Xor: {
6480 if (
I->getOpcode() == Instruction::Mul &&
6491 Value *Op2 =
I->getOperand(1);
6500 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6503 case Instruction::FNeg: {
6506 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6507 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6508 I->getOperand(0),
I);
6510 case Instruction::Select: {
6512 const SCEV *CondSCEV = SE->
getSCEV(SI->getCondition());
6515 const Value *Op0, *Op1;
6532 Type *CondTy = SI->getCondition()->getType();
6537 if (
auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6538 Pred = Cmp->getPredicate();
6542 case Instruction::ICmp:
6543 case Instruction::FCmp: {
6544 Type *ValTy =
I->getOperand(0)->getType();
6545 Instruction *Op0AsInstruction = dyn_cast<Instruction>(
I->getOperand(0));
6550 cast<CmpInst>(
I)->getPredicate(),
CostKind,
6553 case Instruction::Store:
6554 case Instruction::Load: {
6559 "CM decision should be taken at this point");
6566 return getMemoryInstructionCost(
I, VF);
6568 case Instruction::BitCast:
6569 if (
I->getType()->isPointerTy())
6572 case Instruction::ZExt:
6573 case Instruction::SExt:
6574 case Instruction::FPToUI:
6575 case Instruction::FPToSI:
6576 case Instruction::FPExt:
6577 case Instruction::PtrToInt:
6578 case Instruction::IntToPtr:
6579 case Instruction::SIToFP:
6580 case Instruction::UIToFP:
6581 case Instruction::Trunc:
6582 case Instruction::FPTrunc: {
6585 assert((isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
6586 "Expected a load or a store!");
6612 unsigned Opcode =
I->getOpcode();
6615 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6617 if (
StoreInst *Store = dyn_cast<StoreInst>(*
I->user_begin()))
6618 CCH = ComputeCCH(Store);
6621 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6622 Opcode == Instruction::FPExt) {
6623 if (
LoadInst *Load = dyn_cast<LoadInst>(
I->getOperand(0)))
6624 CCH = ComputeCCH(Load);
6631 auto *Trunc = cast<TruncInst>(
I);
6633 Trunc->getSrcTy(), CCH,
CostKind, Trunc);
6640 Type *SrcScalarTy =
I->getOperand(0)->getType();
6641 Instruction *Op0AsInstruction = dyn_cast<Instruction>(
I->getOperand(0));
6652 (
I->getOpcode() == Instruction::ZExt ||
6653 I->getOpcode() == Instruction::SExt))
6659 case Instruction::Call:
6661 case Instruction::ExtractValue:
6663 case Instruction::Alloca:
6686 if ((SI = dyn_cast<StoreInst>(&
I)) &&
6697 return VecValuesToIgnore.contains(U) || ValuesToIgnore.contains(U);
6706 if (Group->getInsertPos() == &
I)
6709 DeadInterleavePointerOps.
push_back(PointerOp);
6715 for (
unsigned I = 0;
I != DeadInterleavePointerOps.
size(); ++
I) {
6716 auto *
Op = dyn_cast<Instruction>(DeadInterleavePointerOps[
I]);
6718 Instruction *UI = cast<Instruction>(U);
6719 return !VecValuesToIgnore.contains(U) &&
6720 (!isAccessInterleaved(UI) ||
6721 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6725 DeadInterleavePointerOps.
append(
Op->op_begin(),
Op->op_end());
6730 for (
unsigned I = 0;
I != DeadOps.
size(); ++
I) {
6731 auto *
Op = dyn_cast<Instruction>(DeadOps[
I]);
6736 return !VecValuesToIgnore.contains(U) && !ValuesToIgnore.contains(U);
6744 [
this](
User *U) { return ValuesToIgnore.contains(U); }))
6748 DeadOps.
append(
Op->op_begin(),
Op->op_end());
6789 bool InLoop = !ReductionOperations.
empty();
6792 InLoopReductions.
insert(Phi);
6795 for (
auto *
I : ReductionOperations) {
6796 InLoopReductionImmediateChains[
I] = LastChain;
6800 LLVM_DEBUG(
dbgs() <<
"LV: Using " << (InLoop ?
"inloop" :
"out of loop")
6801 <<
" reduction for phi: " << *Phi <<
"\n");
6809 return tryInsertInstruction(
6822 unsigned WidestType;
6831 unsigned N =
RegSize.getKnownMinValue() / WidestType;
6852 <<
"overriding computed VF.\n");
6857 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing. Scalable VF requested, but "
6858 <<
"not supported by the target.\n");
6860 "Scalable vectorization requested but not supported by the target",
6861 "the scalable user-specified vectorization width for outer-loop "
6862 "vectorization cannot be used because the target does not support "
6863 "scalable vectors.",
6864 "ScalableVFUnfeasible", ORE, OrigLoop);
6869 "VF needs to be a power of two");
6871 <<
"VF " << VF <<
" to build VPlans.\n");
6878 return {VF, 0 , 0 };
6882 dbgs() <<
"LV: Not vectorizing. Inner loops aren't supported in the "
6883 "VPlan-native path.\n");
6887std::optional<VectorizationFactor>
6895 return std::nullopt;
6902 <<
"LV: Invalidate all interleaved groups due to fold-tail by masking "
6903 "which requires masked-interleaved support.\n");
6917 if (!UserVF.
isZero() && UserVFIsLegal) {
6919 "VF needs to be a power of two");
6925 buildVPlansWithVPRecipes(UserVF, UserVF);
6927 LLVM_DEBUG(
dbgs() <<
"LV: No VPlan could be built for " << UserVF
6929 return std::nullopt;
6933 return {{UserVF, 0, 0}};
6936 "InvalidCost", ORE, OrigLoop);
6949 for (
const auto &VF : VFCandidates) {
6964 return std::nullopt;
6966 [](std::unique_ptr<VPlan> &
P) {
return P->hasScalarVFOnly(); }))
6978 return std::nullopt;
7016 for (
User *U :
IV->users()) {
7017 auto *CI = cast<Instruction>(U);
7018 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
7023 if (!CostCtx.SkipCostComputation.insert(IVInst).second)
7027 dbgs() <<
"Cost of " << InductionCost <<
" for VF " << VF
7028 <<
": induction instruction " << *IVInst <<
"\n";
7030 Cost += InductionCost;
7043 auto *
Term = dyn_cast<BranchInst>(EB->getTerminator());
7046 if (
auto *CondI = dyn_cast<Instruction>(
Term->getOperand(0))) {
7047 ExitInstrs.
insert(CondI);
7051 for (
unsigned I = 0;
I != ExitInstrs.
size(); ++
I) {
7054 !CostCtx.SkipCostComputation.insert(CondI).second)
7056 Cost += CostCtx.getLegacyCost(CondI, VF);
7058 auto *OpI = dyn_cast<Instruction>(
Op);
7059 if (!OpI ||
any_of(OpI->users(), [&ExitInstrs,
this](
User *U) {
7060 return OrigLoop->contains(cast<Instruction>(U)->getParent()) &&
7061 !ExitInstrs.contains(cast<Instruction>(U));
7077 RdxDesc.getRecurrenceKind()))
7083 RdxDesc.getRecurrenceKind())) {
7085 RedPhi->users(), [](
User *U) { return isa<SelectInst>(U); }));
7086 assert(!CostCtx.SkipCostComputation.contains(
Select) &&
7087 "reduction op visited multiple times");
7088 CostCtx.SkipCostComputation.insert(
Select);
7089 auto ReductionCost = CostCtx.getLegacyCost(
Select, VF);
7090 LLVM_DEBUG(
dbgs() <<
"Cost of " << ReductionCost <<
" for VF " << VF
7091 <<
":\n any-of reduction " << *
Select <<
"\n");
7092 Cost += ReductionCost;
7096 const auto &ChainOps = RdxDesc.getReductionOpChain(RedPhi, OrigLoop);
7101 for (
auto *ChainOp : ChainOps) {
7102 for (
Value *
Op : ChainOp->operands()) {
7103 if (
auto *
I = dyn_cast<Instruction>(
Op))
7104 ChainOpsAndOperands.insert(
I);
7115 assert(!CostCtx.SkipCostComputation.contains(
I) &&
7116 "reduction op visited multiple times");
7117 CostCtx.SkipCostComputation.insert(
I);
7118 LLVM_DEBUG(
dbgs() <<
"Cost of " << ReductionCost <<
" for VF " << VF
7119 <<
":\n in-loop reduction " << *
I <<
"\n");
7120 Cost += *ReductionCost;
7132 CostCtx.SkipCostComputation.insert(BB->getTerminator());
7133 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
7144 VPlan &FirstPlan = *VPlans[0];
7148 VPlan *BestPlan = &FirstPlan;
7151 "More than a single plan/VF w/o any plan having scalar VF");
7158 if (ForceVectorization) {
7165 for (
auto &
P : VPlans) {
7172 <<
"LV: Not considering vector loop of width " << VF
7173 <<
" because it will not generate any vector instructions.\n");
7179 if (isMoreProfitable(CurrentFactor, BestFactor)) {
7180 BestFactor = CurrentFactor;
7191 [VF](
const VPlanPtr &Plan) {
return Plan->hasVF(VF); }) ==
7193 "Best VF has not a single VPlan.");
7195 for (
const VPlanPtr &Plan : VPlans) {
7196 if (Plan->hasVF(VF))
7206 bool IsUnrollMetadata =
false;
7207 MDNode *LoopID = L->getLoopID();
7210 for (
unsigned i = 1, ie = LoopID->
getNumOperands(); i < ie; ++i) {
7211 auto *MD = dyn_cast<MDNode>(LoopID->
getOperand(i));
7213 const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7215 S && S->getString().starts_with(
"llvm.loop.unroll.disable");
7221 if (!IsUnrollMetadata) {
7223 LLVMContext &Context = L->getHeader()->getContext();
7226 MDString::get(Context,
"llvm.loop.unroll.runtime.disable"));
7232 L->setLoopID(NewLoopID);
7242 bool VectorizingEpilogue) {
7247 auto *PhiR = cast<VPReductionPHIRecipe>(RedResult->
getOperand(0));
7253 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
7256 auto *Cmp = cast<ICmpInst>(PhiR->getStartValue()->getUnderlyingValue());
7259 ResumePhi = cast<PHINode>(Cmp->getOperand(0));
7261 assert((!VectorizingEpilogue || ResumePhi) &&
7262 "when vectorizing the epilogue loop, we need a resume phi from main "
7279 BCBlockPhi->addIncoming(FinalValue,
Incoming);
7281 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(
Incoming),
7287 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
7291 int IncomingEdgeBlockIdx =
7293 assert(IncomingEdgeBlockIdx >= 0 &&
"Invalid block index");
7295 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
7296 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
7298 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
7300 ReductionResumeValues[&RdxDesc] = BCBlockPhi;
7303std::pair<DenseMap<const SCEV *, Value *>,
7310 "Trying to execute plan with unsupported VF");
7312 "Trying to execute plan with unsupported UF");
7314 (IsEpilogueVectorization || !ExpandedSCEVs) &&
7315 "expanded SCEVs to reuse can only be used during epilogue vectorization");
7316 (void)IsEpilogueVectorization;
7321 <<
", UF=" << BestUF <<
'\n');
7322 BestVPlan.
setName(
"Final VPlan");
7339 assert(IsEpilogueVectorization &&
"should only re-use the existing trip "
7340 "count during epilogue vectorization");
7344 Value *CanonicalIVStartValue;
7345 std::tie(State.
CFG.
PrevBB, CanonicalIVStartValue) =
7348#ifdef EXPENSIVE_CHECKS
7349 assert(DT->
verify(DominatorTree::VerificationLevel::Fast));
7355 std::unique_ptr<LoopVersioning> LVer =
nullptr;
7363 LVer = std::make_unique<LoopVersioning>(
7366 State.
LVer = &*LVer;
7383 CanonicalIVStartValue, State);
7393 dyn_cast<VPInstruction>(&R), ReductionResumeValues, State, OrigLoop,
7402 std::optional<MDNode *> VectorizedLoopID =
7409 if (VectorizedLoopID)
7410 L->setLoopID(*VectorizedLoopID);
7433 cast<BranchInst>(State.
CFG.
VPBB2IRBB[ExitVPBB]->getTerminator());
7434 if (MiddleTerm->isConditional() &&
7438 assert(TripCount > 0 &&
"trip count should not be zero");
7439 const uint32_t Weights[] = {1, TripCount - 1};
7446#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7448 for (
const auto &Plan : VPlans)
7462std::pair<BasicBlock *, Value *>
7464 const SCEV2ValueTy &ExpandedSCEVs) {
7504 dbgs() <<
"Create Skeleton for epilogue vectorized loop (first pass)\n"
7514 dbgs() <<
"intermediate fn:\n"
7522 assert(Bypass &&
"Expected valid bypass basic block.");
7543 TCCheckBlock->
setName(
"vector.main.loop.iter.check");
7547 DT,
LI,
nullptr,
"vector.ph");
7552 "TC check is expected to dominate Bypass");
7570 return TCCheckBlock;
7579std::pair<BasicBlock *, Value *>
7581 const SCEV2ValueTy &ExpandedSCEVs) {
7589 nullptr,
"vec.epilog.iter.check",
true);
7591 VecEpilogueIterationCountCheck);
7596 "expected this to be saved from the previous pass.");
7614 VecEpilogueIterationCountCheck,
7638 for (
PHINode &Phi : VecEpilogueIterationCountCheck->
phis())
7641 for (
PHINode *Phi : PhisInBlock) {
7643 Phi->replaceIncomingBlockWith(
7645 VecEpilogueIterationCountCheck);
7652 return EPI.EpilogueIterationCountCheck == IncB;
7664 Type *IdxTy =
Legal->getWidestInductionType();
7668 EPResumeVal->
addIncoming(ConstantInt::get(IdxTy, 0),
7679 {VecEpilogueIterationCountCheck,
7690 "Expected trip count to have been safed in the first pass.");
7694 "saved trip count does not dominate insertion point.");
7705 Value *CheckMinIters =
7709 "min.epilog.iters.check");
7715 unsigned EpilogueLoopStep =
7721 unsigned EstimatedSkipCount = std::min(MainLoopStep, EpilogueLoopStep);
7722 const uint32_t Weights[] = {EstimatedSkipCount,
7723 MainLoopStep - EstimatedSkipCount};
7733 dbgs() <<
"Create Skeleton for epilogue vectorized loop (second pass)\n"
7747 assert(!
Range.isEmpty() &&
"Trying to test an empty VF range.");
7748 bool PredicateAtRangeStart = Predicate(
Range.Start);
7751 if (Predicate(TmpVF) != PredicateAtRangeStart) {
7756 return PredicateAtRangeStart;
7766 auto MaxVFTimes2 = MaxVF * 2;
7768 VFRange SubRange = {VF, MaxVFTimes2};
7769 VPlans.push_back(buildVPlan(SubRange));
7777 if (
auto *
I = dyn_cast<Instruction>(
Op)) {
7778 if (
auto *R = Ingredient2Recipe.lookup(
I))
7779 return R->getVPSingleValue();
7790 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
7792 if (ECEntryIt != EdgeMaskCache.
end())
7793 return ECEntryIt->second;
7798 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
7799 assert(BI &&
"Unexpected terminator found");
7802 return EdgeMaskCache[Edge] = SrcMask;
7808 return EdgeMaskCache[Edge] = SrcMask;
7811 assert(EdgeMask &&
"No Edge Mask found for condition");
7823 return EdgeMaskCache[Edge] = EdgeMask;
7830 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
7832 assert(ECEntryIt != EdgeMaskCache.
end() &&
7833 "looking up mask for edge which has not been created");
7834 return ECEntryIt->second;
7842 BlockMaskCache[Header] =
nullptr;
7854 HeaderVPBB->
insert(
IV, NewInsertionPoint);
7861 BlockMaskCache[Header] = BlockMask;
7867 assert(BCEntryIt != BlockMaskCache.
end() &&
7868 "Trying to access mask for block without one.");
7869 return BCEntryIt->second;
7873 assert(OrigLoop->
contains(BB) &&
"Block is not a part of a loop");
7874 assert(BlockMaskCache.
count(BB) == 0 &&
"Mask for block already computed");
7876 "Loop header must have cached block mask");
7885 BlockMaskCache[BB] = EdgeMask;
7890 BlockMask = EdgeMask;
7894 BlockMask = Builder.
createOr(BlockMask, EdgeMask, {});
7897 BlockMaskCache[BB] = BlockMask;
7903 assert((isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
7904 "Must be called with either a load or store");
7910 "CM decision should be taken at this point.");
7936 auto *
GEP = dyn_cast<GetElementPtrInst>(
7937 Ptr->getUnderlyingValue()->stripPointerCasts());
7944 if (
LoadInst *Load = dyn_cast<LoadInst>(
I))
7962 "step must be loop invariant");
7966 if (
auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
7969 assert(isa<PHINode>(PhiOrTrunc) &&
"must be a phi node here");
7980 *PSE.
getSE(), *OrigLoop);
8006 auto isOptimizableIVTruncate =
8014 isOptimizableIVTruncate(
I),
Range)) {
8016 auto *
Phi = cast<PHINode>(
I->getOperand(0));
8027 unsigned NumIncoming =
Phi->getNumIncomingValues();
8038 for (
unsigned In = 0;
In < NumIncoming;
In++) {
8043 assert(In == 0 &&
"Both null and non-null edge masks found");
8045 "Distinct incoming values with one having a full mask");
8068 if (
ID && (
ID == Intrinsic::assume ||
ID == Intrinsic::lifetime_end ||
8069 ID == Intrinsic::lifetime_start ||
ID == Intrinsic::sideeffect ||
8070 ID == Intrinsic::pseudoprobe ||
8071 ID == Intrinsic::experimental_noalias_scope_decl))
8078 bool ShouldUseVectorIntrinsic =
8085 if (ShouldUseVectorIntrinsic)
8090 std::optional<unsigned> MaskPos;
8112 Variant = Decision.Variant;
8113 MaskPos = Decision.MaskPos;
8120 if (ShouldUseVectorCall) {
8121 if (MaskPos.has_value()) {
8136 Ops.insert(Ops.
begin() + *MaskPos, Mask);
8148 assert(!isa<BranchInst>(
I) && !isa<PHINode>(
I) && !isa<LoadInst>(
I) &&
8149 !isa<StoreInst>(
I) &&
"Instruction should have been handled earlier");
8164 switch (
I->getOpcode()) {
8167 case Instruction::SDiv:
8168 case Instruction::UDiv:
8169 case Instruction::SRem:
8170 case Instruction::URem: {
8178 auto *SafeRHS = Builder.
createSelect(Mask, Ops[1], One,
I->getDebugLoc());
8184 case Instruction::Add:
8185 case Instruction::And:
8186 case Instruction::AShr:
8187 case Instruction::FAdd:
8188 case Instruction::FCmp:
8189 case Instruction::FDiv:
8190 case Instruction::FMul:
8191 case Instruction::FNeg:
8192 case Instruction::FRem:
8193 case Instruction::FSub:
8194 case Instruction::ICmp:
8195 case Instruction::LShr:
8196 case Instruction::Mul:
8197 case Instruction::Or:
8198 case Instruction::Select:
8199 case Instruction::Shl:
8200 case Instruction::Sub:
8201 case Instruction::Xor:
8202 case Instruction::Freeze:
8210 auto *PN = cast<PHINode>(R->getUnderlyingValue());
8212 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8229 if (!IsUniform &&
Range.Start.isScalable() && isa<IntrinsicInst>(
I)) {
8231 case Intrinsic::assume:
8232 case Intrinsic::lifetime_start:
8233 case Intrinsic::lifetime_end:
8255 VPValue *BlockInMask =
nullptr;
8256 if (!IsPredicated) {
8260 LLVM_DEBUG(
dbgs() <<
"LV: Scalarizing and predicating:" << *
I <<
"\n");
8271 assert((
Range.Start.isScalar() || !IsUniform || !IsPredicated ||
8272 (
Range.Start.isScalable() && isa<IntrinsicInst>(
I))) &&
8273 "Should not predicate a uniform recipe");
8275 IsUniform, BlockInMask);
8286 if (
auto Phi = dyn_cast<PHINode>(Instr)) {
8287 if (Phi->getParent() != OrigLoop->
getHeader())
8290 if ((Recipe = tryToOptimizeInductionPHI(Phi,
Operands,
Range)))
8296 "can only widen reductions and fixed-order recurrences here");
8314 PhisToFix.push_back(PhiRecipe);
8318 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8327 if (
auto *CI = dyn_cast<CallInst>(Instr))
8330 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8333 if (!shouldWiden(Instr,
Range))
8336 if (
auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8340 if (
auto *SI = dyn_cast<SelectInst>(Instr)) {
8345 if (
auto *CI = dyn_cast<CastInst>(Instr)) {
8350 return tryToWiden(Instr,
Operands, VPBB);
8353void LoopVectorizationPlanner::buildVPlansWithVPRecipes(
ElementCount MinVF,
8357 auto MaxVFTimes2 = MaxVF * 2;
8359 VFRange SubRange = {VF, MaxVFTimes2};
8360 if (
auto Plan = tryToBuildVPlanWithVPRecipes(SubRange)) {
8372 VPlans.push_back(std::move(Plan));
8382 Value *StartIdx = ConstantInt::get(IdxTy, 0);
8389 Header->insert(CanonicalIVPHI, Header->begin());
8394 Instruction::Add, {CanonicalIVPHI, &Plan.
getVFxUF()}, {HasNUW,
false},
DL,
8396 CanonicalIVPHI->
addOperand(CanonicalIVIncrement);
8415 Value *IncomingValue =
8416 ExitPhi.getIncomingValueForBlock(ExitingBB);
8422 if ((isa<VPWidenIntOrFpInductionRecipe>(V) &&
8423 !cast<VPWidenIntOrFpInductionRecipe>(V)->getTruncInst()) ||
8424 isa<VPWidenPointerInductionRecipe>(V))
8446 if (isa<VPIRBasicBlock>(Succ))
8448 assert(!ScalarPHVPBB &&
"Two candidates for ScalarPHVPBB?");
8449 ScalarPHVPBB = cast<VPBasicBlock>(Succ);
8454 VPBuilder ScalarPHBuilder(ScalarPHVPBB);
8458 if (
auto *Terminator = MiddleVPBB->getTerminator()) {
8459 auto *Condition = dyn_cast<VPInstruction>(Terminator->getOperand(0));
8460 assert((!Condition || Condition->getParent() == MiddleVPBB) &&
8461 "Condition expected in MiddleVPBB");
8462 MiddleBuilder.
setInsertPoint(Condition ? Condition : Terminator);
8468 auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi);
8474 {FOR->getBackedgeValue(), OneVPV},
8475 {},
"vector.recur.extract");
8478 "scalar.recur.init");
8479 Plan.
addLiveOut(cast<PHINode>(FOR->getUnderlyingInstr()), ResumePhiRecipe);
8484LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
VFRange &
Range) {
8499 bool RequiresScalarEpilogueCheck =
8514 bool IVUpdateMayOverflow =
false;
8525 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, Legal, CM, PSE, Builder);
8545 "Unsupported interleave factor for scalable vectors");
8550 InterleaveGroups.
insert(IG);
8568 bool NeedsBlends = BB != HeaderBB && !BB->phis().empty();
8569 return Legal->blockNeedsPredication(BB) || NeedsBlends;
8574 if (VPBB != HeaderVPBB)
8578 if (VPBB == HeaderVPBB)
8579 RecipeBuilder.createHeaderMask();
8580 else if (NeedsMasks)
8581 RecipeBuilder.createBlockInMask(BB);
8588 auto *
Phi = dyn_cast<PHINode>(Instr);
8589 if (Phi &&
Phi->getParent() == HeaderBB) {
8590 Operands.push_back(Plan->getOrAddLiveIn(
8593 auto OpRange = RecipeBuilder.mapToVPValues(
Instr->operands());
8594 Operands = {OpRange.begin(), OpRange.end()};
8600 if ((SI = dyn_cast<StoreInst>(&
I)) &&
8605 RecipeBuilder.tryToCreateWidenRecipe(Instr,
Operands,
Range, VPBB);
8607 Recipe = RecipeBuilder.handleReplication(Instr,
Range);
8609 RecipeBuilder.setRecipe(Instr, Recipe);
8610 if (isa<VPHeaderPHIRecipe>(Recipe)) {
8621 "unexpected recipe needs moving");
8641 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8642 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8643 "entry block must be set to a VPRegionBlock having a non-empty entry "
8645 RecipeBuilder.fixHeaderPhis();
8655 adjustRecipesForReductions(Plan, RecipeBuilder,
Range.Start);
8660 for (
const auto *IG : InterleaveGroups) {
8662 cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IG->getInsertPos()));
8664 for (
unsigned i = 0; i < IG->getFactor(); ++i)
8665 if (
auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
8666 auto *StoreR = cast<VPWidenStoreRecipe>(RecipeBuilder.getRecipe(SI));
8667 StoredValues.
push_back(StoreR->getStoredValue());
8670 bool NeedsMaskForGaps =
8673 "masked interleaved groups are not allowed.");
8675 Recipe->getMask(), NeedsMaskForGaps);
8676 VPIG->insertBefore(Recipe);
8678 for (
unsigned i = 0; i < IG->getFactor(); ++i)
8680 VPRecipeBase *MemberR = RecipeBuilder.getRecipe(Member);
8681 if (!
Member->getType()->isVoidTy()) {
8692 Plan->setName(
"Initial VPlan");
8697 auto *StrideV = cast<SCEVUnknown>(Stride)->getValue();
8698 auto *ScevStride = dyn_cast<SCEVConstant>(PSE.
getSCEV(StrideV));
8703 auto *CI = Plan->getOrAddLiveIn(
8704 ConstantInt::get(Stride->getType(), ScevStride->getAPInt()));
8705 if (
VPValue *StrideVPV = Plan->getLiveIn(StrideV))
8711 if (!isa<SExtInst, ZExtInst>(U))
8713 VPValue *StrideVPV = Plan->getLiveIn(U);
8716 unsigned BW =
U->getType()->getScalarSizeInBits();
8717 APInt C = isa<SExtInst>(U) ? ScevStride->getAPInt().sext(BW)
8718 : ScevStride->getAPInt().zext(BW);
8719 VPValue *CI = Plan->getOrAddLiveIn(ConstantInt::get(
U->getType(),
C));
8737 bool WithoutRuntimeCheck =
8740 WithoutRuntimeCheck);
8756 *PSE.
getSE(),
true,
false, OrigLoop);
8760 HCFGBuilder.buildHierarchicalCFG();
8768 *PSE.
getSE(), *TLI);
8773 Plan->getVectorLoopRegion()->getExitingBasicBlock()->getTerminator();
8774 Term->eraseFromParent();
8798void LoopVectorizationPlanner::adjustRecipesForReductions(
8800 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8807 if (
auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
8810 bool HasIntermediateStore =
false;
8815 auto *IS2 =
R2->getRecurrenceDescriptor().IntermediateStore;
8816 HasIntermediateStore |= IS1 || IS2;
8837 if (HasIntermediateStore && ReductionPHIList.
size() > 1)
8839 R->moveBefore(*Header, Header->getFirstNonPhi());
8842 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8843 if (!PhiR || !PhiR->isInLoop() || (MinVF.
isScalar() && !PhiR->isOrdered()))
8849 "AnyOf reductions are not allowed for in-loop reductions");
8854 for (
unsigned I = 0;
I != Worklist.
size(); ++
I) {
8857 auto *UserRecipe = dyn_cast<VPSingleDefRecipe>(U);
8859 assert(isa<VPLiveOut>(U) &&
8860 "U must either be a VPSingleDef or VPLiveOut");
8863 Worklist.
insert(UserRecipe);
8876 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
8879 unsigned IndexOfFirstOperand;
8887 "Expected instruction to be a call to the llvm.fmuladd intrinsic");
8888 assert(((MinVF.
isScalar() && isa<VPReplicateRecipe>(CurrentLink)) ||
8889 isa<VPWidenCallRecipe>(CurrentLink)) &&
8890 CurrentLink->getOperand(2) == PreviousLink &&
8891 "expected a call where the previous link is the added operand");
8899 {CurrentLink->getOperand(0), CurrentLink->getOperand(1)},
8901 LinkVPBB->
insert(FMulRecipe, CurrentLink->getIterator());
8904 auto *Blend = dyn_cast<VPBlendRecipe>(CurrentLink);
8905 if (PhiR->isInLoop() && Blend) {
8906 assert(Blend->getNumIncomingValues() == 2 &&
8907 "Blend must have 2 incoming values");
8908 if (Blend->getIncomingValue(0) == PhiR)
8909 Blend->replaceAllUsesWith(Blend->getIncomingValue(1));
8911 assert(Blend->getIncomingValue(1) == PhiR &&
8912 "PhiR must be an operand of the blend");
8913 Blend->replaceAllUsesWith(Blend->getIncomingValue(0));
8919 if (isa<VPWidenRecipe>(CurrentLink)) {
8920 assert(isa<CmpInst>(CurrentLinkI) &&
8921 "need to have the compare of the select");
8924 assert(isa<VPWidenSelectRecipe>(CurrentLink) &&
8925 "must be a select recipe");
8926 IndexOfFirstOperand = 1;
8929 "Expected to replace a VPWidenSC");
8930 IndexOfFirstOperand = 0;
8935 CurrentLink->getOperand(IndexOfFirstOperand) == PreviousLink
8936 ? IndexOfFirstOperand + 1
8937 : IndexOfFirstOperand;
8938 VecOp = CurrentLink->getOperand(VecOpId);
8939 assert(VecOp != PreviousLink &&
8940 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
8941 (VecOpId - IndexOfFirstOperand)) ==
8943 "PreviousLink must be the operand other than VecOp");
8959 CurrentLink->replaceAllUsesWith(RedRecipe);
8960 PreviousLink = RedRecipe;
8969 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8982 return isa<VPWidenSelectRecipe>(U) ||
8983 (isa<VPReplicateRecipe>(U) &&
8984 cast<VPReplicateRecipe>(U)->getUnderlyingInstr()->getOpcode() ==
8985 Instruction::Select);
8991 for (
unsigned I = 0;
I != CmpR->getNumOperands(); ++
I)
8992 if (CmpR->getOperand(
I) == PhiR)
9000 if (
Select->getOperand(1) == PhiR)
9003 Select->getVPSingleValue()->replaceAllUsesWith(
Or);
9017 assert(OrigExitingVPV->getDefiningRecipe()->getParent() != LatchVPBB &&
9018 "reduction recipe must be defined before latch");
9020 std::optional<FastMathFlags> FMFs =
9027 return isa<VPInstruction>(&U) &&
9028 cast<VPInstruction>(&U)->getOpcode() ==
9045 assert(!PhiR->
isInLoop() &&
"Unexpected truncated inloop reduction!");
9054 Trunc->
insertAfter(NewExitingVPV->getDefiningRecipe());
9055 Extnd->insertAfter(Trunc);
9057 PhiR->
setOperand(1, Extnd->getVPSingleValue());
9058 NewExitingVPV = Extnd;
9077 OrigExitingVPV->replaceUsesWithIf(
9078 FinalReductionResult,
9087 "Not a pointer induction according to InductionDescriptor!");
9089 "Unexpected type.");
9091 "Recipe should have been replaced");
9094 PHINode *CanonicalIV = cast<PHINode>(State.
get(IVR, 0,
true));
9099 Type *ScStValueType = ScalarStartValue->
getType();
9104 NewPointerPhi->
addIncoming(ScalarStartValue, VectorPH);
9111 Value *NumUnrolledElems =
9122 NewPointerPhi->
addIncoming(InductionGEP, VectorPH);
9127 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9129 Value *StartOffsetScalar =
9131 Value *StartOffset =
9138 "scalar step must be the same across all parts");
9145 State.
set(
this,
GEP, Part);
9150 assert(!State.
Instance &&
"VPDerivedIVRecipe being replicated.");
9161 Kind, cast_if_present<BinaryOperator>(FPBinOp));
9162 DerivedIV->
setName(
"offset.idx");
9163 assert(DerivedIV != CanonicalIV &&
"IV didn't need transforming?");
9172 "uniform recipe shouldn't be predicated");
9178 if (State.
Instance->Lane.isFirstLane()) {
9192 if ((isa<LoadInst>(UI) || isa<StoreInst>(UI)) &&
9194 return Op->isDefinedOutsideVectorRegions();
9198 for (
unsigned Part = 1; Part < State.
UF; ++Part)
9207 for (
unsigned Part = 0; Part < State.
UF; ++Part)
9214 if (isa<StoreInst>(UI) &&
9225 for (
unsigned Part = 0; Part < State.
UF; ++Part)
9226 for (
unsigned Lane = 0; Lane < EndLane; ++Lane)
9238 auto &Builder = State.
Builder;
9240 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9242 Value *Mask =
nullptr;
9243 if (
auto *VPMask =
getMask()) {
9246 Mask = State.
get(VPMask, Part);
9248 Mask = Builder.CreateVectorReverse(Mask,
"reverse");
9253 NewLI = Builder.CreateMaskedGather(DataTy,
Addr, Alignment, Mask,
nullptr,
9254 "wide.masked.gather");
9256 NewLI = Builder.CreateMaskedLoad(DataTy,
Addr, Alignment, Mask,
9258 "wide.masked.load");
9260 NewLI = Builder.CreateAlignedLoad(DataTy,
Addr, Alignment,
"wide.load");
9265 NewLI = Builder.CreateVectorReverse(NewLI,
"reverse");
9266 State.
set(
this, NewLI, Part);
9275 Value *AllTrueMask =
9277 return Builder.
CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
9278 {Operand, AllTrueMask, EVL},
nullptr,
Name);
9282 assert(State.
UF == 1 &&
"Expected only UF == 1 when vectorizing with "
9283 "explicit vector length.");
9291 auto &Builder = State.
Builder;
9296 Value *Mask =
nullptr;
9298 Mask = State.
get(VPMask, 0);
9302 Mask = Builder.CreateVectorSplat(State.
VF, Builder.getTrue());
9307 Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {
Addr, Mask, EVL},
9308 nullptr,
"wide.masked.gather");
9313 Instruction::Load, DataTy,
Addr,
"vp.op.load"));
9321 State.
set(
this, Res, 0);
9331 auto &Builder = State.
Builder;
9334 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9336 Value *Mask =
nullptr;
9337 if (
auto *VPMask =
getMask()) {
9340 Mask = State.
get(VPMask, Part);
9342 Mask = Builder.CreateVectorReverse(Mask,
"reverse");
9345 Value *StoredVal = State.
get(StoredVPValue, Part);
9349 StoredVal = Builder.CreateVectorReverse(StoredVal,
"reverse");
9355 NewSI = Builder.CreateMaskedScatter(StoredVal,
Addr, Alignment, Mask);
9357 NewSI = Builder.CreateMaskedStore(StoredVal,
Addr, Alignment, Mask);
9359 NewSI = Builder.CreateAlignedStore(StoredVal,
Addr, Alignment);
9365 assert(State.
UF == 1 &&
"Expected only UF == 1 when vectorizing with "
9366 "explicit vector length.");
9373 auto &Builder = State.
Builder;
9377 Value *StoredVal = State.
get(StoredValue, 0);
9381 Value *Mask =
nullptr;
9383 Mask = State.
get(VPMask, 0);
9387 Mask = Builder.CreateVectorSplat(State.
VF, Builder.getTrue());
9390 if (CreateScatter) {
9392 Intrinsic::vp_scatter,
9393 {StoredVal, Addr, Mask, EVL});
9399 {StoredVal, Addr}));
9468 LLVM_DEBUG(
dbgs() <<
"LV: cannot compute the outer-loop trip count\n");
9472 Function *
F = L->getHeader()->getParent();
9478 LoopVectorizationCostModel CM(
SEL, L, PSE, LI, LVL, *
TTI, TLI, DB, AC, ORE,
F,
9483 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *
TTI, LVL, CM, IAI, PSE, Hints,
9503 bool AddBranchWeights =
9505 GeneratedRTChecks Checks(*PSE.
getSE(), DT, LI,
TTI,
9506 F->getDataLayout(), AddBranchWeights);
9508 VF.
Width, 1, LVL, &CM, BFI, PSI, Checks);
9510 << L->getHeader()->getParent()->getName() <<
"\"\n");
9530 if (
auto *S = dyn_cast<StoreInst>(&Inst)) {
9531 if (S->getValueOperand()->getType()->isFloatTy())
9541 while (!Worklist.
empty()) {
9543 if (!L->contains(
I))
9545 if (!Visited.
insert(
I).second)
9552 if (isa<FPExtInst>(
I) && EmittedRemark.
insert(
I).second)
9555 I->getDebugLoc(), L->getHeader())
9556 <<
"floating point conversion changes vector width. "
9557 <<
"Mixed floating point precision requires an up/down "
9558 <<
"cast that will negatively impact performance.";
9561 for (
Use &
Op :
I->operands())
9562 if (
auto *OpI = dyn_cast<Instruction>(
Op))
9569 std::optional<unsigned> VScale,
Loop *L,
9582 <<
"LV: Interleaving only is not profitable due to runtime checks\n");
9623 unsigned AssumedMinimumVscale = 1;
9625 AssumedMinimumVscale = *VScale;
9626 IntVF *= AssumedMinimumVscale;
9644 uint64_t MinTC = std::max(MinTC1, MinTC2);
9646 MinTC =
alignTo(MinTC, IntVF);
9650 dbgs() <<
"LV: Minimum required TC for runtime checks to be profitable:"
9658 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is not beneficial: expected "
9659 "trip count < minimum profitable VF ("
9670 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9672 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9677 "VPlan-native path is not enabled. Only process inner loops.");
9680 << L->getHeader()->getParent()->getName() <<
"' from "
9681 << L->getLocStr() <<
"\n");
9686 dbgs() <<
"LV: Loop hints:"
9697 Function *
F = L->getHeader()->getParent();
9719 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Cannot prove legality.\n");
9729 if (!L->isInnermost())
9733 assert(L->isInnermost() &&
"Inner loop expected.");
9755 LLVM_DEBUG(
dbgs() <<
"LV: Found a loop with a very small trip count. "
9756 <<
"This loop is worth vectorizing only if no scalar "
9757 <<
"iteration overheads are incurred.");
9759 LLVM_DEBUG(
dbgs() <<
" But vectorizing was explicitly forced.\n");
9772 LLVM_DEBUG(
dbgs() <<
" But the target considers the trip count too "
9773 "small to consider vectorizing.\n");
9775 "The trip count is below the minial threshold value.",
9776 "loop trip count is too low, avoiding vectorization",
9777 "LowTripCount",
ORE, L);
9786 if (
F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9788 "Can't vectorize when the NoImplicitFloat attribute is used",
9789 "loop not vectorized due to NoImplicitFloat attribute",
9790 "NoImplicitFloat",
ORE, L);
9802 "Potentially unsafe FP op prevents vectorization",
9803 "loop not vectorized due to unsafe FP support.",
9804 "UnsafeFP",
ORE, L);
9809 bool AllowOrderedReductions;
9819 ExactFPMathInst->getDebugLoc(),
9820 ExactFPMathInst->getParent())
9821 <<
"loop not vectorized: cannot prove it is safe to reorder "
9822 "floating-point operations";
9824 LLVM_DEBUG(
dbgs() <<
"LV: loop not vectorized: cannot prove it is safe to "
9825 "reorder floating-point operations\n");
9831 LoopVectorizationCostModel CM(
SEL, L, PSE,
LI, &LVL, *
TTI,
TLI,
DB,
AC,
ORE,
9834 LoopVectorizationPlanner LVP(L,
LI,
DT,
TLI, *
TTI, &LVL, CM, IAI, PSE, Hints,
9842 std::optional<VectorizationFactor> MaybeVF = LVP.
plan(UserVF, UserIC);
9847 bool AddBranchWeights =
9850 F->getDataLayout(), AddBranchWeights);
9856 unsigned SelectedIC = std::max(IC, UserIC);
9863 bool ForceVectorization =
9865 if (!ForceVectorization &&
9867 *PSE.
getSE(),
SEL)) {
9870 DEBUG_TYPE,
"CantReorderMemOps", L->getStartLoc(),
9872 <<
"loop not vectorized: cannot prove it is safe to reorder "
9873 "memory operations";
9882 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9883 bool VectorizeLoop =
true, InterleaveLoop =
true;
9885 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is possible but not beneficial.\n");
9886 VecDiagMsg = std::make_pair(
9887 "VectorizationNotBeneficial",
9888 "the cost-model indicates that vectorization is not beneficial");
9889 VectorizeLoop =
false;
9892 if (!MaybeVF && UserIC > 1) {
9895 LLVM_DEBUG(
dbgs() <<
"LV: Ignoring UserIC, because vectorization and "
9896 "interleaving should be avoided up front\n");
9897 IntDiagMsg = std::make_pair(
9898 "InterleavingAvoided",
9899 "Ignoring UserIC, because interleaving was avoided up front");
9900 InterleaveLoop =
false;
9901 }
else if (IC == 1 && UserIC <= 1) {
9904 IntDiagMsg = std::make_pair(
9905 "InterleavingNotBeneficial",
9906 "the cost-model indicates that interleaving is not beneficial");
9907 InterleaveLoop =
false;
9909 IntDiagMsg.first =
"InterleavingNotBeneficialAndDisabled";
9910 IntDiagMsg.second +=
9911 " and is explicitly disabled or interleave count is set to 1";
9913 }
else if (IC > 1 && UserIC == 1) {
9916 dbgs() <<
"LV: Interleaving is beneficial but is explicitly disabled.");
9917 IntDiagMsg = std::make_pair(
9918 "InterleavingBeneficialButDisabled",
9919 "the cost-model indicates that interleaving is beneficial "
9920 "but is explicitly disabled or interleave count is set to 1");
9921 InterleaveLoop =
false;
9925 IC = UserIC > 0 ? UserIC : IC;
9929 if (!VectorizeLoop && !InterleaveLoop) {
9933 L->getStartLoc(), L->getHeader())
9934 << VecDiagMsg.second;
9938 L->getStartLoc(), L->getHeader())
9939 << IntDiagMsg.second;
9942 }
else if (!VectorizeLoop && InterleaveLoop) {
9946 L->getStartLoc(), L->getHeader())
9947 << VecDiagMsg.second;
9949 }
else if (VectorizeLoop && !InterleaveLoop) {
9951 <<
") in " << L->getLocStr() <<
'\n');
9954 L->getStartLoc(), L->getHeader())
9955 << IntDiagMsg.second;
9957 }
else if (VectorizeLoop && InterleaveLoop) {
9959 <<
") in " << L->getLocStr() <<
'\n');
9963 bool DisableRuntimeUnroll =
false;
9964 MDNode *OrigLoopID = L->getLoopID();
9966 using namespace ore;
9967 if (!VectorizeLoop) {
9968 assert(IC > 1 &&
"interleave count should not be 1 or 0");
9971 InnerLoopUnroller Unroller(L, PSE,
LI,
DT,
TLI,
TTI,
AC,
ORE, IC, &LVL,
9976 "VPlan cost model and legacy cost model disagreed");
9982 <<
"interleaved loop (interleaved count: "
9983 << NV(
"InterleaveCount", IC) <<
")";
9998 EPI, &LVL, &CM,
BFI,
PSI, Checks);
10000 std::unique_ptr<VPlan> BestMainPlan(
10002 const auto &[ExpandedSCEVs, ReductionResumeValues] = LVP.
executePlan(
10017 Header->setName(
"vec.epilog.vector.body");
10027 auto *ExpandR = cast<VPExpandSCEVRecipe>(&R);
10029 ExpandedSCEVs.find(ExpandR->getSCEV())->second);
10033 ExpandR->eraseFromParent();
10040 if (isa<VPCanonicalIVPHIRecipe>(&R))
10043 Value *ResumeV =
nullptr;
10045 if (
auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10047 ReductionPhi->getRecurrenceDescriptor();
10049 ResumeV = ReductionResumeValues.find(&RdxDesc)->second;
10055 cast<Instruction>(ResumeV)->
getParent()->getFirstNonPHI());
10065 if (
auto *Ind = dyn_cast<VPWidenPointerInductionRecipe>(&R)) {
10066 IndPhi = cast<PHINode>(Ind->getUnderlyingValue());
10067 ID = &Ind->getInductionDescriptor();
10069 auto *WidenInd = cast<VPWidenIntOrFpInductionRecipe>(&R);
10070 IndPhi = WidenInd->getPHINode();
10071 ID = &WidenInd->getInductionDescriptor();
10078 assert(ResumeV &&
"Must have a resume value");
10080 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
10084 "DT not preserved correctly");
10086 DT,
true, &ExpandedSCEVs);
10087 ++LoopsEpilogueVectorized;
10090 DisableRuntimeUnroll =
true;
10094 "Plan should have a single VF");
10099 "VPlan cost model and legacy cost model disagreed");
10110 DisableRuntimeUnroll =
true;
10120 std::optional<MDNode *> RemainderLoopID =
10123 if (RemainderLoopID) {
10124 L->setLoopID(*RemainderLoopID);
10126 if (DisableRuntimeUnroll)
10165 bool Changed =
false, CFGChanged =
false;
10172 for (
const auto &L : *
LI)
10173 Changed |= CFGChanged |=
10184 LoopsAnalyzed += Worklist.
size();
10187 while (!Worklist.
empty()) {
10233 runImpl(
F,
SE,
LI,
TTI,
DT,
BFI, &
TLI,
DB,
AC,
LAIs,
ORE,
PSI);
10234 if (!Result.MadeAnyChange)
10248 if (Result.MadeCFGChange) {
10264 OS, MapClassName2PassName);
10267 OS << (InterleaveOnlyWhenForced ?
"" :
"no-") <<
"interleave-forced-only;";
10268 OS << (VectorizeOnlyWhenForced ?
"" :
"no-") <<
"vectorize-forced-only;";
static unsigned getIntrinsicID(const SDNode *N)
AMDGPU Lower Kernel Arguments
amdgpu AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
loop Loop Strength Reduction
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static void createAndCollectMergePhiForReduction(VPInstruction *RedResult, DenseMap< const RecurrenceDescriptor *, Value * > &ReductionResumeValues, VPTransformState &State, Loop *OrigLoop, BasicBlock *LoopMiddleBlock, bool VectorizingEpilogue)
static std::optional< unsigned > getSmallBestKnownTC(ScalarEvolution &SE, Loop *L)
Returns "best known" trip count for the specified loop L as defined by the following procedure: 1) Re...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static Instruction * createReverseEVL(IRBuilderBase &Builder, Value *Operand, Value *EVL, const Twine &Name)
Use all-true mask for reverse rather than actual mask, as it avoids a dependence w/o affecting the re...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or it's operands.
static void emitInvalidCostRemarks(SmallVector< InstructionVFPair > InvalidCosts, OptimizationRemarkEmitter *ORE, Loop *TheLoop)
const char LLVMLoopVectorizeFollowupAll[]
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, bool HasNUW, DebugLoc DL)
static std::optional< unsigned > getVScaleForTuning(const Loop *L, const TargetTransformInfo &TTI)
Convenience function that returns the value of vscale_range iff vscale_range.min == vscale_range....
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static constexpr uint32_t MemCheckBypassWeights[]
static Type * MaybeVectorizeType(Type *Elt, ElementCount VF)
cl::opt< unsigned > ForceTargetInstructionCost("force-target-instruction-cost", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's expected cost for " "an instruction to a single constant value. Mostly " "useful for getting consistent testing."))
std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, std::optional< unsigned > VScale, Loop *L, ScalarEvolution &SE, ScalarEpilogueLowering SEL)
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
const char VerboseDebug[]
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static VPWidenIntOrFpInductionRecipe * createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, const InductionDescriptor &IndDesc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop)
Creates a VPWidenIntOrFpInductionRecpipe for Phi.
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I)
Create an analysis remark that explains why vectorization failed.
static constexpr uint32_t SCEVCheckBypassWeights[]
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
const char LLVMLoopVectorizeFollowupVectorized[]
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static void addLiveOutsForFirstOrderRecurrences(VPlan &Plan)
Feed a resume value for every FOR from the vector loop to the scalar loop, if middle block branches t...
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
const char LLVMLoopVectorizeFollowupEpilogue[]
static bool useActiveLaneMask(TailFoldingStyle Style)
static void addUsersInExitBlock(Loop *OrigLoop, VPRecipeBuilder &Builder, VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static void cse(BasicBlock *BB)
Perform cse of induction variable instructions.
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static void AddRuntimeUnrollDisableMetaData(Loop *L)
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static cl::opt< bool > PrintVPlansInDotFormat("vplan-print-in-dot-format", cl::Hidden, cl::desc("Use dot format instead of plain text when dumping VPlans"))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
mir Rename Register Operands
This file implements a map that provides insertion order iteration.
std::pair< uint64_t, uint64_t > Interval
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
This file defines the VPlanHCFGBuilder class which contains the public interface (buildHierarchicalCF...
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
static const uint32_t IV[8]
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
int64_t getSExtValue() const
Get sign extended value.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Represents analyses that only rely on functions' control flow.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
@ ICMP_ULE
unsigned less or equal
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
DomTreeNodeBase * getIDom() const
Analysis pass which computes a DominatorTree.
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getScalable(ScalarTy MinVal)
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
BasicBlock * emitMinimumVectorEpilogueIterCountCheck(BasicBlock *Bypass, BasicBlock *Insert)
Emits an iteration count bypass check after the main vector loop has finished to see if there are any...
void printDebugTracesAtEnd() override
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks)
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (ie the ...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
void printDebugTracesAtEnd() override
std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Implements the interface for creating a vectorized skeleton using the main loop strategy (ie the firs...
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * emitIterationCountCheck(BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
FunctionType * getFunctionType() const
Returns the FunctionType for me.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Common base class shared among various IRBuilders.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
ConstantInt * getTrue()
Get the constant value for i1 true.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
A struct for saving information about induction variables.
InductionKind getKind() const
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
const SmallVectorImpl< Instruction * > & getCastInsts() const
Returns a reference to the type cast instructions in the induction update chain, that are redundant w...
Value * getStartValue() const
An extension of the inner loop vectorizer that creates a skeleton for a vectorized loop that has its ...
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks)
virtual std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs)=0
The interface for creating a vectorized skeleton using one of two different strategies,...
std::pair< BasicBlock *, Value * > createVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Create a new empty loop that will contain vectorized instructions later on, while the old loop will b...
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, LoopVectorizationLegality *LVL, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
PHINode * createInductionResumeValue(PHINode *OrigPhi, const InductionDescriptor &ID, Value *Step, ArrayRef< BasicBlock * > BypassBlocks, std::pair< BasicBlock *, Value * > AdditionalBypass={nullptr, nullptr})
Create a new phi node for the induction variable OrigPhi to resume iteration count in the scalar epil...
void scalarizeInstruction(const Instruction *Instr, VPReplicateRecipe *RepRecipe, const VPIteration &Instance, VPTransformState &State)
A helper function to scalarize a single Instruction in the innermost loop.
BasicBlock * LoopScalarBody
The scalar loop body.
Value * TripCount
Trip count of the original loop.
void sinkScalarOperands(Instruction *PredInst)
Iteratively sink the scalarized operands of a predicated instruction into the block that was created ...
const TargetLibraryInfo * TLI
Target Library Info.
DenseMap< PHINode *, Value * > IVEndValues
ElementCount MinProfitableTripCount
const TargetTransformInfo * TTI
Target Transform Info.
Value * VectorTripCount
Trip count of the widened loop (TripCount - TripCount % (VF*UF))
bool areSafetyChecksAdded()
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor, LoopVectorizationLegality *LVL, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
BasicBlock * emitSCEVChecks(BasicBlock *Bypass)
Emit a bypass check to see if all of the SCEV assumptions we've had to make are correct.
LoopVectorizationCostModel * Cost
The profitablity analysis.
SmallMapVector< const RecurrenceDescriptor *, PHINode *, 4 > ReductionResumeValues
BlockFrequencyInfo * BFI
BFI and PSI are used to check for profile guided size optimizations.
Value * getTripCount() const
Returns the original loop trip count.
BasicBlock * LoopMiddleBlock
Middle Block between the vector and the scalar.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
SmallVector< Instruction *, 4 > PredicatedInstructions
Store instructions that were predicated.
BasicBlock * completeLoopSkeleton()
Complete the loop skeleton by adding debug MDs, creating appropriate conditional branches in the midd...
void createVectorLoopSkeleton(StringRef Prefix)
Emit basic blocks (prefixed with Prefix) for the iteration check, vector loop preheader,...
BasicBlock * emitMemRuntimeChecks(BasicBlock *Bypass)
Emit bypass checks to check any memory assumptions we may have made.
BasicBlock * LoopScalarPreHeader
The scalar-loop preheader.
LoopVectorizationLegality * Legal
The legality analysis.
void emitIterationCountCheck(BasicBlock *Bypass)
Emit a bypass check to see if the vector trip count is zero, including if it overflows.
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, Value *VectorTripCount, Value *EndValue, BasicBlock *MiddleBlock, BasicBlock *VectorHeader, VPlan &Plan, VPTransformState &State)
Set up the values of the IVs correctly when exiting the vector loop.
void createInductionResumeValues(const SCEV2ValueTy &ExpandedSCEVs, std::pair< BasicBlock *, Value * > AdditionalBypass={nullptr, nullptr})
Create new phi nodes for the induction variables to resume iteration count in the scalar epilogue,...
void fixNonInductionPHIs(VPlan &Plan, VPTransformState &State)
Fix the non-induction PHIs in Plan.
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
bool OptForSizeBasedOnProfile
BasicBlock * LoopVectorPreHeader
The vector-loop preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
Value * getOrCreateVectorTripCount(BasicBlock *InsertBlock)
Returns (and creates if needed) the trip count of the widened loop.
IRBuilder Builder
The builder that we use.
virtual std::pair< BasicBlock *, Value * > createVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs)
Create a new empty loop that will contain vectorized instructions later on, while the old loop will b...
unsigned UF
The vectorization unroll factor to use.
void fixVectorizedLoop(VPTransformState &State, VPlan &Plan)
Fix the vectorized code, taking care of header phi's, live-outs, and more.
BasicBlock * LoopExitBlock
The unique ExitBlock of the scalar loop if one exists.
SmallVector< BasicBlock *, 4 > LoopBypassBlocks
A list of all bypass blocks. The first block is the entry of the loop.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB)
Replace specified successor OldBB to point at the provided block.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
Drive the analysis of interleaved memory accesses in the loop.
InterleaveGroup< Instruction > * getInterleaveGroup(const Instruction *Instr) const
Get the interleave group that Instr belongs to.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
bool isInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleave group.
bool invalidateGroups()
Invalidate groups, e.g., in case all blocks in loop will be predicated contrary to original assumptio...
iterator_range< SmallPtrSetIterator< llvm::InterleaveGroup< Instruction > * > > getInterleaveGroups()
void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
void invalidateGroupsRequiringScalarEpilogue()
Invalidate groups that require a scalar epilogue (due to gaps).
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
const DenseMap< Value *, const SCEV * > & getSymbolicStrides() const
If an access has a symbolic strides, this maps the pointer value to the stride symbol.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all of the successor blocks of this loop.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
iterator_range< block_iterator > blocks() const
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
bool isLoopExiting(const BlockT *BB) const
True if terminator in the block can branch to another block that is outside of the current loop.
BlockT * getUniqueExitBlock() const
If getUniqueExitBlocks would return exactly one block, return that block.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool requiresScalarEpilogue(VFRange Range) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
bool hasPredStores() const
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, TTI::TargetCostKind CostKind) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI)
const Function * TheFunction
LoopVectorizationLegality * Legal
Vectorization legality.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
Loop * TheLoop
The loop that we evaluate.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallVector< RegisterUsage, 8 > calculateRegisterUsage(ArrayRef< ElementCount > VFs)
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool runtimeChecksRequired()
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool isEpilogueVectorizationProfitable(const ElementCount VF) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
void collectUniformsAndScalars(ElementCount VF)
Collect Uniform and Scalar values for the given VF.
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool isScalarWithPredication(Instruction *I, ElementCount VF) const
Returns true if I is an instruction which requires predication and for which our chosen predication s...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
InstructionCost expectedCost(ElementCount VF, SmallVectorImpl< InstructionVFPair > *Invalid=nullptr)
Returns the expected execution cost.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF) const
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
unsigned selectInterleaveCount(ElementCount VF, InstructionCost LoopCost)
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
unsigned getNumStores() const
bool hasVectorCallVariants() const
Returns true if there is at least one function call in the loop which has a vectorized variant availa...
uint64_t getMaxSafeVectorWidthInBits() const
bool isInvariantAddressOfReduction(Value *V)
Returns True if given address is invariant and is used to store recurrent expression.
bool blockNeedsPredication(BasicBlock *BB) const
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
int isConsecutivePtr(Type *AccessTy, Value *Ptr) const
Check if this pointer is consecutive when vectorizing.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
bool isReductionVariable(PHINode *PN) const
Returns True if PN is a reduction variable in this loop.
bool isFixedOrderRecurrence(const PHINode *Phi) const
Returns True if Phi is a fixed-order recurrence in this loop.
const InductionDescriptor * getPointerInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is pointer induction.
const InductionDescriptor * getIntOrFpInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is an integer or floating point induction.
bool isInductionPhi(const Value *V) const
Returns True if V is a Phi node of an induction variable in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool isInvariant(Value *V) const
Returns true if V is invariant across all loop iterations according to SCEV.
const ReductionList & getReductionVars() const
Returns the reduction variables found in the loop.
bool isSafeForAnyVectorWidth() const
unsigned getNumLoads() const
bool canFoldTailByMasking() const
Return true if we can vectorize this loop while folding its tail by masking.
void prepareToFoldTailByMasking()
Mark all respective loads/stores for masking.
Type * getWidestInductionType()
Returns the widest induction type.
const LoopAccessInfo * getLAI() const
bool isUniformMemOp(Instruction &I, ElementCount VF) const
A uniform memory op is a load or store which accesses the same memory location on all VF lanes,...
bool isMaskRequired(const Instruction *I) const
Returns true if vector representation of the instruction I requires mask.
const RuntimePointerChecking * getRuntimePointerChecking() const
Returns the information that we collected about runtime memory check.
Planner drives the vectorization process after having passed Legality checks.
std::optional< VectorizationFactor > plan(ElementCount UserVF, unsigned UserIC)
Plan how to best vectorize, return the best VF and its cost, or std::nullopt if vectorization and int...
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MaxVF, unsigned IC)
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
std::pair< DenseMap< const SCEV *, Value * >, DenseMap< const RecurrenceDescriptor *, Value * > > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool IsEpilogueVectorization, const DenseMap< const SCEV *, Value * > *ExpandedSCEVs=nullptr)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
VPlan & getBestPlanFor(ElementCount VF) const
Return the best VPlan for VF.
VPlan & getBestPlan() const
Return the most profitable plan and fix its VF to the most profitable one.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
void printPlans(raw_ostream &O)
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Instruction * getExactFPInst()
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool isScalableVectorizationDisabled() const
enum ForceKind getForce() const
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
bool allowReordering() const
When enabling loop hints are provided we allow the vectorizer to change the order of operations that ...
void emitRemarkWithHints() const
Dumps all the hint information.
bool isPotentiallyUnsafe() const
ElementCount getWidth() const
@ FK_Enabled
Forcing enabled.
@ FK_Undefined
Not selected.
@ FK_Disabled
Forcing disabled.
unsigned getPredicate() const
void setAlreadyVectorized()
Mark the loop L as already vectorized by setting the width to 1.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
unsigned getInterleave() const
void prepareNoAliasMetadata()
Set up the aliasing scopes based on the memchecks.
Represents a single loop in the control flow graph.
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
MDNode * getLoopID() const
Return the llvm.loop loop id metadata node for this loop if it is present.
void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static MDString * get(LLVMContext &Context, StringRef Str)
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static unsigned getIncomingValueNumForOperand(unsigned i)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
void preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
Instruction * getLoopExitInstr() const
static unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
TrackingVH< Value > getRecurrenceStartValue() const
SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
bool Need
This flag indicates if we need to add the runtime check.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class represents a constant integer value.
const APInt & getAPInt() const
Helper to remove instructions inserted during SCEV expansion, unless they are marked as used.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
This class represents an assumption made using SCEV expressions which can be checked at run-time.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
bool isOne() const
Return true if the expression is a constant one.
bool isZero() const
Return true if the expression is a constant zero.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
unsigned getSmallConstantMaxTripCount(const Loop *L)
Returns the upper bound of the loop trip count as a normal unsigned value.
const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
ArrayRef< value_type > getArrayRef() const
size_type size() const
Determine the number of elements in the SetVector.
iterator end()
Get an iterator to the end of the SetVector.
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
bool empty() const
Determine if the SetVector is empty or not.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
value_type pop_back_val()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
RecipeListTy::iterator iterator
Instruction iterators...
void execute(VPTransformState *State) override
The method which generates the output IR instructions that correspond to this VPBasicBlock,...
iterator begin()
Recipe iterator methods.
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
void insert(VPRecipeBase *Recipe, iterator InsertPt)
A recipe for vectorizing a phi-node as a sequence of mask-based select instructions.
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
VPRegionBlock * getParent()
const VPBasicBlock * getExitingBasicBlock() const
void setName(const Twine &newName)
const VPBasicBlock * getEntryBasicBlock() const
VPBlockBase * getSingleSuccessor() const
const VPBlocksTy & getSuccessors() const
static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr)
Insert disconnected VPBlockBase NewBlock after BlockPtr.
RAII object that stores the current insertion point and restores it when the object is destroyed.
VPlan-based builder utility analogous to IRBuilder.
VPValue * createOr(VPValue *LHS, VPValue *RHS, DebugLoc DL={}, const Twine &Name="")
VPBasicBlock * getInsertBlock() const
VPValue * createICmp(CmpInst::Predicate Pred, VPValue *A, VPValue *B, DebugLoc DL={}, const Twine &Name="")
Create a new ICmp VPInstruction with predicate Pred and operands A and B.
VPInstruction * createOverflowingOp(unsigned Opcode, std::initializer_list< VPValue * > Operands, VPRecipeWithIRFlags::WrapFlagsTy WrapFlags, DebugLoc DL={}, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPValue * createNot(VPValue *Operand, DebugLoc DL={}, const Twine &Name="")
VPValue * createLogicalAnd(VPValue *LHS, VPValue *RHS, DebugLoc DL={}, const Twine &Name="")
VPValue * createSelect(VPValue *Cond, VPValue *TrueVal, VPValue *FalseVal, DebugLoc DL={}, const Twine &Name="", std::optional< FastMathFlags > FMFs=std::nullopt)
void setInsertPoint(VPBasicBlock *TheBB)
This specifies that created VPInstructions should be appended to the end of the specified block.
Canonical scalar induction phi of the vector loop.
Type * getScalarType() const
Returns the scalar type of the induction.
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPValue * getStepValue() const
VPValue * getStartValue() const
This is a concrete Recipe that models a single VPlan-level instruction.
@ ResumePhi
Creates a scalar phi in a leaf VPBB with a single predecessor in VPlan.
unsigned getOpcode() const
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
static VPLane getLastLaneForVF(const ElementCount &VF)
static VPLane getFirstLane()
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
VPBasicBlock * getParent()
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPValue * getVPValueOrAddLiveIn(Value *V, VPlan &Plan)
VPValue * createEdgeMask(BasicBlock *Src, BasicBlock *Dst)
A helper function that computes the predicate of the edge between SRC and DST.
VPReplicateRecipe * handleReplication(Instruction *I, VFRange &Range)
Build a VPReplicationRecipe for I.
VPValue * getBlockInMask(BasicBlock *BB) const
Returns the entry mask for the block BB.
VPValue * getEdgeMask(BasicBlock *Src, BasicBlock *Dst) const
A helper that returns the previously computed predicate of the edge between SRC and DST.
iterator_range< mapped_iterator< Use *, std::function< VPValue *(Value *)> > > mapToVPValues(User::op_range Operands)
Returns a range mapping the values of the range Operands to their corresponding VPValues.
void fixHeaderPhis()
Add the incoming values from the backedge to reduction & first-order recurrence cross-iteration phis.
VPRecipeBase * tryToCreateWidenRecipe(Instruction *Instr, ArrayRef< VPValue * > Operands, VFRange &Range, VPBasicBlock *VPBB)
Create and return a widened recipe for I if one can be created within the given VF Range.
void createHeaderMask()
Create the mask for the vector loop header block.
void createBlockInMask(BasicBlock *BB)
A helper function that computes the predicate of the block BB, assuming that the header block of the ...
VPRecipeBase * getRecipe(Instruction *I)
Return the recipe created for given ingredient.
void setFlags(Instruction *I) const
Set the IR flags for I.
A recipe for handling reduction phis.
bool isInLoop() const
Returns true, if the phi is part of an in-loop reduction.
const RecurrenceDescriptor & getRecurrenceDescriptor() const
A recipe to represent inloop reduction operations, performing a reduction on a vector operand into a ...
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
const VPBlockBase * getEntry() const
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
void setOperand(unsigned I, VPValue *New)
unsigned getNumOperands() const
VPValue * getOperand(unsigned N) const
void addOperand(VPValue *Operand)
void replaceAllUsesWith(VPValue *New)
user_iterator user_begin()
Value * getLiveInIRValue()
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
bool isLiveIn() const
Returns true if this VPValue is a live-in, i.e. defined outside the VPlan.
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
A recipe to compute the pointers for widened memory accesses of IndexTy for all parts.
A recipe for widening Call instructions.
A Recipe for widening the canonical induction variable of the vector loop.
VPWidenCastRecipe is a recipe to create vector cast instructions.
A recipe for handling GEP instructions.
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
A common base class for widening memory operations.
bool Reverse
Whether the consecutive accessed addresses are in reverse order.
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
VPValue * getMask() const
Return the mask used by this recipe.
VPValue * getAddr() const
Return the address accessed by this recipe.
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
A recipe for handling phis that are widened in the vector loop.
VPValue * getIncomingValue(unsigned I)
Returns the I th incoming VPValue.
VPBasicBlock * getIncomingBlock(unsigned I)
Returns the I th incoming VPBasicBlock.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void execute(VPTransformState &State) override
Generate vector values for the pointer induction.
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Main class to build the VPlan H-CFG for an incoming IR.
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
void printDOT(raw_ostream &O) const
Print this VPlan in DOT format to O.
void prepareToExecute(Value *TripCount, Value *VectorTripCount, Value *CanonicalIVStartValue, VPTransformState &State)
Prepare the plan for execution, setting up the required live-in values.
VPBasicBlock * getEntry()
VPValue & getVectorTripCount()
The vector trip count.
void setName(const Twine &newName)
VPValue & getVFxUF()
Returns VF * UF of the vector loop region.
VPValue * getTripCount() const
The trip count of the original loop.
VPValue * getOrCreateBackedgeTakenCount()
The backedge taken count of the original loop.
void removeLiveOut(PHINode *PN)
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
void addLiveOut(PHINode *PN, VPValue *V)
VPBasicBlock * getPreheader()
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
bool hasVF(ElementCount VF)
bool hasUF(unsigned UF) const
void setVF(ElementCount VF)
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
static VPlanPtr createInitialVPlan(const SCEV *TripCount, ScalarEvolution &PSE, bool RequiresScalarEpilogueCheck, bool TailFolded, Loop *TheLoop)
Create initial VPlan, having an "entry" VPBasicBlock (wrapping original scalar pre-header ) which con...
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
LLVM_DUMP_METHOD void dump() const
Dump the plan to stderr (for debugging).
bool hasScalarVFOnly() const
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
const MapVector< PHINode *, VPLiveOut * > & getLiveOuts() const
void print(raw_ostream &O) const
Print this VPlan to O.
VPValue * getSCEVExpansion(const SCEV *S) const
VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUser() const
Return true if there is exactly one user of this value.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
VectorBuilder & setEVL(Value *NewExplicitVectorLength)
VectorBuilder & setMask(Value *NewMask)
Value * createVectorInstruction(unsigned Opcode, Type *ReturnTy, ArrayRef< Value * > VecOpArray, const Twine &Name=Twine())
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool isZero() const
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ PredicateElseScalarEpilogue
@ PredicateOrDontVectorize
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
const_iterator end(StringRef path)
Get end iterator over path.
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, ScalarEvolution &SE)
Get or create a VPValue that corresponds to the expansion of Expr.
bool isUniformAfterVectorization(VPValue *VPV)
Returns true if VPV is uniform after vectorization.
This is an optimization pass for GlobalISel generic memory operations.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
void stable_sort(R &&Range)
bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Returns a loop's estimated trip count based on branch weight metadata.
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
unsigned getLoadStoreAddressSpace(Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const SCEV * createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE, Loop *OrigLoop)
std::pair< Instruction *, ElementCount > InstructionVFPair
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< MDNode * > makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef< StringRef > FollowupAttrs, const char *InheritOptionsAttrsPrefix="", bool AlwaysNew=false)
Create a new loop identifier for a loop created from a loop transformation.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
auto map_range(ContainerTy &&C, FuncTy F)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
cl::opt< bool > EnableVPlanNativePath("enable-vplan-native-path", cl::Hidden, cl::desc("Enable VPlan-native vectorization path with " "support for outer loop vectorization."))
void sort(IteratorTy Start, IteratorTy End)
std::unique_ptr< VPlan > VPlanPtr
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
cl::opt< bool > EnableLoopVectorization
bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
Align getLoadStoreAlignment(Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Type * ToVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
void setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop, Loop *RemainderLoop, uint64_t UF)
Set weights for UnrolledLoop and RemainderLoop based on weights for OrigLoop and the following distri...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
unsigned getReciprocalPredBlockProb()
A helper function that returns the reciprocal of the block probability of predicated blocks.
bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
cl::opt< bool > EnableLoopInterleaving
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
BasicBlock * SCEVSafetyCheck
BasicBlock * MemSafetyCheck
BasicBlock * MainLoopIterationCountCheck
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF)
BasicBlock * EpilogueIterationCountCheck
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
std::optional< unsigned > MaskPos
A struct that represents some properties of the register usage of a loop.
SmallMapVector< unsigned, unsigned, 4 > MaxLocalUsers
Holds the maximum number of concurrent live intervals in the loop.
SmallMapVector< unsigned, unsigned, 4 > LoopInvariantRegs
Holds the number of loop invariant values that are used in the loop.
bool processLoop(Loop *L)
LoopAccessInfoManager * LAIs
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LoopVectorizePass(LoopVectorizeOptions Opts={})
LoopVectorizeResult runImpl(Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, DominatorTree &DT_, BlockFrequencyInfo *BFI_, TargetLibraryInfo *TLI_, DemandedBits &DB_, AssumptionCache &AC_, LoopAccessInfoManager &LAIs_, OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
A MapVector that performs no allocations if smaller than a certain size.
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A recipe for handling first-order recurrence phis.
VPIteration represents a single point in the iteration space of the output (vectorized and/or unrolle...
bool isFirstIteration() const
void execute(VPTransformState &State) override
Generate the wide load or gather.
VPValue * getEVL() const
Return the EVL operand.
A recipe for widening load operations, using the address to load from and an optional mask.
void execute(VPTransformState &State) override
Generate a wide load or gather.
A recipe for widening select instructions.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
void execute(VPTransformState &State) override
Generate the wide store or scatter.
VPValue * getEVL() const
Return the EVL operand.
A recipe for widening store operations, using the stored value, the address to store to and an option...
void execute(VPTransformState &State) override
Generate a wide store or scatter.
VPValue * getStoredValue() const
Return the value stored by this recipe.
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static bool HoistRuntimeChecks