LLVM 23.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cmath>
150#include <cstdint>
151#include <functional>
152#include <iterator>
153#include <limits>
154#include <memory>
155#include <string>
156#include <tuple>
157#include <utility>
158
159using namespace llvm;
160using namespace SCEVPatternMatch;
161
162#define LV_NAME "loop-vectorize"
163#define DEBUG_TYPE LV_NAME
164
165#ifndef NDEBUG
166const char VerboseDebug[] = DEBUG_TYPE "-verbose";
167#endif
168
169STATISTIC(LoopsVectorized, "Number of loops vectorized");
170STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
171STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
172STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
173
175 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
176 cl::desc("Enable vectorization of epilogue loops."));
177
179 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
180 cl::desc("When epilogue vectorization is enabled, and a value greater than "
181 "1 is specified, forces the given VF for all applicable epilogue "
182 "loops."));
183
185 "epilogue-vectorization-minimum-VF", cl::Hidden,
186 cl::desc("Only loops with vectorization factor equal to or larger than "
187 "the specified value are considered for epilogue vectorization."));
188
189/// Loops with a known constant trip count below this number are vectorized only
190/// if no scalar iteration overheads are incurred.
192 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
193 cl::desc("Loops with a constant trip count that is smaller than this "
194 "value are vectorized only if no scalar iteration overheads "
195 "are incurred."));
196
198 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
199 cl::desc("The maximum allowed number of runtime memory checks"));
200
201/// Option tail-folding-policy indicates that an epilogue is undesired, that
202/// tail folding is preferred, and this lists all options. I.e., the vectorizer
203/// will try to fold the tail-loop (epilogue) into the vector body and predicate
204/// the instructions accordingly. If tail-folding fails, there are different
205/// fallback strategies depending on these values:
207
209 "tail-folding-policy", cl::init(TailFoldingPolicyTy::None), cl::Hidden,
210 cl::desc("Tail-folding preferences over creating an epilogue loop."),
212 clEnumValN(TailFoldingPolicyTy::None, "dont-fold-tail",
213 "Don't tail-fold loops."),
215 "prefer tail-folding, otherwise create an epilogue when "
216 "appropriate."),
218 "always tail-fold, don't attempt vectorization if "
219 "tail-folding fails.")));
220
222 "force-tail-folding-style", cl::desc("Force the tail folding style"),
225 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
228 "Create lane mask for data only, using active.lane.mask intrinsic"),
230 "data-without-lane-mask",
231 "Create lane mask with compare/stepvector"),
233 "Create lane mask using active.lane.mask intrinsic, and use "
234 "it for both data and control flow"),
236 "Use predicated EVL instructions for tail folding. If EVL "
237 "is unsupported, fallback to data-without-lane-mask.")));
238
240 "enable-wide-lane-mask", cl::init(false), cl::Hidden,
241 cl::desc("Enable use of wide lane masks when used for control flow in "
242 "tail-folded loops"));
243
245 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
246 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
247
248/// An interleave-group may need masking if it resides in a block that needs
249/// predication, or in order to mask away gaps.
251 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
252 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
253
255 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
256 cl::desc("A flag that overrides the target's number of scalar registers."));
257
259 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
260 cl::desc("A flag that overrides the target's number of vector registers."));
261
263 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
264 cl::desc("A flag that overrides the target's max interleave factor for "
265 "scalar loops."));
266
268 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
269 cl::desc("A flag that overrides the target's max interleave factor for "
270 "vectorized loops."));
271
273 "force-target-instruction-cost", cl::init(0), cl::Hidden,
274 cl::desc("A flag that overrides the target's expected cost for "
275 "an instruction to a single constant value. Mostly "
276 "useful for getting consistent testing."));
277
279 "small-loop-cost", cl::init(20), cl::Hidden,
280 cl::desc(
281 "The cost of a loop that is considered 'small' by the interleaver."));
282
284 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
285 cl::desc("Enable the use of the block frequency analysis to access PGO "
286 "heuristics minimizing code growth in cold regions and being more "
287 "aggressive in hot regions."));
288
289// Runtime interleave loops for load/store throughput.
291 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
292 cl::desc(
293 "Enable runtime interleaving until load/store ports are saturated"));
294
295/// The number of stores in a loop that are allowed to need predication.
297 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
298 cl::desc("Max number of stores to be predicated behind an if."));
299
301 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
302 cl::desc("Count the induction variable only once when interleaving"));
303
305 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
306 cl::desc("The maximum interleave count to use when interleaving a scalar "
307 "reduction in a nested loop."));
308
310 "force-ordered-reductions", cl::init(false), cl::Hidden,
311 cl::desc("Enable the vectorisation of loops with in-order (strict) "
312 "FP reductions"));
313
315 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
316 cl::desc(
317 "Prefer predicating a reduction operation over an after loop select."));
318
320 "enable-vplan-native-path", cl::Hidden,
321 cl::desc("Enable VPlan-native vectorization path with "
322 "support for outer loop vectorization."));
323
325 llvm::VerifyEachVPlan("vplan-verify-each",
326#ifdef EXPENSIVE_CHECKS
327 cl::init(true),
328#else
329 cl::init(false),
330#endif
332 cl::desc("Verify VPlans after VPlan transforms."));
333
334#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
336 "vplan-print-after-all", cl::init(false), cl::Hidden,
337 cl::desc("Print VPlans after all VPlan transformations."));
338
340 "vplan-print-after", cl::Hidden,
341 cl::desc("Print VPlans after specified VPlan transformations (regexp)."));
342
344 "vplan-print-vector-region-scope", cl::init(false), cl::Hidden,
345 cl::desc("Limit VPlan printing to vector loop region in "
346 "`-vplan-print-after*` if the plan has one."));
347#endif
348
349// This flag enables the stress testing of the VPlan H-CFG construction in the
350// VPlan-native vectorization path. It must be used in conjuction with
351// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
352// verification of the H-CFGs built.
354 "vplan-build-outerloop-stress-test", cl::init(false), cl::Hidden,
355 cl::desc(
356 "Build VPlan for every supported loop nest in the function and bail "
357 "out right after the build (stress test the VPlan H-CFG construction "
358 "in the VPlan-native vectorization path)."));
359
361 "interleave-loops", cl::init(true), cl::Hidden,
362 cl::desc("Enable loop interleaving in Loop vectorization passes"));
364 "vectorize-loops", cl::init(true), cl::Hidden,
365 cl::desc("Run the Loop vectorization passes"));
366
368 ForceMaskedDivRem("force-widen-divrem-via-masked-intrinsic", cl::Hidden,
369 cl::desc("Override cost based masked intrinsic widening "
370 "for div/rem instructions"));
371
373 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
374 cl::desc(
375 "Enable vectorization of early exit loops with uncountable exits."));
376
377// Likelyhood of bypassing the vectorized loop because there are zero trips left
378// after prolog. See `emitIterationCountCheck`.
379static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
380
381/// A helper function that returns true if the given type is irregular. The
382/// type is irregular if its allocated size doesn't equal the store size of an
383/// element of the corresponding vector type.
384static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
385 // Determine if an array of N elements of type Ty is "bitcast compatible"
386 // with a <N x Ty> vector.
387 // This is only true if there is no padding between the array elements.
388 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
389}
390
391/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
392/// ElementCount to include loops whose trip count is a function of vscale.
394 const Loop *L) {
395 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
396 return ElementCount::getFixed(ExpectedTC);
397
398 const SCEV *BTC = SE->getBackedgeTakenCount(L);
400 return ElementCount::getFixed(0);
401
402 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
403 if (isa<SCEVVScale>(ExitCount))
405
406 const APInt *Scale;
407 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
408 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
409 if (Scale->getActiveBits() <= 32)
411
412 return ElementCount::getFixed(0);
413}
414
415/// Get the maximum trip count for \p L from the SCEV unsigned range, excluding
416/// zero from the range. Only valid when not folding the tail, as the minimum
417/// iteration count check guards against a zero trip count. Returns 0 if
418/// unknown.
420 Loop *L) {
421 const SCEV *BTC = PSE.getBackedgeTakenCount();
423 return 0;
424 ScalarEvolution *SE = PSE.getSE();
425 const SCEV *TripCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
426 ConstantRange TCRange = SE->getUnsignedRange(TripCount);
427 APInt MaxTCFromRange = TCRange.getUnsignedMax();
428 if (!MaxTCFromRange.isZero() && MaxTCFromRange.getActiveBits() <= 32)
429 return MaxTCFromRange.getZExtValue();
430 return 0;
431}
432
433/// Returns "best known" trip count, which is either a valid positive trip count
434/// or std::nullopt when an estimate cannot be made (including when the trip
435/// count would overflow), for the specified loop \p L as defined by the
436/// following procedure:
437/// 1) Returns exact trip count if it is known.
438/// 2) Returns expected trip count according to profile data if any.
439/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
440/// 4) Returns the maximum trip count from the SCEV range excluding zero,
441/// if \p CanUseConstantMax and \p CanExcludeZeroTrips.
442/// 5) Returns std::nullopt if all of the above failed.
443static std::optional<ElementCount>
445 bool CanUseConstantMax = true,
446 bool CanExcludeZeroTrips = false) {
447 // Check if exact trip count is known.
448 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
449 return ExpectedTC;
450
451 // Check if there is an expected trip count available from profile data.
453 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
454 return ElementCount::getFixed(*EstimatedTC);
455
456 if (!CanUseConstantMax)
457 return std::nullopt;
458
459 // Check if upper bound estimate is known.
460 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
461 return ElementCount::getFixed(ExpectedTC);
462
463 // Get the maximum trip count from the SCEV range excluding zero. This is
464 // only safe when not folding the tail, as the minimum iteration count check
465 // prevents entering the vector loop with a zero trip count.
466 if (CanUseConstantMax && CanExcludeZeroTrips)
467 if (unsigned RefinedTC = getMaxTCFromNonZeroRange(PSE, L))
468 return ElementCount::getFixed(RefinedTC);
469
470 return std::nullopt;
471}
472
473namespace {
474// Forward declare GeneratedRTChecks.
475class GeneratedRTChecks;
476
477using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
478} // namespace
479
480namespace llvm {
481
483
484/// InnerLoopVectorizer vectorizes loops which contain only one basic
485/// block to a specified vectorization factor (VF).
486/// This class performs the widening of scalars into vectors, or multiple
487/// scalars. This class also implements the following features:
488/// * It inserts an epilogue loop for handling loops that don't have iteration
489/// counts that are known to be a multiple of the vectorization factor.
490/// * It handles the code generation for reduction variables.
491/// * Scalarization (implementation using scalars) of un-vectorizable
492/// instructions.
493/// InnerLoopVectorizer does not perform any vectorization-legality
494/// checks, and relies on the caller to check for the different legality
495/// aspects. The InnerLoopVectorizer relies on the
496/// LoopVectorizationLegality class to provide information about the induction
497/// and reduction variables that were found to a given vectorization factor.
499public:
503 ElementCount VecWidth, unsigned UnrollFactor,
505 GeneratedRTChecks &RTChecks, VPlan &Plan)
506 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
507 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
510 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
511
512 virtual ~InnerLoopVectorizer() = default;
513
514 /// Creates a basic block for the scalar preheader. Both
515 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
516 /// the method to create additional blocks and checks needed for epilogue
517 /// vectorization.
519
520 /// Fix the vectorized code, taking care of header phi's, and more.
522
523 /// Fix the non-induction PHIs in \p Plan.
525
526protected:
528
529 /// Create and return a new IR basic block for the scalar preheader whose name
530 /// is prefixed with \p Prefix.
532
533 /// Allow subclasses to override and print debug traces before/after vplan
534 /// execution, when trace information is requested.
535 virtual void printDebugTracesAtStart() {}
536 virtual void printDebugTracesAtEnd() {}
537
538 /// The original loop.
540
541 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
542 /// dynamic knowledge to simplify SCEV expressions and converts them to a
543 /// more usable form.
545
546 /// Loop Info.
548
549 /// Dominator Tree.
551
552 /// Target Transform Info.
554
555 /// Assumption Cache.
557
558 /// The vectorization SIMD factor to use. Each vector will have this many
559 /// vector elements.
561
562 /// The vectorization unroll factor to use. Each scalar is vectorized to this
563 /// many different vector instructions.
564 unsigned UF;
565
566 /// The builder that we use
568
569 // --- Vectorization state ---
570
571 /// The profitablity analysis.
573
574 /// Structure to hold information about generated runtime checks, responsible
575 /// for cleaning the checks, if vectorization turns out unprofitable.
576 GeneratedRTChecks &RTChecks;
577
579
580 /// The vector preheader block of \p Plan, used as target for check blocks
581 /// introduced during skeleton creation.
583};
584
585/// Encapsulate information regarding vectorization of a loop and its epilogue.
586/// This information is meant to be updated and used across two stages of
587/// epilogue vectorization.
590 unsigned MainLoopUF = 0;
592 unsigned EpilogueUF = 0;
597
599 ElementCount EVF, unsigned EUF,
601 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
603 assert(EUF == 1 &&
604 "A high UF for the epilogue loop is likely not beneficial.");
605 }
606};
607
608/// An extension of the inner loop vectorizer that creates a skeleton for a
609/// vectorized loop that has its epilogue (residual) also vectorized.
610/// The idea is to run the vplan on a given loop twice, firstly to setup the
611/// skeleton and vectorize the main loop, and secondly to complete the skeleton
612/// from the first step and vectorize the epilogue. This is achieved by
613/// deriving two concrete strategy classes from this base class and invoking
614/// them in succession from the loop vectorizer planner.
616public:
626
627 /// Holds and updates state information required to vectorize the main loop
628 /// and its epilogue in two separate passes. This setup helps us avoid
629 /// regenerating and recomputing runtime safety checks. It also helps us to
630 /// shorten the iteration-count-check path length for the cases where the
631 /// iteration count of the loop is so small that the main vector loop is
632 /// completely skipped.
634
635protected:
637};
638
639/// A specialized derived class of inner loop vectorizer that performs
640/// vectorization of *main* loops in the process of vectorizing loops and their
641/// epilogues.
643public:
654
655protected:
656 void printDebugTracesAtStart() override;
657 void printDebugTracesAtEnd() override;
658};
659
660// A specialized derived class of inner loop vectorizer that performs
661// vectorization of *epilogue* loops in the process of vectorizing loops and
662// their epilogues.
664public:
671 GeneratedRTChecks &Checks, VPlan &Plan)
673 Checks, Plan, EPI.EpilogueVF,
674 EPI.EpilogueVF, EPI.EpilogueUF) {}
675 /// Implements the interface for creating a vectorized skeleton using the
676 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
678
679protected:
680 void printDebugTracesAtStart() override;
681 void printDebugTracesAtEnd() override;
682};
683} // end namespace llvm
684
685/// Look for a meaningful debug location on the instruction or its operands.
687 if (!I)
688 return DebugLoc::getUnknown();
689
691 if (I->getDebugLoc() != Empty)
692 return I->getDebugLoc();
693
694 for (Use &Op : I->operands()) {
695 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
696 if (OpInst->getDebugLoc() != Empty)
697 return OpInst->getDebugLoc();
698 }
699
700 return I->getDebugLoc();
701}
702
703/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
704/// is passed, the message relates to that particular instruction.
705#ifndef NDEBUG
706static void debugVectorizationMessage(const StringRef Prefix,
707 const StringRef DebugMsg,
708 Instruction *I) {
709 dbgs() << "LV: " << Prefix << DebugMsg;
710 if (I != nullptr)
711 dbgs() << " " << *I;
712 else
713 dbgs() << '.';
714 dbgs() << '\n';
715}
716#endif
717
718/// Create an analysis remark that explains why vectorization failed
719///
720/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
721/// RemarkName is the identifier for the remark. If \p I is passed it is an
722/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
723/// the location of the remark. If \p DL is passed, use it as debug location for
724/// the remark. \return the remark object that can be streamed to.
725static OptimizationRemarkAnalysis
726createLVAnalysis(const char *PassName, StringRef RemarkName,
727 const Loop *TheLoop, Instruction *I, DebugLoc DL = {}) {
728 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
729 // If debug location is attached to the instruction, use it. Otherwise if DL
730 // was not provided, use the loop's.
731 if (I && I->getDebugLoc())
732 DL = I->getDebugLoc();
733 else if (!DL)
734 DL = TheLoop->getStartLoc();
735
736 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
737}
738
739namespace llvm {
740
741/// Return the runtime value for VF.
743 return B.CreateElementCount(Ty, VF);
744}
745
747 const StringRef OREMsg, const StringRef ORETag,
749 const Loop *TheLoop, Instruction *I) {
750 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
751 LoopVectorizeHints Hints(TheLoop, false /* doesn't matter */, *ORE);
752 ORE->emit(
753 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
754 << "loop not vectorized: " << OREMsg);
755}
756
757void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
759 const Loop *TheLoop, Instruction *I, DebugLoc DL) {
761 LoopVectorizeHints Hints(TheLoop, false /* doesn't matter */, *ORE);
762 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
763 I, DL)
764 << Msg);
765}
766
767/// Report successful vectorization of the loop. In case an outer loop is
768/// vectorized, prepend "outer" to the vectorization remark.
770 VectorizationFactor VF, unsigned IC) {
772 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
773 nullptr));
774 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
775 ORE->emit([&]() {
776 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
777 TheLoop->getHeader())
778 << "vectorized " << LoopType << "loop (vectorization width: "
779 << ore::NV("VectorizationFactor", VF.Width)
780 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
781 });
782}
783
784} // end namespace llvm
785
786namespace llvm {
787
788// Loop vectorization cost-model hints how the epilogue/tail loop should be
789// lowered.
791
792 // The default: allowing epilogues.
794
795 // Vectorization with OptForSize: don't allow epilogues.
797
798 // A special case of vectorisation with OptForSize: loops with a very small
799 // trip count are considered for vectorization under OptForSize, thereby
800 // making sure the cost of their loop body is dominant, free of runtime
801 // guards and scalar iteration overheads.
803
804 // Loop hint indicating an epilogue is undesired, apply tail folding.
806
807 // Directive indicating we must either fold the epilogue/tail or not vectorize
809};
810
811/// LoopVectorizationCostModel - estimates the expected speedups due to
812/// vectorization.
813/// In many cases vectorization is not profitable. This can happen because of
814/// a number of reasons. In this class we mainly attempt to predict the
815/// expected speedup/slowdowns due to the supported instruction set. We use the
816/// TargetTransformInfo to query the different backends for the cost of
817/// different operations.
820
821public:
835
836 /// \return An upper bound for the vectorization factors (both fixed and
837 /// scalable). If the factors are 0, vectorization and interleaving should be
838 /// avoided up front.
839 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
840
841 /// Memory access instruction may be vectorized in more than one way.
842 /// Form of instruction after vectorization depends on cost.
843 /// This function takes cost-based decisions for Load/Store instructions
844 /// and collects them in a map. This decisions map is used for building
845 /// the lists of loop-uniform and loop-scalar instructions.
846 /// The calculated cost is saved with widening decision in order to
847 /// avoid redundant calculations.
848 void setCostBasedWideningDecision(ElementCount VF);
849
850 /// A call may be vectorized in different ways depending on whether we have
851 /// vectorized variants available and whether the target supports masking.
852 /// This function analyzes all calls in the function at the supplied VF,
853 /// makes a decision based on the costs of available options, and stores that
854 /// decision in a map for use in planning and plan execution.
855 void setVectorizedCallDecision(ElementCount VF);
856
857 /// Collect values we want to ignore in the cost model.
858 void collectValuesToIgnore();
859
860 /// \returns True if it is more profitable to scalarize instruction \p I for
861 /// vectorization factor \p VF.
863 assert(VF.isVector() &&
864 "Profitable to scalarize relevant only for VF > 1.");
865 assert(
866 TheLoop->isInnermost() &&
867 "cost-model should not be used for outer loops (in VPlan-native path)");
868
869 auto Scalars = InstsToScalarize.find(VF);
870 assert(Scalars != InstsToScalarize.end() &&
871 "VF not yet analyzed for scalarization profitability");
872 return Scalars->second.contains(I);
873 }
874
875 /// Returns true if \p I is known to be uniform after vectorization.
877 assert(
878 TheLoop->isInnermost() &&
879 "cost-model should not be used for outer loops (in VPlan-native path)");
880
881 // If VF is scalar, then all instructions are trivially uniform.
882 if (VF.isScalar())
883 return true;
884
885 // Pseudo probes must be duplicated per vector lane so that the
886 // profiled loop trip count is not undercounted.
888 return false;
889
890 auto UniformsPerVF = Uniforms.find(VF);
891 assert(UniformsPerVF != Uniforms.end() &&
892 "VF not yet analyzed for uniformity");
893 return UniformsPerVF->second.count(I);
894 }
895
896 /// Returns true if \p I is known to be scalar after vectorization.
898 assert(
899 TheLoop->isInnermost() &&
900 "cost-model should not be used for outer loops (in VPlan-native path)");
901 if (VF.isScalar())
902 return true;
903
904 auto ScalarsPerVF = Scalars.find(VF);
905 assert(ScalarsPerVF != Scalars.end() &&
906 "Scalar values are not calculated for VF");
907 return ScalarsPerVF->second.count(I);
908 }
909
910 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
911 /// for vectorization factor \p VF.
913 const auto &MinBWs = Config.getMinimalBitwidths();
914 // Truncs must truncate at most to their destination type.
915 if (isa_and_nonnull<TruncInst>(I) && MinBWs.contains(I) &&
916 I->getType()->getScalarSizeInBits() < MinBWs.lookup(I))
917 return false;
918 return VF.isVector() && MinBWs.contains(I) &&
921 }
922
923 /// Decision that was taken during cost calculation for memory instruction.
926 CM_Widen, // For consecutive accesses with stride +1.
927 CM_Widen_Reverse, // For consecutive accesses with stride -1.
933 };
934
935 /// Save vectorization decision \p W and \p Cost taken by the cost model for
936 /// instruction \p I and vector width \p VF.
939 assert(VF.isVector() && "Expected VF >=2");
940 WideningDecisions[{I, VF}] = {W, Cost};
941 }
942
943 /// Save vectorization decision \p W and \p Cost taken by the cost model for
944 /// interleaving group \p Grp and vector width \p VF.
948 assert(VF.isVector() && "Expected VF >=2");
949 /// Broadcast this decicion to all instructions inside the group.
950 /// When interleaving, the cost will only be assigned one instruction, the
951 /// insert position. For other cases, add the appropriate fraction of the
952 /// total cost to each instruction. This ensures accurate costs are used,
953 /// even if the insert position instruction is not used.
954 InstructionCost InsertPosCost = Cost;
955 InstructionCost OtherMemberCost = 0;
956 if (W != CM_Interleave)
957 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
958 ;
959 for (auto *I : Grp->members()) {
960 if (Grp->getInsertPos() == I)
961 WideningDecisions[{I, VF}] = {W, InsertPosCost};
962 else
963 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
964 }
965 }
966
967 /// Return the cost model decision for the given instruction \p I and vector
968 /// width \p VF. Return CM_Unknown if this instruction did not pass
969 /// through the cost modeling.
971 assert(VF.isVector() && "Expected VF to be a vector VF");
972 assert(
973 TheLoop->isInnermost() &&
974 "cost-model should not be used for outer loops (in VPlan-native path)");
975
976 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
977 auto Itr = WideningDecisions.find(InstOnVF);
978 if (Itr == WideningDecisions.end())
979 return CM_Unknown;
980 return Itr->second.first;
981 }
982
983 /// Return the vectorization cost for the given instruction \p I and vector
984 /// width \p VF.
986 assert(VF.isVector() && "Expected VF >=2");
987 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
988 assert(WideningDecisions.contains(InstOnVF) &&
989 "The cost is not calculated");
990 return WideningDecisions[InstOnVF].second;
991 }
992
999
1001 Function *Variant, Intrinsic::ID IID,
1003 assert(!VF.isScalar() && "Expected vector VF");
1004 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, Cost};
1005 }
1006
1008 ElementCount VF) const {
1009 assert(!VF.isScalar() && "Expected vector VF");
1010 auto I = CallWideningDecisions.find({CI, VF});
1011 if (I == CallWideningDecisions.end())
1012 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, 0};
1013 return I->second;
1014 }
1015
1016 /// Return True if instruction \p I is an optimizable truncate whose operand
1017 /// is an induction variable. Such a truncate will be removed by adding a new
1018 /// induction variable with the destination type.
1020 // If the instruction is not a truncate, return false.
1021 auto *Trunc = dyn_cast<TruncInst>(I);
1022 if (!Trunc)
1023 return false;
1024
1025 // Get the source and destination types of the truncate.
1026 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1027 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1028
1029 // If the truncate is free for the given types, return false. Replacing a
1030 // free truncate with an induction variable would add an induction variable
1031 // update instruction to each iteration of the loop. We exclude from this
1032 // check the primary induction variable since it will need an update
1033 // instruction regardless.
1034 Value *Op = Trunc->getOperand(0);
1035 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1036 return false;
1037
1038 // If the truncated value is not an induction variable, return false.
1039 return Legal->isInductionPhi(Op);
1040 }
1041
1042 /// Collects the instructions to scalarize for each predicated instruction in
1043 /// the loop.
1044 void collectInstsToScalarize(ElementCount VF);
1045
1046 /// Collect values that will not be widened, including Uniforms, Scalars, and
1047 /// Instructions to Scalarize for the given \p VF.
1048 /// The sets depend on CM decision for Load/Store instructions
1049 /// that may be vectorized as interleave, gather-scatter or scalarized.
1050 /// Also make a decision on what to do about call instructions in the loop
1051 /// at that VF -- scalarize, call a known vector routine, or call a
1052 /// vector intrinsic.
1054 // Do the analysis once.
1055 if (VF.isScalar() || Uniforms.contains(VF))
1056 return;
1058 collectLoopUniforms(VF);
1060 collectLoopScalars(VF);
1062 }
1063
1064 /// Given costs for both strategies, return true if the scalar predication
1065 /// lowering should be used for div/rem. This incorporates an override
1066 /// option so it is not simply a cost comparison.
1068 InstructionCost MaskedCost) const {
1069 switch (ForceMaskedDivRem) {
1070 case cl::BOU_UNSET:
1071 return ScalarCost < MaskedCost;
1072 case cl::BOU_TRUE:
1073 return false;
1074 case cl::BOU_FALSE:
1075 return true;
1076 }
1077 llvm_unreachable("impossible case value");
1078 }
1079
1080 /// Returns true if \p I is an instruction which requires predication and
1081 /// for which our chosen predication strategy is scalarization (i.e. we
1082 /// don't have an alternate strategy such as masking available).
1083 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1084 bool isScalarWithPredication(Instruction *I, ElementCount VF);
1085
1086 /// Wrapper function for LoopVectorizationLegality::isMaskRequired,
1087 /// that passes the Instruction \p I and if we fold tail.
1088 bool isMaskRequired(Instruction *I) const;
1089
1090 /// Returns true if \p I is an instruction that needs to be predicated
1091 /// at runtime. The result is independent of the predication mechanism.
1092 /// Superset of instructions that return true for isScalarWithPredication.
1093 bool isPredicatedInst(Instruction *I) const;
1094
1095 /// A helper function that returns how much we should divide the cost of a
1096 /// predicated block by. Typically this is the reciprocal of the block
1097 /// probability, i.e. if we return X we are assuming the predicated block will
1098 /// execute once for every X iterations of the loop header so the block should
1099 /// only contribute 1/X of its cost to the total cost calculation, but when
1100 /// optimizing for code size it will just be 1 as code size costs don't depend
1101 /// on execution probabilities.
1102 ///
1103 /// Note that if a block wasn't originally predicated but was predicated due
1104 /// to tail folding, the divisor will still be 1 because it will execute for
1105 /// every iteration of the loop header.
1106 inline uint64_t
1107 getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind,
1108 const BasicBlock *BB);
1109
1110 /// Returns true if an artificially high cost for emulated masked memrefs
1111 /// should be used.
1112 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1113
1114 /// Return the costs for our two available strategies for lowering a
1115 /// div/rem operation which requires speculating at least one lane.
1116 /// First result is for scalarization (will be invalid for scalable
1117 /// vectors); second is for the masked intrinsic strategy.
1118 std::pair<InstructionCost, InstructionCost>
1119 getDivRemSpeculationCost(Instruction *I, ElementCount VF);
1120
1121 /// Returns true if \p I is a memory instruction with consecutive memory
1122 /// access that can be widened.
1123 bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
1124
1125 /// Returns true if \p I is a memory instruction in an interleaved-group
1126 /// of memory accesses that can be vectorized with wide vector loads/stores
1127 /// and shuffles.
1128 bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const;
1129
1130 /// Check if \p Instr belongs to any interleaved access group.
1132 return InterleaveInfo.isInterleaved(Instr);
1133 }
1134
1135 /// Get the interleaved access group that \p Instr belongs to.
1138 return InterleaveInfo.getInterleaveGroup(Instr);
1139 }
1140
1141 /// Returns true if we're required to use a scalar epilogue for at least
1142 /// the final iteration of the original loop.
1143 bool requiresScalarEpilogue(bool IsVectorizing) const {
1144 if (!isEpilogueAllowed()) {
1145 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1146 return false;
1147 }
1148 // If we might exit from anywhere but the latch and early exit vectorization
1149 // is disabled, we must run the exiting iteration in scalar form.
1150 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1151 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1152 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1153 "from latch block\n");
1154 return true;
1155 }
1156 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1157 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1158 "interleaved group requires scalar epilogue\n");
1159 return true;
1160 }
1161 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1162 return false;
1163 }
1164
1165 /// Returns true if an epilogue is allowed (e.g., not prevented by
1166 /// optsize or a loop hint annotation).
1167 bool isEpilogueAllowed() const {
1168 return EpilogueLoweringStatus == CM_EpilogueAllowed;
1169 }
1170
1171 /// Returns true if tail-folding is preferred over an epilogue.
1173 return EpilogueLoweringStatus == CM_EpilogueNotNeededFoldTail ||
1174 EpilogueLoweringStatus == CM_EpilogueNotAllowedFoldTail;
1175 }
1176
1177 /// Returns the TailFoldingStyle that is best for the current loop.
1179 return ChosenTailFoldingStyle;
1180 }
1181
1182 /// Selects and saves TailFoldingStyle.
1183 /// \param IsScalableVF true if scalable vector factors enabled.
1184 /// \param UserIC User specific interleave count.
1185 void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC) {
1186 assert(ChosenTailFoldingStyle == TailFoldingStyle::None &&
1187 "Tail folding must not be selected yet.");
1188 if (!Legal->canFoldTailByMasking()) {
1189 ChosenTailFoldingStyle = TailFoldingStyle::None;
1190 return;
1191 }
1192
1193 // Default to TTI preference, but allow command line override.
1194 ChosenTailFoldingStyle = TTI.getPreferredTailFoldingStyle();
1195 if (ForceTailFoldingStyle.getNumOccurrences())
1196 ChosenTailFoldingStyle = ForceTailFoldingStyle.getValue();
1197
1198 if (ChosenTailFoldingStyle != TailFoldingStyle::DataWithEVL)
1199 return;
1200 // Override EVL styles if needed.
1201 // FIXME: Investigate opportunity for fixed vector factor.
1202 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1203 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1204 if (EVLIsLegal)
1205 return;
1206 // If for some reason EVL mode is unsupported, fallback to an epilogue
1207 // if it's allowed, or DataWithoutLaneMask otherwise.
1208 if (EpilogueLoweringStatus == CM_EpilogueAllowed ||
1209 EpilogueLoweringStatus == CM_EpilogueNotNeededFoldTail)
1210 ChosenTailFoldingStyle = TailFoldingStyle::None;
1211 else
1212 ChosenTailFoldingStyle = TailFoldingStyle::DataWithoutLaneMask;
1213
1214 LLVM_DEBUG(
1215 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1216 "not try to generate VP Intrinsics "
1217 << (UserIC > 1
1218 ? "since interleave count specified is greater than 1.\n"
1219 : "due to non-interleaving reasons.\n"));
1220 }
1221
1222 /// Returns true if all loop blocks should be masked to fold tail loop.
1223 bool foldTailByMasking() const {
1225 }
1226
1227 /// Returns true if the use of wide lane masks is requested and the loop is
1228 /// using tail-folding with a lane mask for control flow.
1231 return false;
1232
1234 }
1235
1236 /// Returns true if the instructions in this block requires predication
1237 /// for any reason, e.g. because tail folding now requires a predicate
1238 /// or because the block in the original loop was predicated.
1240 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1241 }
1242
1243 /// Returns true if VP intrinsics with explicit vector length support should
1244 /// be generated in the tail folded loop.
1248
1249 /// Returns true if the predicated reduction select should be used to set the
1250 /// incoming value for the reduction phi.
1251 bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const {
1252 // Force to use predicated reduction select since the EVL of the
1253 // second-to-last iteration might not be VF*UF.
1254 if (foldTailWithEVL())
1255 return true;
1256
1257 // Note: For FindLast recurrences we prefer a predicated select to simplify
1258 // matching in handleFindLastReductions(), rather than handle multiple
1259 // cases.
1261 return true;
1262
1264 TTI.preferPredicatedReductionSelect();
1265 }
1266
1267 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1268 /// with factor VF. Return the cost of the instruction, including
1269 /// scalarization overhead if it's needed.
1270 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1271
1272 /// Estimate cost of a call instruction CI if it were vectorized with factor
1273 /// VF. Return the cost of the instruction, including scalarization overhead
1274 /// if it's needed.
1275 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const;
1276
1277 /// Invalidates decisions already taken by the cost model.
1279 WideningDecisions.clear();
1280 CallWideningDecisions.clear();
1281 Uniforms.clear();
1282 Scalars.clear();
1283 }
1284
1285 /// Returns the expected execution cost. The unit of the cost does
1286 /// not matter because we use the 'cost' units to compare different
1287 /// vector widths. The cost that is returned is *not* normalized by
1288 /// the factor width.
1289 InstructionCost expectedCost(ElementCount VF);
1290
1291 /// Returns true if epilogue vectorization is considered profitable, and
1292 /// false otherwise.
1293 /// \p VF is the vectorization factor chosen for the original loop.
1294 /// \p Multiplier is an aditional scaling factor applied to VF before
1295 /// comparing to EpilogueVectorizationMinVF.
1296 bool isEpilogueVectorizationProfitable(const ElementCount VF,
1297 const unsigned IC) const;
1298
1299 /// Returns the execution time cost of an instruction for a given vector
1300 /// width. Vector width of one means scalar.
1301 InstructionCost getInstructionCost(Instruction *I, ElementCount VF);
1302
1303 /// Return the cost of instructions in an inloop reduction pattern, if I is
1304 /// part of that pattern.
1305 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1306 ElementCount VF,
1307 Type *VectorTy) const;
1308
1309 /// Returns true if \p Op should be considered invariant and if it is
1310 /// trivially hoistable.
1311 bool shouldConsiderInvariant(Value *Op);
1312
1313 /// Returns true if \p I has been forced to be scalarized at \p VF.
1315 auto FS = ForcedScalars.find(VF);
1316 return FS != ForcedScalars.end() && FS->second.contains(I);
1317 }
1318
1319private:
1320 unsigned NumPredStores = 0;
1321
1322 /// VF selection state independent of cost-modeling decisions.
1323 VFSelectionContext &Config;
1324
1325 /// Calculate vectorization cost of memory instruction \p I.
1326 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1327
1328 /// The cost computation for scalarized memory instruction.
1329 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1330
1331 /// The cost computation for interleaving group of memory instructions.
1332 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1333
1334 /// The cost computation for Gather/Scatter instruction.
1335 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1336
1337 /// The cost computation for widening instruction \p I with consecutive
1338 /// memory access.
1339 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1340
1341 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1342 /// Load: scalar load + broadcast.
1343 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1344 /// element)
1345 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1346
1347 /// Estimate the overhead of scalarizing an instruction. This is a
1348 /// convenience wrapper for the type-based getScalarizationOverhead API.
1350 ElementCount VF) const;
1351
1352 /// A type representing the costs for instructions if they were to be
1353 /// scalarized rather than vectorized. The entries are Instruction-Cost
1354 /// pairs.
1355 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1356
1357 /// A set containing all BasicBlocks that are known to present after
1358 /// vectorization as a predicated block.
1360 PredicatedBBsAfterVectorization;
1361
1362 /// Records whether it is allowed to have the original scalar loop execute at
1363 /// least once. This may be needed as a fallback loop in case runtime
1364 /// aliasing/dependence checks fail, or to handle the tail/remainder
1365 /// iterations when the trip count is unknown or doesn't divide by the VF,
1366 /// or as a peel-loop to handle gaps in interleave-groups.
1367 /// Under optsize and when the trip count is very small we don't allow any
1368 /// iterations to execute in the scalar loop.
1369 EpilogueLowering EpilogueLoweringStatus = CM_EpilogueAllowed;
1370
1371 /// Control finally chosen tail folding style.
1372 TailFoldingStyle ChosenTailFoldingStyle = TailFoldingStyle::None;
1373
1374 /// A map holding scalar costs for different vectorization factors. The
1375 /// presence of a cost for an instruction in the mapping indicates that the
1376 /// instruction will be scalarized when vectorizing with the associated
1377 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1379
1380 /// Holds the instructions known to be uniform after vectorization.
1381 /// The data is collected per VF.
1383
1384 /// Holds the instructions known to be scalar after vectorization.
1385 /// The data is collected per VF.
1387
1388 /// Holds the instructions (address computations) that are forced to be
1389 /// scalarized.
1391
1392 /// Returns the expected difference in cost from scalarizing the expression
1393 /// feeding a predicated instruction \p PredInst. The instructions to
1394 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1395 /// non-negative return value implies the expression will be scalarized.
1396 /// Currently, only single-use chains are considered for scalarization.
1397 InstructionCost computePredInstDiscount(Instruction *PredInst,
1398 ScalarCostsTy &ScalarCosts,
1399 ElementCount VF);
1400
1401 /// Collect the instructions that are uniform after vectorization. An
1402 /// instruction is uniform if we represent it with a single scalar value in
1403 /// the vectorized loop corresponding to each vector iteration. Examples of
1404 /// uniform instructions include pointer operands of consecutive or
1405 /// interleaved memory accesses. Note that although uniformity implies an
1406 /// instruction will be scalar, the reverse is not true. In general, a
1407 /// scalarized instruction will be represented by VF scalar values in the
1408 /// vectorized loop, each corresponding to an iteration of the original
1409 /// scalar loop.
1410 void collectLoopUniforms(ElementCount VF);
1411
1412 /// Collect the instructions that are scalar after vectorization. An
1413 /// instruction is scalar if it is known to be uniform or will be scalarized
1414 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1415 /// to the list if they are used by a load/store instruction that is marked as
1416 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1417 /// VF values in the vectorized loop, each corresponding to an iteration of
1418 /// the original scalar loop.
1419 void collectLoopScalars(ElementCount VF);
1420
1421 /// Keeps cost model vectorization decision and cost for instructions.
1422 /// Right now it is used for memory instructions only.
1424 std::pair<InstWidening, InstructionCost>>;
1425
1426 DecisionList WideningDecisions;
1427
1428 using CallDecisionList =
1429 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1430
1431 CallDecisionList CallWideningDecisions;
1432
1433 /// Returns true if \p V is expected to be vectorized and it needs to be
1434 /// extracted.
1435 bool needsExtract(Value *V, ElementCount VF) const {
1437 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1438 TheLoop->isLoopInvariant(I) ||
1439 getWideningDecision(I, VF) == CM_Scalarize ||
1440 (isa<CallInst>(I) &&
1441 getCallWideningDecision(cast<CallInst>(I), VF).Kind == CM_Scalarize))
1442 return false;
1443
1444 // Assume we can vectorize V (and hence we need extraction) if the
1445 // scalars are not computed yet. This can happen, because it is called
1446 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1447 // the scalars are collected. That should be a safe assumption in most
1448 // cases, because we check if the operands have vectorizable types
1449 // beforehand in LoopVectorizationLegality.
1450 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1451 };
1452
1453 /// Returns a range containing only operands needing to be extracted.
1454 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1455 ElementCount VF) const {
1456
1457 SmallPtrSet<const Value *, 4> UniqueOperands;
1459 for (Value *Op : Ops) {
1460 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1461 !needsExtract(Op, VF))
1462 continue;
1463 Res.push_back(Op);
1464 }
1465 return Res;
1466 }
1467
1468public:
1469 /// The loop that we evaluate.
1471
1472 /// Predicated scalar evolution analysis.
1474
1475 /// Loop Info analysis.
1477
1478 /// Vectorization legality.
1480
1481 /// Vector target information.
1483
1484 /// Target Library Info.
1486
1487 /// Assumption cache.
1489
1490 /// Interface to emit optimization remarks.
1492
1493 /// A function to lazily fetch BlockFrequencyInfo. This avoids computing it
1494 /// unless necessary, e.g. when the loop isn't legal to vectorize or when
1495 /// there is no predication.
1496 std::function<BlockFrequencyInfo &()> GetBFI;
1497 /// The BlockFrequencyInfo returned from GetBFI.
1499 /// Returns the BlockFrequencyInfo for the function if cached, otherwise
1500 /// fetches it via GetBFI. Avoids an indirect call to the std::function.
1502 if (!BFI)
1503 BFI = &GetBFI();
1504 return *BFI;
1505 }
1506
1508
1509 /// Loop Vectorize Hint.
1511
1512 /// The interleave access information contains groups of interleaved accesses
1513 /// with the same stride and close to each other.
1515
1516 /// Values to ignore in the cost model.
1518
1519 /// Values to ignore in the cost model when VF > 1.
1521};
1522} // end namespace llvm
1523
1524namespace {
1525/// Helper struct to manage generating runtime checks for vectorization.
1526///
1527/// The runtime checks are created up-front in temporary blocks to allow better
1528/// estimating the cost and un-linked from the existing IR. After deciding to
1529/// vectorize, the checks are moved back. If deciding not to vectorize, the
1530/// temporary blocks are completely removed.
1531class GeneratedRTChecks {
1532 /// Basic block which contains the generated SCEV checks, if any.
1533 BasicBlock *SCEVCheckBlock = nullptr;
1534
1535 /// The value representing the result of the generated SCEV checks. If it is
1536 /// nullptr no SCEV checks have been generated.
1537 Value *SCEVCheckCond = nullptr;
1538
1539 /// Basic block which contains the generated memory runtime checks, if any.
1540 BasicBlock *MemCheckBlock = nullptr;
1541
1542 /// The value representing the result of the generated memory runtime checks.
1543 /// If it is nullptr no memory runtime checks have been generated.
1544 Value *MemRuntimeCheckCond = nullptr;
1545
1546 DominatorTree *DT;
1547 LoopInfo *LI;
1549
1550 SCEVExpander SCEVExp;
1551 SCEVExpander MemCheckExp;
1552
1553 bool CostTooHigh = false;
1554
1555 Loop *OuterLoop = nullptr;
1556
1558
1559 /// The kind of cost that we are calculating
1561
1562public:
1563 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1566 : DT(DT), LI(LI), TTI(TTI),
1567 SCEVExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1568 MemCheckExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1569 PSE(PSE), CostKind(CostKind) {}
1570
1571 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1572 /// accurately estimate the cost of the runtime checks. The blocks are
1573 /// un-linked from the IR and are added back during vector code generation. If
1574 /// there is no vector code generation, the check blocks are removed
1575 /// completely.
1576 void create(Loop *L, const LoopAccessInfo &LAI,
1577 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC,
1578 OptimizationRemarkEmitter &ORE) {
1579
1580 // Hard cutoff to limit compile-time increase in case a very large number of
1581 // runtime checks needs to be generated.
1582 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1583 // profile info.
1584 CostTooHigh =
1586 if (CostTooHigh) {
1587 // Mark runtime checks as never succeeding when they exceed the threshold.
1588 MemRuntimeCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1589 SCEVCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1590 ORE.emit([&]() {
1591 return OptimizationRemarkAnalysisAliasing(
1592 DEBUG_TYPE, "TooManyMemoryRuntimeChecks", L->getStartLoc(),
1593 L->getHeader())
1594 << "loop not vectorized: too many memory checks needed";
1595 });
1596 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
1597 return;
1598 }
1599
1600 BasicBlock *LoopHeader = L->getHeader();
1601 BasicBlock *Preheader = L->getLoopPreheader();
1602
1603 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1604 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1605 // may be used by SCEVExpander. The blocks will be un-linked from their
1606 // predecessors and removed from LI & DT at the end of the function.
1607 if (!UnionPred.isAlwaysTrue()) {
1608 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1609 nullptr, "vector.scevcheck");
1610
1611 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1612 &UnionPred, SCEVCheckBlock->getTerminator());
1613 if (isa<Constant>(SCEVCheckCond)) {
1614 // Clean up directly after expanding the predicate to a constant, to
1615 // avoid further expansions re-using anything left over from SCEVExp.
1616 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1617 SCEVCleaner.cleanup();
1618 }
1619 }
1620
1621 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1622 if (RtPtrChecking.Need) {
1623 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1624 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1625 "vector.memcheck");
1626
1627 auto DiffChecks = RtPtrChecking.getDiffChecks();
1628 if (DiffChecks) {
1629 Value *RuntimeVF = nullptr;
1630 MemRuntimeCheckCond = addDiffRuntimeChecks(
1631 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1632 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1633 if (!RuntimeVF)
1634 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1635 return RuntimeVF;
1636 },
1637 IC);
1638 } else {
1639 MemRuntimeCheckCond = addRuntimeChecks(
1640 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1642 }
1643 assert(MemRuntimeCheckCond &&
1644 "no RT checks generated although RtPtrChecking "
1645 "claimed checks are required");
1646 }
1647
1648 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1649
1650 if (!MemCheckBlock && !SCEVCheckBlock)
1651 return;
1652
1653 // Unhook the temporary block with the checks, update various places
1654 // accordingly.
1655 if (SCEVCheckBlock)
1656 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1657 if (MemCheckBlock)
1658 MemCheckBlock->replaceAllUsesWith(Preheader);
1659
1660 if (SCEVCheckBlock) {
1661 SCEVCheckBlock->getTerminator()->moveBefore(
1662 Preheader->getTerminator()->getIterator());
1663 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1664 UI->setDebugLoc(DebugLoc::getTemporary());
1665 Preheader->getTerminator()->eraseFromParent();
1666 }
1667 if (MemCheckBlock) {
1668 MemCheckBlock->getTerminator()->moveBefore(
1669 Preheader->getTerminator()->getIterator());
1670 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1671 UI->setDebugLoc(DebugLoc::getTemporary());
1672 Preheader->getTerminator()->eraseFromParent();
1673 }
1674
1675 DT->changeImmediateDominator(LoopHeader, Preheader);
1676 if (MemCheckBlock) {
1677 DT->eraseNode(MemCheckBlock);
1678 LI->removeBlock(MemCheckBlock);
1679 }
1680 if (SCEVCheckBlock) {
1681 DT->eraseNode(SCEVCheckBlock);
1682 LI->removeBlock(SCEVCheckBlock);
1683 }
1684
1685 // Outer loop is used as part of the later cost calculations.
1686 OuterLoop = L->getParentLoop();
1687 }
1688
1690 if (SCEVCheckBlock || MemCheckBlock)
1691 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1692
1693 if (CostTooHigh) {
1695 Cost.setInvalid();
1696 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1697 return Cost;
1698 }
1699
1700 InstructionCost RTCheckCost = 0;
1701 if (SCEVCheckBlock)
1702 for (Instruction &I : *SCEVCheckBlock) {
1703 if (SCEVCheckBlock->getTerminator() == &I)
1704 continue;
1706 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1707 RTCheckCost += C;
1708 }
1709 if (MemCheckBlock) {
1710 InstructionCost MemCheckCost = 0;
1711 for (Instruction &I : *MemCheckBlock) {
1712 if (MemCheckBlock->getTerminator() == &I)
1713 continue;
1715 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1716 MemCheckCost += C;
1717 }
1718
1719 // If the runtime memory checks are being created inside an outer loop
1720 // we should find out if these checks are outer loop invariant. If so,
1721 // the checks will likely be hoisted out and so the effective cost will
1722 // reduce according to the outer loop trip count.
1723 if (OuterLoop) {
1724 ScalarEvolution *SE = MemCheckExp.getSE();
1725 // TODO: If profitable, we could refine this further by analysing every
1726 // individual memory check, since there could be a mixture of loop
1727 // variant and invariant checks that mean the final condition is
1728 // variant.
1729 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1730 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1731 // It seems reasonable to assume that we can reduce the effective
1732 // cost of the checks even when we know nothing about the trip
1733 // count. Assume that the outer loop executes at least twice.
1734 unsigned BestTripCount = 2;
1735
1736 // Get the best known TC estimate.
1737 if (auto EstimatedTC = getSmallBestKnownTC(
1738 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1739 if (EstimatedTC->isFixed())
1740 BestTripCount = EstimatedTC->getFixedValue();
1741
1742 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
1743
1744 // Let's ensure the cost is always at least 1.
1745 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
1746 (InstructionCost::CostType)1);
1747
1748 if (BestTripCount > 1)
1750 << "We expect runtime memory checks to be hoisted "
1751 << "out of the outer loop. Cost reduced from "
1752 << MemCheckCost << " to " << NewMemCheckCost << '\n');
1753
1754 MemCheckCost = NewMemCheckCost;
1755 }
1756 }
1757
1758 RTCheckCost += MemCheckCost;
1759 }
1760
1761 if (SCEVCheckBlock || MemCheckBlock)
1762 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
1763 << "\n");
1764
1765 return RTCheckCost;
1766 }
1767
1768 /// Remove the created SCEV & memory runtime check blocks & instructions, if
1769 /// unused.
1770 ~GeneratedRTChecks() {
1771 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1772 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
1773 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
1774 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
1775 if (SCEVChecksUsed)
1776 SCEVCleaner.markResultUsed();
1777
1778 if (MemChecksUsed) {
1779 MemCheckCleaner.markResultUsed();
1780 } else {
1781 auto &SE = *MemCheckExp.getSE();
1782 // Memory runtime check generation creates compares that use expanded
1783 // values. Remove them before running the SCEVExpanderCleaners.
1784 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
1785 if (MemCheckExp.isInsertedInstruction(&I))
1786 continue;
1787 SE.forgetValue(&I);
1788 I.eraseFromParent();
1789 }
1790 }
1791 MemCheckCleaner.cleanup();
1792 SCEVCleaner.cleanup();
1793
1794 if (!SCEVChecksUsed)
1795 SCEVCheckBlock->eraseFromParent();
1796 if (!MemChecksUsed)
1797 MemCheckBlock->eraseFromParent();
1798 }
1799
1800 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
1801 /// outside VPlan.
1802 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
1803 using namespace llvm::PatternMatch;
1804 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
1805 return {nullptr, nullptr};
1806
1807 return {SCEVCheckCond, SCEVCheckBlock};
1808 }
1809
1810 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
1811 /// outside VPlan.
1812 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
1813 using namespace llvm::PatternMatch;
1814 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
1815 return {nullptr, nullptr};
1816 return {MemRuntimeCheckCond, MemCheckBlock};
1817 }
1818
1819 /// Return true if any runtime checks have been added
1820 bool hasChecks() const {
1821 return getSCEVChecks().first || getMemRuntimeChecks().first;
1822 }
1823};
1824} // namespace
1825
1827 return Style == TailFoldingStyle::Data ||
1829}
1830
1834
1835// Return true if \p OuterLp is an outer loop annotated with hints for explicit
1836// vectorization. The loop needs to be annotated with #pragma omp simd
1837// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1838// vector length information is not provided, vectorization is not considered
1839// explicit. Interleave hints are not allowed either. These limitations will be
1840// relaxed in the future.
1841// Please, note that we are currently forced to abuse the pragma 'clang
1842// vectorize' semantics. This pragma provides *auto-vectorization hints*
1843// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1844// provides *explicit vectorization hints* (LV can bypass legal checks and
1845// assume that vectorization is legal). However, both hints are implemented
1846// using the same metadata (llvm.loop.vectorize, processed by
1847// LoopVectorizeHints). This will be fixed in the future when the native IR
1848// representation for pragma 'omp simd' is introduced.
1849static bool isExplicitVecOuterLoop(Loop *OuterLp,
1851 assert(!OuterLp->isInnermost() && "This is not an outer loop");
1852 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1853
1854 // Only outer loops with an explicit vectorization hint are supported.
1855 // Unannotated outer loops are ignored.
1857 return false;
1858
1859 Function *Fn = OuterLp->getHeader()->getParent();
1860 if (!Hints.allowVectorization(Fn, OuterLp,
1861 true /*VectorizeOnlyWhenForced*/)) {
1862 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1863 return false;
1864 }
1865
1866 if (Hints.getInterleave() > 1) {
1867 // TODO: Interleave support is future work.
1868 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1869 "outer loops.\n");
1870 Hints.emitRemarkWithHints();
1871 return false;
1872 }
1873
1874 return true;
1875}
1876
1880 // Collect inner loops and outer loops without irreducible control flow. For
1881 // now, only collect outer loops that have explicit vectorization hints. If we
1882 // are stress testing the VPlan H-CFG construction, we collect the outermost
1883 // loop of every loop nest.
1884 if (L.isInnermost() || VPlanBuildOuterloopStressTest ||
1886 LoopBlocksRPO RPOT(&L);
1887 RPOT.perform(LI);
1889 V.push_back(&L);
1890 // TODO: Collect inner loops inside marked outer loops in case
1891 // vectorization fails for the outer loop. Do not invoke
1892 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1893 // already known to be reducible. We can use an inherited attribute for
1894 // that.
1895 return;
1896 }
1897 }
1898 for (Loop *InnerL : L)
1899 collectSupportedLoops(*InnerL, LI, ORE, V);
1900}
1901
1902//===----------------------------------------------------------------------===//
1903// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1904// LoopVectorizationCostModel and LoopVectorizationPlanner.
1905//===----------------------------------------------------------------------===//
1906
1907/// For the given VF and UF and maximum trip count computed for the loop, return
1908/// whether the induction variable might overflow in the vectorized loop. If not,
1909/// then we know a runtime overflow check always evaluates to false and can be
1910/// removed.
1912 const LoopVectorizationCostModel *Cost,
1913 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
1914 // Always be conservative if we don't know the exact unroll factor.
1915 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
1916
1917 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
1918 APInt MaxUIntTripCount = IdxTy->getMask();
1919
1920 // We know the runtime overflow check is known false iff the (max) trip-count
1921 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
1922 // the vector loop induction variable.
1923 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
1924 uint64_t MaxVF = VF.getKnownMinValue();
1925 if (VF.isScalable()) {
1926 std::optional<unsigned> MaxVScale =
1927 getMaxVScale(*Cost->TheFunction, Cost->TTI);
1928 if (!MaxVScale)
1929 return false;
1930 MaxVF *= *MaxVScale;
1931 }
1932
1933 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
1934 }
1935
1936 return false;
1937}
1938
1939// Return whether we allow using masked interleave-groups (for dealing with
1940// strided loads/stores that reside in predicated blocks, or for dealing
1941// with gaps).
1943 // If an override option has been passed in for interleaved accesses, use it.
1944 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
1946
1947 return TTI.enableMaskedInterleavedAccessVectorization();
1948}
1949
1950/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
1951/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
1952/// predecessors and successors of VPBB, if any, are rewired to the new
1953/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
1955 BasicBlock *IRBB,
1956 VPlan *Plan = nullptr) {
1957 if (!Plan)
1958 Plan = VPBB->getPlan();
1959 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
1960 auto IP = IRVPBB->begin();
1961 for (auto &R : make_early_inc_range(VPBB->phis()))
1962 R.moveBefore(*IRVPBB, IP);
1963
1964 for (auto &R :
1966 R.moveBefore(*IRVPBB, IRVPBB->end());
1967
1968 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
1969 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
1970 return IRVPBB;
1971}
1972
1974 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
1975 assert(VectorPH && "Invalid loop structure");
1976 assert((OrigLoop->getUniqueLatchExitBlock() ||
1977 Cost->requiresScalarEpilogue(VF.isVector())) &&
1978 "loops not exiting via the latch without required epilogue?");
1979
1980 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
1981 // wrapping the newly created scalar preheader here at the moment, because the
1982 // Plan's scalar preheader may be unreachable at this point. Instead it is
1983 // replaced in executePlan.
1984 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
1985 Twine(Prefix) + "scalar.ph");
1986}
1987
1988/// Knowing that loop \p L executes a single vector iteration, add instructions
1989/// that will get simplified and thus should not have any cost to \p
1990/// InstsToIgnore.
1993 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
1994 auto *Cmp = L->getLatchCmpInst();
1995 if (Cmp)
1996 InstsToIgnore.insert(Cmp);
1997 for (const auto &KV : IL) {
1998 // Extract the key by hand so that it can be used in the lambda below. Note
1999 // that captured structured bindings are a C++20 extension.
2000 const PHINode *IV = KV.first;
2001
2002 // Get next iteration value of the induction variable.
2003 Instruction *IVInst =
2004 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2005 if (all_of(IVInst->users(),
2006 [&](const User *U) { return U == IV || U == Cmp; }))
2007 InstsToIgnore.insert(IVInst);
2008 }
2009}
2010
2012 // Create a new IR basic block for the scalar preheader.
2013 BasicBlock *ScalarPH = createScalarPreheader("");
2014 return ScalarPH->getSinglePredecessor();
2015}
2016
2017namespace {
2018
2019struct CSEDenseMapInfo {
2020 static bool canHandle(const Instruction *I) {
2023 }
2024
2025 static inline Instruction *getEmptyKey() {
2027 }
2028
2029 static inline Instruction *getTombstoneKey() {
2030 return DenseMapInfo<Instruction *>::getTombstoneKey();
2031 }
2032
2033 static unsigned getHashValue(const Instruction *I) {
2034 assert(canHandle(I) && "Unknown instruction!");
2035 return hash_combine(I->getOpcode(),
2036 hash_combine_range(I->operand_values()));
2037 }
2038
2039 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2040 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2041 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2042 return LHS == RHS;
2043 return LHS->isIdenticalTo(RHS);
2044 }
2045};
2046
2047} // end anonymous namespace
2048
2049/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
2050/// removal, in favor of the VPlan-based one.
2051static void legacyCSE(BasicBlock *BB) {
2052 // Perform simple cse.
2054 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2055 if (!CSEDenseMapInfo::canHandle(&In))
2056 continue;
2057
2058 // Check if we can replace this instruction with any of the
2059 // visited instructions.
2060 if (Instruction *V = CSEMap.lookup(&In)) {
2061 In.replaceAllUsesWith(V);
2062 In.eraseFromParent();
2063 continue;
2064 }
2065
2066 CSEMap[&In] = &In;
2067 }
2068}
2069
2070/// This function attempts to return a value that represents the ElementCount
2071/// at runtime. For fixed-width VFs we know this precisely at compile
2072/// time, but for scalable VFs we calculate it based on an estimate of the
2073/// vscale value.
2075 std::optional<unsigned> VScale) {
2076 unsigned EstimatedVF = VF.getKnownMinValue();
2077 if (VF.isScalable())
2078 if (VScale)
2079 EstimatedVF *= *VScale;
2080 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2081 return EstimatedVF;
2082}
2083
2086 ElementCount VF) const {
2087 // We only need to calculate a cost if the VF is scalar; for actual vectors
2088 // we should already have a pre-calculated cost at each VF.
2089 if (!VF.isScalar())
2090 return getCallWideningDecision(CI, VF).Cost;
2091
2092 Type *RetTy = CI->getType();
2094 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2095 return *RedCost;
2096
2098 for (auto &ArgOp : CI->args())
2099 Tys.push_back(ArgOp->getType());
2100
2101 InstructionCost ScalarCallCost = TTI.getCallInstrCost(
2102 CI->getCalledFunction(), RetTy, Tys, Config.CostKind);
2103
2104 // If this is an intrinsic we may have a lower cost for it.
2107 return std::min(ScalarCallCost, IntrinsicCost);
2108 }
2109 return ScalarCallCost;
2110}
2111
2113 if (VF.isScalar() || !canVectorizeTy(Ty))
2114 return Ty;
2115 return toVectorizedTy(Ty, VF);
2116}
2117
2120 ElementCount VF) const {
2122 assert(ID && "Expected intrinsic call!");
2123 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2124 FastMathFlags FMF;
2125 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2126 FMF = FPMO->getFastMathFlags();
2127
2130 SmallVector<Type *> ParamTys;
2131 std::transform(FTy->param_begin(), FTy->param_end(),
2132 std::back_inserter(ParamTys),
2133 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2134
2135 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2138 return TTI.getIntrinsicInstrCost(CostAttrs, Config.CostKind);
2139}
2140
2142 // Fix widened non-induction PHIs by setting up the PHI operands.
2143 fixNonInductionPHIs(State);
2144
2145 // Don't apply optimizations below when no (vector) loop remains, as they all
2146 // require one at the moment.
2147 VPBasicBlock *HeaderVPBB =
2148 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2149 if (!HeaderVPBB)
2150 return;
2151
2152 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2153
2154 // Remove redundant induction instructions.
2155 legacyCSE(HeaderBB);
2156}
2157
2159 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2161 for (VPRecipeBase &P : VPBB->phis()) {
2163 if (!VPPhi)
2164 continue;
2165 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2166 // Make sure the builder has a valid insert point.
2167 Builder.SetInsertPoint(NewPhi);
2168 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2169 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2170 }
2171 }
2172}
2173
2174void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2175 // We should not collect Scalars more than once per VF. Right now, this
2176 // function is called from collectUniformsAndScalars(), which already does
2177 // this check. Collecting Scalars for VF=1 does not make any sense.
2178 assert(VF.isVector() && !Scalars.contains(VF) &&
2179 "This function should not be visited twice for the same VF");
2180
2181 // This avoids any chances of creating a REPLICATE recipe during planning
2182 // since that would result in generation of scalarized code during execution,
2183 // which is not supported for scalable vectors.
2184 if (VF.isScalable()) {
2185 Scalars[VF].insert_range(Uniforms[VF]);
2186 return;
2187 }
2188
2190
2191 // These sets are used to seed the analysis with pointers used by memory
2192 // accesses that will remain scalar.
2194 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2195 auto *Latch = TheLoop->getLoopLatch();
2196
2197 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2198 // The pointer operands of loads and stores will be scalar as long as the
2199 // memory access is not a gather or scatter operation. The value operand of a
2200 // store will remain scalar if the store is scalarized.
2201 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2202 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2203 assert(WideningDecision != CM_Unknown &&
2204 "Widening decision should be ready at this moment");
2205 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2206 if (Ptr == Store->getValueOperand())
2207 return WideningDecision == CM_Scalarize;
2208 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2209 "Ptr is neither a value or pointer operand");
2210 return WideningDecision != CM_GatherScatter;
2211 };
2212
2213 // A helper that returns true if the given value is a getelementptr
2214 // instruction contained in the loop.
2215 auto IsLoopVaryingGEP = [&](Value *V) {
2216 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2217 };
2218
2219 // A helper that evaluates a memory access's use of a pointer. If the use will
2220 // be a scalar use and the pointer is only used by memory accesses, we place
2221 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2222 // PossibleNonScalarPtrs.
2223 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2224 // We only care about bitcast and getelementptr instructions contained in
2225 // the loop.
2226 if (!IsLoopVaryingGEP(Ptr))
2227 return;
2228
2229 // If the pointer has already been identified as scalar (e.g., if it was
2230 // also identified as uniform), there's nothing to do.
2231 auto *I = cast<Instruction>(Ptr);
2232 if (Worklist.count(I))
2233 return;
2234
2235 // If the use of the pointer will be a scalar use, and all users of the
2236 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2237 // place the pointer in PossibleNonScalarPtrs.
2238 if (IsScalarUse(MemAccess, Ptr) &&
2240 ScalarPtrs.insert(I);
2241 else
2242 PossibleNonScalarPtrs.insert(I);
2243 };
2244
2245 // We seed the scalars analysis with three classes of instructions: (1)
2246 // instructions marked uniform-after-vectorization and (2) bitcast,
2247 // getelementptr and (pointer) phi instructions used by memory accesses
2248 // requiring a scalar use.
2249 //
2250 // (1) Add to the worklist all instructions that have been identified as
2251 // uniform-after-vectorization.
2252 Worklist.insert_range(Uniforms[VF]);
2253
2254 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2255 // memory accesses requiring a scalar use. The pointer operands of loads and
2256 // stores will be scalar unless the operation is a gather or scatter.
2257 // The value operand of a store will remain scalar if the store is scalarized.
2258 for (auto *BB : TheLoop->blocks())
2259 for (auto &I : *BB) {
2260 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2261 EvaluatePtrUse(Load, Load->getPointerOperand());
2262 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2263 EvaluatePtrUse(Store, Store->getPointerOperand());
2264 EvaluatePtrUse(Store, Store->getValueOperand());
2265 }
2266 }
2267 for (auto *I : ScalarPtrs)
2268 if (!PossibleNonScalarPtrs.count(I)) {
2269 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2270 Worklist.insert(I);
2271 }
2272
2273 // Insert the forced scalars.
2274 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2275 // induction variable when the PHI user is scalarized.
2276 auto ForcedScalar = ForcedScalars.find(VF);
2277 if (ForcedScalar != ForcedScalars.end())
2278 for (auto *I : ForcedScalar->second) {
2279 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2280 Worklist.insert(I);
2281 }
2282
2283 // Expand the worklist by looking through any bitcasts and getelementptr
2284 // instructions we've already identified as scalar. This is similar to the
2285 // expansion step in collectLoopUniforms(); however, here we're only
2286 // expanding to include additional bitcasts and getelementptr instructions.
2287 unsigned Idx = 0;
2288 while (Idx != Worklist.size()) {
2289 Instruction *Dst = Worklist[Idx++];
2290 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2291 continue;
2292 auto *Src = cast<Instruction>(Dst->getOperand(0));
2293 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2294 auto *J = cast<Instruction>(U);
2295 return !TheLoop->contains(J) || Worklist.count(J) ||
2296 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2297 IsScalarUse(J, Src));
2298 })) {
2299 Worklist.insert(Src);
2300 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2301 }
2302 }
2303
2304 // An induction variable will remain scalar if all users of the induction
2305 // variable and induction variable update remain scalar.
2306 for (const auto &Induction : Legal->getInductionVars()) {
2307 auto *Ind = Induction.first;
2308 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2309
2310 // If tail-folding is applied, the primary induction variable will be used
2311 // to feed a vector compare.
2312 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2313 continue;
2314
2315 // Returns true if \p Indvar is a pointer induction that is used directly by
2316 // load/store instruction \p I.
2317 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2318 Instruction *I) {
2319 return Induction.second.getKind() ==
2322 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2323 };
2324
2325 // Determine if all users of the induction variable are scalar after
2326 // vectorization.
2327 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2328 auto *I = cast<Instruction>(U);
2329 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2330 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2331 });
2332 if (!ScalarInd)
2333 continue;
2334
2335 // If the induction variable update is a fixed-order recurrence, neither the
2336 // induction variable or its update should be marked scalar after
2337 // vectorization.
2338 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2339 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2340 continue;
2341
2342 // Determine if all users of the induction variable update instruction are
2343 // scalar after vectorization.
2344 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2345 auto *I = cast<Instruction>(U);
2346 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2347 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2348 });
2349 if (!ScalarIndUpdate)
2350 continue;
2351
2352 // The induction variable and its update instruction will remain scalar.
2353 Worklist.insert(Ind);
2354 Worklist.insert(IndUpdate);
2355 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2356 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2357 << "\n");
2358 }
2359
2360 Scalars[VF].insert_range(Worklist);
2361}
2362
2364 ElementCount VF) {
2365 if (!isPredicatedInst(I))
2366 return false;
2367
2368 // Do we have a non-scalar lowering for this predicated
2369 // instruction? No - it is scalar with predication.
2370 switch(I->getOpcode()) {
2371 default:
2372 return true;
2373 case Instruction::Call:
2374 if (VF.isScalar())
2375 return true;
2377 case Instruction::Load:
2378 case Instruction::Store: {
2379 bool IsConsecutive = Legal->isConsecutivePtr(getLoadStoreType(I),
2381 return !(IsConsecutive && Config.isLegalMaskedLoadOrStore(I, VF)) &&
2382 !Config.isLegalGatherOrScatter(I, VF);
2383 }
2384 case Instruction::UDiv:
2385 case Instruction::SDiv:
2386 case Instruction::SRem:
2387 case Instruction::URem: {
2388 // We have the option to use the llvm.masked.udiv intrinsics to avoid
2389 // predication. The cost based decision here will always select the masked
2390 // intrinsics for scalable vectors as scalarization isn't legal.
2391 const auto [ScalarCost, MaskedCost] = getDivRemSpeculationCost(I, VF);
2392 return isDivRemScalarWithPredication(ScalarCost, MaskedCost);
2393 }
2394 }
2395}
2396
2398 return Legal->isMaskRequired(I, foldTailByMasking());
2399}
2400
2401// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2403 // TODO: We can use the loop-preheader as context point here and get
2404 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2408 return false;
2409
2410 // If the instruction was executed conditionally in the original scalar loop,
2411 // predication is needed with a mask whose lanes are all possibly inactive.
2412 if (Legal->blockNeedsPredication(I->getParent()))
2413 return true;
2414
2415 // If we're not folding the tail by masking, predication is unnecessary.
2416 if (!foldTailByMasking())
2417 return false;
2418
2419 // All that remain are instructions with side-effects originally executed in
2420 // the loop unconditionally, but now execute under a tail-fold mask (only)
2421 // having at least one active lane (the first). If the side-effects of the
2422 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2423 // - it will cause the same side-effects as when masked.
2424 switch(I->getOpcode()) {
2425 default:
2427 "instruction should have been considered by earlier checks");
2428 case Instruction::Call:
2429 // Side-effects of a Call are assumed to be non-invariant, needing a
2430 // (fold-tail) mask.
2432 "should have returned earlier for calls not needing a mask");
2433 return true;
2434 case Instruction::Load:
2435 // If the address is loop invariant no predication is needed.
2436 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2437 case Instruction::Store: {
2438 // For stores, we need to prove both speculation safety (which follows from
2439 // the same argument as loads), but also must prove the value being stored
2440 // is correct. The easiest form of the later is to require that all values
2441 // stored are the same.
2442 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2443 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2444 }
2445 case Instruction::UDiv:
2446 case Instruction::URem:
2447 // If the divisor is loop-invariant no predication is needed.
2448 return !Legal->isInvariant(I->getOperand(1));
2449 case Instruction::SDiv:
2450 case Instruction::SRem:
2451 // Conservative for now, since masked-off lanes may be poison and could
2452 // trigger signed overflow.
2453 return true;
2454 }
2455}
2456
2460 return 1;
2461 // If the block wasn't originally predicated then return early to avoid
2462 // computing BlockFrequencyInfo unnecessarily.
2463 if (!Legal->blockNeedsPredication(BB))
2464 return 1;
2465
2466 uint64_t HeaderFreq =
2467 getBFI().getBlockFreq(TheLoop->getHeader()).getFrequency();
2468 uint64_t BBFreq = getBFI().getBlockFreq(BB).getFrequency();
2469 assert(HeaderFreq >= BBFreq &&
2470 "Header has smaller block freq than dominated BB?");
2471 return std::round((double)HeaderFreq / BBFreq);
2472}
2473
2475 switch (Opcode) {
2476 case Instruction::UDiv:
2477 return Intrinsic::masked_udiv;
2478 case Instruction::SDiv:
2479 return Intrinsic::masked_sdiv;
2480 case Instruction::URem:
2481 return Intrinsic::masked_urem;
2482 case Instruction::SRem:
2483 return Intrinsic::masked_srem;
2484 default:
2485 llvm_unreachable("Unexpected opcode");
2486 }
2487}
2488
2489std::pair<InstructionCost, InstructionCost>
2491 ElementCount VF) {
2492 assert(I->getOpcode() == Instruction::UDiv ||
2493 I->getOpcode() == Instruction::SDiv ||
2494 I->getOpcode() == Instruction::SRem ||
2495 I->getOpcode() == Instruction::URem);
2497
2498 // Scalarization isn't legal for scalable vector types
2499 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2500 if (!VF.isScalable()) {
2501 // Get the scalarization cost and scale this amount by the probability of
2502 // executing the predicated block. If the instruction is not predicated,
2503 // we fall through to the next case.
2504 ScalarizationCost = 0;
2505
2506 // These instructions have a non-void type, so account for the phi nodes
2507 // that we will create. This cost is likely to be zero. The phi node
2508 // cost, if any, should be scaled by the block probability because it
2509 // models a copy at the end of each predicated block.
2510 ScalarizationCost += VF.getFixedValue() *
2511 TTI.getCFInstrCost(Instruction::PHI, Config.CostKind);
2512
2513 // The cost of the non-predicated instruction.
2514 ScalarizationCost +=
2515 VF.getFixedValue() * TTI.getArithmeticInstrCost(
2516 I->getOpcode(), I->getType(), Config.CostKind);
2517
2518 // The cost of insertelement and extractelement instructions needed for
2519 // scalarization.
2520 ScalarizationCost += getScalarizationOverhead(I, VF);
2521
2522 // Scale the cost by the probability of executing the predicated blocks.
2523 // This assumes the predicated block for each vector lane is equally
2524 // likely.
2525 ScalarizationCost =
2526 ScalarizationCost /
2527 getPredBlockCostDivisor(Config.CostKind, I->getParent());
2528 }
2529
2530 auto *VecTy = toVectorTy(I->getType(), VF);
2531 auto *MaskTy = toVectorTy(Type::getInt1Ty(I->getContext()), VF);
2532 IntrinsicCostAttributes ICA(getMaskedDivRemIntrinsic(I->getOpcode()), VecTy,
2533 {VecTy, VecTy, MaskTy});
2534 InstructionCost MaskedCost = TTI.getIntrinsicInstrCost(ICA, Config.CostKind);
2535 return {ScalarizationCost, MaskedCost};
2536}
2537
2539 Instruction *I, ElementCount VF) const {
2540 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2542 "Decision should not be set yet.");
2543 auto *Group = getInterleavedAccessGroup(I);
2544 assert(Group && "Must have a group.");
2545 unsigned InterleaveFactor = Group->getFactor();
2546
2547 // If the instruction's allocated size doesn't equal its type size, it
2548 // requires padding and will be scalarized.
2549 auto &DL = I->getDataLayout();
2550 auto *ScalarTy = getLoadStoreType(I);
2551 if (hasIrregularType(ScalarTy, DL))
2552 return false;
2553
2554 // For scalable vectors, the interleave factors must be <= 8 since we require
2555 // the (de)interleaveN intrinsics instead of shufflevectors.
2556 if (VF.isScalable() && InterleaveFactor > 8)
2557 return false;
2558
2559 // If the group involves a non-integral pointer, we may not be able to
2560 // losslessly cast all values to a common type.
2561 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
2562 for (Instruction *Member : Group->members()) {
2563 auto *MemberTy = getLoadStoreType(Member);
2564 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
2565 // Don't coerce non-integral pointers to integers or vice versa.
2566 if (MemberNI != ScalarNI)
2567 // TODO: Consider adding special nullptr value case here
2568 return false;
2569 if (MemberNI && ScalarNI &&
2570 ScalarTy->getPointerAddressSpace() !=
2571 MemberTy->getPointerAddressSpace())
2572 return false;
2573 }
2574
2575 // Check if masking is required.
2576 // A Group may need masking for one of two reasons: it resides in a block that
2577 // needs predication, or it was decided to use masking to deal with gaps
2578 // (either a gap at the end of a load-access that may result in a speculative
2579 // load, or any gaps in a store-access).
2580 bool PredicatedAccessRequiresMasking =
2582 bool LoadAccessWithGapsRequiresEpilogMasking =
2583 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
2585 bool StoreAccessWithGapsRequiresMasking =
2586 isa<StoreInst>(I) && !Group->isFull();
2587 if (!PredicatedAccessRequiresMasking &&
2588 !LoadAccessWithGapsRequiresEpilogMasking &&
2589 !StoreAccessWithGapsRequiresMasking)
2590 return true;
2591
2592 // If masked interleaving is required, we expect that the user/target had
2593 // enabled it, because otherwise it either wouldn't have been created or
2594 // it should have been invalidated by the CostModel.
2596 "Masked interleave-groups for predicated accesses are not enabled.");
2597
2598 if (Group->isReverse())
2599 return false;
2600
2601 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
2602 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
2603 StoreAccessWithGapsRequiresMasking;
2604 if (VF.isScalable() && NeedsMaskForGaps)
2605 return false;
2606
2607 return Config.isLegalMaskedLoadOrStore(I, VF);
2608}
2609
2611 Instruction *I, ElementCount VF) {
2612 // Get and ensure we have a valid memory instruction.
2613 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
2614
2615 auto *Ptr = getLoadStorePointerOperand(I);
2616 auto *ScalarTy = getLoadStoreType(I);
2617
2618 // In order to be widened, the pointer should be consecutive, first of all.
2619 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
2620 return false;
2621
2622 // If the instruction is a store located in a predicated block, it will be
2623 // scalarized.
2624 if (isScalarWithPredication(I, VF))
2625 return false;
2626
2627 // If the instruction's allocated size doesn't equal it's type size, it
2628 // requires padding and will be scalarized.
2629 auto &DL = I->getDataLayout();
2630 if (hasIrregularType(ScalarTy, DL))
2631 return false;
2632
2633 return true;
2634}
2635
2636void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
2637 // We should not collect Uniforms more than once per VF. Right now,
2638 // this function is called from collectUniformsAndScalars(), which
2639 // already does this check. Collecting Uniforms for VF=1 does not make any
2640 // sense.
2641
2642 assert(VF.isVector() && !Uniforms.contains(VF) &&
2643 "This function should not be visited twice for the same VF");
2644
2645 // Visit the list of Uniforms. If we find no uniform value, we won't
2646 // analyze again. Uniforms.count(VF) will return 1.
2647 Uniforms[VF].clear();
2648
2649 // Now we know that the loop is vectorizable!
2650 // Collect instructions inside the loop that will remain uniform after
2651 // vectorization.
2652
2653 // Global values, params and instructions outside of current loop are out of
2654 // scope.
2655 auto IsOutOfScope = [&](Value *V) -> bool {
2657 return (!I || !TheLoop->contains(I));
2658 };
2659
2660 // Worklist containing uniform instructions demanding lane 0.
2661 SetVector<Instruction *> Worklist;
2662
2663 // Add uniform instructions demanding lane 0 to the worklist. Instructions
2664 // that require predication must not be considered uniform after
2665 // vectorization, because that would create an erroneous replicating region
2666 // where only a single instance out of VF should be formed.
2667 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
2668 if (IsOutOfScope(I)) {
2669 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
2670 << *I << "\n");
2671 return;
2672 }
2673 if (isPredicatedInst(I)) {
2674 LLVM_DEBUG(
2675 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
2676 << "\n");
2677 return;
2678 }
2679 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
2680 Worklist.insert(I);
2681 };
2682
2683 // Start with the conditional branches exiting the loop. If the branch
2684 // condition is an instruction contained in the loop that is only used by the
2685 // branch, it is uniform. Note conditions from uncountable early exits are not
2686 // uniform.
2688 TheLoop->getExitingBlocks(Exiting);
2689 for (BasicBlock *E : Exiting) {
2690 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
2691 continue;
2692 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
2693 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
2694 AddToWorklistIfAllowed(Cmp);
2695 }
2696
2697 auto PrevVF = VF.divideCoefficientBy(2);
2698 // Return true if all lanes perform the same memory operation, and we can
2699 // thus choose to execute only one.
2700 auto IsUniformMemOpUse = [&](Instruction *I) {
2701 // If the value was already known to not be uniform for the previous
2702 // (smaller VF), it cannot be uniform for the larger VF.
2703 if (PrevVF.isVector()) {
2704 auto Iter = Uniforms.find(PrevVF);
2705 if (Iter != Uniforms.end() && !Iter->second.contains(I))
2706 return false;
2707 }
2708 if (!Legal->isUniformMemOp(*I, VF))
2709 return false;
2710 if (isa<LoadInst>(I))
2711 // Loading the same address always produces the same result - at least
2712 // assuming aliasing and ordering which have already been checked.
2713 return true;
2714 // Storing the same value on every iteration.
2715 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
2716 };
2717
2718 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
2719 InstWidening WideningDecision = getWideningDecision(I, VF);
2720 assert(WideningDecision != CM_Unknown &&
2721 "Widening decision should be ready at this moment");
2722
2723 if (IsUniformMemOpUse(I))
2724 return true;
2725
2726 return (WideningDecision == CM_Widen ||
2727 WideningDecision == CM_Widen_Reverse ||
2728 WideningDecision == CM_Interleave);
2729 };
2730
2731 // Returns true if Ptr is the pointer operand of a memory access instruction
2732 // I, I is known to not require scalarization, and the pointer is not also
2733 // stored.
2734 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
2735 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
2736 return false;
2737 return getLoadStorePointerOperand(I) == Ptr &&
2738 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
2739 };
2740
2741 // Holds a list of values which are known to have at least one uniform use.
2742 // Note that there may be other uses which aren't uniform. A "uniform use"
2743 // here is something which only demands lane 0 of the unrolled iterations;
2744 // it does not imply that all lanes produce the same value (e.g. this is not
2745 // the usual meaning of uniform)
2746 SetVector<Value *> HasUniformUse;
2747
2748 // Scan the loop for instructions which are either a) known to have only
2749 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
2750 for (auto *BB : TheLoop->blocks())
2751 for (auto &I : *BB) {
2752 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
2753 switch (II->getIntrinsicID()) {
2754 case Intrinsic::sideeffect:
2755 case Intrinsic::experimental_noalias_scope_decl:
2756 case Intrinsic::assume:
2757 case Intrinsic::lifetime_start:
2758 case Intrinsic::lifetime_end:
2759 if (TheLoop->hasLoopInvariantOperands(&I))
2760 AddToWorklistIfAllowed(&I);
2761 break;
2762 default:
2763 break;
2764 }
2765 }
2766
2767 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
2768 if (IsOutOfScope(EVI->getAggregateOperand())) {
2769 AddToWorklistIfAllowed(EVI);
2770 continue;
2771 }
2772 // Only ExtractValue instructions where the aggregate value comes from a
2773 // call are allowed to be non-uniform.
2774 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
2775 "Expected aggregate value to be call return value");
2776 }
2777
2778 // If there's no pointer operand, there's nothing to do.
2779 auto *Ptr = getLoadStorePointerOperand(&I);
2780 if (!Ptr)
2781 continue;
2782
2783 // If the pointer can be proven to be uniform, always add it to the
2784 // worklist.
2785 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
2786 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
2787
2788 if (IsUniformMemOpUse(&I))
2789 AddToWorklistIfAllowed(&I);
2790
2791 if (IsVectorizedMemAccessUse(&I, Ptr))
2792 HasUniformUse.insert(Ptr);
2793 }
2794
2795 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
2796 // demanding) users. Since loops are assumed to be in LCSSA form, this
2797 // disallows uses outside the loop as well.
2798 for (auto *V : HasUniformUse) {
2799 if (IsOutOfScope(V))
2800 continue;
2801 auto *I = cast<Instruction>(V);
2802 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
2803 auto *UI = cast<Instruction>(U);
2804 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
2805 });
2806 if (UsersAreMemAccesses)
2807 AddToWorklistIfAllowed(I);
2808 }
2809
2810 // Expand Worklist in topological order: whenever a new instruction
2811 // is added , its users should be already inside Worklist. It ensures
2812 // a uniform instruction will only be used by uniform instructions.
2813 unsigned Idx = 0;
2814 while (Idx != Worklist.size()) {
2815 Instruction *I = Worklist[Idx++];
2816
2817 for (auto *OV : I->operand_values()) {
2818 // isOutOfScope operands cannot be uniform instructions.
2819 if (IsOutOfScope(OV))
2820 continue;
2821 // First order recurrence Phi's should typically be considered
2822 // non-uniform.
2823 auto *OP = dyn_cast<PHINode>(OV);
2824 if (OP && Legal->isFixedOrderRecurrence(OP))
2825 continue;
2826 // If all the users of the operand are uniform, then add the
2827 // operand into the uniform worklist.
2828 auto *OI = cast<Instruction>(OV);
2829 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
2830 auto *J = cast<Instruction>(U);
2831 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
2832 }))
2833 AddToWorklistIfAllowed(OI);
2834 }
2835 }
2836
2837 // For an instruction to be added into Worklist above, all its users inside
2838 // the loop should also be in Worklist. However, this condition cannot be
2839 // true for phi nodes that form a cyclic dependence. We must process phi
2840 // nodes separately. An induction variable will remain uniform if all users
2841 // of the induction variable and induction variable update remain uniform.
2842 // The code below handles both pointer and non-pointer induction variables.
2843 BasicBlock *Latch = TheLoop->getLoopLatch();
2844 for (const auto &Induction : Legal->getInductionVars()) {
2845 auto *Ind = Induction.first;
2846 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2847
2848 // Determine if all users of the induction variable are uniform after
2849 // vectorization.
2850 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
2851 auto *I = cast<Instruction>(U);
2852 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2853 IsVectorizedMemAccessUse(I, Ind);
2854 });
2855 if (!UniformInd)
2856 continue;
2857
2858 // Determine if all users of the induction variable update instruction are
2859 // uniform after vectorization.
2860 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2861 auto *I = cast<Instruction>(U);
2862 return I == Ind || Worklist.count(I) ||
2863 IsVectorizedMemAccessUse(I, IndUpdate);
2864 });
2865 if (!UniformIndUpdate)
2866 continue;
2867
2868 // The induction variable and its update instruction will remain uniform.
2869 AddToWorklistIfAllowed(Ind);
2870 AddToWorklistIfAllowed(IndUpdate);
2871 }
2872
2873 Uniforms[VF].insert_range(Worklist);
2874}
2875
2876FixedScalableVFPair
2878 // For outer loops, use simple type-based heuristic VF. No cost model or
2879 // memory dependence analysis is available.
2880 if (!TheLoop->isInnermost()) {
2881 return Config.computeVPlanOuterloopVF(UserVF);
2882 }
2883
2884 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
2885 // TODO: It may be useful to do since it's still likely to be dynamically
2886 // uniform if the target can skip.
2888 "Not inserting runtime ptr check for divergent target",
2889 "runtime pointer checks needed. Not enabled for divergent target",
2890 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
2892 }
2893
2894 ScalarEvolution *SE = PSE.getSE();
2896 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
2897 if (!MaxTC && EpilogueLoweringStatus == CM_EpilogueAllowed)
2899 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
2900 if (TC != ElementCount::getFixed(MaxTC))
2901 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
2902 if (TC.isScalar()) {
2903 reportVectorizationFailure("Single iteration (non) loop",
2904 "loop trip count is one, irrelevant for vectorization",
2905 "SingleIterationLoop", ORE, TheLoop);
2907 }
2908
2909 // If BTC matches the widest induction type and is -1 then the trip count
2910 // computation will wrap to 0 and the vector trip count will be 0. Do not try
2911 // to vectorize.
2912 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
2913 if (!isa<SCEVCouldNotCompute>(BTC) &&
2914 BTC->getType()->getScalarSizeInBits() >=
2915 Legal->getWidestInductionType()->getScalarSizeInBits() &&
2917 SE->getMinusOne(BTC->getType()))) {
2919 "Trip count computation wrapped",
2920 "backedge-taken count is -1, loop trip count wrapped to 0",
2921 "TripCountWrapped", ORE, TheLoop);
2923 }
2924
2925 assert(WideningDecisions.empty() && CallWideningDecisions.empty() &&
2926 Uniforms.empty() && Scalars.empty() &&
2927 "No cost-modeling decisions should have been taken at this point");
2928
2929 switch (EpilogueLoweringStatus) {
2930 case CM_EpilogueAllowed:
2931 return Config.computeFeasibleMaxVF(MaxTC, UserVF, UserIC, false,
2934 [[fallthrough]];
2936 LLVM_DEBUG(dbgs() << "LV: tail-folding hint/switch found.\n"
2937 << "LV: Not allowing epilogue, creating tail-folded "
2938 << "vector loop.\n");
2939 break;
2941 // fallthrough as a special case of OptForSize
2943 if (EpilogueLoweringStatus == CM_EpilogueNotAllowedOptSize)
2944 LLVM_DEBUG(dbgs() << "LV: Not allowing epilogue due to -Os/-Oz.\n");
2945 else
2946 LLVM_DEBUG(dbgs() << "LV: Not allowing epilogue due to low trip "
2947 << "count.\n");
2948
2949 // Bail if runtime checks are required, which are not good when optimising
2950 // for size.
2951 if (Config.runtimeChecksRequired())
2953
2954 break;
2955 }
2956
2957 // Now try the tail folding
2958
2959 // Invalidate interleave groups that require an epilogue if we can't mask
2960 // the interleave-group.
2962 // Note: There is no need to invalidate any cost modeling decisions here, as
2963 // none were taken so far (see assertion above).
2964 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
2965 }
2966
2967 FixedScalableVFPair MaxFactors = Config.computeFeasibleMaxVF(
2968 MaxTC, UserVF, UserIC, true, requiresScalarEpilogue(true));
2969
2970 // Avoid tail folding if the trip count is known to be a multiple of any VF
2971 // we choose.
2972 std::optional<unsigned> MaxPowerOf2RuntimeVF =
2973 MaxFactors.FixedVF.getFixedValue();
2974 if (MaxFactors.ScalableVF) {
2975 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
2976 if (MaxVScale) {
2977 MaxPowerOf2RuntimeVF = std::max<unsigned>(
2978 *MaxPowerOf2RuntimeVF,
2979 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
2980 } else
2981 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
2982 }
2983
2984 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
2985 // Return false if the loop is neither a single-latch-exit loop nor an
2986 // early-exit loop as tail-folding is not supported in that case.
2987 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
2988 !Legal->hasUncountableEarlyExit())
2989 return false;
2990 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
2991 ScalarEvolution *SE = PSE.getSE();
2992 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
2993 // with uncountable exits. For countable loops, the symbolic maximum must
2994 // remain identical to the known back-edge taken count.
2995 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
2996 assert((Legal->hasUncountableEarlyExit() ||
2997 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
2998 "Invalid loop count");
2999 const SCEV *ExitCount = SE->getAddExpr(
3000 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3001 const SCEV *Rem = SE->getURemExpr(
3002 SE->applyLoopGuards(ExitCount, TheLoop),
3003 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3004 return Rem->isZero();
3005 };
3006
3007 if (MaxPowerOf2RuntimeVF > 0u) {
3008 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3009 "MaxFixedVF must be a power of 2");
3010 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3011 // Accept MaxFixedVF if we do not have a tail.
3012 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3013 return MaxFactors;
3014 }
3015 }
3016
3017 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3018 if (ExpectedTC && ExpectedTC->isFixed() &&
3019 ExpectedTC->getFixedValue() <=
3020 TTI.getMinTripCountTailFoldingThreshold()) {
3021 if (MaxPowerOf2RuntimeVF > 0u) {
3022 // If we have a low-trip-count, and the fixed-width VF is known to divide
3023 // the trip count but the scalable factor does not, use the fixed-width
3024 // factor in preference to allow the generation of a non-predicated loop.
3025 if (EpilogueLoweringStatus == CM_EpilogueNotAllowedLowTripLoop &&
3026 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3027 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3028 "remain for any chosen VF.\n");
3029 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3030 return MaxFactors;
3031 }
3032 }
3033
3035 "The trip count is below the minial threshold value.",
3036 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3037 ORE, TheLoop);
3039 }
3040
3041 // If we don't know the precise trip count, or if the trip count that we
3042 // found modulo the vectorization factor is not zero, try to fold the tail
3043 // by masking.
3044 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3045 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3046 setTailFoldingStyle(ContainsScalableVF, UserIC);
3047 if (foldTailByMasking()) {
3048 if (foldTailWithEVL()) {
3049 LLVM_DEBUG(
3050 dbgs()
3051 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3052 "try to generate VP Intrinsics with scalable vector "
3053 "factors only.\n");
3054 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3055 // for now.
3056 // TODO: extend it for fixed vectors, if required.
3057 assert(ContainsScalableVF && "Expected scalable vector factor.");
3058
3059 MaxFactors.FixedVF = ElementCount::getFixed(1);
3060 }
3061 return MaxFactors;
3062 }
3063
3064 // If there was a tail-folding hint/switch, but we can't fold the tail by
3065 // masking, fallback to a vectorization with an epilogue.
3066 if (EpilogueLoweringStatus == CM_EpilogueNotNeededFoldTail) {
3067 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with an "
3068 "epilogue instead.\n");
3069 EpilogueLoweringStatus = CM_EpilogueAllowed;
3070 return MaxFactors;
3071 }
3072
3073 if (EpilogueLoweringStatus == CM_EpilogueNotAllowedFoldTail) {
3074 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3076 }
3077
3078 if (TC.isZero()) {
3080 "unable to calculate the loop count due to complex control flow",
3081 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3083 }
3084
3086 "Cannot optimize for size and vectorize at the same time.",
3087 "cannot optimize for size and vectorize at the same time. "
3088 "Enable vectorization of this loop with '#pragma clang loop "
3089 "vectorize(enable)' when compiling with -Os/-Oz",
3090 "NoTailLoopWithOptForSize", ORE, TheLoop);
3092}
3093
3094bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3095 const VectorizationFactor &B,
3096 const unsigned MaxTripCount,
3097 bool HasTail,
3098 bool IsEpilogue) const {
3099 InstructionCost CostA = A.Cost;
3100 InstructionCost CostB = B.Cost;
3101
3102 // When there is a hint to always prefer scalable vectors, honour that hint.
3103 if (Hints.isScalableVectorizationAlwaysPreferred())
3104 if (A.Width.isScalable() && CostA.isValid() && !B.Width.isScalable() &&
3105 !B.Width.isScalar())
3106 return true;
3107
3108 // Improve estimate for the vector width if it is scalable.
3109 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3110 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3111 if (std::optional<unsigned> VScale = Config.getVScaleForTuning()) {
3112 if (A.Width.isScalable())
3113 EstimatedWidthA *= *VScale;
3114 if (B.Width.isScalable())
3115 EstimatedWidthB *= *VScale;
3116 }
3117
3118 // When optimizing for size choose whichever is smallest, which will be the
3119 // one with the smallest cost for the whole loop. On a tie pick the larger
3120 // vector width, on the assumption that throughput will be greater.
3121 if (Config.CostKind == TTI::TCK_CodeSize)
3122 return CostA < CostB ||
3123 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3124
3125 // Assume vscale may be larger than 1 (or the value being tuned for),
3126 // so that scalable vectorization is slightly favorable over fixed-width
3127 // vectorization.
3128 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3129 A.Width.isScalable() && !B.Width.isScalable();
3130
3131 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3132 const InstructionCost &RHS) {
3133 return PreferScalable ? LHS <= RHS : LHS < RHS;
3134 };
3135
3136 // To avoid the need for FP division:
3137 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3138 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3139 bool LowerCostWithoutTC =
3140 CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3141 if (!MaxTripCount)
3142 return LowerCostWithoutTC;
3143
3144 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3145 InstructionCost VectorCost,
3146 InstructionCost ScalarCost) {
3147 // If the trip count is a known (possibly small) constant, the trip count
3148 // will be rounded up to an integer number of iterations under
3149 // FoldTailByMasking. The total cost in that case will be
3150 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3151 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3152 // some extra overheads, but for the purpose of comparing the costs of
3153 // different VFs we can use this to compare the total loop-body cost
3154 // expected after vectorization.
3155 if (HasTail)
3156 return VectorCost * (MaxTripCount / VF) +
3157 ScalarCost * (MaxTripCount % VF);
3158 return VectorCost * divideCeil(MaxTripCount, VF);
3159 };
3160
3161 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3162 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3163 bool LowerCostWithTC = CmpFn(RTCostA, RTCostB);
3164 LLVM_DEBUG(if (LowerCostWithTC != LowerCostWithoutTC) {
3165 dbgs() << "LV: VF " << (LowerCostWithTC ? A.Width : B.Width)
3166 << " has lower cost than VF "
3167 << (LowerCostWithTC ? B.Width : A.Width)
3168 << " when taking the cost of the remaining scalar loop iterations "
3169 "into consideration for a maximum trip count of "
3170 << MaxTripCount << ".\n";
3171 });
3172 return LowerCostWithTC;
3173}
3174
3175bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3176 const VectorizationFactor &B,
3177 bool HasTail,
3178 bool IsEpilogue) const {
3179 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3180 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3181 IsEpilogue);
3182}
3183
3186 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3187 SmallVector<RecipeVFPair> InvalidCosts;
3188 for (const auto &Plan : VPlans) {
3189 for (ElementCount VF : Plan->vectorFactors()) {
3190 // The VPlan-based cost model is designed for computing vector cost.
3191 // Querying VPlan-based cost model with a scarlar VF will cause some
3192 // errors because we expect the VF is vector for most of the widen
3193 // recipes.
3194 if (VF.isScalar())
3195 continue;
3196
3197 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, Config.CostKind, CM.PSE,
3198 OrigLoop);
3199 precomputeCosts(*Plan, VF, CostCtx);
3200 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
3202 for (auto &R : *VPBB) {
3203 if (!R.cost(VF, CostCtx).isValid())
3204 InvalidCosts.emplace_back(&R, VF);
3205 }
3206 }
3207 }
3208 }
3209 if (InvalidCosts.empty())
3210 return;
3211
3212 // Emit a report of VFs with invalid costs in the loop.
3213
3214 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
3216 unsigned I = 0;
3217 for (auto &Pair : InvalidCosts)
3218 if (Numbering.try_emplace(Pair.first, I).second)
3219 ++I;
3220
3221 // Sort the list, first on recipe(number) then on VF.
3222 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
3223 unsigned NA = Numbering[A.first];
3224 unsigned NB = Numbering[B.first];
3225 if (NA != NB)
3226 return NA < NB;
3227 return ElementCount::isKnownLT(A.second, B.second);
3228 });
3229
3230 // For a list of ordered recipe-VF pairs:
3231 // [(load, VF1), (load, VF2), (store, VF1)]
3232 // group the recipes together to emit separate remarks for:
3233 // load (VF1, VF2)
3234 // store (VF1)
3235 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
3236 auto Subset = ArrayRef<RecipeVFPair>();
3237 do {
3238 if (Subset.empty())
3239 Subset = Tail.take_front(1);
3240
3241 VPRecipeBase *R = Subset.front().first;
3242
3243 unsigned Opcode =
3245 .Case([](const VPHeaderPHIRecipe *R) { return Instruction::PHI; })
3246 .Case(
3247 [](const VPWidenStoreRecipe *R) { return Instruction::Store; })
3248 .Case([](const VPWidenLoadRecipe *R) { return Instruction::Load; })
3249 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
3250 [](const auto *R) { return Instruction::Call; })
3253 [](const auto *R) { return R->getOpcode(); })
3254 .Case([](const VPInterleaveRecipe *R) {
3255 return R->getStoredValues().empty() ? Instruction::Load
3256 : Instruction::Store;
3257 })
3258 .Case([](const VPReductionRecipe *R) {
3259 return RecurrenceDescriptor::getOpcode(R->getRecurrenceKind());
3260 });
3261
3262 // If the next recipe is different, or if there are no other pairs,
3263 // emit a remark for the collated subset. e.g.
3264 // [(load, VF1), (load, VF2))]
3265 // to emit:
3266 // remark: invalid costs for 'load' at VF=(VF1, VF2)
3267 if (Subset == Tail || Tail[Subset.size()].first != R) {
3268 std::string OutString;
3269 raw_string_ostream OS(OutString);
3270 assert(!Subset.empty() && "Unexpected empty range");
3271 OS << "Recipe with invalid costs prevented vectorization at VF=(";
3272 for (const auto &Pair : Subset)
3273 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
3274 OS << "):";
3275 if (Opcode == Instruction::Call) {
3276 StringRef Name = "";
3277 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
3278 Name = Int->getIntrinsicName();
3279 } else {
3280 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
3281 Function *CalledFn =
3282 WidenCall ? WidenCall->getCalledScalarFunction()
3283 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
3284 ->getLiveInIRValue());
3285 Name = CalledFn->getName();
3286 }
3287 OS << " call to " << Name;
3288 } else
3289 OS << " " << Instruction::getOpcodeName(Opcode);
3290 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
3291 R->getDebugLoc());
3292 Tail = Tail.drop_front(Subset.size());
3293 Subset = {};
3294 } else
3295 // Grow the subset by one element
3296 Subset = Tail.take_front(Subset.size() + 1);
3297 } while (!Tail.empty());
3298}
3299
3300/// Check if any recipe of \p Plan will generate a vector value, which will be
3301/// assigned a vector register.
3303 const TargetTransformInfo &TTI) {
3304 assert(VF.isVector() && "Checking a scalar VF?");
3305 VPTypeAnalysis TypeInfo(Plan);
3306 DenseSet<VPRecipeBase *> EphemeralRecipes;
3307 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
3308 // Set of already visited types.
3309 DenseSet<Type *> Visited;
3312 for (VPRecipeBase &R : *VPBB) {
3313 if (EphemeralRecipes.contains(&R))
3314 continue;
3315 // Continue early if the recipe is considered to not produce a vector
3316 // result. Note that this includes VPInstruction where some opcodes may
3317 // produce a vector, to preserve existing behavior as VPInstructions model
3318 // aspects not directly mapped to existing IR instructions.
3319 switch (R.getVPRecipeID()) {
3320 case VPRecipeBase::VPDerivedIVSC:
3321 case VPRecipeBase::VPScalarIVStepsSC:
3322 case VPRecipeBase::VPReplicateSC:
3323 case VPRecipeBase::VPInstructionSC:
3324 case VPRecipeBase::VPCurrentIterationPHISC:
3325 case VPRecipeBase::VPVectorPointerSC:
3326 case VPRecipeBase::VPVectorEndPointerSC:
3327 case VPRecipeBase::VPExpandSCEVSC:
3328 case VPRecipeBase::VPPredInstPHISC:
3329 case VPRecipeBase::VPBranchOnMaskSC:
3330 continue;
3331 case VPRecipeBase::VPReductionSC:
3332 case VPRecipeBase::VPActiveLaneMaskPHISC:
3333 case VPRecipeBase::VPWidenCallSC:
3334 case VPRecipeBase::VPWidenCanonicalIVSC:
3335 case VPRecipeBase::VPWidenCastSC:
3336 case VPRecipeBase::VPWidenGEPSC:
3337 case VPRecipeBase::VPWidenIntrinsicSC:
3338 case VPRecipeBase::VPWidenSC:
3339 case VPRecipeBase::VPBlendSC:
3340 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
3341 case VPRecipeBase::VPHistogramSC:
3342 case VPRecipeBase::VPWidenPHISC:
3343 case VPRecipeBase::VPWidenIntOrFpInductionSC:
3344 case VPRecipeBase::VPWidenPointerInductionSC:
3345 case VPRecipeBase::VPReductionPHISC:
3346 case VPRecipeBase::VPInterleaveEVLSC:
3347 case VPRecipeBase::VPInterleaveSC:
3348 case VPRecipeBase::VPWidenLoadEVLSC:
3349 case VPRecipeBase::VPWidenLoadSC:
3350 case VPRecipeBase::VPWidenStoreEVLSC:
3351 case VPRecipeBase::VPWidenStoreSC:
3352 break;
3353 default:
3354 llvm_unreachable("unhandled recipe");
3355 }
3356
3357 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
3358 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
3359 if (!NumLegalParts)
3360 return false;
3361 if (VF.isScalable()) {
3362 // <vscale x 1 x iN> is assumed to be profitable over iN because
3363 // scalable registers are a distinct register class from scalar
3364 // ones. If we ever find a target which wants to lower scalable
3365 // vectors back to scalars, we'll need to update this code to
3366 // explicitly ask TTI about the register class uses for each part.
3367 return NumLegalParts <= VF.getKnownMinValue();
3368 }
3369 // Two or more elements that share a register - are vectorized.
3370 return NumLegalParts < VF.getFixedValue();
3371 };
3372
3373 // If no def nor is a store, e.g., branches, continue - no value to check.
3374 if (R.getNumDefinedValues() == 0 &&
3376 continue;
3377 // For multi-def recipes, currently only interleaved loads, suffice to
3378 // check first def only.
3379 // For stores check their stored value; for interleaved stores suffice
3380 // the check first stored value only. In all cases this is the second
3381 // operand.
3382 VPValue *ToCheck =
3383 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
3384 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
3385 if (!Visited.insert({ScalarTy}).second)
3386 continue;
3387 Type *WideTy = toVectorizedTy(ScalarTy, VF);
3388 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
3389 return true;
3390 }
3391 }
3392
3393 return false;
3394}
3395
3396static bool hasReplicatorRegion(VPlan &Plan) {
3398 Plan.getVectorLoopRegion()->getEntry())),
3399 [](auto *VPRB) { return VPRB->isReplicator(); });
3400}
3401
3402/// Returns true if the VPlan contains a VPReductionPHIRecipe with
3403/// FindLast recurrence kind.
3404static bool hasFindLastReductionPhi(VPlan &Plan) {
3406 [](VPRecipeBase &R) {
3407 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
3408 return RedPhi &&
3409 RecurrenceDescriptor::isFindLastRecurrenceKind(
3410 RedPhi->getRecurrenceKind());
3411 });
3412}
3413
3414/// Returns true if the VPlan contains header phi recipes that are not currently
3415/// supported for epilogue vectorization.
3417 return any_of(
3419 [](VPRecipeBase &R) {
3420 switch (R.getVPRecipeID()) {
3421 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
3422 // TODO: Add support for fixed-order recurrences.
3423 return true;
3424 case VPRecipeBase::VPWidenIntOrFpInductionSC:
3425 return !cast<VPWidenIntOrFpInductionRecipe>(&R)->getPHINode();
3426 case VPRecipeBase::VPReductionPHISC: {
3427 auto *RedPhi = cast<VPReductionPHIRecipe>(&R);
3428 // TODO: Support FMinNum/FMaxNum, FindLast reductions, and reductions
3429 // without underlying values.
3430 RecurKind Kind = RedPhi->getRecurrenceKind();
3431 if (RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(Kind) ||
3432 RecurrenceDescriptor::isFindLastRecurrenceKind(Kind) ||
3433 !RedPhi->getUnderlyingValue())
3434 return true;
3435 // TODO: Add support for FindIV reductions with sunk expressions: the
3436 // resume value from the main loop is in expression domain (e.g.,
3437 // mul(ReducedIV, 3)), but the epilogue tracks raw IV values. A sunk
3438 // expression is identified by a non-VPInstruction user of
3439 // ComputeReductionResult.
3440 if (RecurrenceDescriptor::isFindIVRecurrenceKind(Kind)) {
3441 auto *RdxResult = vputils::findComputeReductionResult(RedPhi);
3442 assert(RdxResult &&
3443 "FindIV reduction must have ComputeReductionResult");
3444 return any_of(RdxResult->users(),
3445 std::not_fn(IsaPred<VPInstruction>));
3446 }
3447 return false;
3448 }
3449 default:
3450 return false;
3451 };
3452 });
3453}
3454
3455bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
3456 VPlan &MainPlan) const {
3457 // Bail out if the plan contains header phi recipes not yet supported
3458 // for epilogue vectorization.
3459 if (hasUnsupportedHeaderPhiRecipe(MainPlan))
3460 return false;
3461
3462 // Epilogue vectorization code has not been auditted to ensure it handles
3463 // non-latch exits properly. It may be fine, but it needs auditted and
3464 // tested.
3465 // TODO: Add support for loops with an early exit.
3466 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
3467 return false;
3468
3469 return true;
3470}
3471
3473 const ElementCount VF, const unsigned IC) const {
3474 // FIXME: We need a much better cost-model to take different parameters such
3475 // as register pressure, code size increase and cost of extra branches into
3476 // account. For now we apply a very crude heuristic and only consider loops
3477 // with vectorization factors larger than a certain value.
3478
3479 // Allow the target to opt out.
3480 if (!TTI.preferEpilogueVectorization(VF * IC))
3481 return false;
3482
3483 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
3485 : TTI.getEpilogueVectorizationMinVF();
3486 return estimateElementCount(VF * IC, Config.getVScaleForTuning()) >=
3487 MinVFThreshold;
3488}
3489
3491 VPlan &MainPlan, ElementCount MainLoopVF, unsigned IC) {
3493 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
3494 return nullptr;
3495 }
3496
3497 if (!CM.isEpilogueAllowed()) {
3498 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
3499 "epilogue is allowed.\n");
3500 return nullptr;
3501 }
3502
3503 // Not really a cost consideration, but check for unsupported cases here to
3504 // simplify the logic.
3505 if (!isCandidateForEpilogueVectorization(MainPlan)) {
3506 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
3507 "is not a supported candidate.\n");
3508 return nullptr;
3509 }
3510
3513 IC * estimateElementCount(MainLoopVF, Config.getVScaleForTuning())) {
3514 // Note that the main loop leaves IC * MainLoopVF iterations iff a scalar
3515 // epilogue is required, but then the epilogue loop also requires a scalar
3516 // epilogue.
3517 LLVM_DEBUG(dbgs() << "LEV: Forced epilogue VF results in dead epilogue "
3518 "vector loop, skipping vectorizing epilogue.\n");
3519 return nullptr;
3520 }
3521
3522 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
3524 if (hasPlanWithVF(ForcedEC)) {
3525 std::unique_ptr<VPlan> Clone(getPlanFor(ForcedEC).duplicate());
3526 Clone->setVF(ForcedEC);
3527 return Clone;
3528 }
3529
3530 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
3531 "viable.\n");
3532 return nullptr;
3533 }
3534
3535 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
3536 LLVM_DEBUG(
3537 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
3538 return nullptr;
3539 }
3540
3541 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
3542 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
3543 "this loop\n");
3544 return nullptr;
3545 }
3546
3547 // Check if a plan's vector loop processes fewer iterations than VF (e.g. when
3548 // interleave groups have been narrowed) narrowInterleaveGroups) and return
3549 // the adjusted, effective VF.
3550 using namespace VPlanPatternMatch;
3551 auto GetEffectiveVF = [](VPlan &Plan, ElementCount VF) -> ElementCount {
3552 auto *Exiting = Plan.getVectorLoopRegion()->getExitingBasicBlock();
3553 if (match(&Exiting->back(),
3554 m_BranchOnCount(m_Add(m_CanonicalIV(), m_Specific(&Plan.getUF())),
3555 m_VPValue())))
3556 return ElementCount::get(1, VF.isScalable());
3557 return VF;
3558 };
3559
3560 // Check if the main loop processes fewer than MainLoopVF elements per
3561 // iteration (e.g. due to narrowing interleave groups). Adjust MainLoopVF
3562 // as needed.
3563 MainLoopVF = GetEffectiveVF(MainPlan, MainLoopVF);
3564
3565 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
3566 // the main loop handles 8 lanes per iteration. We could still benefit from
3567 // vectorizing the epilogue loop with VF=4.
3568 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
3569 estimateElementCount(MainLoopVF, Config.getVScaleForTuning()));
3570
3571 Type *TCType = Legal->getWidestInductionType();
3572 const SCEV *RemainingIterations = nullptr;
3573 unsigned MaxTripCount = 0;
3574 const SCEV *TC = vputils::getSCEVExprForVPValue(MainPlan.getTripCount(), PSE);
3575 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
3576 const SCEV *KnownMinTC;
3577 bool ScalableTC = match(TC, m_scev_c_Mul(m_SCEV(KnownMinTC), m_SCEVVScale()));
3578 bool ScalableRemIter = false;
3579 ScalarEvolution &SE = *PSE.getSE();
3580 // Use versions of TC and VF in which both are either scalable or fixed.
3581 if (ScalableTC == MainLoopVF.isScalable()) {
3582 ScalableRemIter = ScalableTC;
3583 RemainingIterations =
3584 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
3585 } else if (ScalableTC) {
3586 const SCEV *EstimatedTC = SE.getMulExpr(
3587 KnownMinTC,
3588 SE.getConstant(TCType, Config.getVScaleForTuning().value_or(1)));
3589 RemainingIterations = SE.getURemExpr(
3590 EstimatedTC, SE.getElementCount(TCType, MainLoopVF * IC));
3591 } else
3592 RemainingIterations =
3593 SE.getURemExpr(TC, SE.getElementCount(TCType, EstimatedRuntimeVF * IC));
3594
3595 // No iterations left to process in the epilogue.
3596 if (RemainingIterations->isZero())
3597 return nullptr;
3598
3599 if (MainLoopVF.isFixed()) {
3600 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
3601 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
3602 SE.getConstant(TCType, MaxTripCount))) {
3603 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
3604 }
3605 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
3606 << MaxTripCount << "\n");
3607 }
3608
3609 auto SkipVF = [&](const SCEV *VF, const SCEV *RemIter) -> bool {
3610 return SE.isKnownPredicate(CmpInst::ICMP_UGT, VF, RemIter);
3611 };
3613 VPlan *BestPlan = nullptr;
3614 for (auto &NextVF : ProfitableVFs) {
3615 // Skip candidate VFs without a corresponding VPlan.
3616 if (!hasPlanWithVF(NextVF.Width))
3617 continue;
3618
3619 VPlan &CurrentPlan = getPlanFor(NextVF.Width);
3620 ElementCount EffectiveVF = GetEffectiveVF(CurrentPlan, NextVF.Width);
3621 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
3622 // vectors) or > the VF of the main loop (fixed vectors).
3623 if ((!EffectiveVF.isScalable() && MainLoopVF.isScalable() &&
3624 ElementCount::isKnownGE(EffectiveVF, EstimatedRuntimeVF)) ||
3625 (EffectiveVF.isScalable() &&
3626 ElementCount::isKnownGE(EffectiveVF, MainLoopVF)) ||
3627 (!EffectiveVF.isScalable() && !MainLoopVF.isScalable() &&
3628 ElementCount::isKnownGT(EffectiveVF, MainLoopVF)))
3629 continue;
3630
3631 // If EffectiveVF is greater than the number of remaining iterations, the
3632 // epilogue loop would be dead. Skip such factors. If the epilogue plan
3633 // also has narrowed interleave groups, use the effective VF since
3634 // the epilogue step will be reduced to its IC.
3635 // TODO: We should also consider comparing against a scalable
3636 // RemainingIterations when SCEV be able to evaluate non-canonical
3637 // vscale-based expressions.
3638 if (!ScalableRemIter) {
3639 // Handle the case where EffectiveVF and RemainingIterations are in
3640 // different numerical spaces.
3641 if (EffectiveVF.isScalable())
3642 EffectiveVF = ElementCount::getFixed(
3643 estimateElementCount(EffectiveVF, Config.getVScaleForTuning()));
3644 if (SkipVF(SE.getElementCount(TCType, EffectiveVF), RemainingIterations))
3645 continue;
3646 }
3647
3648 if (Result.Width.isScalar() ||
3649 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
3650 /*IsEpilogue*/ true)) {
3651 Result = NextVF;
3652 BestPlan = &CurrentPlan;
3653 }
3654 }
3655
3656 if (!BestPlan)
3657 return nullptr;
3658
3659 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
3660 << Result.Width << "\n");
3661 std::unique_ptr<VPlan> Clone(BestPlan->duplicate());
3662 Clone->setVF(Result.Width);
3663 return Clone;
3664}
3665
3666unsigned
3668 InstructionCost LoopCost) {
3669 // -- The interleave heuristics --
3670 // We interleave the loop in order to expose ILP and reduce the loop overhead.
3671 // There are many micro-architectural considerations that we can't predict
3672 // at this level. For example, frontend pressure (on decode or fetch) due to
3673 // code size, or the number and capabilities of the execution ports.
3674 //
3675 // We use the following heuristics to select the interleave count:
3676 // 1. If the code has reductions, then we interleave to break the cross
3677 // iteration dependency.
3678 // 2. If the loop is really small, then we interleave to reduce the loop
3679 // overhead.
3680 // 3. We don't interleave if we think that we will spill registers to memory
3681 // due to the increased register pressure.
3682
3683 // Only interleave tail-folded loops if wide lane masks are requested, as the
3684 // overhead of multiple instructions to calculate the predicate is likely
3685 // not beneficial. If an epilogue is not allowed for any other reason,
3686 // do not interleave.
3687 if (!CM.isEpilogueAllowed() &&
3688 !(CM.preferTailFoldedLoop() && CM.useWideActiveLaneMask()))
3689 return 1;
3690
3693 LLVM_DEBUG(dbgs() << "LV: Loop requires variable-length step. "
3694 "Unroll factor forced to be 1.\n");
3695 return 1;
3696 }
3697
3698 // We used the distance for the interleave count.
3699 if (!Legal->isSafeForAnyVectorWidth())
3700 return 1;
3701
3702 // We don't attempt to perform interleaving for loops with uncountable early
3703 // exits because the VPInstruction::AnyOf code cannot currently handle
3704 // multiple parts.
3705 if (Plan.hasEarlyExit())
3706 return 1;
3707
3708 const bool HasReductions =
3711
3712 // FIXME: implement interleaving for FindLast transform correctly.
3713 if (hasFindLastReductionPhi(Plan))
3714 return 1;
3715
3716 VPRegisterUsage R =
3717 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
3718
3719 // If we did not calculate the cost for VF (because the user selected the VF)
3720 // then we calculate the cost of VF here.
3721 if (LoopCost == 0) {
3722 if (VF.isScalar())
3723 LoopCost = CM.expectedCost(VF);
3724 else
3725 LoopCost = cost(Plan, VF, &R);
3726 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
3727
3728 // Loop body is free and there is no need for interleaving.
3729 if (LoopCost == 0)
3730 return 1;
3731 }
3732
3733 // We divide by these constants so assume that we have at least one
3734 // instruction that uses at least one register.
3735 for (auto &Pair : R.MaxLocalUsers) {
3736 Pair.second = std::max(Pair.second, 1U);
3737 }
3738
3739 // We calculate the interleave count using the following formula.
3740 // Subtract the number of loop invariants from the number of available
3741 // registers. These registers are used by all of the interleaved instances.
3742 // Next, divide the remaining registers by the number of registers that is
3743 // required by the loop, in order to estimate how many parallel instances
3744 // fit without causing spills. All of this is rounded down if necessary to be
3745 // a power of two. We want power of two interleave count to simplify any
3746 // addressing operations or alignment considerations.
3747 // We also want power of two interleave counts to ensure that the induction
3748 // variable of the vector loop wraps to zero, when tail is folded by masking;
3749 // this currently happens when OptForSize, in which case IC is set to 1 above.
3750 unsigned IC = UINT_MAX;
3751
3752 for (const auto &Pair : R.MaxLocalUsers) {
3753 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
3754 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
3755 << " registers of "
3756 << TTI.getRegisterClassName(Pair.first)
3757 << " register class\n");
3758 if (VF.isScalar()) {
3759 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
3760 TargetNumRegisters = ForceTargetNumScalarRegs;
3761 } else {
3762 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
3763 TargetNumRegisters = ForceTargetNumVectorRegs;
3764 }
3765 unsigned MaxLocalUsers = Pair.second;
3766 unsigned LoopInvariantRegs = 0;
3767 if (R.LoopInvariantRegs.contains(Pair.first))
3768 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
3769
3770 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
3771 MaxLocalUsers);
3772 // Don't count the induction variable as interleaved.
3774 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
3775 std::max(1U, (MaxLocalUsers - 1)));
3776 }
3777
3778 IC = std::min(IC, TmpIC);
3779 }
3780
3781 // Clamp the interleave ranges to reasonable counts.
3782 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
3783 LLVM_DEBUG(dbgs() << "LV: MaxInterleaveFactor for the target is "
3784 << MaxInterleaveCount << "\n");
3785
3786 // Check if the user has overridden the max.
3787 if (VF.isScalar()) {
3788 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
3789 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
3790 } else {
3791 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
3792 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
3793 }
3794
3795 // Try to get the exact trip count, or an estimate based on profiling data or
3796 // ConstantMax from PSE, failing that.
3797 auto BestKnownTC =
3798 getSmallBestKnownTC(PSE, OrigLoop,
3799 /*CanUseConstantMax=*/true,
3800 /*CanExcludeZeroTrips=*/CM.isEpilogueAllowed());
3801
3802 // For fixed length VFs treat a scalable trip count as unknown.
3803 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
3804 // Re-evaluate trip counts and VFs to be in the same numerical space.
3805 unsigned AvailableTC =
3806 estimateElementCount(*BestKnownTC, Config.getVScaleForTuning());
3807 unsigned EstimatedVF =
3808 estimateElementCount(VF, Config.getVScaleForTuning());
3809
3810 // At least one iteration must be scalar when this constraint holds. So the
3811 // maximum available iterations for interleaving is one less.
3812 if (CM.requiresScalarEpilogue(VF.isVector()))
3813 --AvailableTC;
3814
3815 unsigned InterleaveCountLB = bit_floor(std::max(
3816 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
3817
3818 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
3819 // If the best known trip count is exact, we select between two
3820 // prospective ICs, where
3821 //
3822 // 1) the aggressive IC is capped by the trip count divided by VF
3823 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
3824 //
3825 // The final IC is selected in a way that the epilogue loop trip count is
3826 // minimized while maximizing the IC itself, so that we either run the
3827 // vector loop at least once if it generates a small epilogue loop, or
3828 // else we run the vector loop at least twice.
3829
3830 unsigned InterleaveCountUB = bit_floor(std::max(
3831 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
3832 MaxInterleaveCount = InterleaveCountLB;
3833
3834 if (InterleaveCountUB != InterleaveCountLB) {
3835 unsigned TailTripCountUB =
3836 (AvailableTC % (EstimatedVF * InterleaveCountUB));
3837 unsigned TailTripCountLB =
3838 (AvailableTC % (EstimatedVF * InterleaveCountLB));
3839 // If both produce same scalar tail, maximize the IC to do the same work
3840 // in fewer vector loop iterations
3841 if (TailTripCountUB == TailTripCountLB)
3842 MaxInterleaveCount = InterleaveCountUB;
3843 }
3844 } else {
3845 // If trip count is an estimated compile time constant, limit the
3846 // IC to be capped by the trip count divided by VF * 2, such that the
3847 // vector loop runs at least twice to make interleaving seem profitable
3848 // when there is an epilogue loop present. Since exact Trip count is not
3849 // known we choose to be conservative in our IC estimate.
3850 MaxInterleaveCount = InterleaveCountLB;
3851 }
3852 }
3853
3854 assert(MaxInterleaveCount > 0 &&
3855 "Maximum interleave count must be greater than 0");
3856
3857 // Clamp the calculated IC to be between the 1 and the max interleave count
3858 // that the target and trip count allows.
3859 if (IC > MaxInterleaveCount)
3860 IC = MaxInterleaveCount;
3861 else
3862 // Make sure IC is greater than 0.
3863 IC = std::max(1u, IC);
3864
3865 assert(IC > 0 && "Interleave count must be greater than 0.");
3866
3867 // Interleave if we vectorized this loop and there is a reduction that could
3868 // benefit from interleaving.
3869 if (VF.isVector() && HasReductions) {
3870 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
3871 return IC;
3872 }
3873
3874 // For any scalar loop that either requires runtime checks or tail-folding we
3875 // are better off leaving this to the unroller. Note that if we've already
3876 // vectorized the loop we will have done the runtime check and so interleaving
3877 // won't require further checks.
3878 bool ScalarInterleavingRequiresPredication =
3879 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
3880 return Legal->blockNeedsPredication(BB);
3881 }));
3882 bool ScalarInterleavingRequiresRuntimePointerCheck =
3883 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
3884
3885 // We want to interleave small loops in order to reduce the loop overhead and
3886 // potentially expose ILP opportunities.
3887 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
3888 << "LV: IC is " << IC << '\n'
3889 << "LV: VF is " << VF << '\n');
3890 const bool AggressivelyInterleave =
3891 TTI.enableAggressiveInterleaving(HasReductions);
3892 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
3893 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
3894 // We assume that the cost overhead is 1 and we use the cost model
3895 // to estimate the cost of the loop and interleave until the cost of the
3896 // loop overhead is about 5% of the cost of the loop.
3897 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
3898 SmallLoopCost / LoopCost.getValue()));
3899
3900 // Interleave until store/load ports (estimated by max interleave count) are
3901 // saturated.
3902 unsigned NumStores = 0;
3903 unsigned NumLoads = 0;
3906 for (VPRecipeBase &R : *VPBB) {
3908 NumLoads++;
3909 continue;
3910 }
3912 NumStores++;
3913 continue;
3914 }
3915
3916 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
3917 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
3918 NumStores += StoreOps;
3919 else
3920 NumLoads += InterleaveR->getNumDefinedValues();
3921 continue;
3922 }
3923 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
3924 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
3925 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
3926 continue;
3927 }
3928 if (isa<VPHistogramRecipe>(&R)) {
3929 NumLoads++;
3930 NumStores++;
3931 continue;
3932 }
3933 }
3934 }
3935 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
3936 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
3937
3938 // There is little point in interleaving for reductions containing selects
3939 // and compares when VF=1 since it may just create more overhead than it's
3940 // worth for loops with small trip counts. This is because we still have to
3941 // do the final reduction after the loop.
3942 bool HasSelectCmpReductions =
3943 HasReductions &&
3945 [](VPRecipeBase &R) {
3946 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
3947 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
3948 RedR->getRecurrenceKind()) ||
3949 RecurrenceDescriptor::isFindIVRecurrenceKind(
3950 RedR->getRecurrenceKind()));
3951 });
3952 if (HasSelectCmpReductions) {
3953 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
3954 return 1;
3955 }
3956
3957 // If we have a scalar reduction (vector reductions are already dealt with
3958 // by this point), we can increase the critical path length if the loop
3959 // we're interleaving is inside another loop. For tree-wise reductions
3960 // set the limit to 2, and for ordered reductions it's best to disable
3961 // interleaving entirely.
3962 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
3963 bool HasOrderedReductions =
3965 [](VPRecipeBase &R) {
3966 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
3967
3968 return RedR && RedR->isOrdered();
3969 });
3970 if (HasOrderedReductions) {
3971 LLVM_DEBUG(
3972 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
3973 return 1;
3974 }
3975
3976 unsigned F = MaxNestedScalarReductionIC;
3977 SmallIC = std::min(SmallIC, F);
3978 StoresIC = std::min(StoresIC, F);
3979 LoadsIC = std::min(LoadsIC, F);
3980 }
3981
3983 std::max(StoresIC, LoadsIC) > SmallIC) {
3984 LLVM_DEBUG(
3985 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
3986 return std::max(StoresIC, LoadsIC);
3987 }
3988
3989 // If there are scalar reductions and TTI has enabled aggressive
3990 // interleaving for reductions, we will interleave to expose ILP.
3991 if (VF.isScalar() && AggressivelyInterleave) {
3992 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
3993 // Interleave no less than SmallIC but not as aggressive as the normal IC
3994 // to satisfy the rare situation when resources are too limited.
3995 return std::max(IC / 2, SmallIC);
3996 }
3997
3998 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
3999 return SmallIC;
4000 }
4001
4002 // Interleave if this is a large loop (small loops are already dealt with by
4003 // this point) that could benefit from interleaving.
4004 if (AggressivelyInterleave) {
4005 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4006 return IC;
4007 }
4008
4009 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4010 return 1;
4011}
4012
4014 ElementCount VF) {
4015 // TODO: Cost model for emulated masked load/store is completely
4016 // broken. This hack guides the cost model to use an artificially
4017 // high enough value to practically disable vectorization with such
4018 // operations, except where previously deployed legality hack allowed
4019 // using very low cost values. This is to avoid regressions coming simply
4020 // from moving "masked load/store" check from legality to cost model.
4021 // Masked Load/Gather emulation was previously never allowed.
4022 // Limited number of Masked Store/Scatter emulation was allowed.
4024 "Expecting a scalar emulated instruction");
4025 return isa<LoadInst>(I) ||
4026 (isa<StoreInst>(I) &&
4027 NumPredStores > NumberOfStoresToPredicate);
4028}
4029
4031 assert(VF.isVector() && "Expected VF >= 2");
4032
4033 // If we've already collected the instructions to scalarize or the predicated
4034 // BBs after vectorization, there's nothing to do. Collection may already have
4035 // occurred if we have a user-selected VF and are now computing the expected
4036 // cost for interleaving.
4037 if (InstsToScalarize.contains(VF) ||
4038 PredicatedBBsAfterVectorization.contains(VF))
4039 return;
4040
4041 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
4042 // not profitable to scalarize any instructions, the presence of VF in the
4043 // map will indicate that we've analyzed it already.
4044 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4045
4046 // Find all the instructions that are scalar with predication in the loop and
4047 // determine if it would be better to not if-convert the blocks they are in.
4048 // If so, we also record the instructions to scalarize.
4049 for (BasicBlock *BB : TheLoop->blocks()) {
4051 continue;
4052 for (Instruction &I : *BB)
4053 if (isScalarWithPredication(&I, VF)) {
4054 ScalarCostsTy ScalarCosts;
4055 // Do not apply discount logic for:
4056 // 1. Scalars after vectorization, as there will only be a single copy
4057 // of the instruction.
4058 // 2. Scalable VF, as that would lead to invalid scalarization costs.
4059 // 3. Emulated masked memrefs, if a hacked cost is needed.
4060 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
4062 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
4063 for (const auto &[I, IC] : ScalarCosts)
4064 ScalarCostsVF.insert({I, IC});
4065 // Check if we decided to scalarize a call. If so, update the widening
4066 // decision of the call to CM_Scalarize with the computed scalar cost.
4067 for (const auto &[I, Cost] : ScalarCosts) {
4068 auto *CI = dyn_cast<CallInst>(I);
4069 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4070 continue;
4071 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
4072 CallWideningDecisions[{CI, VF}].Cost = Cost;
4073 }
4074 }
4075 // Remember that BB will remain after vectorization.
4076 PredicatedBBsAfterVectorization[VF].insert(BB);
4077 for (auto *Pred : predecessors(BB)) {
4078 if (Pred->getSingleSuccessor() == BB)
4079 PredicatedBBsAfterVectorization[VF].insert(Pred);
4080 }
4081 }
4082 }
4083}
4084
4085InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
4086 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
4087 assert(!isUniformAfterVectorization(PredInst, VF) &&
4088 "Instruction marked uniform-after-vectorization will be predicated");
4089
4090 // Initialize the discount to zero, meaning that the scalar version and the
4091 // vector version cost the same.
4092 InstructionCost Discount = 0;
4093
4094 // Holds instructions to analyze. The instructions we visit are mapped in
4095 // ScalarCosts. Those instructions are the ones that would be scalarized if
4096 // we find that the scalar version costs less.
4098
4099 // Returns true if the given instruction can be scalarized.
4100 auto CanBeScalarized = [&](Instruction *I) -> bool {
4101 // We only attempt to scalarize instructions forming a single-use chain
4102 // from the original predicated block that would otherwise be vectorized.
4103 // Although not strictly necessary, we give up on instructions we know will
4104 // already be scalar to avoid traversing chains that are unlikely to be
4105 // beneficial.
4106 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
4108 return false;
4109
4110 // If the instruction is scalar with predication, it will be analyzed
4111 // separately. We ignore it within the context of PredInst.
4112 if (isScalarWithPredication(I, VF))
4113 return false;
4114
4115 // If any of the instruction's operands are uniform after vectorization,
4116 // the instruction cannot be scalarized. This prevents, for example, a
4117 // masked load from being scalarized.
4118 //
4119 // We assume we will only emit a value for lane zero of an instruction
4120 // marked uniform after vectorization, rather than VF identical values.
4121 // Thus, if we scalarize an instruction that uses a uniform, we would
4122 // create uses of values corresponding to the lanes we aren't emitting code
4123 // for. This behavior can be changed by allowing getScalarValue to clone
4124 // the lane zero values for uniforms rather than asserting.
4125 for (Use &U : I->operands())
4126 if (auto *J = dyn_cast<Instruction>(U.get()))
4127 if (isUniformAfterVectorization(J, VF))
4128 return false;
4129
4130 // Otherwise, we can scalarize the instruction.
4131 return true;
4132 };
4133
4134 // Compute the expected cost discount from scalarizing the entire expression
4135 // feeding the predicated instruction. We currently only consider expressions
4136 // that are single-use instruction chains.
4137 Worklist.push_back(PredInst);
4138 while (!Worklist.empty()) {
4139 Instruction *I = Worklist.pop_back_val();
4140
4141 // If we've already analyzed the instruction, there's nothing to do.
4142 if (ScalarCosts.contains(I))
4143 continue;
4144
4145 // Cannot scalarize fixed-order recurrence phis at the moment.
4146 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4147 continue;
4148
4149 // Compute the cost of the vector instruction. Note that this cost already
4150 // includes the scalarization overhead of the predicated instruction.
4151 InstructionCost VectorCost = getInstructionCost(I, VF);
4152
4153 // Compute the cost of the scalarized instruction. This cost is the cost of
4154 // the instruction as if it wasn't if-converted and instead remained in the
4155 // predicated block. We will scale this cost by block probability after
4156 // computing the scalarization overhead.
4157 InstructionCost ScalarCost =
4159
4160 // Compute the scalarization overhead of needed insertelement instructions
4161 // and phi nodes.
4162 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
4163 Type *WideTy = toVectorizedTy(I->getType(), VF);
4164 for (Type *VectorTy : getContainedTypes(WideTy)) {
4165 ScalarCost += TTI.getScalarizationOverhead(
4167 /*Insert=*/true,
4168 /*Extract=*/false, Config.CostKind);
4169 }
4170 ScalarCost += VF.getFixedValue() *
4171 TTI.getCFInstrCost(Instruction::PHI, Config.CostKind);
4172 }
4173
4174 // Compute the scalarization overhead of needed extractelement
4175 // instructions. For each of the instruction's operands, if the operand can
4176 // be scalarized, add it to the worklist; otherwise, account for the
4177 // overhead.
4178 for (Use &U : I->operands())
4179 if (auto *J = dyn_cast<Instruction>(U.get())) {
4180 assert(canVectorizeTy(J->getType()) &&
4181 "Instruction has non-scalar type");
4182 if (CanBeScalarized(J))
4183 Worklist.push_back(J);
4184 else if (needsExtract(J, VF)) {
4185 Type *WideTy = toVectorizedTy(J->getType(), VF);
4186 for (Type *VectorTy : getContainedTypes(WideTy)) {
4187 ScalarCost += TTI.getScalarizationOverhead(
4188 cast<VectorType>(VectorTy),
4189 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
4190 /*Extract*/ true, Config.CostKind);
4191 }
4192 }
4193 }
4194
4195 // Scale the total scalar cost by block probability.
4196 ScalarCost /= getPredBlockCostDivisor(Config.CostKind, I->getParent());
4197
4198 // Compute the discount. A non-negative discount means the vector version
4199 // of the instruction costs more, and scalarizing would be beneficial.
4200 Discount += VectorCost - ScalarCost;
4201 ScalarCosts[I] = ScalarCost;
4202 }
4203
4204 return Discount;
4205}
4206
4209 assert(VF.isScalar() && "must only be called for scalar VFs");
4210
4211 // For each block.
4212 for (BasicBlock *BB : TheLoop->blocks()) {
4213 InstructionCost BlockCost;
4214
4215 // For each instruction in the old loop.
4216 for (Instruction &I : *BB) {
4217 // Skip ignored values.
4218 if (ValuesToIgnore.count(&I) ||
4219 (VF.isVector() && VecValuesToIgnore.count(&I)))
4220 continue;
4221
4223
4224 // Check if we should override the cost.
4225 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0)
4227
4228 BlockCost += C;
4229 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
4230 << VF << " For instruction: " << I << '\n');
4231 }
4232
4233 // In the scalar loop, we may not always execute the predicated block, if it
4234 // is an if-else block. Thus, scale the block's cost by the probability of
4235 // executing it. getPredBlockCostDivisor will return 1 for blocks that are
4236 // only predicated by the header mask when folding the tail.
4237 Cost += BlockCost / getPredBlockCostDivisor(Config.CostKind, BB);
4238 }
4239
4240 return Cost;
4241}
4242
4243/// Gets the address access SCEV for Ptr, if it should be used for cost modeling
4244/// according to isAddressSCEVForCost.
4245///
4246/// This SCEV can be sent to the Target in order to estimate the address
4247/// calculation cost.
4249 Value *Ptr,
4251 const Loop *TheLoop) {
4252 const SCEV *Addr = PSE.getSCEV(Ptr);
4253 return vputils::isAddressSCEVForCost(Addr, *PSE.getSE(), TheLoop) ? Addr
4254 : nullptr;
4255}
4256
4258LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
4259 ElementCount VF) {
4260 assert(VF.isVector() &&
4261 "Scalarization cost of instruction implies vectorization.");
4262 if (VF.isScalable())
4264
4265 Type *ValTy = getLoadStoreType(I);
4266 auto *SE = PSE.getSE();
4267
4268 unsigned AS = getLoadStoreAddressSpace(I);
4270 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
4271 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
4272 // that it is being called from this specific place.
4273
4274 // Figure out whether the access is strided and get the stride value
4275 // if it's known in compile time
4276 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, PSE, TheLoop);
4277
4278 // Get the cost of the scalar memory instruction and address computation.
4280 VF.getFixedValue() *
4281 TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV, Config.CostKind);
4282
4283 // Don't pass *I here, since it is scalar but will actually be part of a
4284 // vectorized loop where the user of it is a vectorized instruction.
4285 const Align Alignment = getLoadStoreAlignment(I);
4286 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
4287 Cost += VF.getFixedValue() *
4288 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
4289 AS, Config.CostKind, OpInfo);
4290
4291 // Get the overhead of the extractelement and insertelement instructions
4292 // we might create due to scalarization.
4293 Cost += getScalarizationOverhead(I, VF);
4294
4295 // If we have a predicated load/store, it will need extra i1 extracts and
4296 // conditional branches, but may not be executed for each vector lane. Scale
4297 // the cost by the probability of executing the predicated block.
4298 if (isPredicatedInst(I)) {
4299 Cost /= getPredBlockCostDivisor(Config.CostKind, I->getParent());
4300
4301 // Add the cost of an i1 extract and a branch
4302 auto *VecI1Ty =
4304 Cost += TTI.getScalarizationOverhead(
4305 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
4306 /*Insert=*/false, /*Extract=*/true, Config.CostKind);
4307 Cost += TTI.getCFInstrCost(Instruction::CondBr, Config.CostKind);
4308
4310 // Artificially setting to a high enough value to practically disable
4311 // vectorization with such operations.
4312 Cost = 3000000;
4313 }
4314
4315 return Cost;
4316}
4317
4319LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
4320 ElementCount VF) {
4321 Type *ValTy = getLoadStoreType(I);
4322 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4324 unsigned AS = getLoadStoreAddressSpace(I);
4325 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
4326
4327 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
4328 "Stride should be 1 or -1 for consecutive memory access");
4329 const Align Alignment = getLoadStoreAlignment(I);
4331 if (isMaskRequired(I)) {
4332 unsigned IID = I->getOpcode() == Instruction::Load
4333 ? Intrinsic::masked_load
4334 : Intrinsic::masked_store;
4335 Cost += TTI.getMemIntrinsicInstrCost(
4336 MemIntrinsicCostAttributes(IID, VectorTy, Alignment, AS),
4337 Config.CostKind);
4338 } else {
4339 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
4340 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
4341 Config.CostKind, OpInfo, I);
4342 }
4343
4344 bool Reverse = ConsecutiveStride < 0;
4345 if (Reverse)
4346 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
4347 VectorTy, {}, Config.CostKind, 0);
4348 return Cost;
4349}
4350
4352LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
4353 ElementCount VF) {
4354 assert(Legal->isUniformMemOp(*I, VF));
4355
4356 Type *ValTy = getLoadStoreType(I);
4358 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4359 const Align Alignment = getLoadStoreAlignment(I);
4360 unsigned AS = getLoadStoreAddressSpace(I);
4361 if (isa<LoadInst>(I)) {
4362 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
4363 Config.CostKind) +
4364 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
4365 Config.CostKind) +
4366 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy,
4367 VectorTy, {}, Config.CostKind);
4368 }
4369 StoreInst *SI = cast<StoreInst>(I);
4370
4371 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
4372 // TODO: We have existing tests that request the cost of extracting element
4373 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
4374 // the actual generated code, which involves extracting the last element of
4375 // a scalable vector where the lane to extract is unknown at compile time.
4377 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, Config.CostKind) +
4378 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
4379 Config.CostKind);
4380 if (!IsLoopInvariantStoreValue)
4381 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
4382 VectorTy, Config.CostKind, 0);
4383 return Cost;
4384}
4385
4387LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
4388 ElementCount VF) {
4389 Type *ValTy = getLoadStoreType(I);
4390 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4391 const Align Alignment = getLoadStoreAlignment(I);
4393 Type *PtrTy = Ptr->getType();
4394
4395 if (!Legal->isUniform(Ptr, VF))
4396 PtrTy = toVectorTy(PtrTy, VF);
4397
4398 unsigned IID = I->getOpcode() == Instruction::Load
4399 ? Intrinsic::masked_gather
4400 : Intrinsic::masked_scatter;
4401 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
4402 Config.CostKind) +
4403 TTI.getMemIntrinsicInstrCost(
4404 MemIntrinsicCostAttributes(IID, VectorTy, Ptr, isMaskRequired(I),
4405 Alignment, I),
4406 Config.CostKind);
4407}
4408
4410LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
4411 ElementCount VF) {
4412 const auto *Group = getInterleavedAccessGroup(I);
4413 assert(Group && "Fail to get an interleaved access group.");
4414
4415 Instruction *InsertPos = Group->getInsertPos();
4416 Type *ValTy = getLoadStoreType(InsertPos);
4417 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4418 unsigned AS = getLoadStoreAddressSpace(InsertPos);
4419
4420 unsigned InterleaveFactor = Group->getFactor();
4421 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
4422
4423 // Holds the indices of existing members in the interleaved group.
4424 SmallVector<unsigned, 4> Indices;
4425 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
4426 if (Group->getMember(IF))
4427 Indices.push_back(IF);
4428
4429 // Calculate the cost of the whole interleaved group.
4430 bool UseMaskForGaps =
4431 (Group->requiresScalarEpilogue() && !isEpilogueAllowed()) ||
4432 (isa<StoreInst>(I) && !Group->isFull());
4433 InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
4434 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
4435 Group->getAlign(), AS, Config.CostKind, isMaskRequired(I),
4436 UseMaskForGaps);
4437
4438 if (Group->isReverse()) {
4439 // TODO: Add support for reversed masked interleaved access.
4441 "Reverse masked interleaved access not supported.");
4442 Cost += Group->getNumMembers() *
4443 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
4444 VectorTy, {}, Config.CostKind, 0);
4445 }
4446 return Cost;
4447}
4448
4449std::optional<InstructionCost>
4451 ElementCount VF,
4452 Type *Ty) const {
4453 using namespace llvm::PatternMatch;
4454 // Early exit for no inloop reductions
4455 if (Config.getInLoopReductions().empty() || VF.isScalar() ||
4456 !isa<VectorType>(Ty))
4457 return std::nullopt;
4458 auto *VectorTy = cast<VectorType>(Ty);
4459
4460 // We are looking for a pattern of, and finding the minimal acceptable cost:
4461 // reduce(mul(ext(A), ext(B))) or
4462 // reduce(mul(A, B)) or
4463 // reduce(ext(A)) or
4464 // reduce(A).
4465 // The basic idea is that we walk down the tree to do that, finding the root
4466 // reduction instruction in InLoopReductionImmediateChains. From there we find
4467 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
4468 // of the components. If the reduction cost is lower then we return it for the
4469 // reduction instruction and 0 for the other instructions in the pattern. If
4470 // it is not we return an invalid cost specifying the orignal cost method
4471 // should be used.
4472 Instruction *RetI = I;
4473 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
4474 if (!RetI->hasOneUser())
4475 return std::nullopt;
4476 RetI = RetI->user_back();
4477 }
4478
4479 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
4480 RetI->user_back()->getOpcode() == Instruction::Add) {
4481 RetI = RetI->user_back();
4482 }
4483
4484 // Test if the found instruction is a reduction, and if not return an invalid
4485 // cost specifying the parent to use the original cost modelling.
4486 Instruction *LastChain = Config.getInLoopReductionImmediateChain(RetI);
4487 if (!LastChain)
4488 return std::nullopt;
4489
4490 // Find the reduction this chain is a part of and calculate the basic cost of
4491 // the reduction on its own.
4492 Instruction *ReductionPhi = LastChain;
4493 while (!isa<PHINode>(ReductionPhi))
4494 ReductionPhi = Config.getInLoopReductionImmediateChain(ReductionPhi);
4495
4496 const RecurrenceDescriptor &RdxDesc =
4497 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
4498
4499 InstructionCost BaseCost;
4500 RecurKind RK = RdxDesc.getRecurrenceKind();
4503 BaseCost = TTI.getMinMaxReductionCost(
4504 MinMaxID, VectorTy, RdxDesc.getFastMathFlags(), Config.CostKind);
4505 } else {
4506 BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(), VectorTy,
4507 RdxDesc.getFastMathFlags(),
4508 Config.CostKind);
4509 }
4510
4511 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
4512 // normal fmul instruction to the cost of the fadd reduction.
4513 if (RK == RecurKind::FMulAdd)
4514 BaseCost += TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy,
4515 Config.CostKind);
4516
4517 // If we're using ordered reductions then we can just return the base cost
4518 // here, since getArithmeticReductionCost calculates the full ordered
4519 // reduction cost when FP reassociation is not allowed.
4520 if (Config.useOrderedReductions(RdxDesc))
4521 return BaseCost;
4522
4523 // Get the operand that was not the reduction chain and match it to one of the
4524 // patterns, returning the better cost if it is found.
4525 Instruction *RedOp = RetI->getOperand(1) == LastChain
4528
4529 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
4530
4531 Instruction *Op0, *Op1;
4532 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
4533 match(RedOp,
4535 match(Op0, m_ZExtOrSExt(m_Value())) &&
4536 Op0->getOpcode() == Op1->getOpcode() &&
4537 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
4538 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
4539 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
4540
4541 // Matched reduce.add(ext(mul(ext(A), ext(B)))
4542 // Note that the extend opcodes need to all match, or if A==B they will have
4543 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
4544 // which is equally fine.
4545 bool IsUnsigned = isa<ZExtInst>(Op0);
4546 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
4547 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
4548
4549 InstructionCost ExtCost =
4550 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
4551 TTI::CastContextHint::None, Config.CostKind, Op0);
4552 InstructionCost MulCost =
4553 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, Config.CostKind);
4554 InstructionCost Ext2Cost = TTI.getCastInstrCost(
4555 RedOp->getOpcode(), VectorTy, MulType, TTI::CastContextHint::None,
4556 Config.CostKind, RedOp);
4557
4558 InstructionCost RedCost = TTI.getMulAccReductionCost(
4559 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
4560 Config.CostKind);
4561
4562 if (RedCost.isValid() &&
4563 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
4564 return I == RetI ? RedCost : 0;
4565 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
4566 !TheLoop->isLoopInvariant(RedOp)) {
4567 // Matched reduce(ext(A))
4568 bool IsUnsigned = isa<ZExtInst>(RedOp);
4569 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
4570 InstructionCost RedCost = TTI.getExtendedReductionCost(
4571 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
4572 RdxDesc.getFastMathFlags(), Config.CostKind);
4573
4574 InstructionCost ExtCost = TTI.getCastInstrCost(
4575 RedOp->getOpcode(), VectorTy, ExtType, TTI::CastContextHint::None,
4576 Config.CostKind, RedOp);
4577 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
4578 return I == RetI ? RedCost : 0;
4579 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
4580 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
4581 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
4582 Op0->getOpcode() == Op1->getOpcode() &&
4583 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
4584 bool IsUnsigned = isa<ZExtInst>(Op0);
4585 Type *Op0Ty = Op0->getOperand(0)->getType();
4586 Type *Op1Ty = Op1->getOperand(0)->getType();
4587 Type *LargestOpTy =
4588 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
4589 : Op0Ty;
4590 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
4591
4592 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
4593 // different sizes. We take the largest type as the ext to reduce, and add
4594 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
4595 InstructionCost ExtCost0 = TTI.getCastInstrCost(
4596 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
4597 TTI::CastContextHint::None, Config.CostKind, Op0);
4598 InstructionCost ExtCost1 = TTI.getCastInstrCost(
4599 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
4600 TTI::CastContextHint::None, Config.CostKind, Op1);
4601 InstructionCost MulCost = TTI.getArithmeticInstrCost(
4602 Instruction::Mul, VectorTy, Config.CostKind);
4603
4604 InstructionCost RedCost = TTI.getMulAccReductionCost(
4605 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
4606 Config.CostKind);
4607 InstructionCost ExtraExtCost = 0;
4608 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
4609 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
4610 ExtraExtCost = TTI.getCastInstrCost(
4611 ExtraExtOp->getOpcode(), ExtType,
4612 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
4613 TTI::CastContextHint::None, Config.CostKind, ExtraExtOp);
4614 }
4615
4616 if (RedCost.isValid() &&
4617 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
4618 return I == RetI ? RedCost : 0;
4619 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
4620 // Matched reduce.add(mul())
4621 InstructionCost MulCost = TTI.getArithmeticInstrCost(
4622 Instruction::Mul, VectorTy, Config.CostKind);
4623
4624 InstructionCost RedCost = TTI.getMulAccReductionCost(
4625 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
4626 Config.CostKind);
4627
4628 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
4629 return I == RetI ? RedCost : 0;
4630 }
4631 }
4632
4633 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
4634}
4635
4637LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
4638 ElementCount VF) {
4639 // Calculate scalar cost only. Vectorization cost should be ready at this
4640 // moment.
4641 if (VF.isScalar()) {
4642 Type *ValTy = getLoadStoreType(I);
4644 const Align Alignment = getLoadStoreAlignment(I);
4645 unsigned AS = getLoadStoreAddressSpace(I);
4646
4647 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
4648 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
4649 Config.CostKind) +
4650 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
4651 Config.CostKind, OpInfo, I);
4652 }
4653 return getWideningCost(I, VF);
4654}
4655
4657LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
4658 ElementCount VF) const {
4659
4660 // There is no mechanism yet to create a scalable scalarization loop,
4661 // so this is currently Invalid.
4662 if (VF.isScalable())
4664
4665 if (VF.isScalar())
4666 return 0;
4667
4669 Type *RetTy = toVectorizedTy(I->getType(), VF);
4670 if (!RetTy->isVoidTy() &&
4671 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) {
4672
4674 if (isa<LoadInst>(I))
4676 else if (isa<StoreInst>(I))
4678
4679 for (Type *VectorTy : getContainedTypes(RetTy)) {
4680 Cost += TTI.getScalarizationOverhead(
4682 /*Insert=*/true, /*Extract=*/false, Config.CostKind,
4683 /*ForPoisonSrc=*/true, {}, VIC);
4684 }
4685 }
4686
4687 // Some targets keep addresses scalar.
4688 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
4689 return Cost;
4690
4691 // Some targets support efficient element stores.
4692 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
4693 return Cost;
4694
4695 // Collect operands to consider.
4696 CallInst *CI = dyn_cast<CallInst>(I);
4697 Instruction::op_range Ops = CI ? CI->args() : I->operands();
4698
4699 // Skip operands that do not require extraction/scalarization and do not incur
4700 // any overhead.
4702 for (auto *V : filterExtractingOperands(Ops, VF))
4703 Tys.push_back(maybeVectorizeType(V->getType(), VF));
4704
4708 return Cost +
4709 TTI.getOperandsScalarizationOverhead(Tys, Config.CostKind, OperandVIC);
4710}
4711
4713 if (VF.isScalar())
4714 return;
4715 NumPredStores = 0;
4716 for (BasicBlock *BB : TheLoop->blocks()) {
4717 // For each instruction in the old loop.
4718 for (Instruction &I : *BB) {
4720 if (!Ptr)
4721 continue;
4722
4723 // TODO: We should generate better code and update the cost model for
4724 // predicated uniform stores. Today they are treated as any other
4725 // predicated store (see added test cases in
4726 // invariant-store-vectorization.ll).
4728 NumPredStores++;
4729
4730 if (Legal->isUniformMemOp(I, VF)) {
4731 auto IsLegalToScalarize = [&]() {
4732 if (!VF.isScalable())
4733 // Scalarization of fixed length vectors "just works".
4734 return true;
4735
4736 // We have dedicated lowering for unpredicated uniform loads and
4737 // stores. Note that even with tail folding we know that at least
4738 // one lane is active (i.e. generalized predication is not possible
4739 // here), and the logic below depends on this fact.
4740 if (!foldTailByMasking())
4741 return true;
4742
4743 // For scalable vectors, a uniform memop load is always
4744 // uniform-by-parts and we know how to scalarize that.
4745 if (isa<LoadInst>(I))
4746 return true;
4747
4748 // A uniform store isn't neccessarily uniform-by-part
4749 // and we can't assume scalarization.
4750 auto &SI = cast<StoreInst>(I);
4751 return TheLoop->isLoopInvariant(SI.getValueOperand());
4752 };
4753
4754 const InstructionCost GatherScatterCost =
4755 Config.isLegalGatherOrScatter(&I, VF)
4756 ? getGatherScatterCost(&I, VF)
4758
4759 // Load: Scalar load + broadcast
4760 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
4761 // FIXME: This cost is a significant under-estimate for tail folded
4762 // memory ops.
4763 const InstructionCost ScalarizationCost =
4764 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
4766
4767 // Choose better solution for the current VF, Note that Invalid
4768 // costs compare as maximumal large. If both are invalid, we get
4769 // scalable invalid which signals a failure and a vectorization abort.
4770 if (GatherScatterCost < ScalarizationCost)
4771 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
4772 else
4773 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
4774 continue;
4775 }
4776
4777 // We assume that widening is the best solution when possible.
4778 if (memoryInstructionCanBeWidened(&I, VF)) {
4779 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
4780 int ConsecutiveStride = Legal->isConsecutivePtr(
4782 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
4783 "Expected consecutive stride.");
4784 InstWidening Decision =
4785 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
4786 setWideningDecision(&I, VF, Decision, Cost);
4787 continue;
4788 }
4789
4790 // Choose between Interleaving, Gather/Scatter or Scalarization.
4792 unsigned NumAccesses = 1;
4793 if (isAccessInterleaved(&I)) {
4794 const auto *Group = getInterleavedAccessGroup(&I);
4795 assert(Group && "Fail to get an interleaved access group.");
4796
4797 // Make one decision for the whole group.
4798 if (getWideningDecision(&I, VF) != CM_Unknown)
4799 continue;
4800
4801 NumAccesses = Group->getNumMembers();
4803 InterleaveCost = getInterleaveGroupCost(&I, VF);
4804 }
4805
4806 InstructionCost GatherScatterCost =
4807 Config.isLegalGatherOrScatter(&I, VF)
4808 ? getGatherScatterCost(&I, VF) * NumAccesses
4810
4811 InstructionCost ScalarizationCost =
4812 getMemInstScalarizationCost(&I, VF) * NumAccesses;
4813
4814 // Choose better solution for the current VF,
4815 // write down this decision and use it during vectorization.
4817 InstWidening Decision;
4818 if (InterleaveCost <= GatherScatterCost &&
4819 InterleaveCost < ScalarizationCost) {
4820 Decision = CM_Interleave;
4821 Cost = InterleaveCost;
4822 } else if (GatherScatterCost < ScalarizationCost) {
4823 Decision = CM_GatherScatter;
4824 Cost = GatherScatterCost;
4825 } else {
4826 Decision = CM_Scalarize;
4827 Cost = ScalarizationCost;
4828 }
4829 // If the instructions belongs to an interleave group, the whole group
4830 // receives the same decision. The whole group receives the cost, but
4831 // the cost will actually be assigned to one instruction.
4832 if (const auto *Group = getInterleavedAccessGroup(&I)) {
4833 if (Decision == CM_Scalarize) {
4834 for (Instruction *I : Group->members())
4835 setWideningDecision(I, VF, Decision,
4836 getMemInstScalarizationCost(I, VF));
4837 } else {
4838 setWideningDecision(Group, VF, Decision, Cost);
4839 }
4840 } else
4841 setWideningDecision(&I, VF, Decision, Cost);
4842 }
4843 }
4844
4845 // Make sure that any load of address and any other address computation
4846 // remains scalar unless there is gather/scatter support. This avoids
4847 // inevitable extracts into address registers, and also has the benefit of
4848 // activating LSR more, since that pass can't optimize vectorized
4849 // addresses.
4850 if (TTI.prefersVectorizedAddressing())
4851 return;
4852
4853 // Start with all scalar pointer uses.
4855 for (BasicBlock *BB : TheLoop->blocks())
4856 for (Instruction &I : *BB) {
4857 Instruction *PtrDef =
4859 if (PtrDef && TheLoop->contains(PtrDef) &&
4861 AddrDefs.insert(PtrDef);
4862 }
4863
4864 // Add all instructions used to generate the addresses.
4866 append_range(Worklist, AddrDefs);
4867 while (!Worklist.empty()) {
4868 Instruction *I = Worklist.pop_back_val();
4869 for (auto &Op : I->operands())
4870 if (auto *InstOp = dyn_cast<Instruction>(Op))
4871 if (TheLoop->contains(InstOp) && !isa<PHINode>(InstOp) &&
4872 AddrDefs.insert(InstOp).second)
4873 Worklist.push_back(InstOp);
4874 }
4875
4876 auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
4877 // If there are direct memory op users of the newly scalarized load,
4878 // their cost may have changed because there's no scalarization
4879 // overhead for the operand. Update it.
4880 for (User *U : LI->users()) {
4882 continue;
4884 continue;
4887 getMemInstScalarizationCost(cast<Instruction>(U), VF));
4888 }
4889 };
4890 for (auto *I : AddrDefs) {
4891 if (isa<LoadInst>(I)) {
4892 // Setting the desired widening decision should ideally be handled in
4893 // by cost functions, but since this involves the task of finding out
4894 // if the loaded register is involved in an address computation, it is
4895 // instead changed here when we know this is the case.
4896 InstWidening Decision = getWideningDecision(I, VF);
4897 if (!isPredicatedInst(I) &&
4898 (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
4899 (!Legal->isUniformMemOp(*I, VF) && Decision == CM_Scalarize))) {
4900 // Scalarize a widened load of address or update the cost of a scalar
4901 // load of an address.
4903 I, VF, CM_Scalarize,
4904 (VF.getKnownMinValue() *
4905 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
4906 UpdateMemOpUserCost(cast<LoadInst>(I));
4907 } else if (const auto *Group = getInterleavedAccessGroup(I)) {
4908 // Scalarize all members of this interleaved group when any member
4909 // is used as an address. The address-used load skips scalarization
4910 // overhead, other members include it.
4911 for (Instruction *Member : Group->members()) {
4912 InstructionCost Cost = AddrDefs.contains(Member)
4913 ? (VF.getKnownMinValue() *
4914 getMemoryInstructionCost(
4915 Member, ElementCount::getFixed(1)))
4916 : getMemInstScalarizationCost(Member, VF);
4918 UpdateMemOpUserCost(cast<LoadInst>(Member));
4919 }
4920 }
4921 } else {
4922 // Cannot scalarize fixed-order recurrence phis at the moment.
4923 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4924 continue;
4925
4926 // Make sure I gets scalarized and a cost estimate without
4927 // scalarization overhead.
4928 ForcedScalars[VF].insert(I);
4929 }
4930 }
4931}
4932
4934 assert(!VF.isScalar() &&
4935 "Trying to set a vectorization decision for a scalar VF");
4936
4937 auto ForcedScalar = ForcedScalars.find(VF);
4938 for (BasicBlock *BB : TheLoop->blocks()) {
4939 // For each instruction in the old loop.
4940 for (Instruction &I : *BB) {
4942
4943 if (!CI)
4944 continue;
4945
4949 Function *ScalarFunc = CI->getCalledFunction();
4950 Type *ScalarRetTy = CI->getType();
4951 SmallVector<Type *, 4> Tys, ScalarTys;
4952 for (auto &ArgOp : CI->args())
4953 ScalarTys.push_back(ArgOp->getType());
4954
4955 // Estimate cost of scalarized vector call. The source operands are
4956 // assumed to be vectors, so we need to extract individual elements from
4957 // there, execute VF scalar calls, and then gather the result into the
4958 // vector return value.
4959 if (VF.isFixed()) {
4960 InstructionCost ScalarCallCost = TTI.getCallInstrCost(
4961 ScalarFunc, ScalarRetTy, ScalarTys, Config.CostKind);
4962
4963 // Compute costs of unpacking argument values for the scalar calls and
4964 // packing the return values to a vector.
4965 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
4966 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
4967 } else {
4968 // There is no point attempting to calculate the scalar cost for a
4969 // scalable VF as we know it will be Invalid.
4970 assert(!getScalarizationOverhead(CI, VF).isValid() &&
4971 "Unexpected valid cost for scalarizing scalable vectors");
4972 ScalarCost = InstructionCost::getInvalid();
4973 }
4974
4975 // Honor ForcedScalars and UniformAfterVectorization decisions.
4976 // TODO: For calls, it might still be more profitable to widen. Use
4977 // VPlan-based cost model to compare different options.
4978 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
4979 ForcedScalar->second.contains(CI)) ||
4980 isUniformAfterVectorization(CI, VF))) {
4981 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
4982 Intrinsic::not_intrinsic, ScalarCost);
4983 continue;
4984 }
4985
4986 bool MaskRequired = isMaskRequired(CI);
4987 // Compute corresponding vector type for return value and arguments.
4988 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
4989 for (Type *ScalarTy : ScalarTys)
4990 Tys.push_back(toVectorizedTy(ScalarTy, VF));
4991
4992 // An in-loop reduction using an fmuladd intrinsic is a special case;
4993 // we don't want the normal cost for that intrinsic.
4995 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
4998 *RedCost);
4999 continue;
5000 }
5001
5002 // Find the cost of vectorizing the call, if we can find a suitable
5003 // vector variant of the function.
5004 VFInfo FuncInfo;
5005 Function *VecFunc = nullptr;
5006 // Search through any available variants for one we can use at this VF.
5007 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
5008 // Must match requested VF.
5009 if (Info.Shape.VF != VF)
5010 continue;
5011
5012 // Must take a mask argument if one is required
5013 if (MaskRequired && !Info.isMasked())
5014 continue;
5015
5016 // Check that all parameter kinds are supported
5017 bool ParamsOk = true;
5018 for (VFParameter Param : Info.Shape.Parameters) {
5019 switch (Param.ParamKind) {
5021 break;
5023 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5024 // Make sure the scalar parameter in the loop is invariant.
5025 if (!PSE.getSE()->isSCEVable(ScalarParam->getType()) ||
5026 !PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
5027 TheLoop))
5028 ParamsOk = false;
5029 break;
5030 }
5032 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5033 // Find the stride for the scalar parameter in this loop and see if
5034 // it matches the stride for the variant.
5035 // TODO: do we need to figure out the cost of an extract to get the
5036 // first lane? Or do we hope that it will be folded away?
5037 ScalarEvolution *SE = PSE.getSE();
5038 if (!SE->isSCEVable(ScalarParam->getType()) ||
5039 !match(SE->getSCEV(ScalarParam),
5041 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
5043 ParamsOk = false;
5044 break;
5045 }
5047 break;
5048 default:
5049 ParamsOk = false;
5050 break;
5051 }
5052 }
5053
5054 if (!ParamsOk)
5055 continue;
5056
5057 // Found a suitable candidate, stop here.
5058 VecFunc = CI->getModule()->getFunction(Info.VectorName);
5059 FuncInfo = Info;
5060 break;
5061 }
5062
5063 if (TLI && VecFunc && !CI->isNoBuiltin())
5064 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, Config.CostKind);
5065
5066 // Find the cost of an intrinsic; some targets may have instructions that
5067 // perform the operation without needing an actual call.
5069 if (IID != Intrinsic::not_intrinsic)
5071
5072 InstructionCost Cost = ScalarCost;
5073 InstWidening Decision = CM_Scalarize;
5074
5075 if (VectorCost.isValid() && VectorCost <= Cost) {
5076 Cost = VectorCost;
5077 Decision = CM_VectorCall;
5078 }
5079
5080 if (IntrinsicCost.isValid() && IntrinsicCost <= Cost) {
5082 Decision = CM_IntrinsicCall;
5083 }
5084
5085 setCallWideningDecision(CI, VF, Decision, VecFunc, IID, Cost);
5086 }
5087 }
5088}
5089
5091 if (!Legal->isInvariant(Op))
5092 return false;
5093 // Consider Op invariant, if it or its operands aren't predicated
5094 // instruction in the loop. In that case, it is not trivially hoistable.
5095 auto *OpI = dyn_cast<Instruction>(Op);
5096 return !OpI || !TheLoop->contains(OpI) ||
5097 (!isPredicatedInst(OpI) &&
5098 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
5099 all_of(OpI->operands(),
5100 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
5101}
5102
5105 ElementCount VF) {
5106 // If we know that this instruction will remain uniform, check the cost of
5107 // the scalar version.
5109 VF = ElementCount::getFixed(1);
5110
5111 if (VF.isVector() && isProfitableToScalarize(I, VF))
5112 return InstsToScalarize[VF][I];
5113
5114 // Forced scalars do not have any scalarization overhead.
5115 auto ForcedScalar = ForcedScalars.find(VF);
5116 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
5117 auto InstSet = ForcedScalar->second;
5118 if (InstSet.count(I))
5120 VF.getKnownMinValue();
5121 }
5122
5123 const auto &MinBWs = Config.getMinimalBitwidths();
5124 uint64_t InstrMinBWs = MinBWs.lookup(I);
5125 Type *RetTy = I->getType();
5127 RetTy = IntegerType::get(RetTy->getContext(), InstrMinBWs);
5128 auto *SE = PSE.getSE();
5129
5130 Type *VectorTy;
5131 if (isScalarAfterVectorization(I, VF)) {
5132 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
5133 [this](Instruction *I, ElementCount VF) -> bool {
5134 if (VF.isScalar())
5135 return true;
5136
5137 auto Scalarized = InstsToScalarize.find(VF);
5138 assert(Scalarized != InstsToScalarize.end() &&
5139 "VF not yet analyzed for scalarization profitability");
5140 return !Scalarized->second.count(I) &&
5141 llvm::all_of(I->users(), [&](User *U) {
5142 auto *UI = cast<Instruction>(U);
5143 return !Scalarized->second.count(UI);
5144 });
5145 };
5146
5147 // With the exception of GEPs and PHIs, after scalarization there should
5148 // only be one copy of the instruction generated in the loop. This is
5149 // because the VF is either 1, or any instructions that need scalarizing
5150 // have already been dealt with by the time we get here. As a result,
5151 // it means we don't have to multiply the instruction cost by VF.
5152 assert(I->getOpcode() == Instruction::GetElementPtr ||
5153 I->getOpcode() == Instruction::PHI ||
5154 (I->getOpcode() == Instruction::BitCast &&
5155 I->getType()->isPointerTy()) ||
5156 HasSingleCopyAfterVectorization(I, VF));
5157 VectorTy = RetTy;
5158 } else
5159 VectorTy = toVectorizedTy(RetTy, VF);
5160
5161 if (VF.isVector() && VectorTy->isVectorTy() &&
5162 !TTI.getNumberOfParts(VectorTy))
5164
5165 // TODO: We need to estimate the cost of intrinsic calls.
5166 switch (I->getOpcode()) {
5167 case Instruction::GetElementPtr:
5168 // We mark this instruction as zero-cost because the cost of GEPs in
5169 // vectorized code depends on whether the corresponding memory instruction
5170 // is scalarized or not. Therefore, we handle GEPs with the memory
5171 // instruction cost.
5172 return 0;
5173 case Instruction::UncondBr:
5174 case Instruction::CondBr: {
5175 // In cases of scalarized and predicated instructions, there will be VF
5176 // predicated blocks in the vectorized loop. Each branch around these
5177 // blocks requires also an extract of its vector compare i1 element.
5178 // Note that the conditional branch from the loop latch will be replaced by
5179 // a single branch controlling the loop, so there is no extra overhead from
5180 // scalarization.
5181 bool ScalarPredicatedBB = false;
5183 if (VF.isVector() && BI &&
5184 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
5185 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
5186 BI->getParent() != TheLoop->getLoopLatch())
5187 ScalarPredicatedBB = true;
5188
5189 if (ScalarPredicatedBB) {
5190 // Not possible to scalarize scalable vector with predicated instructions.
5191 if (VF.isScalable())
5193 // Return cost for branches around scalarized and predicated blocks.
5194 auto *VecI1Ty =
5196 return (TTI.getScalarizationOverhead(
5197 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5198 /*Insert*/ false, /*Extract*/ true, Config.CostKind) +
5199 (TTI.getCFInstrCost(Instruction::CondBr, Config.CostKind) *
5200 VF.getFixedValue()));
5201 }
5202
5203 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
5204 // The back-edge branch will remain, as will all scalar branches.
5205 return TTI.getCFInstrCost(Instruction::UncondBr, Config.CostKind);
5206
5207 // This branch will be eliminated by if-conversion.
5208 return 0;
5209 // Note: We currently assume zero cost for an unconditional branch inside
5210 // a predicated block since it will become a fall-through, although we
5211 // may decide in the future to call TTI for all branches.
5212 }
5213 case Instruction::Switch: {
5214 if (VF.isScalar())
5215 return TTI.getCFInstrCost(Instruction::Switch, Config.CostKind);
5216 auto *Switch = cast<SwitchInst>(I);
5217 return Switch->getNumCases() *
5218 TTI.getCmpSelInstrCost(
5219 Instruction::ICmp,
5220 toVectorTy(Switch->getCondition()->getType(), VF),
5221 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
5222 CmpInst::ICMP_EQ, Config.CostKind);
5223 }
5224 case Instruction::PHI: {
5225 auto *Phi = cast<PHINode>(I);
5226
5227 // First-order recurrences are replaced by vector shuffles inside the loop.
5228 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
5229 return TTI.getShuffleCost(
5231 cast<VectorType>(VectorTy), {}, Config.CostKind, -1);
5232 }
5233
5234 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
5235 // converted into select instructions. We require N - 1 selects per phi
5236 // node, where N is the number of incoming values.
5237 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
5238 Type *ResultTy = Phi->getType();
5239
5240 // All instructions in an Any-of reduction chain are narrowed to bool.
5241 // Check if that is the case for this phi node.
5242 auto *HeaderUser = cast_if_present<PHINode>(
5243 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
5244 auto *Phi = dyn_cast<PHINode>(U);
5245 if (Phi && Phi->getParent() == TheLoop->getHeader())
5246 return Phi;
5247 return nullptr;
5248 }));
5249 if (HeaderUser) {
5250 auto &ReductionVars = Legal->getReductionVars();
5251 auto Iter = ReductionVars.find(HeaderUser);
5252 if (Iter != ReductionVars.end() &&
5254 Iter->second.getRecurrenceKind()))
5255 ResultTy = Type::getInt1Ty(Phi->getContext());
5256 }
5257 return (Phi->getNumIncomingValues() - 1) *
5258 TTI.getCmpSelInstrCost(
5259 Instruction::Select, toVectorTy(ResultTy, VF),
5260 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
5261 CmpInst::BAD_ICMP_PREDICATE, Config.CostKind);
5262 }
5263
5264 // When tail folding with EVL, if the phi is part of an out of loop
5265 // reduction then it will be transformed into a wide vp_merge.
5266 if (VF.isVector() && foldTailWithEVL() &&
5267 Legal->getReductionVars().contains(Phi) &&
5268 !Config.isInLoopReduction(Phi)) {
5270 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
5271 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
5272 return TTI.getIntrinsicInstrCost(ICA, Config.CostKind);
5273 }
5274
5275 return TTI.getCFInstrCost(Instruction::PHI, Config.CostKind);
5276 }
5277 case Instruction::UDiv:
5278 case Instruction::SDiv:
5279 case Instruction::URem:
5280 case Instruction::SRem:
5281 if (VF.isVector() && isPredicatedInst(I)) {
5282 const auto [ScalarCost, MaskedCost] = getDivRemSpeculationCost(I, VF);
5283 return isDivRemScalarWithPredication(ScalarCost, MaskedCost) ? ScalarCost
5284 : MaskedCost;
5285 }
5286 // We've proven all lanes safe to speculate, fall through.
5287 [[fallthrough]];
5288 case Instruction::Add:
5289 case Instruction::Sub: {
5290 auto Info = Legal->getHistogramInfo(I);
5291 if (Info && VF.isVector()) {
5292 const HistogramInfo *HGram = Info.value();
5293 // Assume that a non-constant update value (or a constant != 1) requires
5294 // a multiply, and add that into the cost.
5296 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
5297 if (!RHS || RHS->getZExtValue() != 1)
5298 MulCost = TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
5299 Config.CostKind);
5300
5301 // Find the cost of the histogram operation itself.
5302 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
5303 Type *ScalarTy = I->getType();
5304 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
5305 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
5306 Type::getVoidTy(I->getContext()),
5307 {PtrTy, ScalarTy, MaskTy});
5308
5309 // Add the costs together with the add/sub operation.
5310 return TTI.getIntrinsicInstrCost(ICA, Config.CostKind) + MulCost +
5311 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy,
5312 Config.CostKind);
5313 }
5314 [[fallthrough]];
5315 }
5316 case Instruction::FAdd:
5317 case Instruction::FSub:
5318 case Instruction::Mul:
5319 case Instruction::FMul:
5320 case Instruction::FDiv:
5321 case Instruction::FRem:
5322 case Instruction::Shl:
5323 case Instruction::LShr:
5324 case Instruction::AShr:
5325 case Instruction::And:
5326 case Instruction::Or:
5327 case Instruction::Xor: {
5328 // If we're speculating on the stride being 1, the multiplication may
5329 // fold away. We can generalize this for all operations using the notion
5330 // of neutral elements. (TODO)
5331 if (I->getOpcode() == Instruction::Mul &&
5332 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
5333 PSE.getSCEV(I->getOperand(0))->isOne()) ||
5334 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
5335 PSE.getSCEV(I->getOperand(1))->isOne())))
5336 return 0;
5337
5338 // Detect reduction patterns
5339 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
5340 return *RedCost;
5341
5342 // Certain instructions can be cheaper to vectorize if they have a constant
5343 // second vector operand. One example of this are shifts on x86.
5344 Value *Op2 = I->getOperand(1);
5345 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
5346 PSE.getSE()->isSCEVable(Op2->getType()) &&
5347 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
5348 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
5349 }
5350 auto Op2Info = TTI.getOperandInfo(Op2);
5351 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
5354
5355 SmallVector<const Value *, 4> Operands(I->operand_values());
5356 return TTI.getArithmeticInstrCost(
5357 I->getOpcode(), VectorTy, Config.CostKind,
5358 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5359 Op2Info, Operands, I, TLI);
5360 }
5361 case Instruction::FNeg: {
5362 return TTI.getArithmeticInstrCost(
5363 I->getOpcode(), VectorTy, Config.CostKind,
5364 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5365 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5366 I->getOperand(0), I);
5367 }
5368 case Instruction::Select: {
5370 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
5371 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
5372
5373 const Value *Op0, *Op1;
5374 using namespace llvm::PatternMatch;
5375 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
5376 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
5377 // select x, y, false --> x & y
5378 // select x, true, y --> x | y
5379 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
5380 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
5381 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
5382 Op1->getType()->getScalarSizeInBits() == 1);
5383
5384 return TTI.getArithmeticInstrCost(
5385 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
5386 VectorTy, Config.CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1},
5387 I);
5388 }
5389
5390 Type *CondTy = SI->getCondition()->getType();
5391 if (!ScalarCond)
5392 CondTy = VectorType::get(CondTy, VF);
5393
5395 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
5396 Pred = Cmp->getPredicate();
5397 return TTI.getCmpSelInstrCost(
5398 I->getOpcode(), VectorTy, CondTy, Pred, Config.CostKind,
5399 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
5400 }
5401 case Instruction::ICmp:
5402 case Instruction::FCmp: {
5403 Type *ValTy = I->getOperand(0)->getType();
5404
5406 [[maybe_unused]] Instruction *Op0AsInstruction =
5407 dyn_cast<Instruction>(I->getOperand(0));
5408 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
5409 InstrMinBWs == MinBWs.lookup(Op0AsInstruction)) &&
5410 "if both the operand and the compare are marked for "
5411 "truncation, they must have the same bitwidth");
5412 ValTy = IntegerType::get(ValTy->getContext(), InstrMinBWs);
5413 }
5414
5415 VectorTy = toVectorTy(ValTy, VF);
5416 return TTI.getCmpSelInstrCost(
5417 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
5418 cast<CmpInst>(I)->getPredicate(), Config.CostKind,
5419 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
5420 }
5421 case Instruction::Store:
5422 case Instruction::Load: {
5423 ElementCount Width = VF;
5424 if (Width.isVector()) {
5425 InstWidening Decision = getWideningDecision(I, Width);
5426 assert(Decision != CM_Unknown &&
5427 "CM decision should be taken at this point");
5430 if (Decision == CM_Scalarize)
5431 Width = ElementCount::getFixed(1);
5432 }
5433 VectorTy = toVectorTy(getLoadStoreType(I), Width);
5434 return getMemoryInstructionCost(I, VF);
5435 }
5436 case Instruction::BitCast:
5437 if (I->getType()->isPointerTy())
5438 return 0;
5439 [[fallthrough]];
5440 case Instruction::ZExt:
5441 case Instruction::SExt:
5442 case Instruction::FPToUI:
5443 case Instruction::FPToSI:
5444 case Instruction::FPExt:
5445 case Instruction::PtrToInt:
5446 case Instruction::IntToPtr:
5447 case Instruction::SIToFP:
5448 case Instruction::UIToFP:
5449 case Instruction::Trunc:
5450 case Instruction::FPTrunc: {
5451 // Computes the CastContextHint from a Load/Store instruction.
5452 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
5454 "Expected a load or a store!");
5455
5456 if (VF.isScalar() || !TheLoop->contains(I))
5458
5459 switch (getWideningDecision(I, VF)) {
5471 llvm_unreachable("Instr did not go through cost modelling?");
5474 llvm_unreachable_internal("Instr has invalid widening decision");
5475 }
5476
5477 llvm_unreachable("Unhandled case!");
5478 };
5479
5480 unsigned Opcode = I->getOpcode();
5482 // For Trunc, the context is the only user, which must be a StoreInst.
5483 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
5484 if (I->hasOneUse())
5485 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
5486 CCH = ComputeCCH(Store);
5487 }
5488 // For Z/Sext, the context is the operand, which must be a LoadInst.
5489 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
5490 Opcode == Instruction::FPExt) {
5491 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
5492 CCH = ComputeCCH(Load);
5493 }
5494
5495 // We optimize the truncation of induction variables having constant
5496 // integer steps. The cost of these truncations is the same as the scalar
5497 // operation.
5498 if (isOptimizableIVTruncate(I, VF)) {
5499 auto *Trunc = cast<TruncInst>(I);
5500 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
5501 Trunc->getSrcTy(), CCH, Config.CostKind,
5502 Trunc);
5503 }
5504
5505 // Detect reduction patterns
5506 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
5507 return *RedCost;
5508
5509 Type *SrcScalarTy = I->getOperand(0)->getType();
5510 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
5511 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
5512 SrcScalarTy = IntegerType::get(SrcScalarTy->getContext(),
5513 MinBWs.lookup(Op0AsInstruction));
5514 Type *SrcVecTy =
5515 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
5516
5518 // If the result type is <= the source type, there will be no extend
5519 // after truncating the users to the minimal required bitwidth.
5520 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
5521 (I->getOpcode() == Instruction::ZExt ||
5522 I->getOpcode() == Instruction::SExt))
5523 return 0;
5524 }
5525
5526 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH,
5527 Config.CostKind, I);
5528 }
5529 case Instruction::Call:
5530 return getVectorCallCost(cast<CallInst>(I), VF);
5531 case Instruction::ExtractValue:
5532 return TTI.getInstructionCost(I, Config.CostKind);
5533 case Instruction::Alloca:
5534 // We cannot easily widen alloca to a scalable alloca, as
5535 // the result would need to be a vector of pointers.
5536 if (VF.isScalable())
5538 return TTI.getArithmeticInstrCost(Instruction::Mul, RetTy, Config.CostKind);
5539 case Instruction::Freeze:
5540 return TTI::TCC_Free;
5541 default:
5542 // This opcode is unknown. Assume that it is the same as 'mul'.
5543 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
5544 Config.CostKind);
5545 } // end of switch.
5546}
5547
5549 // Ignore ephemeral values.
5551
5552 SmallVector<Value *, 4> DeadInterleavePointerOps;
5554
5555 // If a scalar epilogue is required, users outside the loop won't use
5556 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
5557 // that is the case.
5558 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
5559 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
5560 return RequiresScalarEpilogue &&
5561 !TheLoop->contains(cast<Instruction>(U)->getParent());
5562 };
5563
5565 DFS.perform(LI);
5566 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
5567 for (Instruction &I : reverse(*BB)) {
5568 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
5569 continue;
5570
5571 // Add instructions that would be trivially dead and are only used by
5572 // values already ignored to DeadOps to seed worklist.
5574 all_of(I.users(), [this, IsLiveOutDead](User *U) {
5575 return VecValuesToIgnore.contains(U) ||
5576 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
5577 }))
5578 DeadOps.push_back(&I);
5579
5580 // For interleave groups, we only create a pointer for the start of the
5581 // interleave group. Queue up addresses of group members except the insert
5582 // position for further processing.
5583 if (isAccessInterleaved(&I)) {
5584 auto *Group = getInterleavedAccessGroup(&I);
5585 if (Group->getInsertPos() == &I)
5586 continue;
5587 Value *PointerOp = getLoadStorePointerOperand(&I);
5588 DeadInterleavePointerOps.push_back(PointerOp);
5589 }
5590
5591 // Queue branches for analysis. They are dead, if their successors only
5592 // contain dead instructions.
5593 if (isa<CondBrInst>(&I))
5594 DeadOps.push_back(&I);
5595 }
5596
5597 // Mark ops feeding interleave group members as free, if they are only used
5598 // by other dead computations.
5599 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
5600 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
5601 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
5602 Instruction *UI = cast<Instruction>(U);
5603 return !VecValuesToIgnore.contains(U) &&
5604 (!isAccessInterleaved(UI) ||
5605 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
5606 }))
5607 continue;
5608 VecValuesToIgnore.insert(Op);
5609 append_range(DeadInterleavePointerOps, Op->operands());
5610 }
5611
5612 // Mark ops that would be trivially dead and are only used by ignored
5613 // instructions as free.
5614 BasicBlock *Header = TheLoop->getHeader();
5615
5616 // Returns true if the block contains only dead instructions. Such blocks will
5617 // be removed by VPlan-to-VPlan transforms and won't be considered by the
5618 // VPlan-based cost model, so skip them in the legacy cost-model as well.
5619 auto IsEmptyBlock = [this](BasicBlock *BB) {
5620 return all_of(*BB, [this](Instruction &I) {
5621 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
5623 });
5624 };
5625 for (unsigned I = 0; I != DeadOps.size(); ++I) {
5626 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
5627
5628 // Check if the branch should be considered dead.
5629 if (auto *Br = dyn_cast_or_null<CondBrInst>(Op)) {
5630 BasicBlock *ThenBB = Br->getSuccessor(0);
5631 BasicBlock *ElseBB = Br->getSuccessor(1);
5632 // Don't considers branches leaving the loop for simplification.
5633 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
5634 continue;
5635 bool ThenEmpty = IsEmptyBlock(ThenBB);
5636 bool ElseEmpty = IsEmptyBlock(ElseBB);
5637 if ((ThenEmpty && ElseEmpty) ||
5638 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
5639 ElseBB->phis().empty()) ||
5640 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
5641 ThenBB->phis().empty())) {
5642 VecValuesToIgnore.insert(Br);
5643 DeadOps.push_back(Br->getCondition());
5644 }
5645 continue;
5646 }
5647
5648 // Skip any op that shouldn't be considered dead.
5649 if (!Op || !TheLoop->contains(Op) ||
5650 (isa<PHINode>(Op) && Op->getParent() == Header) ||
5652 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
5653 return !VecValuesToIgnore.contains(U) &&
5654 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
5655 }))
5656 continue;
5657
5658 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
5659 // which applies for both scalar and vector versions. Otherwise it is only
5660 // dead in vector versions, so only add it to VecValuesToIgnore.
5661 if (all_of(Op->users(),
5662 [this](User *U) { return ValuesToIgnore.contains(U); }))
5663 ValuesToIgnore.insert(Op);
5664
5665 VecValuesToIgnore.insert(Op);
5666 append_range(DeadOps, Op->operands());
5667 }
5668
5669 // Ignore type-promoting instructions we identified during reduction
5670 // detection.
5671 for (const auto &Reduction : Legal->getReductionVars()) {
5672 const RecurrenceDescriptor &RedDes = Reduction.second;
5673 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
5674 VecValuesToIgnore.insert_range(Casts);
5675 }
5676 // Ignore type-casting instructions we identified during induction
5677 // detection.
5678 for (const auto &Induction : Legal->getInductionVars()) {
5679 const InductionDescriptor &IndDes = Induction.second;
5680 VecValuesToIgnore.insert_range(IndDes.getCastInsts());
5681 }
5682}
5683
5684void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
5685 CM.collectValuesToIgnore();
5686 Config.collectElementTypesForWidening(&CM.ValuesToIgnore);
5687
5688 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
5689 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
5690 return;
5691
5692 if (!OrigLoop->isInnermost()) {
5693 // For outer loops, computeMaxVF returns a single non-scalar VF; build a
5694 // plan for only that VF.
5695 ElementCount VF =
5696 MaxFactors.FixedVF ? MaxFactors.FixedVF : MaxFactors.ScalableVF;
5697 buildVPlans(VF, VF);
5699 return;
5700 }
5701
5702 // Compute the minimal bitwidths required for integer operations in the loop
5703 // for later use by the cost model.
5704 Config.computeMinimalBitwidths();
5705
5706 // Invalidate interleave groups if all blocks of loop will be predicated.
5707 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
5709 LLVM_DEBUG(
5710 dbgs()
5711 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
5712 "which requires masked-interleaved support.\n");
5713 if (CM.InterleaveInfo.invalidateGroups())
5714 // Invalidating interleave groups also requires invalidating all decisions
5715 // based on them, which includes widening decisions and uniform and scalar
5716 // values.
5717 CM.invalidateCostModelingDecisions();
5718 }
5719
5720 if (CM.foldTailByMasking())
5721 Legal->prepareToFoldTailByMasking();
5722
5723 ElementCount MaxUserVF =
5724 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
5725 if (UserVF) {
5726 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
5728 "UserVF ignored because it may be larger than the maximal safe VF",
5729 "InvalidUserVF", ORE, OrigLoop);
5730 } else {
5732 "VF needs to be a power of two");
5733 // Collect the instructions (and their associated costs) that will be more
5734 // profitable to scalarize.
5735 Config.collectInLoopReductions();
5736 CM.collectNonVectorizedAndSetWideningDecisions(UserVF);
5737 ElementCount EpilogueUserVF =
5739 if (EpilogueUserVF.isVector() &&
5740 ElementCount::isKnownLT(EpilogueUserVF, UserVF)) {
5741 CM.collectNonVectorizedAndSetWideningDecisions(EpilogueUserVF);
5742 buildVPlans(EpilogueUserVF, EpilogueUserVF);
5743 }
5744 buildVPlans(UserVF, UserVF);
5745 if (!VPlans.empty() && VPlans.back()->getSingleVF() == UserVF) {
5746 // For scalar VF, skip VPlan cost check as VPlan cost is designed for
5747 // vector VFs only.
5748 if (UserVF.isScalar() ||
5749 cost(*VPlans.back(), UserVF, /*RU=*/nullptr).isValid()) {
5750 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
5752 return;
5753 }
5754 }
5755 VPlans.clear();
5756 reportVectorizationInfo("UserVF ignored because of invalid costs.",
5757 "InvalidCost", ORE, OrigLoop);
5758 }
5759 }
5760
5761 // Collect the Vectorization Factor Candidates.
5762 SmallVector<ElementCount> VFCandidates;
5763 for (auto VF = ElementCount::getFixed(1);
5764 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
5765 VFCandidates.push_back(VF);
5766 for (auto VF = ElementCount::getScalable(1);
5767 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
5768 VFCandidates.push_back(VF);
5769
5770 Config.collectInLoopReductions();
5771 for (const auto &VF : VFCandidates) {
5772 // Collect Uniform and Scalar instructions after vectorization with VF.
5773 CM.collectNonVectorizedAndSetWideningDecisions(VF);
5774 }
5775
5776 buildVPlans(ElementCount::getFixed(1), MaxFactors.FixedVF);
5777 buildVPlans(ElementCount::getScalable(1), MaxFactors.ScalableVF);
5778
5780}
5781
5783 ElementCount VF) const {
5784 InstructionCost Cost = CM.getInstructionCost(UI, VF);
5785 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
5787 return Cost;
5788}
5789
5790bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
5791 return CM.ValuesToIgnore.contains(UI) ||
5792 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
5793 SkipCostComputation.contains(UI);
5794}
5795
5797 return CM.getPredBlockCostDivisor(CostKind, BB);
5798}
5799
5801 return CM.isScalarWithPredication(I, VF) ||
5802 CM.isUniformAfterVectorization(I, VF) || CM.isForcedScalar(I, VF) ||
5803 (VF.isVector() && CM.isProfitableToScalarize(I, VF));
5804}
5805
5807 return CM.isMaskRequired(I);
5808}
5809
5810std::optional<VPCostContext::CallWideningKind>
5812 if (VF.isScalar())
5814 switch (CM.getCallWideningDecision(CI, VF).Kind) {
5821 default:
5822 return std::nullopt;
5823 }
5824}
5825
5827LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
5828 VPCostContext &CostCtx) const {
5830 // Cost modeling for inductions is inaccurate in the legacy cost model
5831 // compared to the recipes that are generated. To match here initially during
5832 // VPlan cost model bring up directly use the induction costs from the legacy
5833 // cost model. Note that we do this as pre-processing; the VPlan may not have
5834 // any recipes associated with the original induction increment instruction
5835 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
5836 // the cost of induction phis and increments (both that are represented by
5837 // recipes and those that are not), to avoid distinguishing between them here,
5838 // and skip all recipes that represent induction phis and increments (the
5839 // former case) later on, if they exist, to avoid counting them twice.
5840 // Similarly we pre-compute the cost of any optimized truncates.
5841 // TODO: Switch to more accurate costing based on VPlan.
5842 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
5844 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
5845 SmallVector<Instruction *> IVInsts = {IVInc};
5846 for (unsigned I = 0; I != IVInsts.size(); I++) {
5847 for (Value *Op : IVInsts[I]->operands()) {
5848 auto *OpI = dyn_cast<Instruction>(Op);
5849 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
5850 continue;
5851 IVInsts.push_back(OpI);
5852 }
5853 }
5854 IVInsts.push_back(IV);
5855 for (User *U : IV->users()) {
5856 auto *CI = cast<Instruction>(U);
5857 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
5858 continue;
5859 IVInsts.push_back(CI);
5860 }
5861
5862 // If the vector loop gets executed exactly once with the given VF, ignore
5863 // the costs of comparison and induction instructions, as they'll get
5864 // simplified away.
5865 // TODO: Remove this code after stepping away from the legacy cost model and
5866 // adding code to simplify VPlans before calculating their costs.
5867 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
5868 if (TC == VF && !CM.foldTailByMasking())
5869 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
5870 CostCtx.SkipCostComputation);
5871
5872 for (Instruction *IVInst : IVInsts) {
5873 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
5874 continue;
5875 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
5876 LLVM_DEBUG({
5877 dbgs() << "Cost of " << InductionCost << " for VF " << VF
5878 << ": induction instruction " << *IVInst << "\n";
5879 });
5880 Cost += InductionCost;
5881 CostCtx.SkipCostComputation.insert(IVInst);
5882 }
5883 }
5884
5885 /// Compute the cost of all exiting conditions of the loop using the legacy
5886 /// cost model. This is to match the legacy behavior, which adds the cost of
5887 /// all exit conditions. Note that this over-estimates the cost, as there will
5888 /// be a single condition to control the vector loop.
5890 CM.TheLoop->getExitingBlocks(Exiting);
5891 SetVector<Instruction *> ExitInstrs;
5892 // Collect all exit conditions.
5893 for (BasicBlock *EB : Exiting) {
5894 auto *Term = dyn_cast<CondBrInst>(EB->getTerminator());
5895 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
5896 continue;
5897 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
5898 ExitInstrs.insert(CondI);
5899 }
5900 }
5901 // Compute the cost of all instructions only feeding the exit conditions.
5902 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
5903 Instruction *CondI = ExitInstrs[I];
5904 if (!OrigLoop->contains(CondI) ||
5905 !CostCtx.SkipCostComputation.insert(CondI).second)
5906 continue;
5907 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
5908 LLVM_DEBUG({
5909 dbgs() << "Cost of " << CondICost << " for VF " << VF
5910 << ": exit condition instruction " << *CondI << "\n";
5911 });
5912 Cost += CondICost;
5913 for (Value *Op : CondI->operands()) {
5914 auto *OpI = dyn_cast<Instruction>(Op);
5915 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
5916 any_of(OpI->users(), [&ExitInstrs](User *U) {
5917 return !ExitInstrs.contains(cast<Instruction>(U));
5918 }))
5919 continue;
5920 ExitInstrs.insert(OpI);
5921 }
5922 }
5923
5924 // Pre-compute the costs for branches except for the backedge, as the number
5925 // of replicate regions in a VPlan may not directly match the number of
5926 // branches, which would lead to different decisions.
5927 // TODO: Compute cost of branches for each replicate region in the VPlan,
5928 // which is more accurate than the legacy cost model.
5929 for (BasicBlock *BB : OrigLoop->blocks()) {
5930 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
5931 continue;
5932 CostCtx.SkipCostComputation.insert(BB->getTerminator());
5933 if (BB == OrigLoop->getLoopLatch())
5934 continue;
5935 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
5936 Cost += BranchCost;
5937 }
5938
5939 // Don't apply special costs when instruction cost is forced to make sure the
5940 // forced cost is used for each recipe.
5941 if (ForceTargetInstructionCost.getNumOccurrences())
5942 return Cost;
5943
5944 // Pre-compute costs for instructions that are forced-scalar or profitable to
5945 // scalarize. For most such instructions, their scalarization costs are
5946 // accounted for here using the legacy cost model. However, some opcodes
5947 // are excluded from these precomputed scalarization costs and are instead
5948 // modeled later by the VPlan cost model (see UseVPlanCostModel below).
5949 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
5950 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
5951 continue;
5952 CostCtx.SkipCostComputation.insert(ForcedScalar);
5953 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
5954 LLVM_DEBUG({
5955 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
5956 << ": forced scalar " << *ForcedScalar << "\n";
5957 });
5958 Cost += ForcedCost;
5959 }
5960
5961 auto UseVPlanCostModel = [](Instruction *I) -> bool {
5962 switch (I->getOpcode()) {
5963 case Instruction::SDiv:
5964 case Instruction::UDiv:
5965 case Instruction::SRem:
5966 case Instruction::URem:
5967 return true;
5968 default:
5969 return false;
5970 }
5971 };
5972 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
5973 if (UseVPlanCostModel(Scalarized) ||
5974 CostCtx.skipCostComputation(Scalarized, VF.isVector()))
5975 continue;
5976 CostCtx.SkipCostComputation.insert(Scalarized);
5977 LLVM_DEBUG({
5978 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
5979 << ": profitable to scalarize " << *Scalarized << "\n";
5980 });
5981 Cost += ScalarCost;
5982 }
5983
5984 return Cost;
5985}
5986
5987InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan, ElementCount VF,
5988 VPRegisterUsage *RU) const {
5989 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, Config.CostKind, PSE,
5990 OrigLoop);
5991 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
5992
5993 // Now compute and add the VPlan-based cost.
5994 Cost += Plan.cost(VF, CostCtx);
5995
5996 // Add the cost of spills due to excess register usage
5997 if (RU && Config.shouldConsiderRegPressureForVF(VF))
5998 Cost += RU->spillCost(CM.TTI, Config.CostKind, ForceTargetNumVectorRegs);
5999
6000#ifndef NDEBUG
6001 unsigned EstimatedWidth =
6002 estimateElementCount(VF, Config.getVScaleForTuning());
6003 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
6004 << " (Estimated cost per lane: ");
6005 if (Cost.isValid()) {
6006 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
6007 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
6008 } else /* No point dividing an invalid cost - it will still be invalid */
6009 LLVM_DEBUG(dbgs() << "Invalid");
6010 LLVM_DEBUG(dbgs() << ")\n");
6011#endif
6012 return Cost;
6013}
6014
6015std::pair<VectorizationFactor, VPlan *>
6017 if (VPlans.empty())
6018 return {VectorizationFactor::Disabled(), nullptr};
6019 // If there is a single VPlan with a single VF, return it directly.
6020 VPlan &FirstPlan = *VPlans[0];
6021
6022 ElementCount UserVF = Hints.getWidth();
6023 if (VPlans.size() == 1) {
6024 // For outer loops, the plan has a single vector VF determined by the
6025 // heuristic.
6026 assert((FirstPlan.hasScalarVFOnly() || hasPlanWithVF(UserVF) ||
6027 FirstPlan.isOuterLoop()) &&
6028 "must have a single scalar VF, UserVF or an outer loop");
6029 return {VectorizationFactor(FirstPlan.getSingleVF(), 0, 0), &FirstPlan};
6030 }
6031
6032 if (hasPlanWithVF(UserVF) && EpilogueVectorizationForceVF > 1) {
6033 assert(VPlans.size() == 2 && "Must have exactly 2 VPlans built");
6034 assert(VPlans[0]->getSingleVF() ==
6036 "expected first plan to be for the forced epilogue VF");
6037 assert(VPlans[1]->getSingleVF() == UserVF &&
6038 "expected second plan to be for the forced UserVF");
6039 return {VectorizationFactor(UserVF, 0, 0), VPlans[1].get()};
6040 }
6041
6042 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
6043 << (Config.CostKind == TTI::TCK_RecipThroughput
6044 ? "Reciprocal Throughput\n"
6045 : Config.CostKind == TTI::TCK_Latency
6046 ? "Instruction Latency\n"
6047 : Config.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
6048 : Config.CostKind == TTI::TCK_SizeAndLatency
6049 ? "Code Size and Latency\n"
6050 : "Unknown\n"));
6051
6053 assert(FirstPlan.hasVF(ScalarVF) &&
6054 "More than a single plan/VF w/o any plan having scalar VF");
6055
6056 // TODO: Compute scalar cost using VPlan-based cost model.
6057 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
6058 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
6059 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
6060 VectorizationFactor BestFactor = ScalarFactor;
6061
6062 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
6063 if (ForceVectorization) {
6064 // Ignore scalar width, because the user explicitly wants vectorization.
6065 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6066 // evaluation.
6067 BestFactor.Cost = InstructionCost::getMax();
6068 }
6069
6070 VPlan *PlanForBestVF = &FirstPlan;
6071
6072 for (auto &P : VPlans) {
6073 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
6074 P->vectorFactors().end());
6075
6077 bool ConsiderRegPressure = any_of(VFs, [this](ElementCount VF) {
6078 return Config.shouldConsiderRegPressureForVF(VF);
6079 });
6081 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
6082
6083 for (unsigned I = 0; I < VFs.size(); I++) {
6084 ElementCount VF = VFs[I];
6085 if (VF.isScalar())
6086 continue;
6087 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
6088 LLVM_DEBUG(
6089 dbgs()
6090 << "LV: Not considering vector loop of width " << VF
6091 << " because it will not generate any vector instructions.\n");
6092 continue;
6093 }
6094 if (Config.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
6095 LLVM_DEBUG(
6096 dbgs()
6097 << "LV: Not considering vector loop of width " << VF
6098 << " because it would cause replicated blocks to be generated,"
6099 << " which isn't allowed when optimizing for size.\n");
6100 continue;
6101 }
6102
6104 cost(*P, VF, ConsiderRegPressure ? &RUs[I] : nullptr);
6105 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
6106
6107 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail())) {
6108 BestFactor = CurrentFactor;
6109 PlanForBestVF = P.get();
6110 }
6111
6112 // If profitable add it to ProfitableVF list.
6113 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
6114 ProfitableVFs.push_back(CurrentFactor);
6115 }
6116 }
6117
6118 VPlan &BestPlan = *PlanForBestVF;
6119
6120 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
6121 "when vectorizing, the scalar cost must be computed.");
6122
6123 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
6124 return {BestFactor, &BestPlan};
6125}
6126
6128 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
6130 EpilogueVectorizationKind EpilogueVecKind) {
6131 assert(BestVPlan.hasVF(BestVF) &&
6132 "Trying to execute plan with unsupported VF");
6133 assert(BestVPlan.hasUF(BestUF) &&
6134 "Trying to execute plan with unsupported UF");
6135 if (BestVPlan.hasEarlyExit())
6136 ++LoopsEarlyExitVectorized;
6137
6139 BestVPlan, *PSE.getSE(), CM.TTI, Config.CostKind, BestVF, BestUF,
6140 CM.ValuesToIgnore);
6141 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
6142 // cost model is complete for better cost estimates.
6143 RUN_VPLAN_PASS(VPlanTransforms::unrollByUF, BestVPlan, BestUF);
6147 bool HasBranchWeights =
6148 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
6149 if (HasBranchWeights) {
6150 std::optional<unsigned> VScale = Config.getVScaleForTuning();
6152 BestVPlan, BestVF, VScale);
6153 }
6154
6155 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
6156 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
6157
6159 BestVF, BestUF, PSE);
6160 RUN_VPLAN_PASS(VPlanTransforms::optimizeForVFAndUF, BestVPlan, BestVF, BestUF,
6161 PSE);
6163 if (EpilogueVecKind == EpilogueVectorizationKind::None)
6165 /*OnlyLatches=*/false);
6166 if (BestVPlan.getEntry()->getSingleSuccessor() ==
6167 BestVPlan.getScalarPreheader()) {
6168 // TODO: The vector loop would be dead, should not even try to vectorize.
6169 ORE->emit([&]() {
6170 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
6171 OrigLoop->getStartLoc(),
6172 OrigLoop->getHeader())
6173 << "Created vector loop never executes due to insufficient trip "
6174 "count.";
6175 });
6177 }
6178
6180
6182 // Convert the exit condition to AVLNext == 0 for EVL tail folded loops.
6184 // Regions are dissolved after optimizing for VF and UF, which completely
6185 // removes unneeded loop regions first.
6187 // Expand BranchOnTwoConds after dissolution, when latch has direct access to
6188 // its successors.
6190 // Convert loops with variable-length stepping after regions are dissolved.
6192 // Remove dead back-edges for single-iteration loops with BranchOnCond(true).
6193 // Only process loop latches to avoid removing edges from the middle block,
6194 // which may be needed for epilogue vectorization.
6195 VPlanTransforms::removeBranchOnConst(BestVPlan, /*OnlyLatches=*/true);
6197 std::optional<uint64_t> MaxRuntimeStep;
6198 if (auto MaxVScale = getMaxVScale(*CM.TheFunction, CM.TTI))
6199 MaxRuntimeStep = uint64_t(*MaxVScale) * BestVF.getKnownMinValue() * BestUF;
6201 BestVPlan, VectorPH, CM.foldTailByMasking(),
6202 CM.requiresScalarEpilogue(BestVF.isVector()), &BestVPlan.getVFxUF(),
6203 MaxRuntimeStep);
6204 VPlanTransforms::materializeFactors(BestVPlan, VectorPH, BestVF);
6205 VPlanTransforms::cse(BestVPlan);
6207 VPlanTransforms::simplifyKnownEVL(BestVPlan, BestVF, PSE);
6208
6209 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
6210 // making any changes to the CFG.
6211 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
6212 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
6213
6214 // Perform the actual loop transformation.
6215 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
6216 OrigLoop->getParentLoop(),
6217 Legal->getWidestInductionType());
6218
6219#ifdef EXPENSIVE_CHECKS
6220 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
6221#endif
6222
6223 // 1. Set up the skeleton for vectorization, including vector pre-header and
6224 // middle block. The vector loop is created during VPlan execution.
6225 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6226 if (VPBasicBlock *ScalarPH = BestVPlan.getScalarPreheader())
6227 replaceVPBBWithIRVPBB(ScalarPH, State.CFG.PrevBB->getSingleSuccessor(),
6228 &BestVPlan);
6230
6231 assert(verifyVPlanIsValid(BestVPlan) && "final VPlan is invalid");
6232
6233 // After vectorization, the exit blocks of the original loop will have
6234 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
6235 // looked through single-entry phis.
6236 ScalarEvolution &SE = *PSE.getSE();
6237 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
6238 if (!Exit->hasPredecessors())
6239 continue;
6240 for (VPRecipeBase &PhiR : Exit->phis())
6242 &cast<VPIRPhi>(PhiR).getIRPhi());
6243 }
6244 // Forget the original loop and block dispositions.
6245 SE.forgetLoop(OrigLoop);
6247
6249
6250 //===------------------------------------------------===//
6251 //
6252 // Notice: any optimization or new instruction that go
6253 // into the code below should also be implemented in
6254 // the cost-model.
6255 //
6256 //===------------------------------------------------===//
6257
6258 // Retrieve loop information before executing the plan, which may remove the
6259 // original loop, if it becomes unreachable.
6260 MDNode *LID = OrigLoop->getLoopID();
6261 unsigned OrigLoopInvocationWeight = 0;
6262 std::optional<unsigned> OrigAverageTripCount =
6263 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
6264
6265 BestVPlan.execute(&State);
6266
6267 // 2.6. Maintain Loop Hints
6268 // Keep all loop hints from the original loop on the vector loop (we'll
6269 // replace the vectorizer-specific hints below).
6270 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
6271 // Add metadata to disable runtime unrolling a scalar loop when there
6272 // are no runtime checks about strides and memory. A scalar loop that is
6273 // rarely used is not worth unrolling.
6274 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
6276 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
6277 : nullptr,
6278 HeaderVPBB, BestVPlan,
6279 EpilogueVecKind == EpilogueVectorizationKind::Epilogue, LID,
6280 OrigAverageTripCount, OrigLoopInvocationWeight,
6281 estimateElementCount(BestVF * BestUF, Config.getVScaleForTuning()),
6282 DisableRuntimeUnroll);
6283
6284 // 3. Fix the vectorized code: take care of header phi's, live-outs,
6285 // predication, updating analyses.
6286 ILV.fixVectorizedLoop(State);
6287
6289
6290 return ExpandedSCEVs;
6291}
6292
6293//===--------------------------------------------------------------------===//
6294// EpilogueVectorizerMainLoop
6295//===--------------------------------------------------------------------===//
6296
6298 LLVM_DEBUG({
6299 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
6300 << "Main Loop VF:" << EPI.MainLoopVF
6301 << ", Main Loop UF:" << EPI.MainLoopUF
6302 << ", Epilogue Loop VF:" << EPI.EpilogueVF
6303 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
6304 });
6305}
6306
6309 dbgs() << "intermediate fn:\n"
6310 << *OrigLoop->getHeader()->getParent() << "\n";
6311 });
6312}
6313
6314//===--------------------------------------------------------------------===//
6315// EpilogueVectorizerEpilogueLoop
6316//===--------------------------------------------------------------------===//
6317
6318/// This function creates a new scalar preheader, using the previous one as
6319/// entry block to the epilogue VPlan. The minimum iteration check is being
6320/// represented in VPlan.
6322 BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog.");
6323 BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor();
6324 OriginalScalarPH->setName("vec.epilog.iter.check");
6325 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH);
6326 VPBasicBlock *OldEntry = Plan.getEntry();
6327 for (auto &R : make_early_inc_range(*OldEntry)) {
6328 // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by
6329 // defining.
6330 if (isa<VPIRInstruction>(&R))
6331 continue;
6332 R.moveBefore(*NewEntry, NewEntry->end());
6333 }
6334
6335 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
6336 Plan.setEntry(NewEntry);
6337 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
6338
6339 return OriginalScalarPH;
6340}
6341
6343 LLVM_DEBUG({
6344 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
6345 << "Epilogue Loop VF:" << EPI.EpilogueVF
6346 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
6347 });
6348}
6349
6352 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
6353 });
6354}
6355
6357 VFRange &Range) {
6358 assert((VPI->getOpcode() == Instruction::Load ||
6359 VPI->getOpcode() == Instruction::Store) &&
6360 "Must be called with either a load or store");
6362
6363 auto WillWiden = [&](ElementCount VF) -> bool {
6365 CM.getWideningDecision(I, VF);
6367 "CM decision should be taken at this point.");
6369 return true;
6370 if (CM.isScalarAfterVectorization(I, VF) ||
6371 CM.isProfitableToScalarize(I, VF))
6372 return false;
6374 };
6375
6377 return nullptr;
6378
6379 // If a mask is not required, drop it - use unmasked version for safe loads.
6380 // TODO: Determine if mask is needed in VPlan.
6381 VPValue *Mask = CM.isMaskRequired(I) ? VPI->getMask() : nullptr;
6382
6383 // Determine if the pointer operand of the access is either consecutive or
6384 // reverse consecutive.
6386 CM.getWideningDecision(I, Range.Start);
6388 bool Consecutive =
6390
6391 VPValue *Ptr = VPI->getOpcode() == Instruction::Load ? VPI->getOperand(0)
6392 : VPI->getOperand(1);
6393 if (Consecutive) {
6395 VPSingleDefRecipe *VectorPtr;
6396 if (Reverse) {
6397 // When folding the tail, we may compute an address that we don't in the
6398 // original scalar loop: drop the GEP no-wrap flags in this case.
6399 // Otherwise preserve existing flags without no-unsigned-wrap, as we will
6400 // emit negative indices.
6401 GEPNoWrapFlags ReverseFlags = CM.foldTailByMasking()
6403 : Flags.withoutNoUnsignedWrap();
6404 VectorPtr = new VPVectorEndPointerRecipe(
6405 Ptr, &Plan.getVF(), getLoadStoreType(I),
6406 /*Stride*/ -1, ReverseFlags, VPI->getDebugLoc());
6407 } else {
6408 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I), Flags,
6409 VPI->getDebugLoc());
6410 }
6411 Builder.setInsertPoint(VPI);
6412 Builder.insert(VectorPtr);
6413 Ptr = VectorPtr;
6414 }
6415
6416 if (Reverse && Mask)
6417 Mask = Builder.createNaryOp(VPInstruction::Reverse, Mask, I->getDebugLoc());
6418
6419 if (VPI->getOpcode() == Instruction::Load) {
6420 auto *Load = cast<LoadInst>(I);
6421 auto *LoadR = new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, *VPI,
6422 Load->getDebugLoc());
6423 if (Reverse) {
6424 Builder.insert(LoadR);
6425 return new VPInstruction(VPInstruction::Reverse, LoadR, {}, {},
6426 LoadR->getDebugLoc());
6427 }
6428 return LoadR;
6429 }
6430
6431 StoreInst *Store = cast<StoreInst>(I);
6432 VPValue *StoredVal = VPI->getOperand(0);
6433 if (Reverse)
6434 StoredVal = Builder.createNaryOp(VPInstruction::Reverse, StoredVal,
6435 Store->getDebugLoc());
6436 return new VPWidenStoreRecipe(*Store, Ptr, StoredVal, Mask, Consecutive, *VPI,
6437 Store->getDebugLoc());
6438}
6439
6441VPRecipeBuilder::tryToOptimizeInductionTruncate(VPInstruction *VPI,
6442 VFRange &Range) {
6443 auto *I = cast<TruncInst>(VPI->getUnderlyingInstr());
6444 // Optimize the special case where the source is a constant integer
6445 // induction variable. Notice that we can only optimize the 'trunc' case
6446 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6447 // (c) other casts depend on pointer size.
6448
6449 // Determine whether \p K is a truncation based on an induction variable that
6450 // can be optimized.
6453 I),
6454 Range))
6455 return nullptr;
6456
6458 VPI->getOperand(0)->getDefiningRecipe());
6459 PHINode *Phi = WidenIV->getPHINode();
6460 VPIRValue *Start = WidenIV->getStartValue();
6461 const InductionDescriptor &IndDesc = WidenIV->getInductionDescriptor();
6462
6463 // Wrap flags from the original induction do not apply to the truncated type,
6464 // so do not propagate them.
6465 VPIRFlags Flags = VPIRFlags::WrapFlagsTy(false, false);
6466 VPValue *Step =
6469 Phi, Start, Step, &Plan.getVF(), IndDesc, I, Flags, VPI->getDebugLoc());
6470}
6471
6472bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
6474 "Instruction should have been handled earlier");
6475 // Instruction should be widened, unless it is scalar after vectorization,
6476 // scalarization is profitable or it is predicated.
6477 auto WillScalarize = [this, I](ElementCount VF) -> bool {
6478 return CM.isScalarAfterVectorization(I, VF) ||
6479 CM.isProfitableToScalarize(I, VF) ||
6480 CM.isScalarWithPredication(I, VF);
6481 };
6483 Range);
6484}
6485
6486VPRecipeWithIRFlags *VPRecipeBuilder::tryToWiden(VPInstruction *VPI) {
6487 auto *I = VPI->getUnderlyingInstr();
6488 switch (VPI->getOpcode()) {
6489 default:
6490 return nullptr;
6491 case Instruction::SDiv:
6492 case Instruction::UDiv:
6493 case Instruction::SRem:
6494 case Instruction::URem:
6495 // If not provably safe, use a masked intrinsic.
6496 if (CM.isPredicatedInst(I))
6497 return new VPWidenIntrinsicRecipe(
6499 I->getType(), {}, {}, VPI->getDebugLoc());
6500 [[fallthrough]];
6501 case Instruction::Add:
6502 case Instruction::And:
6503 case Instruction::AShr:
6504 case Instruction::FAdd:
6505 case Instruction::FCmp:
6506 case Instruction::FDiv:
6507 case Instruction::FMul:
6508 case Instruction::FNeg:
6509 case Instruction::FRem:
6510 case Instruction::FSub:
6511 case Instruction::ICmp:
6512 case Instruction::LShr:
6513 case Instruction::Mul:
6514 case Instruction::Or:
6515 case Instruction::Select:
6516 case Instruction::Shl:
6517 case Instruction::Sub:
6518 case Instruction::Xor:
6519 case Instruction::Freeze:
6520 return new VPWidenRecipe(*I, VPI->operandsWithoutMask(), *VPI, *VPI,
6521 VPI->getDebugLoc());
6522 case Instruction::ExtractValue: {
6524 auto *EVI = cast<ExtractValueInst>(I);
6525 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
6526 unsigned Idx = EVI->getIndices()[0];
6527 NewOps.push_back(Plan.getConstantInt(32, Idx));
6528 return new VPWidenRecipe(*I, NewOps, *VPI, *VPI, VPI->getDebugLoc());
6529 }
6530 };
6531}
6532
6534 if (VPI->getOpcode() != Instruction::Store)
6535 return nullptr;
6536
6537 auto HistInfo =
6538 Legal->getHistogramInfo(cast<StoreInst>(VPI->getUnderlyingInstr()));
6539 if (!HistInfo)
6540 return nullptr;
6541
6542 const HistogramInfo *HI = *HistInfo;
6543 // FIXME: Support other operations.
6544 unsigned Opcode = HI->Update->getOpcode();
6545 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
6546 "Histogram update operation must be an Add or Sub");
6547
6549 // Bucket address.
6550 HGramOps.push_back(VPI->getOperand(1));
6551 // Increment value.
6552 HGramOps.push_back(Plan.getOrAddLiveIn(HI->Update->getOperand(1)));
6553
6554 // In case of predicated execution (due to tail-folding, or conditional
6555 // execution, or both), pass the relevant mask.
6556 if (CM.isMaskRequired(HI->Store))
6557 HGramOps.push_back(VPI->getMask());
6558
6559 return new VPHistogramRecipe(Opcode, HGramOps, VPI->getDebugLoc());
6560}
6561
6563 VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder) {
6564 StoreInst *SI;
6565 if ((SI = dyn_cast<StoreInst>(VPI->getUnderlyingInstr())) &&
6566 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
6567 // Only create recipe for the final invariant store of the reduction.
6568 if (Legal->isInvariantStoreOfReduction(SI)) {
6569 VPValue *Val = VPI->getOperand(0);
6570 VPValue *Addr = VPI->getOperand(1);
6571 // We need to store the exiting value of the reduction, so use the blend
6572 // if tail folded.
6573 if (auto *Blend = vputils::findUserOf<VPBlendRecipe>(Val))
6574 Val = Blend;
6575 assert(
6576 vputils::findUserOf<VPReductionPHIRecipe>(Val)->getBackedgeValue() ==
6577 Val &&
6578 "Store isn't backedge value?");
6579 auto *Recipe = new VPReplicateRecipe(
6580 SI, {Val, Addr}, true /* IsUniform */, nullptr /*Mask*/, *VPI, *VPI,
6581 VPI->getDebugLoc());
6582 FinalRedStoresBuilder.insert(Recipe);
6583 }
6584 VPI->eraseFromParent();
6585 return true;
6586 }
6587
6588 return false;
6589}
6590
6592 VFRange &Range) {
6593 auto *I = VPI->getUnderlyingInstr();
6595 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
6596 Range);
6597
6598 bool IsPredicated = CM.isPredicatedInst(I);
6599
6600 // Even if the instruction is not marked as uniform, there are certain
6601 // intrinsic calls that can be effectively treated as such, so we check for
6602 // them here. Conservatively, we only do this for scalable vectors, since
6603 // for fixed-width VFs we can always fall back on full scalarization.
6604 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
6605 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
6606 case Intrinsic::assume:
6607 case Intrinsic::lifetime_start:
6608 case Intrinsic::lifetime_end:
6609 // For scalable vectors if one of the operands is variant then we still
6610 // want to mark as uniform, which will generate one instruction for just
6611 // the first lane of the vector. We can't scalarize the call in the same
6612 // way as for fixed-width vectors because we don't know how many lanes
6613 // there are.
6614 //
6615 // The reasons for doing it this way for scalable vectors are:
6616 // 1. For the assume intrinsic generating the instruction for the first
6617 // lane is still be better than not generating any at all. For
6618 // example, the input may be a splat across all lanes.
6619 // 2. For the lifetime start/end intrinsics the pointer operand only
6620 // does anything useful when the input comes from a stack object,
6621 // which suggests it should always be uniform. For non-stack objects
6622 // the effect is to poison the object, which still allows us to
6623 // remove the call.
6624 IsUniform = true;
6625 break;
6626 default:
6627 break;
6628 }
6629 }
6630 VPValue *BlockInMask = nullptr;
6631 if (!IsPredicated) {
6632 // Finalize the recipe for Instr, first if it is not predicated.
6633 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
6634 } else {
6635 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
6636 // Instructions marked for predication are replicated and a mask operand is
6637 // added initially. Masked replicate recipes will later be placed under an
6638 // if-then construct to prevent side-effects. Generate recipes to compute
6639 // the block mask for this region.
6640 BlockInMask = VPI->getMask();
6641 }
6642
6643 // Note that there is some custom logic to mark some intrinsics as uniform
6644 // manually above for scalable vectors, which this assert needs to account for
6645 // as well.
6646 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
6647 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
6648 "Should not predicate a uniform recipe");
6649 auto *Recipe =
6650 new VPReplicateRecipe(I, VPI->operandsWithoutMask(), IsUniform,
6651 BlockInMask, *VPI, *VPI, VPI->getDebugLoc());
6652 return Recipe;
6653}
6654
6657 VFRange &Range) {
6658 assert(!R->isPhi() && "phis must be handled earlier");
6659 // First, check for specific widening recipes that deal with optimizing
6660 // truncates and memory operations.
6661 auto *VPI = cast<VPInstruction>(R);
6662 assert(VPI->getOpcode() != Instruction::Call &&
6663 "Call should have been handled by makeCallWideningDecisions");
6664
6665 VPRecipeBase *Recipe;
6666 if (VPI->getOpcode() == Instruction::Trunc &&
6667 (Recipe = tryToOptimizeInductionTruncate(VPI, Range)))
6668 return Recipe;
6669
6670 // All widen recipes below deal only with VF > 1.
6672 [&](ElementCount VF) { return VF.isScalar(); }, Range))
6673 return nullptr;
6674
6675 Instruction *Instr = R->getUnderlyingInstr();
6676 assert(!is_contained({Instruction::Load, Instruction::Store},
6677 VPI->getOpcode()) &&
6678 "Should have been handled prior to this!");
6679
6680 if (!shouldWiden(Instr, Range))
6681 return nullptr;
6682
6683 if (VPI->getOpcode() == Instruction::GetElementPtr)
6684 return new VPWidenGEPRecipe(cast<GetElementPtrInst>(Instr),
6685 VPI->operandsWithoutMask(), *VPI,
6686 VPI->getDebugLoc());
6687
6688 if (Instruction::isCast(VPI->getOpcode())) {
6689 auto *CI = cast<CastInst>(Instr);
6690 auto *CastR = cast<VPInstructionWithType>(VPI);
6691 return new VPWidenCastRecipe(CI->getOpcode(), VPI->getOperand(0),
6692 CastR->getResultType(), CI, *VPI, *VPI,
6693 VPI->getDebugLoc());
6694 }
6695
6696 return tryToWiden(VPI);
6697}
6698
6699// To allow RUN_VPLAN_PASS to print the VPlan after VF/UF independent
6700// optimizations.
6702
6703void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
6704 ElementCount MaxVF) {
6705 if (ElementCount::isKnownGT(MinVF, MaxVF))
6706 return;
6707
6708 bool IsInnerLoop = OrigLoop->isInnermost();
6709
6710 // Set up loop versioning for inner loops with memory runtime checks.
6711 // Outer loops don't have LoopAccessInfo since canVectorizeMemory() is not
6712 // called for them.
6713 std::optional<LoopVersioning> LVer;
6714 if (IsInnerLoop) {
6715 const LoopAccessInfo *LAI = Legal->getLAI();
6716 LVer.emplace(*LAI, LAI->getRuntimePointerChecking()->getChecks(), OrigLoop,
6717 LI, DT, PSE.getSE());
6718 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
6720 // Only use noalias metadata when using memory checks guaranteeing no
6721 // overlap across all iterations.
6722 LVer->prepareNoAliasMetadata();
6723 }
6724 }
6725
6726 // Create initial base VPlan0, to serve as common starting point for all
6727 // candidates built later for specific VF ranges.
6728 auto VPlan0 = VPlanTransforms::buildVPlan0(
6729 OrigLoop, *LI, Legal->getWidestInductionType(),
6730 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE,
6731 LVer ? &*LVer : nullptr);
6732
6733 // Create recipes for header phis. For outer loops, reductions, recurrences
6734 // and in-loop reductions are empty since legality doesn't detect them.
6736 *OrigLoop, Legal->getInductionVars(),
6737 Legal->getReductionVars(),
6738 Legal->getFixedOrderRecurrences(),
6739 Config.getInLoopReductions(), Hints.allowReordering()))
6740 return;
6741
6744 // If we're vectorizing a loop with an uncountable exit, make sure that the
6745 // recipes are safe to handle.
6746 // TODO: Remove this once we can properly check the VPlan itself for both
6747 // the presence of an uncountable exit and the presence of stores in
6748 // the loop inside handleEarlyExits itself.
6750 if (Legal->hasUncountableEarlyExit())
6751 EEStyle = Legal->hasUncountableExitWithSideEffects()
6754
6756 OrigLoop, PSE, *DT, Legal->getAssumptionCache()))
6757 return;
6758
6760 CM.foldTailByMasking());
6762 if (CM.foldTailByMasking())
6765
6766 auto MaxVFTimes2 = MaxVF * 2;
6767 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
6768 VFRange SubRange = {VF, MaxVFTimes2};
6769 auto Plan =
6770 tryToBuildVPlan(std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange);
6771 VF = SubRange.End;
6772
6773 if (!Plan)
6774 continue;
6775
6776 // Now optimize the initial VPlan.
6780 Config.getMinimalBitwidths());
6782 // TODO: try to put addExplicitVectorLength close to addActiveLaneMask
6783 if (CM.foldTailWithEVL()) {
6785 Config.getMaxSafeElements());
6787 }
6788
6789 if (auto P = VPlanTransforms::narrowInterleaveGroups(*Plan, TTI))
6790 VPlans.push_back(std::move(P));
6791
6793 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
6794 VPlans.push_back(std::move(Plan));
6795 }
6796}
6797
6798VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VPlanPtr Plan,
6799 VFRange &Range) {
6800
6801 // For outer loops, the plan only needs basic recipe conversion and induction
6802 // live-out optimization; the full inner-loop recipe building below does not
6803 // apply (no widening decisions, interleave groups, reductions, etc.).
6804 if (Plan->isOuterLoop()) {
6805 for (ElementCount VF : Range)
6806 Plan->addVF(VF);
6808 return nullptr;
6810 /*FoldTail=*/false);
6811 return Plan;
6812 }
6813
6814 using namespace llvm::VPlanPatternMatch;
6815 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
6816
6817 // ---------------------------------------------------------------------------
6818 // Build initial VPlan: Scan the body of the loop in a topological order to
6819 // visit each basic block after having visited its predecessor basic blocks.
6820 // ---------------------------------------------------------------------------
6821
6822 bool RequiresScalarEpilogueCheck =
6824 [this](ElementCount VF) {
6825 return !CM.requiresScalarEpilogue(VF.isVector());
6826 },
6827 Range);
6828 // Update the branch in the middle block if a scalar epilogue is required.
6829 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
6830 if (!RequiresScalarEpilogueCheck && MiddleVPBB->getNumSuccessors() == 2) {
6831 auto *BranchOnCond = cast<VPInstruction>(MiddleVPBB->getTerminator());
6832 assert(MiddleVPBB->getSuccessors()[1] == Plan->getScalarPreheader() &&
6833 "second successor must be scalar preheader");
6834 BranchOnCond->setOperand(0, Plan->getFalse());
6835 }
6836
6837 // Don't use getDecisionAndClampRange here, because we don't know the UF
6838 // so this function is better to be conservative, rather than to split
6839 // it up into different VPlans.
6840 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
6841 bool IVUpdateMayOverflow = false;
6842 for (ElementCount VF : Range)
6843 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
6844
6845 TailFoldingStyle Style = CM.getTailFoldingStyle();
6846 // Use NUW for the induction increment if we proved that it won't overflow in
6847 // the vector loop or when not folding the tail. In the later case, we know
6848 // that the canonical induction increment will not overflow as the vector trip
6849 // count is >= increment and a multiple of the increment.
6850 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
6851 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
6852 if (!HasNUW) {
6853 auto *IVInc =
6854 LoopRegion->getExitingBasicBlock()->getTerminator()->getOperand(0);
6855 assert(match(IVInc,
6856 m_VPInstruction<Instruction::Add>(
6857 m_Specific(LoopRegion->getCanonicalIV()), m_VPValue())) &&
6858 "Did not find the canonical IV increment");
6859 LoopRegion->clearCanonicalIVNUW(cast<VPInstruction>(IVInc));
6860 }
6861
6862 // ---------------------------------------------------------------------------
6863 // Pre-construction: record ingredients whose recipes we'll need to further
6864 // process after constructing the initial VPlan.
6865 // ---------------------------------------------------------------------------
6866
6867 // For each interleave group which is relevant for this (possibly trimmed)
6868 // Range, add it to the set of groups to be later applied to the VPlan and add
6869 // placeholders for its members' Recipes which we'll be replacing with a
6870 // single VPInterleaveRecipe.
6871 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
6872 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
6873 bool Result = (VF.isVector() && // Query is illegal for VF == 1
6874 CM.getWideningDecision(IG->getInsertPos(), VF) ==
6876 // For scalable vectors, the interleave factors must be <= 8 since we
6877 // require the (de)interleaveN intrinsics instead of shufflevectors.
6878 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
6879 "Unsupported interleave factor for scalable vectors");
6880 return Result;
6881 };
6882 if (!getDecisionAndClampRange(ApplyIG, Range))
6883 continue;
6884 InterleaveGroups.insert(IG);
6885 }
6886
6887 // ---------------------------------------------------------------------------
6888 // Construct wide recipes and apply predication for original scalar
6889 // VPInstructions in the loop.
6890 // ---------------------------------------------------------------------------
6891 VPRecipeBuilder RecipeBuilder(*Plan, Legal, CM, Builder);
6892
6893 // Scan the body of the loop in a topological order to visit each basic block
6894 // after having visited its predecessor basic blocks.
6895 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
6896 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
6897 HeaderVPBB);
6898
6900 Range.Start);
6901
6902 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, Config.CostKind, CM.PSE,
6903 OrigLoop);
6904
6906 RecipeBuilder);
6907
6909
6911 RecipeBuilder, CostCtx);
6912
6913 // Now process all other blocks and instructions.
6914 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
6915 // Convert input VPInstructions to widened recipes.
6916 for (VPRecipeBase &R : make_early_inc_range(
6917 make_range(VPBB->getFirstNonPhi(), VPBB->end()))) {
6918 // Skip recipes that do not need transforming or have already been
6919 // transformed.
6920 if (isa<VPWidenCanonicalIVRecipe, VPBlendRecipe, VPReductionRecipe,
6921 VPReplicateRecipe, VPWidenLoadRecipe, VPWidenStoreRecipe,
6922 VPWidenCallRecipe, VPWidenIntrinsicRecipe, VPVectorPointerRecipe,
6923 VPVectorEndPointerRecipe, VPHistogramRecipe>(&R))
6924 continue;
6925 auto *VPI = cast<VPInstruction>(&R);
6926 if (!VPI->getUnderlyingValue())
6927 continue;
6928
6929 // TODO: Gradually replace uses of underlying instruction by analyses on
6930 // VPlan. Migrate code relying on the underlying instruction from VPlan0
6931 // to construct recipes below to not use the underlying instruction.
6933 Builder.setInsertPoint(VPI);
6934
6935 VPRecipeBase *Recipe =
6936 RecipeBuilder.tryToCreateWidenNonPhiRecipe(VPI, Range);
6937 if (!Recipe)
6938 Recipe =
6939 RecipeBuilder.handleReplication(cast<VPInstruction>(VPI), Range);
6940
6941 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
6942 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
6943 // moved to the phi section in the header.
6944 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
6945 } else {
6946 Builder.insert(Recipe);
6947 }
6948 if (Recipe->getNumDefinedValues() == 1) {
6949 VPI->replaceAllUsesWith(Recipe->getVPSingleValue());
6950 } else {
6951 assert(Recipe->getNumDefinedValues() == 0 &&
6952 "Unexpected multidef recipe");
6953 }
6954 R.eraseFromParent();
6955 }
6956 }
6957
6958 assert(isa<VPRegionBlock>(LoopRegion) &&
6959 !LoopRegion->getEntryBasicBlock()->empty() &&
6960 "entry block must be set to a VPRegionBlock having a non-empty entry "
6961 "VPBasicBlock");
6962
6964 Range);
6965
6966 // ---------------------------------------------------------------------------
6967 // Transform initial VPlan: Apply previously taken decisions, in order, to
6968 // bring the VPlan to its final state.
6969 // ---------------------------------------------------------------------------
6970
6971 addReductionResultComputation(Plan, RecipeBuilder, Range.Start);
6972
6973 // Optimize FindIV reductions to use sentinel-based approach when possible.
6975 *OrigLoop);
6977 CM.foldTailByMasking());
6978
6979 // Apply mandatory transformation to handle reductions with multiple in-loop
6980 // uses if possible, bail out otherwise.
6982 OrigLoop))
6983 return nullptr;
6984 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
6985 // NaNs if possible, bail out otherwise.
6987 return nullptr;
6988
6989 // Create whole-vector selects for find-last recurrences.
6991 return nullptr;
6992
6994
6995 // Create partial reduction recipes for scaled reductions and transform
6996 // recipes to abstract recipes if it is legal and beneficial and clamp the
6997 // range for better cost estimation.
6998 // TODO: Enable following transform when the EVL-version of extended-reduction
6999 // and mulacc-reduction are implemented.
7000 if (!CM.foldTailWithEVL()) {
7001 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, Config.CostKind, CM.PSE,
7002 OrigLoop);
7004 Range);
7006 Range);
7007 }
7008
7009 // Ensure scalar VF plans only contain VF=1, as required by hasScalarVFOnly.
7010 if (Range.Start.isScalar())
7011 Range.End = Range.Start * 2;
7012
7013 for (ElementCount VF : Range)
7014 Plan->addVF(VF);
7015 Plan->setName("Initial VPlan");
7016
7017 // Interleave memory: for each Interleave Group we marked earlier as relevant
7018 // for this VPlan, replace the Recipes widening its memory instructions with a
7019 // single VPInterleaveRecipe at its insertion point.
7021 InterleaveGroups, CM.isEpilogueAllowed());
7022
7023 // Replace VPValues for known constant strides.
7025 Legal->getLAI()->getSymbolicStrides());
7026
7028
7029 if (useActiveLaneMask(Style)) {
7030 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
7031 // TailFoldingStyle is visible there.
7032 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
7033 RUN_VPLAN_PASS(VPlanTransforms::addActiveLaneMask, *Plan, ForControlFlow);
7034 }
7035
7036 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
7037 return Plan;
7038}
7039
7040void LoopVectorizationPlanner::addReductionResultComputation(
7041 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
7042 using namespace VPlanPatternMatch;
7043 VPTypeAnalysis TypeInfo(*Plan);
7044 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
7045 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
7047 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
7048 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
7049 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
7050 VPValue *HeaderMask = vputils::findHeaderMask(*Plan);
7051 for (VPRecipeBase &R :
7052 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
7053 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
7054 if (!PhiR)
7055 continue;
7056
7057 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
7058 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
7060 Type *PhiTy = TypeInfo.inferScalarType(PhiR);
7061
7062 // Convert a VPBlendRecipe backedge to a select.
7063 if (auto *Blend = dyn_cast<VPBlendRecipe>(PhiR->getBackedgeValue())) {
7064 if (Blend->getNumIncomingValues() == 2 &&
7065 Blend->getMask(0) == HeaderMask) {
7066 auto *Sel = VPBuilder(Blend).createSelect(
7067 Blend->getMask(0), Blend->getIncomingValue(0),
7068 Blend->getIncomingValue(1), {}, "", *Blend);
7069 Blend->replaceAllUsesWith(Sel);
7070 Blend->eraseFromParent();
7071 }
7072 }
7073
7074 auto *OrigExitingVPV = PhiR->getBackedgeValue();
7075 auto *NewExitingVPV = PhiR->getBackedgeValue();
7076
7077 // Remove the predicated select if the target doesn't want it.
7078 VPValue *V;
7079 if (!CM.usePredicatedReductionSelect(RecurrenceKind) &&
7080 match(PhiR->getBackedgeValue(),
7081 m_Select(m_Specific(HeaderMask), m_VPValue(V), m_Specific(PhiR))))
7082 PhiR->setBackedgeValue(V);
7083
7084 // We want code in the middle block to appear to execute on the location of
7085 // the scalar loop's latch terminator because: (a) it is all compiler
7086 // generated, (b) these instructions are always executed after evaluating
7087 // the latch conditional branch, and (c) other passes may add new
7088 // predecessors which terminate on this line. This is the easiest way to
7089 // ensure we don't accidentally cause an extra step back into the loop while
7090 // debugging.
7091 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
7092
7093 // TODO: At the moment ComputeReductionResult also drives creation of the
7094 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
7095 // even for in-loop reductions, until the reduction resume value handling is
7096 // also modeled in VPlan.
7097 VPInstruction *FinalReductionResult;
7098 VPBuilder::InsertPointGuard Guard(Builder);
7099 Builder.setInsertPoint(MiddleVPBB, IP);
7100 // For AnyOf reductions, find the select among PhiR's users and convert
7101 // the reduction phi to operate on bools before creating the final
7102 // reduction result.
7103 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
7104 auto *AnyOfSelect =
7105 cast<VPSingleDefRecipe>(*find_if(PhiR->users(), [](VPUser *U) {
7106 return match(U, m_Select(m_VPValue(), m_VPValue(), m_VPValue()));
7107 }));
7108 VPValue *Start = PhiR->getStartValue();
7109 bool TrueValIsPhi = AnyOfSelect->getOperand(1) == PhiR;
7110 // NewVal is the non-phi operand of the select.
7111 VPValue *NewVal = TrueValIsPhi ? AnyOfSelect->getOperand(2)
7112 : AnyOfSelect->getOperand(1);
7113
7114 // Adjust AnyOf reductions; replace the reduction phi for the selected
7115 // value with a boolean reduction phi node to check if the condition is
7116 // true in any iteration. The final value is selected by the final
7117 // ComputeReductionResult.
7118 VPValue *Cmp = AnyOfSelect->getOperand(0);
7119 // If the compare is checking the reduction PHI node, adjust it to check
7120 // the start value.
7121 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
7122 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
7123 Builder.setInsertPoint(AnyOfSelect);
7124
7125 // If the true value of the select is the reduction phi, the new value
7126 // is selected if the negated condition is true in any iteration.
7127 if (TrueValIsPhi)
7128 Cmp = Builder.createNot(Cmp);
7129 VPValue *Or = Builder.createOr(PhiR, Cmp);
7130 // Only replace uses inside the vector region with Or. External uses
7131 // (e.g. scalar preheader resume phis) must be replaced by the user
7132 // update loop below with FinalReductionResult.
7133 AnyOfSelect->replaceUsesWithIf(Or, [](VPUser &U, unsigned) {
7134 return cast<VPRecipeBase>(&U)->getRegion();
7135 });
7136 ToDelete.push_back(AnyOfSelect);
7137
7138 // Convert the reduction phi to operate on bools.
7139 PhiR->setOperand(0, Plan->getFalse());
7140
7141 // Update NewExitingVPV if it was pointing to the now-replaced select.
7142 if (NewExitingVPV == AnyOfSelect)
7143 NewExitingVPV = Or;
7144
7145 Builder.setInsertPoint(MiddleVPBB, IP);
7146
7147 FinalReductionResult =
7148 Builder.createAnyOfReduction(NewExitingVPV, NewVal, Start, ExitDL);
7149 } else {
7150 VPIRFlags Flags(RecurrenceKind, PhiR->isOrdered(), PhiR->isInLoop(),
7151 PhiR->getFastMathFlags());
7152 FinalReductionResult =
7153 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
7154 {NewExitingVPV}, Flags, ExitDL);
7155 }
7156 // If the vector reduction can be performed in a smaller type, we truncate
7157 // then extend the loop exit value to enable InstCombine to evaluate the
7158 // entire expression in the smaller type.
7159 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
7161 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
7163 "Unexpected truncated min-max recurrence!");
7164 Type *RdxTy = RdxDesc.getRecurrenceType();
7165 VPWidenCastRecipe *Trunc;
7166 Instruction::CastOps ExtendOpc =
7167 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
7168 VPWidenCastRecipe *Extnd;
7169 {
7170 VPBuilder::InsertPointGuard Guard(Builder);
7171 Builder.setInsertPoint(
7172 NewExitingVPV->getDefiningRecipe()->getParent(),
7173 std::next(NewExitingVPV->getDefiningRecipe()->getIterator()));
7174 Trunc =
7175 Builder.createWidenCast(Instruction::Trunc, NewExitingVPV, RdxTy);
7176 Extnd = Builder.createWidenCast(ExtendOpc, Trunc, PhiTy);
7177 }
7178 if (PhiR->getOperand(1) == NewExitingVPV)
7179 PhiR->setOperand(1, Extnd->getVPSingleValue());
7180
7181 // Update ComputeReductionResult with the truncated exiting value and
7182 // extend its result. Operand 0 provides the values to be reduced.
7183 FinalReductionResult->setOperand(0, Trunc);
7184 FinalReductionResult =
7185 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
7186 }
7187
7188 // Update all users outside the vector region. Also replace redundant
7189 // extracts.
7190 for (auto *U : to_vector(OrigExitingVPV->users())) {
7191 auto *Parent = cast<VPRecipeBase>(U)->getParent();
7192 if (FinalReductionResult == U || Parent->getParent())
7193 continue;
7194 // Skip ComputeReductionResult and FindIV reductions when they are not the
7195 // final result.
7196 if (match(U, m_VPInstruction<VPInstruction::ComputeReductionResult>()) ||
7198 match(U, m_VPInstruction<Instruction::ICmp>())))
7199 continue;
7200 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
7201
7202 // Look through ExtractLastPart.
7204 U = cast<VPInstruction>(U)->getSingleUser();
7205
7208 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
7209 }
7210
7211 RecurKind RK = PhiR->getRecurrenceKind();
7216 VPBuilder PHBuilder(Plan->getVectorPreheader());
7217 VPValue *Iden = Plan->getOrAddLiveIn(
7218 getRecurrenceIdentity(RK, PhiTy, PhiR->getFastMathFlags()));
7219 auto *ScaleFactorVPV = Plan->getConstantInt(32, 1);
7220 VPValue *StartV = PHBuilder.createNaryOp(
7222 {PhiR->getStartValue(), Iden, ScaleFactorVPV}, *PhiR);
7223 PhiR->setOperand(0, StartV);
7224 }
7225 }
7226 for (VPRecipeBase *R : ToDelete)
7227 R->eraseFromParent();
7228
7230}
7231
7233 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
7234 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
7235 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
7236 assert((!Config.OptForSize ||
7237 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
7238 "Cannot SCEV check stride or overflow when optimizing for size");
7240 SCEVCheckBlock, HasBranchWeights);
7241 }
7242 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
7243 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
7244 // VPlan-native path does not do any analysis for runtime checks
7245 // currently.
7247 "Runtime checks are not supported for outer loops yet");
7248
7249 if (Config.OptForSize) {
7250 assert(
7251 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
7252 "Cannot emit memory checks when optimizing for size, unless forced "
7253 "to vectorize.");
7254 ORE->emit([&]() {
7255 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
7256 OrigLoop->getStartLoc(),
7257 OrigLoop->getHeader())
7258 << "Code-size may be reduced by not forcing "
7259 "vectorization, or by source-code modifications "
7260 "eliminating the need for runtime checks "
7261 "(e.g., adding 'restrict').";
7262 });
7263 }
7265 MemCheckBlock, HasBranchWeights);
7266 }
7267}
7268
7270 VPlan &Plan, ElementCount VF, unsigned UF,
7271 ElementCount MinProfitableTripCount) const {
7272 const uint32_t *BranchWeights =
7273 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
7275 : nullptr;
7277 MinProfitableTripCount,
7278 CM.requiresScalarEpilogue(VF.isVector()),
7279 CM.foldTailByMasking(), OrigLoop, BranchWeights,
7280 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(),
7281 PSE, /*CheckBlock=*/nullptr);
7282}
7283
7284// Determine how to lower the epilogue, which depends on 1) optimising
7285// for minimum code-size, 2) tail-folding compiler options, 3) loop
7286// hints forcing tail-folding, and 4) a TTI hook that analyses whether the loop
7287// is suitable for tail-folding.
7288static EpilogueLowering
7290 bool OptForSize, TargetTransformInfo *TTI,
7292 InterleavedAccessInfo *IAI) {
7293 // 1) OptSize takes precedence over all other options, i.e. if this is set,
7294 // don't look at hints or options, and don't request an epilogue.
7295 if (F->hasOptSize() ||
7296 (OptForSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled))
7298
7299 // 2) If set, obey the directives
7300 if (TailFoldingPolicy.getNumOccurrences()) {
7301 switch (TailFoldingPolicy) {
7303 return CM_EpilogueAllowed;
7308 };
7309 }
7310
7311 // 3) If set, obey the hints
7312 switch (Hints.getPredicate()) {
7316 return CM_EpilogueAllowed;
7317 };
7318
7319 // 4) if the TTI hook indicates this is profitable, request tail-folding.
7320 TailFoldingInfo TFI(TLI, &LVL, IAI);
7321 if (TTI->preferTailFoldingOverEpilogue(&TFI))
7323
7324 return CM_EpilogueAllowed;
7325}
7326
7327// Emit a remark if there are stores to floats that required a floating point
7328// extension. If the vectorized loop was generated with floating point there
7329// will be a performance penalty from the conversion overhead and the change in
7330// the vector width.
7333 for (BasicBlock *BB : L->getBlocks()) {
7334 for (Instruction &Inst : *BB) {
7335 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
7336 if (S->getValueOperand()->getType()->isFloatTy())
7337 Worklist.push_back(S);
7338 }
7339 }
7340 }
7341
7342 // Traverse the floating point stores upwards searching, for floating point
7343 // conversions.
7346 while (!Worklist.empty()) {
7347 auto *I = Worklist.pop_back_val();
7348 if (!L->contains(I))
7349 continue;
7350 if (!Visited.insert(I).second)
7351 continue;
7352
7353 // Emit a remark if the floating point store required a floating
7354 // point conversion.
7355 // TODO: More work could be done to identify the root cause such as a
7356 // constant or a function return type and point the user to it.
7357 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
7358 ORE->emit([&]() {
7359 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
7360 I->getDebugLoc(), L->getHeader())
7361 << "floating point conversion changes vector width. "
7362 << "Mixed floating point precision requires an up/down "
7363 << "cast that will negatively impact performance.";
7364 });
7365
7366 for (Use &Op : I->operands())
7367 if (auto *OpI = dyn_cast<Instruction>(Op))
7368 Worklist.push_back(OpI);
7369 }
7370}
7371
7372/// For loops with uncountable early exits, find the cost of doing work when
7373/// exiting the loop early, such as calculating the final exit values of
7374/// variables used outside the loop.
7375/// TODO: This is currently overly pessimistic because the loop may not take
7376/// the early exit, but better to keep this conservative for now. In future,
7377/// it might be possible to relax this by using branch probabilities.
7379 VPlan &Plan, ElementCount VF) {
7380 InstructionCost Cost = 0;
7381 for (auto *ExitVPBB : Plan.getExitBlocks()) {
7382 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
7383 // If the predecessor is not the middle.block, then it must be the
7384 // vector.early.exit block, which may contain work to calculate the exit
7385 // values of variables used outside the loop.
7386 if (PredVPBB != Plan.getMiddleBlock()) {
7387 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
7388 << PredVPBB->getName() << ":\n");
7389 Cost += PredVPBB->cost(VF, CostCtx);
7390 }
7391 }
7392 }
7393 return Cost;
7394}
7395
7396/// This function determines whether or not it's still profitable to vectorize
7397/// the loop given the extra work we have to do outside of the loop:
7398/// 1. Perform the runtime checks before entering the loop to ensure it's safe
7399/// to vectorize.
7400/// 2. In the case of loops with uncountable early exits, we may have to do
7401/// extra work when exiting the loop early, such as calculating the final
7402/// exit values of variables used outside the loop.
7403/// 3. The middle block.
7404static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
7405 VectorizationFactor &VF, Loop *L,
7407 VPCostContext &CostCtx, VPlan &Plan,
7408 EpilogueLowering SEL,
7409 std::optional<unsigned> VScale) {
7410 InstructionCost RtC = Checks.getCost();
7411 if (!RtC.isValid())
7412 return false;
7413
7414 // When interleaving only scalar and vector cost will be equal, which in turn
7415 // would lead to a divide by 0. Fall back to hard threshold.
7416 if (VF.Width.isScalar()) {
7417 // TODO: Should we rename VectorizeMemoryCheckThreshold?
7419 LLVM_DEBUG(
7420 dbgs()
7421 << "LV: Interleaving only is not profitable due to runtime checks\n");
7422 return false;
7423 }
7424 return true;
7425 }
7426
7427 // The scalar cost should only be 0 when vectorizing with a user specified
7428 // VF/IC. In those cases, runtime checks should always be generated.
7429 uint64_t ScalarC = VF.ScalarCost.getValue();
7430 if (ScalarC == 0)
7431 return true;
7432
7433 InstructionCost TotalCost = RtC;
7434 // Add on the cost of any work required in the vector early exit block, if
7435 // one exists.
7436 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
7437 TotalCost += Plan.getMiddleBlock()->cost(VF.Width, CostCtx);
7438
7439 // First, compute the minimum iteration count required so that the vector
7440 // loop outperforms the scalar loop.
7441 // The total cost of the scalar loop is
7442 // ScalarC * TC
7443 // where
7444 // * TC is the actual trip count of the loop.
7445 // * ScalarC is the cost of a single scalar iteration.
7446 //
7447 // The total cost of the vector loop is
7448 // TotalCost + VecC * (TC / VF) + EpiC
7449 // where
7450 // * TotalCost is the sum of the costs cost of
7451 // - the generated runtime checks, i.e. RtC
7452 // - performing any additional work in the vector.early.exit block for
7453 // loops with uncountable early exits.
7454 // - the middle block, if ExpectedTC <= VF.Width.
7455 // * VecC is the cost of a single vector iteration.
7456 // * TC is the actual trip count of the loop
7457 // * VF is the vectorization factor
7458 // * EpiCost is the cost of the generated epilogue, including the cost
7459 // of the remaining scalar operations.
7460 //
7461 // Vectorization is profitable once the total vector cost is less than the
7462 // total scalar cost:
7463 // TotalCost + VecC * (TC / VF) + EpiC < ScalarC * TC
7464 //
7465 // Now we can compute the minimum required trip count TC as
7466 // VF * (TotalCost + EpiC) / (ScalarC * VF - VecC) < TC
7467 //
7468 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
7469 // the computations are performed on doubles, not integers and the result
7470 // is rounded up, hence we get an upper estimate of the TC.
7471 unsigned IntVF = estimateElementCount(VF.Width, VScale);
7472 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
7473 uint64_t MinTC1 =
7474 Div == 0 ? 0 : divideCeil(TotalCost.getValue() * IntVF, Div);
7475
7476 // Second, compute a minimum iteration count so that the cost of the
7477 // runtime checks is only a fraction of the total scalar loop cost. This
7478 // adds a loop-dependent bound on the overhead incurred if the runtime
7479 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
7480 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
7481 // cost, compute
7482 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
7483 uint64_t MinTC2 = divideCeil(RtC.getValue() * 10, ScalarC);
7484
7485 // Now pick the larger minimum. If it is not a multiple of VF and an epilogue
7486 // is allowed, choose the next closest multiple of VF. This should partly
7487 // compensate for ignoring the epilogue cost.
7488 uint64_t MinTC = std::max(MinTC1, MinTC2);
7489 if (SEL == CM_EpilogueAllowed)
7490 MinTC = alignTo(MinTC, IntVF);
7492
7493 LLVM_DEBUG(
7494 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
7495 << VF.MinProfitableTripCount << "\n");
7496
7497 // Skip vectorization if the expected trip count is less than the minimum
7498 // required trip count.
7499 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
7500 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
7501 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
7502 "trip count < minimum profitable VF ("
7503 << *ExpectedTC << " < " << VF.MinProfitableTripCount
7504 << ")\n");
7505
7506 return false;
7507 }
7508 }
7509 return true;
7510}
7511
7513 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
7515 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
7517
7518/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
7519/// vectorization.
7522 using namespace VPlanPatternMatch;
7523 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
7524 // introduce multiple uses of undef/poison. If the reduction start value may
7525 // be undef or poison it needs to be frozen and the frozen start has to be
7526 // used when computing the reduction result. We also need to use the frozen
7527 // value in the resume phi generated by the main vector loop, as this is also
7528 // used to compute the reduction result after the epilogue vector loop.
7529 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
7530 bool UpdateResumePhis) {
7531 VPBuilder Builder(Plan.getEntry());
7532 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
7533 auto *VPI = dyn_cast<VPInstruction>(&R);
7534 if (!VPI)
7535 continue;
7536 VPValue *OrigStart;
7537 if (!matchFindIVResult(VPI, m_VPValue(), m_VPValue(OrigStart)))
7538 continue;
7540 continue;
7541 VPInstruction *Freeze =
7542 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
7543 VPI->setOperand(2, Freeze);
7544 if (UpdateResumePhis)
7545 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
7546 return Freeze != &U && isa<VPPhi>(&U);
7547 });
7548 }
7549 };
7550 AddFreezeForFindLastIVReductions(MainPlan, true);
7551 AddFreezeForFindLastIVReductions(EpiPlan, false);
7552
7553 VPValue *VectorTC = nullptr;
7554 auto *Term =
7556 [[maybe_unused]] bool MatchedTC =
7557 match(Term, m_BranchOnCount(m_VPValue(), m_VPValue(VectorTC)));
7558 assert(MatchedTC && "must match vector trip count");
7559
7560 // If there is a suitable resume value for the canonical induction in the
7561 // scalar (which will become vector) epilogue loop, use it and move it to the
7562 // beginning of the scalar preheader. Otherwise create it below.
7563 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
7564 auto ResumePhiIter =
7565 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
7566 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
7567 m_ZeroInt()));
7568 });
7569 VPPhi *ResumePhi = nullptr;
7570 if (ResumePhiIter == MainScalarPH->phis().end()) {
7571 Type *Ty = VPTypeAnalysis(MainPlan).inferScalarType(VectorTC);
7572 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
7573 ResumePhi = ScalarPHBuilder.createScalarPhi(
7574 {VectorTC, MainPlan.getZero(Ty)}, {}, "vec.epilog.resume.val");
7575 } else {
7576 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
7577 ResumePhi->setName("vec.epilog.resume.val");
7578 if (&MainScalarPH->front() != ResumePhi)
7579 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
7580 }
7581
7582 // Create a ResumeForEpilogue for the canonical IV resume as the
7583 // first non-phi, to keep it alive for the epilogue.
7584 VPBuilder ResumeBuilder(MainScalarPH);
7585 ResumeBuilder.createNaryOp(VPInstruction::ResumeForEpilogue, ResumePhi);
7586
7587 // Create ResumeForEpilogue instructions for the resume phis of the
7588 // VPIRPhis in the scalar header of the main plan and return them so they can
7589 // be used as resume values when vectorizing the epilogue.
7590 return to_vector(
7591 map_range(MainPlan.getScalarHeader()->phis(), [&](VPRecipeBase &R) {
7592 assert(isa<VPIRPhi>(R) &&
7593 "only VPIRPhis expected in the scalar header");
7594 return ResumeBuilder.createNaryOp(VPInstruction::ResumeForEpilogue,
7595 R.getOperand(0));
7596 }));
7597}
7598
7599/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
7600/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some
7601/// reductions require creating new instructions to compute the resume values.
7602/// They are collected in a vector and returned. They must be moved to the
7603/// preheader of the vector epilogue loop, after created by the execution of \p
7604/// Plan.
7606 VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs,
7608 VFSelectionContext &Config, ScalarEvolution &SE) {
7609 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
7610 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
7611 Header->setName("vec.epilog.vector.body");
7612
7613 VPValue *IV = VectorLoop->getCanonicalIV();
7614 // When vectorizing the epilogue loop, the canonical induction needs to start
7615 // at the resume value from the main vector loop. Find the resume value
7616 // created during execution of the main VPlan. It must be the first phi in the
7617 // loop preheader. Add this resume value as an offset to the canonical IV of
7618 // the epilogue loop.
7619 using namespace llvm::PatternMatch;
7620 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
7621 for (Value *Inc : EPResumeVal->incoming_values()) {
7622 if (match(Inc, m_SpecificInt(0)))
7623 continue;
7624 assert(!EPI.VectorTripCount &&
7625 "Must only have a single non-zero incoming value");
7626 EPI.VectorTripCount = Inc;
7627 }
7628 // If we didn't find a non-zero vector trip count, all incoming values
7629 // must be zero, which also means the vector trip count is zero. Pick the
7630 // first zero as vector trip count.
7631 // TODO: We should not choose VF * UF so the main vector loop is known to
7632 // be dead.
7633 if (!EPI.VectorTripCount) {
7634 assert(EPResumeVal->getNumIncomingValues() > 0 &&
7635 all_of(EPResumeVal->incoming_values(), match_fn(m_SpecificInt(0))) &&
7636 "all incoming values must be 0");
7637 EPI.VectorTripCount = EPResumeVal->getOperand(0);
7638 }
7639 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
7640 assert(all_of(IV->users(),
7641 [](const VPUser *U) {
7642 return isa<VPScalarIVStepsRecipe>(U) ||
7643 isa<VPDerivedIVRecipe>(U) ||
7644 cast<VPRecipeBase>(U)->isScalarCast() ||
7645 cast<VPInstruction>(U)->getOpcode() ==
7646 Instruction::Add;
7647 }) &&
7648 "the canonical IV should only be used by its increment or "
7649 "ScalarIVSteps when resetting the start value");
7650 VPBuilder Builder(Header, Header->getFirstNonPhi());
7651 VPInstruction *Add = Builder.createAdd(IV, VPV);
7652 // Replace all users of the canonical IV and its increment with the offset
7653 // version, except for the Add itself and the canonical IV increment.
7655 assert(Increment && "Must have a canonical IV increment at this point");
7656 IV->replaceUsesWithIf(Add, [Add, Increment](VPUser &U, unsigned) {
7657 return &U != Add && &U != Increment;
7658 });
7659 VPInstruction *OffsetIVInc =
7661 Increment->replaceAllUsesWith(OffsetIVInc);
7662 OffsetIVInc->setOperand(0, Increment);
7663
7665 SmallVector<Instruction *> InstsToMove;
7666 // Ensure that the start values for all header phi recipes are updated before
7667 // vectorizing the epilogue loop.
7668 for (VPRecipeBase &R : Header->phis()) {
7669 Value *ResumeV = nullptr;
7670 // TODO: Move setting of resume values to prepareToExecute.
7671 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
7672 // Find the reduction result by searching users of the phi or its backedge
7673 // value.
7674 auto IsReductionResult = [](VPRecipeBase *R) {
7675 auto *VPI = dyn_cast<VPInstruction>(R);
7676 return VPI && VPI->getOpcode() == VPInstruction::ComputeReductionResult;
7677 };
7678 auto *RdxResult = cast<VPInstruction>(
7679 vputils::findRecipe(ReductionPhi->getBackedgeValue(), IsReductionResult));
7680 assert(RdxResult && "expected to find reduction result");
7681
7682 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
7683 ->getIncomingValueForBlock(L->getLoopPreheader());
7684
7685 // Check for FindIV pattern by looking for icmp user of RdxResult.
7686 // The pattern is: select(icmp ne RdxResult, Sentinel), RdxResult, Start
7687 using namespace VPlanPatternMatch;
7688 VPValue *SentinelVPV = nullptr;
7689 bool IsFindIV = any_of(RdxResult->users(), [&](VPUser *U) {
7690 return match(U, VPlanPatternMatch::m_SpecificICmp(
7691 ICmpInst::ICMP_NE, m_Specific(RdxResult),
7692 m_VPValue(SentinelVPV)));
7693 });
7694
7695 RecurKind RK = ReductionPhi->getRecurrenceKind();
7696 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RK) || IsFindIV) {
7697 auto *ResumePhi = cast<PHINode>(ResumeV);
7698 Value *StartV = ResumePhi->getIncomingValueForBlock(
7700 IRBuilder<> Builder(ResumePhi->getParent(),
7701 ResumePhi->getParent()->getFirstNonPHIIt());
7702
7704 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
7705 // start value; compare the final value from the main vector loop
7706 // to the start value.
7707 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
7708 if (auto *I = dyn_cast<Instruction>(ResumeV))
7709 InstsToMove.push_back(I);
7710 } else {
7711 assert(SentinelVPV && "expected to find icmp using RdxResult");
7712 if (auto *FreezeI = dyn_cast<FreezeInst>(StartV))
7713 ToFrozen[FreezeI->getOperand(0)] = StartV;
7714
7715 // Adjust resume: select(icmp eq ResumeV, StartV), Sentinel, ResumeV
7716 Value *Cmp = Builder.CreateICmpEQ(ResumeV, StartV);
7717 if (auto *I = dyn_cast<Instruction>(Cmp))
7718 InstsToMove.push_back(I);
7719 ResumeV = Builder.CreateSelect(Cmp, SentinelVPV->getLiveInIRValue(),
7720 ResumeV);
7721 if (auto *I = dyn_cast<Instruction>(ResumeV))
7722 InstsToMove.push_back(I);
7723 }
7724 } else {
7725 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
7726 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
7727 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
7729 "unexpected start value");
7730 // Partial sub-reductions always start at 0 and account for the
7731 // reduction start value in a final subtraction. Update it to use the
7732 // resume value from the main vector loop.
7733 if (PhiR->getVFScaleFactor() > 1 &&
7735 PhiR->getRecurrenceKind())) {
7736 auto *Sub = cast<VPInstruction>(RdxResult->getSingleUser());
7737 assert((Sub->getOpcode() == Instruction::Sub ||
7738 Sub->getOpcode() == Instruction::FSub) &&
7739 "Unexpected opcode");
7740 assert(isa<VPIRValue>(Sub->getOperand(0)) &&
7741 "Expected operand to match the original start value of the "
7742 "reduction");
7743 // For integer sub-reductions, verify start value is zero.
7744 // For FP sub-reductions, verify start value is negative zero.
7745 [[maybe_unused]] auto StartValueIsIdentity = [&] {
7746 Value *IdentityValue = getRecurrenceIdentity(
7747 PhiR->getRecurrenceKind(), ResumeV->getType(),
7748 PhiR->getFastMathFlags());
7749 auto *StartValue = dyn_cast<VPIRValue>(VPI->getOperand(0));
7750 return StartValue && StartValue->getValue() == IdentityValue;
7751 };
7752 assert(StartValueIsIdentity() &&
7753 "Expected start value for partial sub-reduction to be zero "
7754 "(or negative zero)");
7755
7756 Sub->setOperand(0, StartVal);
7757 } else
7758 VPI->setOperand(0, StartVal);
7759 continue;
7760 }
7761 }
7762 } else {
7763 // Retrieve the induction resume values for wide inductions from
7764 // their original phi nodes in the scalar loop.
7765 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
7766 // Hook up to the PHINode generated by a ResumePhi recipe of main
7767 // loop VPlan, which feeds the scalar loop.
7768 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
7769 }
7770 assert(ResumeV && "Must have a resume value");
7771 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
7772 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
7773 }
7774
7775 // For some VPValues in the epilogue plan we must re-use the generated IR
7776 // values from the main plan. Replace them with live-in VPValues.
7777 // TODO: This is a workaround needed for epilogue vectorization and it
7778 // should be removed once induction resume value creation is done
7779 // directly in VPlan.
7780 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
7781 // Re-use frozen values from the main plan for Freeze VPInstructions in the
7782 // epilogue plan. This ensures all users use the same frozen value.
7783 auto *VPI = dyn_cast<VPInstruction>(&R);
7784 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
7786 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
7787 continue;
7788 }
7789
7790 // Re-use the trip count and steps expanded for the main loop, as
7791 // skeleton creation needs it as a value that dominates both the scalar
7792 // and vector epilogue loops
7793 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
7794 if (!ExpandR)
7795 continue;
7796 VPValue *ExpandedVal =
7797 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
7798 ExpandR->replaceAllUsesWith(ExpandedVal);
7799 if (Plan.getTripCount() == ExpandR)
7800 Plan.resetTripCount(ExpandedVal);
7801 ExpandR->eraseFromParent();
7802 }
7803
7804 auto VScale = Config.getVScaleForTuning();
7805 unsigned MainLoopStep =
7806 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
7807 unsigned EpilogueLoopStep =
7808 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
7812 EPI.EpilogueVF, EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
7813
7814 return InstsToMove;
7815}
7816
7817static void
7819 VPlan &BestEpiPlan,
7820 ArrayRef<VPInstruction *> ResumeValues) {
7821 // Fix resume values from the additional bypass block.
7822 BasicBlock *PH = L->getLoopPreheader();
7823 for (auto *Pred : predecessors(PH)) {
7824 for (PHINode &Phi : PH->phis()) {
7825 if (Phi.getBasicBlockIndex(Pred) != -1)
7826 continue;
7827 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
7828 }
7829 }
7830 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
7831 if (ScalarPH->hasPredecessors()) {
7832 // Fix resume values for inductions and reductions from the additional
7833 // bypass block using the incoming values from the main loop's resume phis.
7834 // ResumeValues correspond 1:1 with the scalar loop header phis.
7835 for (auto [ResumeV, HeaderPhi] :
7836 zip(ResumeValues, BestEpiPlan.getScalarHeader()->phis())) {
7837 auto *HeaderPhiR = cast<VPIRPhi>(&HeaderPhi);
7838 auto *EpiResumePhi =
7839 cast<PHINode>(HeaderPhiR->getIRPhi().getIncomingValueForBlock(PH));
7840 if (EpiResumePhi->getBasicBlockIndex(BypassBlock) == -1)
7841 continue;
7842 auto *MainResumePhi = cast<PHINode>(ResumeV->getUnderlyingValue());
7843 EpiResumePhi->setIncomingValueForBlock(
7844 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
7845 }
7846 }
7847}
7848
7849/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector
7850/// loop, after both plans have executed, updating branches from the iteration
7851/// and runtime checks of the main loop, as well as updating various phis. \p
7852/// InstsToMove contains instructions that need to be moved to the preheader of
7853/// the epilogue vector loop.
7854static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L,
7856 DominatorTree *DT,
7857 GeneratedRTChecks &Checks,
7858 ArrayRef<Instruction *> InstsToMove,
7859 ArrayRef<VPInstruction *> ResumeValues) {
7860 BasicBlock *VecEpilogueIterationCountCheck =
7861 cast<VPIRBasicBlock>(EpiPlan.getEntry())->getIRBasicBlock();
7862
7863 BasicBlock *VecEpiloguePreHeader =
7864 cast<CondBrInst>(VecEpilogueIterationCountCheck->getTerminator())
7865 ->getSuccessor(1);
7866 // Adjust the control flow taking the state info from the main loop
7867 // vectorization into account.
7869 "expected this to be saved from the previous pass.");
7870 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
7872 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
7873
7875 VecEpilogueIterationCountCheck},
7877 VecEpiloguePreHeader}});
7878
7879 BasicBlock *ScalarPH =
7880 cast<VPIRBasicBlock>(EpiPlan.getScalarPreheader())->getIRBasicBlock();
7882 VecEpilogueIterationCountCheck, ScalarPH);
7883 DTU.applyUpdates(
7885 VecEpilogueIterationCountCheck},
7887
7888 // Adjust the terminators of runtime check blocks and phis using them.
7889 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
7890 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
7891 if (SCEVCheckBlock) {
7892 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
7893 VecEpilogueIterationCountCheck, ScalarPH);
7894 DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock,
7895 VecEpilogueIterationCountCheck},
7896 {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}});
7897 }
7898 if (MemCheckBlock) {
7899 MemCheckBlock->getTerminator()->replaceUsesOfWith(
7900 VecEpilogueIterationCountCheck, ScalarPH);
7901 DTU.applyUpdates(
7902 {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck},
7903 {DominatorTree::Insert, MemCheckBlock, ScalarPH}});
7904 }
7905
7906 // The vec.epilog.iter.check block may contain Phi nodes from inductions
7907 // or reductions which merge control-flow from the latch block and the
7908 // middle block. Update the incoming values here and move the Phi into the
7909 // preheader.
7910 SmallVector<PHINode *, 4> PhisInBlock(
7911 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
7912
7913 for (PHINode *Phi : PhisInBlock) {
7914 Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt());
7915 Phi->replaceIncomingBlockWith(
7916 VecEpilogueIterationCountCheck->getSinglePredecessor(),
7917 VecEpilogueIterationCountCheck);
7918
7919 // If the phi doesn't have an incoming value from the
7920 // EpilogueIterationCountCheck, we are done. Otherwise remove the
7921 // incoming value and also those from other check blocks. This is needed
7922 // for reduction phis only.
7923 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
7924 return EPI.EpilogueIterationCountCheck == IncB;
7925 }))
7926 continue;
7927 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
7928 if (SCEVCheckBlock)
7929 Phi->removeIncomingValue(SCEVCheckBlock);
7930 if (MemCheckBlock)
7931 Phi->removeIncomingValue(MemCheckBlock);
7932 }
7933
7934 auto IP = VecEpiloguePreHeader->getFirstNonPHIIt();
7935 for (auto *I : InstsToMove)
7936 I->moveBefore(IP);
7937
7938 // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop
7939 // after executing the main loop. We need to update the resume values of
7940 // inductions and reductions during epilogue vectorization.
7941 fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan,
7942 ResumeValues);
7943
7944 // Remove dead phis that were moved to the epilogue preheader but are unused
7945 // (e.g., resume phis for inductions not widened in the epilogue vector loop).
7946 for (PHINode &Phi : make_early_inc_range(VecEpiloguePreHeader->phis()))
7947 if (Phi.use_empty())
7948 Phi.eraseFromParent();
7949}
7950
7952 assert((EnableVPlanNativePath || L->isInnermost()) &&
7953 "VPlan-native path is not enabled. Only process inner loops.");
7954
7955 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
7956 << L->getHeader()->getParent()->getName() << "' from "
7957 << L->getLocStr() << "\n");
7958
7959 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
7960
7961 LLVM_DEBUG(
7962 dbgs() << "LV: Loop hints:"
7963 << " force="
7965 ? "disabled"
7967 ? "enabled"
7968 : "?"))
7969 << " width=" << Hints.getWidth()
7970 << " interleave=" << Hints.getInterleave() << "\n");
7971
7972 // Function containing loop
7973 Function *F = L->getHeader()->getParent();
7974
7975 // Looking at the diagnostic output is the only way to determine if a loop
7976 // was vectorized (other than looking at the IR or machine code), so it
7977 // is important to generate an optimization remark for each loop. Most of
7978 // these messages are generated as OptimizationRemarkAnalysis. Remarks
7979 // generated as OptimizationRemark and OptimizationRemarkMissed are
7980 // less verbose reporting vectorized loops and unvectorized loops that may
7981 // benefit from vectorization, respectively.
7982
7983 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
7984 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7985 return false;
7986 }
7987
7988 PredicatedScalarEvolution PSE(*SE, *L);
7989
7990 // Query this against the original loop and save it here because the profile
7991 // of the original loop header may change as the transformation happens.
7992 bool OptForSize = llvm::shouldOptimizeForSize(
7993 L->getHeader(), PSI,
7994 PSI && PSI->hasProfileSummary() ? &GetBFI() : nullptr,
7996
7997 // Check if it is legal to vectorize the loop.
7998 LoopVectorizationRequirements Requirements;
7999 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
8000 &Requirements, &Hints, DB, AC,
8001 /*AllowRuntimeSCEVChecks=*/!OptForSize, AA);
8003 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
8004 Hints.emitRemarkWithHints();
8005 return false;
8006 }
8007
8008 bool IsInnerLoop = L->isInnermost();
8009
8010 // Outer loops require a computable trip count.
8011 if (!IsInnerLoop && isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
8012 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
8013 return false;
8014 }
8015
8016 if (LVL.hasUncountableEarlyExit()) {
8018 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
8019 "early exit is not enabled",
8020 "UncountableEarlyExitLoopsDisabled", ORE, L);
8021 return false;
8022 }
8023 }
8024
8025 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
8026 bool UseInterleaved =
8027 IsInnerLoop && TTI->enableInterleavedAccessVectorization();
8028
8029 // If an override option has been passed in for interleaved accesses, use it.
8030 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
8031 UseInterleaved = IsInnerLoop && EnableInterleavedMemAccesses;
8032
8033 // Analyze interleaved memory accesses.
8034 if (UseInterleaved)
8036
8037 if (LVL.hasUncountableEarlyExit()) {
8038 BasicBlock *LoopLatch = L->getLoopLatch();
8039 if (IAI.requiresScalarEpilogue() ||
8040 any_of(LVL.getCountableExitingBlocks(), not_equal_to(LoopLatch))) {
8041 reportVectorizationFailure("Auto-vectorization of early exit loops "
8042 "requiring a scalar epilogue is unsupported",
8043 "UncountableEarlyExitUnsupported", ORE, L);
8044 return false;
8045 }
8046 }
8047
8048 // Check the function attributes and profiles to find out if this function
8049 // should be optimized for size.
8050 EpilogueLowering SEL =
8051 getEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, LVL, &IAI);
8052
8053 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
8054 // count by optimizing for size, to minimize overheads.
8055 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
8056 if (ExpectedTC && ExpectedTC->isFixed() &&
8057 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
8058 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
8059 << "This loop is worth vectorizing only if no scalar "
8060 << "iteration overheads are incurred.");
8062 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
8063 else {
8064 LLVM_DEBUG(dbgs() << "\n");
8065 // Tail-folded loops are efficient even when the loop
8066 // iteration count is low. However, setting the epilogue policy to
8067 // `CM_EpilogueNotAllowedLowTripLoop` prevents vectorizing loops
8068 // with runtime checks. It's more effective to let
8069 // `isOutsideLoopWorkProfitable` determine if vectorization is
8070 // beneficial for the loop.
8073 }
8074 }
8075
8076 // Check the function attributes to see if implicit floats or vectors are
8077 // allowed.
8078 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
8080 "Can't vectorize when the NoImplicitFloat attribute is used",
8081 "loop not vectorized due to NoImplicitFloat attribute",
8082 "NoImplicitFloat", ORE, L);
8083 Hints.emitRemarkWithHints();
8084 return false;
8085 }
8086
8087 // Check if the target supports potentially unsafe FP vectorization.
8088 // FIXME: Add a check for the type of safety issue (denormal, signaling)
8089 // for the target we're vectorizing for, to make sure none of the
8090 // additional fp-math flags can help.
8091 if (Hints.isPotentiallyUnsafe() &&
8092 TTI->isFPVectorizationPotentiallyUnsafe()) {
8094 "Potentially unsafe FP op prevents vectorization",
8095 "loop not vectorized due to unsafe FP support.",
8096 "UnsafeFP", ORE, L);
8097 Hints.emitRemarkWithHints();
8098 return false;
8099 }
8100
8101 bool AllowOrderedReductions;
8102 // If the flag is set, use that instead and override the TTI behaviour.
8103 if (ForceOrderedReductions.getNumOccurrences() > 0)
8104 AllowOrderedReductions = ForceOrderedReductions;
8105 else
8106 AllowOrderedReductions = TTI->enableOrderedReductions();
8107 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
8108 ORE->emit([&]() {
8109 auto *ExactFPMathInst = Requirements.getExactFPInst();
8110 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
8111 ExactFPMathInst->getDebugLoc(),
8112 ExactFPMathInst->getParent())
8113 << "loop not vectorized: cannot prove it is safe to reorder "
8114 "floating-point operations";
8115 });
8116 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
8117 "reorder floating-point operations\n");
8118 Hints.emitRemarkWithHints();
8119 return false;
8120 }
8121
8122 // Use the cost model.
8123 VFSelectionContext Config(*TTI, &LVL, L, *F, PSE, DB, ORE, &Hints,
8124 OptForSize);
8125 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, AC, ORE,
8126 GetBFI, F, &Hints, IAI, Config);
8127 // Use the planner for vectorization.
8128 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, Config, IAI, PSE,
8129 Hints, ORE);
8130
8131 // Get user vectorization factor and interleave count.
8132 ElementCount UserVF = Hints.getWidth();
8133 unsigned UserIC = Hints.getInterleave();
8134 // Outer loops don't have LoopAccessInfo, so skip the safety check and reset
8135 // UserIC (interleaving is not supported for outer loops).
8136 if (!IsInnerLoop)
8137 UserIC = 0;
8138 else if (UserIC > 1 && !LVL.isSafeForAnyVectorWidth())
8139 UserIC = 1;
8140
8141 // Plan how to best vectorize.
8142 LVP.plan(UserVF, UserIC);
8143 auto [VF, BestPlanPtr] = LVP.computeBestVF();
8144 unsigned IC = 1;
8145
8146 // For VPlan build stress testing of outer loops, bail after plan
8147 // construction.
8148 if (!IsInnerLoop && VPlanBuildOuterloopStressTest)
8149 return false;
8150
8151 if (IsInnerLoop && ORE->allowExtraAnalysis(LV_NAME))
8153
8154 GeneratedRTChecks Checks(PSE, DT, LI, TTI, Config.CostKind);
8155 if (IsInnerLoop && LVP.hasPlanWithVF(VF.Width)) {
8156 // Select the interleave count.
8157 IC = LVP.selectInterleaveCount(*BestPlanPtr, VF.Width, VF.Cost);
8158
8159 unsigned SelectedIC = std::max(IC, UserIC);
8160 // Optimistically generate runtime checks if they are needed. Drop them if
8161 // they turn out to not be profitable.
8162 if (VF.Width.isVector() || SelectedIC > 1) {
8163 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC,
8164 *ORE);
8165
8166 // Bail out early if either the SCEV or memory runtime checks are known to
8167 // fail. In that case, the vector loop would never execute.
8168 using namespace llvm::PatternMatch;
8169 if (Checks.getSCEVChecks().first &&
8170 match(Checks.getSCEVChecks().first, m_One()))
8171 return false;
8172 if (Checks.getMemRuntimeChecks().first &&
8173 match(Checks.getMemRuntimeChecks().first, m_One()))
8174 return false;
8175 }
8176
8177 // Check if it is profitable to vectorize with runtime checks.
8178 bool ForceVectorization =
8180 VPCostContext CostCtx(CM.TTI, *CM.TLI, *BestPlanPtr, CM, Config.CostKind,
8181 CM.PSE, L);
8182 if (!ForceVectorization &&
8183 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx, *BestPlanPtr,
8184 SEL, Config.getVScaleForTuning())) {
8185 ORE->emit([&]() {
8187 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
8188 L->getHeader())
8189 << "loop not vectorized: cannot prove it is safe to reorder "
8190 "memory operations";
8191 });
8192 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
8193 Hints.emitRemarkWithHints();
8194 return false;
8195 }
8196 }
8197
8198 // Identify the diagnostic messages that should be produced.
8199 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
8200 bool VectorizeLoop = true, InterleaveLoop = true;
8201 if (VF.Width.isScalar()) {
8202 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
8203 VecDiagMsg = {
8204 "VectorizationNotBeneficial",
8205 "the cost-model indicates that vectorization is not beneficial"};
8206 VectorizeLoop = false;
8207 }
8208
8209 if (UserIC == 1 && Hints.getInterleave() > 1) {
8211 "UserIC should only be ignored due to unsafe dependencies");
8212 LLVM_DEBUG(dbgs() << "LV: Ignoring user-specified interleave count.\n");
8213 IntDiagMsg = {"InterleavingUnsafe",
8214 "Ignoring user-specified interleave count due to possibly "
8215 "unsafe dependencies in the loop."};
8216 InterleaveLoop = false;
8217 } else if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
8218 // Tell the user interleaving was avoided up-front, despite being explicitly
8219 // requested.
8220 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
8221 "interleaving should be avoided up front\n");
8222 IntDiagMsg = {"InterleavingAvoided",
8223 "Ignoring UserIC, because interleaving was avoided up front"};
8224 InterleaveLoop = false;
8225 } else if (IC == 1 && UserIC <= 1) {
8226 // Tell the user interleaving is not beneficial.
8227 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
8228 IntDiagMsg = {
8229 "InterleavingNotBeneficial",
8230 "the cost-model indicates that interleaving is not beneficial"};
8231 InterleaveLoop = false;
8232 if (UserIC == 1) {
8233 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
8234 IntDiagMsg.second +=
8235 " and is explicitly disabled or interleave count is set to 1";
8236 }
8237 } else if (IC > 1 && UserIC == 1) {
8238 // Tell the user interleaving is beneficial, but it explicitly disabled.
8239 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
8240 "disabled.\n");
8241 IntDiagMsg = {"InterleavingBeneficialButDisabled",
8242 "the cost-model indicates that interleaving is beneficial "
8243 "but is explicitly disabled or interleave count is set to 1"};
8244 InterleaveLoop = false;
8245 }
8246
8247 // If there is a histogram in the loop, do not just interleave without
8248 // vectorizing. The order of operations will be incorrect without the
8249 // histogram intrinsics, which are only used for recipes with VF > 1.
8250 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
8251 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
8252 << "to histogram operations.\n");
8253 IntDiagMsg = {
8254 "HistogramPreventsScalarInterleaving",
8255 "Unable to interleave without vectorization due to constraints on "
8256 "the order of histogram operations"};
8257 InterleaveLoop = false;
8258 }
8259
8260 // Override IC if user provided an interleave count.
8261 IC = UserIC > 0 ? UserIC : IC;
8262
8263 // Emit diagnostic messages, if any.
8264 const char *VAPassName = Hints.vectorizeAnalysisPassName();
8265 if (!VectorizeLoop && !InterleaveLoop) {
8266 // Do not vectorize or interleaving the loop.
8267 ORE->emit([&]() {
8268 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
8269 L->getStartLoc(), L->getHeader())
8270 << VecDiagMsg.second;
8271 });
8272 ORE->emit([&]() {
8273 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
8274 L->getStartLoc(), L->getHeader())
8275 << IntDiagMsg.second;
8276 });
8277 return false;
8278 }
8279
8280 if (!VectorizeLoop && InterleaveLoop) {
8281 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8282 ORE->emit([&]() {
8283 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
8284 L->getStartLoc(), L->getHeader())
8285 << VecDiagMsg.second;
8286 });
8287 } else if (VectorizeLoop && !InterleaveLoop) {
8288 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
8289 << ") in " << L->getLocStr() << '\n');
8290 ORE->emit([&]() {
8291 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
8292 L->getStartLoc(), L->getHeader())
8293 << IntDiagMsg.second;
8294 });
8295 } else if (VectorizeLoop && InterleaveLoop) {
8296 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
8297 << ") in " << L->getLocStr() << '\n');
8298 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8299 }
8300
8301 // Report the vectorization decision.
8302 if (VF.Width.isScalar()) {
8303 using namespace ore;
8304 assert(IC > 1);
8305 ORE->emit([&]() {
8306 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
8307 L->getHeader())
8308 << "interleaved loop (interleaved count: "
8309 << NV("InterleaveCount", IC) << ")";
8310 });
8311 } else {
8312 // Report the vectorization decision.
8313 reportVectorization(ORE, L, VF, IC);
8314 }
8315 if (ORE->allowExtraAnalysis(LV_NAME))
8317
8318 // If we decided that it is *legal* to interleave or vectorize the loop, then
8319 // do it.
8320
8321 VPlan &BestPlan = *BestPlanPtr;
8322 // Consider vectorizing the epilogue too if it's profitable.
8323 std::unique_ptr<VPlan> EpiPlan =
8324 LVP.selectBestEpiloguePlan(BestPlan, VF.Width, IC);
8325 bool HasBranchWeights =
8326 hasBranchWeightMD(*L->getLoopLatch()->getTerminator());
8327 if (EpiPlan) {
8328 VPlan &BestEpiPlan = *EpiPlan;
8329 VPlan &BestMainPlan = BestPlan;
8330 ElementCount EpilogueVF = BestEpiPlan.getSingleVF();
8331
8332 // The first pass vectorizes the main loop and creates a scalar epilogue
8333 // to be vectorized by executing the plan (potentially with a different
8334 // factor) again shortly afterwards.
8335 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
8336 BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph");
8337 SmallVector<VPInstruction *> ResumeValues =
8338 preparePlanForMainVectorLoop(BestMainPlan, BestEpiPlan);
8339 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF, 1, BestEpiPlan);
8340
8341 // Add minimum iteration check for the epilogue plan, followed by runtime
8342 // checks for the main plan.
8343 LVP.addMinimumIterationCheck(BestMainPlan, EPI.EpilogueVF, EPI.EpilogueUF,
8345 LVP.attachRuntimeChecks(BestMainPlan, Checks, HasBranchWeights);
8347 EPI.MainLoopVF, EPI.MainLoopUF,
8349 HasBranchWeights ? MinItersBypassWeights : nullptr,
8350 L->getLoopPredecessor()->getTerminator()->getDebugLoc(),
8351 PSE);
8352
8353 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
8354 Checks, BestMainPlan);
8355 auto ExpandedSCEVs = LVP.executePlan(
8356 EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, DT,
8358 ++LoopsVectorized;
8359
8360 // Derive EPI fields from VPlan-generated IR.
8361 BasicBlock *EntryBB =
8362 cast<VPIRBasicBlock>(BestMainPlan.getEntry())->getIRBasicBlock();
8363 EntryBB->setName("iter.check");
8364 EPI.EpilogueIterationCountCheck = EntryBB;
8365 // The check chain is: Entry -> [SCEV] -> [Mem] -> MainCheck -> VecPH.
8366 // MainCheck is the non-bypass successor of the last runtime check block
8367 // (or Entry if there are no runtime checks).
8368 BasicBlock *LastCheck = EntryBB;
8369 if (BasicBlock *MemBB = Checks.getMemRuntimeChecks().second)
8370 LastCheck = MemBB;
8371 else if (BasicBlock *SCEVBB = Checks.getSCEVChecks().second)
8372 LastCheck = SCEVBB;
8373 BasicBlock *ScalarPH = L->getLoopPreheader();
8374 auto *BI = cast<CondBrInst>(LastCheck->getTerminator());
8376 BI->getSuccessor(BI->getSuccessor(0) == ScalarPH);
8377
8378 // Second pass vectorizes the epilogue and adjusts the control flow
8379 // edges from the first pass.
8380 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
8381 Checks, BestEpiPlan);
8383 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, Config, *PSE.getSE());
8384 LVP.attachRuntimeChecks(BestEpiPlan, Checks, HasBranchWeights);
8385 LVP.executePlan(
8386 EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
8388 connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, Checks, InstsToMove,
8389 ResumeValues);
8390 ++LoopsEpilogueVectorized;
8391 } else {
8392 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, Checks,
8393 BestPlan);
8394 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
8395 VF.MinProfitableTripCount);
8396 LVP.attachRuntimeChecks(BestPlan, Checks, HasBranchWeights);
8397
8398 if (!IsInnerLoop)
8399 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" << F->getName()
8400 << "\"\n");
8401 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
8402 ++LoopsVectorized;
8403 }
8404
8405 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
8406 "DT not preserved correctly");
8407 assert(!verifyFunction(*F, &dbgs()));
8408
8409 return true;
8410}
8411
8413
8414 // Don't attempt if
8415 // 1. the target claims to have no vector registers, and
8416 // 2. interleaving won't help ILP.
8417 //
8418 // The second condition is necessary because, even if the target has no
8419 // vector registers, loop vectorization may still enable scalar
8420 // interleaving.
8421 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
8422 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
8423 return LoopVectorizeResult(false, false);
8424
8425 bool Changed = false, CFGChanged = false;
8426
8427 // The vectorizer requires loops to be in simplified form.
8428 // Since simplification may add new inner loops, it has to run before the
8429 // legality and profitability checks. This means running the loop vectorizer
8430 // will simplify all loops, regardless of whether anything end up being
8431 // vectorized.
8432 for (const auto &L : *LI)
8433 Changed |= CFGChanged |=
8434 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
8435
8436 // Build up a worklist of inner-loops to vectorize. This is necessary as
8437 // the act of vectorizing or partially unrolling a loop creates new loops
8438 // and can invalidate iterators across the loops.
8439 SmallVector<Loop *, 8> Worklist;
8440
8441 for (Loop *L : *LI)
8442 collectSupportedLoops(*L, LI, ORE, Worklist);
8443
8444 LoopsAnalyzed += Worklist.size();
8445
8446 // Now walk the identified inner loops.
8447 while (!Worklist.empty()) {
8448 Loop *L = Worklist.pop_back_val();
8449
8450 // For the inner loops we actually process, form LCSSA to simplify the
8451 // transform.
8452 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8453
8454 Changed |= CFGChanged |= processLoop(L);
8455
8456 if (Changed) {
8457 LAIs->clear();
8458
8459#ifndef NDEBUG
8460 if (VerifySCEV)
8461 SE->verify();
8462#endif
8463 }
8464 }
8465
8466 // Process each loop nest in the function.
8467 return LoopVectorizeResult(Changed, CFGChanged);
8468}
8469
8472 LI = &AM.getResult<LoopAnalysis>(F);
8473 // There are no loops in the function. Return before computing other
8474 // expensive analyses.
8475 if (LI->empty())
8476 return PreservedAnalyses::all();
8485 AA = &AM.getResult<AAManager>(F);
8486
8487 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
8488 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8489 GetBFI = [&AM, &F]() -> BlockFrequencyInfo & {
8491 };
8492 LoopVectorizeResult Result = runImpl(F);
8493 if (!Result.MadeAnyChange)
8494 return PreservedAnalyses::all();
8496
8497 if (isAssignmentTrackingEnabled(*F.getParent())) {
8498 for (auto &BB : F)
8500 }
8501
8502 PA.preserve<LoopAnalysis>();
8506
8507 if (Result.MadeCFGChange) {
8508 // Making CFG changes likely means a loop got vectorized. Indicate that
8509 // extra simplification passes should be run.
8510 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
8511 // be run if runtime checks have been added.
8514 } else {
8516 }
8517 return PA;
8518}
8519
8521 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
8522 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
8523 OS, MapClassName2PassName);
8524
8525 OS << '<';
8526 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
8527 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
8528 OS << '>';
8529}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI)
Definition CostModel.cpp:73
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
cl::opt< bool > VPlanBuildOuterloopStressTest
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static unsigned getMaxTCFromNonZeroRange(PredicatedScalarEvolution &PSE, Loop *L)
Get the maximum trip count for L from the SCEV unsigned range, excluding zero from the range.
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static bool hasUnsupportedHeaderPhiRecipe(VPlan &Plan)
Returns true if the VPlan contains header phi recipes that are not currently supported for epilogue v...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove, ArrayRef< VPInstruction * > ResumeValues)
Connect the epilogue vector loop generated for EpiPlan to the main vector loop, after both plans have...
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< cl::boolOrDefault > ForceMaskedDivRem("force-widen-divrem-via-masked-intrinsic", cl::Hidden, cl::desc("Override cost based masked intrinsic widening " "for div/rem instructions"))
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static Intrinsic::ID getMaskedDivRemIntrinsic(unsigned Opcode)
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
TailFoldingPolicyTy
Option tail-folding-policy indicates that an epilogue is undesired, that tail folding is preferred,...
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static SmallVector< VPInstruction * > preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, const Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static void printOptimizedVPlan(VPlan &)
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, VFSelectionContext &Config, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true, bool CanExcludeZeroTrips=false)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static const SCEV * getAddressAccessSCEV(Value *Ptr, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets the address access SCEV for Ptr, if it should be used for cost modeling according to isAddressSC...
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static bool hasFindLastReductionPhi(VPlan &Plan)
Returns true if the VPlan contains a VPReductionPHIRecipe with FindLast recurrence kind.
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static cl::opt< TailFoldingPolicyTy > TailFoldingPolicy("tail-folding-policy", cl::init(TailFoldingPolicyTy::None), cl::Hidden, cl::desc("Tail-folding preferences over creating an epilogue loop."), cl::values(clEnumValN(TailFoldingPolicyTy::None, "dont-fold-tail", "Don't tail-fold loops."), clEnumValN(TailFoldingPolicyTy::PreferFoldTail, "prefer-fold-tail", "prefer tail-folding, otherwise create an epilogue when " "appropriate."), clEnumValN(TailFoldingPolicyTy::MustFoldTail, "must-fold-tail", "always tail-fold, don't attempt vectorization if " "tail-folding fails.")))
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, EpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
cl::opt< bool > VPlanBuildOuterloopStressTest("vplan-build-outerloop-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static EpilogueLowering getEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, ArrayRef< VPInstruction * > ResumeValues)
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None)
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:119
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
#define RUN_VPLAN_PASS(PASS,...)
#define RUN_VPLAN_PASS_NO_VERIFY(PASS,...)
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1535
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
Get the array size.
Definition ArrayRef.h:141
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:986
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
Conditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
This class represents a range of values.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getTemporary()
Definition DebugLoc.h:160
static DebugLoc getUnknown()
Definition DebugLoc.h:161
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:254
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:292
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Check, VPlan &Plan)
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2858
A struct for saving information about induction variables.
const SCEV * getStep() const
ArrayRef< Instruction * > getCastInsts() const
Returns an ArrayRef to the type cast instructions in the induction update chain, that are redundant w...
@ IK_PtrInduction
Pointer induction var. Step = C.
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
friend class LoopVectorizationPlanner
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, GeneratedRTChecks &RTChecks, VPlan &Plan)
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
DominatorTree * DT
Dominator Tree.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool isCast() const
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:378
The group of interleaved loads/stores sharing the same stride and close to each other.
auto members() const
Return an iterator range over the non-null members of this group, in index order.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:587
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool useWideActiveLaneMask() const
Returns true if the use of wide lane masks is requested and the loop is using tail-folding with a lan...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
BlockFrequencyInfo * BFI
The BlockFrequencyInfo returned from GetBFI.
BlockFrequencyInfo & getBFI()
Returns the BlockFrequencyInfo for the function if cached, otherwise fetches it via GetBFI.
bool isForcedScalar(Instruction *I, ElementCount VF) const
Returns true if I has been forced to be scalarized at VF.
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
bool preferTailFoldedLoop() const
Returns true if tail-folding is preferred over an epilogue.
bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF)
Returns true if an artificially high cost for emulated masked memrefs should be used.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
bool isMaskRequired(Instruction *I) const
Wrapper function for LoopVectorizationLegality::isMaskRequired, that passes the Instruction I and if ...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
uint64_t getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, const BasicBlock *BB)
A helper function that returns how much we should divide the cost of a predicated block by.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
Loop * TheLoop
The loop that we evaluate.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, InstructionCost Cost)
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
bool isEpilogueAllowed() const
Returns true if an epilogue is allowed (e.g., not prevented by optsize or a loop hint annotation).
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
LoopVectorizationCostModel(EpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, VFSelectionContext &Config)
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF)
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool isScalarWithPredication(Instruction *I, ElementCount VF)
Returns true if I is an instruction which requires predication and for which our chosen predication s...
std::function< BlockFrequencyInfo &()> GetBFI
A function to lazily fetch BlockFrequencyInfo.
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost MaskedCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
TailFoldingStyle getTailFoldingStyle() const
Returns the TailFoldingStyle that is best for the current loop.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
bool hasUncountableEarlyExit() const
Returns true if the loop has uncountable early exits, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, EpilogueVectorizationKind EpilogueVecKind=EpilogueVectorizationKind::None)
EpilogueVectorizationKind
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
@ MainLoop
Vectorizing the main loop of epilogue vectorization.
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1679
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1730
void attachRuntimeChecks(VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const
Attach the runtime checks of RTChecks to Plan.
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1665
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1836
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
std::unique_ptr< VPlan > selectBestEpiloguePlan(VPlan &MainPlan, ElementCount MainLoopVF, unsigned IC)
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
std::pair< VectorizationFactor, VPlan * > computeBestVF()
Compute and return the most profitable vectorization factor and the corresponding best VPlan.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:73
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:659
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:67
Metadata node.
Definition Metadata.h:1080
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:38
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:126
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:235
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
static bool isFindLastRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static LLVM_ABI bool isSubRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is for a sub operation.
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:89
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:98
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
iterator_range< op_iterator > op_range
Definition User.h:256
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:76
Holds state needed to make cost decisions before computing costs per-VF, including the maximum VFs.
const TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
std::optional< unsigned > getVScaleForTuning() const
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4148
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:4175
iterator end()
Definition VPlan.h:4185
iterator begin()
Recipe iterator methods.
Definition VPlan.h:4183
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4236
InstructionCost cost(ElementCount VF, VPCostContext &Ctx) override
Return the cost of this VPBasicBlock.
Definition VPlan.cpp:745
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:233
const VPRecipeBase & front() const
Definition VPlan.h:4195
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition VPlan.cpp:628
bool empty() const
Definition VPlan.h:4194
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:203
void setName(const Twine &newName)
Definition VPlan.h:178
VPlan * getPlan()
Definition VPlan.cpp:178
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:183
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:226
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:267
static auto blocksOnly(T &&Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:295
VPlan-based builder utility analogous to IRBuilder.
VPInstruction * createAdd(VPValue *LHS, VPValue *RHS, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", VPRecipeWithIRFlags::WrapFlagsTy WrapFlags={false, false})
T * insert(T *R)
Insert R at the current insertion point. Returns R unchanged.
static VPBuilder getToInsertAfter(VPRecipeBase *R)
Create a VPBuilder to insert after R.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", const VPIRFlags &Flags={})
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:504
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:477
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:2305
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2347
void setBackedgeValue(VPValue *V)
Update the incoming value from the loop backedge.
Definition VPlan.h:2352
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2336
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
Definition VPlan.h:2050
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:4301
Class to record and manage LLVM IR flags.
Definition VPlan.h:685
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1220
iterator_range< operand_iterator > operandsWithoutMask()
Returns an iterator range over the operands excluding the mask operand if present.
Definition VPlan.h:1466
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1320
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1311
@ ComputeReductionResult
Reduce the operands to the final reduction result using the operation specified via the operation's V...
Definition VPlan.h:1263
unsigned getOpcode() const
Definition VPlan.h:1395
void setName(StringRef NewName)
Set the symbolic name for the VPInstruction.
Definition VPlan.h:1494
VPValue * getMask() const
Returns the mask for the VPInstruction.
Definition VPlan.h:1460
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2953
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1623
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:401
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:553
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenNonPhiRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for a non-phi recipe R if one can be created within the given VF R...
VPHistogramRecipe * widenIfHistogram(VPInstruction *VPI)
If VPI represents a histogram operation (as determined by LoopVectorizationLegality) make that safe f...
VPRecipeBase * tryToWidenMemory(VPInstruction *VPI, VFRange &Range)
Check if the load or store instruction VPI should widened for Range.Start and potentially masked.
bool replaceWithFinalIfReductionStore(VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder)
If VPI is a store of a reduction into an invariant address, delete it.
VPReplicateRecipe * handleReplication(VPInstruction *VPI, VFRange &Range)
Build a VPReplicationRecipe for VPI.
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
Definition VPlan.h:2752
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
Definition VPlan.h:2731
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
Definition VPlan.h:2755
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2749
A recipe to represent inloop, ordered or partial reduction operations.
Definition VPlan.h:3046
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4358
const VPBlockBase * getEntry() const
Definition VPlan.h:4402
void clearCanonicalIVNUW(VPInstruction *Increment)
Unsets NUW for the canonical IV increment Increment, for loop regions.
Definition VPlan.h:4486
VPRegionValue * getCanonicalIV()
Return the canonical induction variable of the region, null for replicating regions.
Definition VPlan.h:4470
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:3200
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:605
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:670
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:335
operand_range operands()
Definition VPlanValue.h:403
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:379
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:374
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:49
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
Definition VPlan.cpp:138
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:128
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:74
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1478
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1484
user_range users()
Definition VPlanValue.h:155
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:2156
A recipe to compute the pointers for widened memory accesses of SourceElementTy.
Definition VPlan.h:2229
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1830
A recipe for handling GEP instructions.
Definition VPlan.h:2092
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2453
A recipe for widened phis.
Definition VPlan.h:2589
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1774
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4506
bool hasVF(ElementCount VF) const
Definition VPlan.h:4729
ElementCount getSingleVF() const
Returns the single VF of the plan, asserting that the plan has exactly one VF.
Definition VPlan.h:4742
VPBasicBlock * getEntry()
Definition VPlan.h:4602
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4665
VPSymbolicValue & getVFxUF()
Returns VF * UF of the vector loop region.
Definition VPlan.h:4705
bool hasUF(unsigned UF) const
Definition VPlan.h:4754
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4655
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4779
VPIRValue * getZero(Type *Ty)
Return a VPIRValue wrapping the null value of type Ty.
Definition VPlan.h:4805
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1065
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4902
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1047
LLVM_ABI_FOR_TEST bool isOuterLoop() const
Returns true if this VPlan is for an outer loop, i.e., its vector loop region contains a nested loop ...
Definition VPlan.cpp:1080
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4679
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4631
VPBasicBlock * getVectorPreheader() const
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4607
VPSymbolicValue & getUF()
Returns the UF of the vector loop region.
Definition VPlan.h:4702
bool hasScalarVFOnly() const
Definition VPlan.h:4747
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4645
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:917
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4651
VPSymbolicValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4698
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1221
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:162
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:393
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:155
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr bool isZero() const
Definition TypeSize.h:153
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
match_bind< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
SCEVAffineAddRec_match< Op0_t, Op1_t, match_isa< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start)
Match FindIV result pattern: select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),...
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
auto m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
VPInstruction * findCanonicalIVIncrement(VPlan &Plan)
Find the canonical IV increment of Plan's vector loop region.
VPRecipeBase * findRecipe(VPValue *Start, PredT Pred)
Search Start's users for a recipe satisfying Pred, looking through recipes with definitions.
Definition VPlanUtils.h:116
VPSingleDefRecipe * findHeaderMask(VPlan &Plan)
Collect the header mask with the pattern: (ICMP_ULE, WideCanonicalIV, backedge-taken-count) TODO: Int...
static VPRecipeBase * findUserOf(VPValue *V, const MatchT &P)
If V is used by a recipe matching pattern P, return it.
Definition VPlanUtils.h:137
GEPNoWrapFlags getGEPFlagsForPtr(VPValue *Ptr)
Returns the GEP nowrap flags for Ptr, looking through pointer casts mirroring Value::stripPointerCast...
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, const Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:830
constexpr auto not_equal_to(T &&Arg)
Functor variant of std::not_equal_to that can be used as a UnaryPredicate in functional algorithms li...
Definition STLExtras.h:2179
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:683
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
LLVM_ABI_FOR_TEST cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:253
LLVM_ABI bool VerifySCEV
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintAfterAll
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:283
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
auto map_range(ContainerTy &&C, FuncTy F)
Return a range that applies F to the elements of C.
Definition STLExtras.h:365
constexpr auto bind_front(FnT &&Fn, BindArgsT &&...BindArgs)
C++20 bind_front.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:154
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_ABI_FOR_TEST cl::opt< bool > EnableWideActiveLaneMask
UncountableExitStyle
Different methods of handling early exits.
Definition VPlan.h:78
@ ReadOnly
No side effects to worry about, so we can process any uncountable exits in the loop and branch either...
Definition VPlan.h:83
@ MaskedHandleExitInScalarLoop
All memory operations other than the load(s) required to determine whether an uncountable exit occurr...
Definition VPlan.h:88
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
LLVM_ABI cl::opt< bool > EnableLoopVectorization
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI_FOR_TEST cl::list< std::string > VPlanPrintAfterPasses
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:422
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1836
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:129
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, const Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
@ CM_EpilogueNotAllowedLowTripLoop
@ CM_EpilogueNotNeededFoldTail
@ CM_EpilogueNotAllowedFoldTail
@ CM_EpilogueNotAllowedOptSize
@ CM_EpilogueAllowed
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1771
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
cl::opt< bool > EnableVPlanNativePath
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:325
@ Increment
Incrementally increasing token ID.
Definition AllocToken.h:26
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:347
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:73
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:305
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintVectorRegionScope
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
std::function< BlockFrequencyInfo &()> GetBFI
TargetTransformInfo * TTI
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:89
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
bool isMaskRequired(Instruction *I) const
Forwards to LoopVectorizationCostModel::isMaskRequired.
bool willBeScalarized(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalarized at VF.
uint64_t getPredBlockCostDivisor(BasicBlock *BB) const
TargetTransformInfo::TargetCostKind CostKind
std::optional< CallWideningKind > getLegacyCallKind(CallInst *CI, ElementCount VF) const
Returns the legacy call widening decision for CI at VF, or std::nullopt if none was recorded.
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A VPValue representing a live-in from the input IR or a constant.
Definition VPlanValue.h:240
A pure-virtual common base class for recipes defining a single VPValue and using IR flags.
Definition VPlan.h:1107
A struct that represents some properties of the register usage of a loop.
InstructionCost spillCost(const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, unsigned OverrideMaxNumRegs=0) const
Calculate the estimated cost of any spills due to using more registers than the number available for ...
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening load operations, using the address to load from and an optional mask.
Definition VPlan.h:3567
A recipe for widening store operations, using the stored value, the address to store to and an option...
Definition VPlan.h:3663
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlan &Plan, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void makeMemOpWideningDecisions(VPlan &Plan, VFRange &Range, VPRecipeBuilder &RecipeBuilder)
Convert load/store VPInstructions in Plan into widened or replicate recipes.
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE, VPBasicBlock *CheckBlock=nullptr)
static bool createHeaderPhiRecipes(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &OrigLoop, const MapVector< PHINode *, InductionDescriptor > &Inductions, const MapVector< PHINode *, RecurrenceDescriptor > &Reductions, const SmallPtrSetImpl< const PHINode * > &FixedOrderRecurrences, const SmallPtrSetImpl< PHINode * > &InLoopReductions, bool AllowReordering)
Replace VPPhi recipes in Plan's header with corresponding VPHeaderPHIRecipe subclasses for inductions...
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializePacksAndUnpacks(VPlan &Plan)
Add explicit Build[Struct]Vector recipes to Pack multiple scalar values into vectors and Unpack recip...
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, const bool &EpilogueAllowed)
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE, LoopVersioning *LVer=nullptr)
Create a base VPlan0, serving as the common starting point for all later candidates.
static bool simplifyKnownEVL(VPlan &Plan, ElementCount VF, PredicatedScalarEvolution &PSE)
Try to simplify VPInstruction::ExplicitVectorLength recipes when the AVL is known to be <= VF,...
static void removeBranchOnConst(VPlan &Plan, bool OnlyLatches=false)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static void introduceMasksAndLinearize(VPlan &Plan)
Predicate and linearize the control-flow in the only loop region of Plan.
static void materializeFactors(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize UF, VF and VFxUF to be computed explicitly using VPInstructions.
static void foldTailByMasking(VPlan &Plan)
Adapts the vector loop region for tail folding by introducing a header mask and conditionally executi...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static void addMinimumVectorEpilogueIterationCheck(VPlan &Plan, Value *VectorTripCount, bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE)
Add a check to Plan to see if the epilogue vector loop should be executed.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static bool handleMultiUseReductions(VPlan &Plan, OptimizationRemarkEmitter *ORE, Loop *TheLoop)
Try to legalize reductions with multiple in-loop uses.
static void replaceWideCanonicalIVWithWideIV(VPlan &Plan, ScalarEvolution &SE, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, ElementCount VF, unsigned UF, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Replace a VPWidenCanonicalIVRecipe if it is present in Plan, with a VPWidenIntOrFpInductionRecipe,...
static void convertToVariableLengthStep(VPlan &Plan)
Transform loops with variable-length stepping after region dissolution.
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static std::unique_ptr< VPlan > narrowInterleaveGroups(VPlan &Plan, const TargetTransformInfo &TTI)
Try to find a single VF among Plan's VFs for which all interleave groups (with known minimum VF eleme...
static bool handleFindLastReductions(VPlan &Plan)
Check if Plan contains any FindLast reductions.
static void createInLoopReductionRecipes(VPlan &Plan, ElementCount MinVF)
Create VPReductionRecipes for in-loop reductions.
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void expandBranchOnTwoConds(VPlan &Plan)
Expand BranchOnTwoConds instructions into explicit CFG with BranchOnCond instructions.
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue, VPValue *Step, std::optional< uint64_t > MaxRuntimeStep=std::nullopt)
Materialize vector trip count computations to a set of VPInstructions.
static void hoistPredicatedLoads(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Hoist predicated loads from the same address to the loop entry block, if they are guaranteed to execu...
static void optimizeFindIVReductions(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &L)
Optimize FindLast reductions selecting IVs (or expressions of IVs) by converting them to FindIV reduc...
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static void makeScalarizationDecisions(VPlan &Plan, VFRange &Range)
Make VPlan-based scalarization decision prior to delegating to the ones made by the legacy CM.
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPCurrentIterationPHIRecipe and related recipes to Plan and replaces all uses of the canonical ...
static void makeCallWideningDecisions(VPlan &Plan, VFRange &Range, VPRecipeBuilder &RecipeBuilder, VPCostContext &CostCtx)
Convert call VPInstructions in Plan into widened call, vector intrinsic or replicate recipes based on...
static void adjustFirstOrderRecurrenceMiddleUsers(VPlan &Plan, VFRange &Range)
Adjust first-order recurrence users in the middle block: create penultimate element extracts for LCSS...
static void optimizeEVLMasks(VPlan &Plan)
Optimize recipes which use an EVL-based header mask to VP intrinsics, for example:
static LLVM_ABI_FOR_TEST bool handleEarlyExits(VPlan &Plan, UncountableExitStyle Style, Loop *TheLoop, PredicatedScalarEvolution &PSE, DominatorTree &DT, AssumptionCache *AC)
Update Plan to account for all early exits.
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static void sinkPredicatedStores(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Sink predicated stores to the same address with complementary predicates (P and NOT P) to an uncondit...
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace replicating VPReplicateRecipe, VPScalarIVStepsRecipe and VPInstruction in Plan with VF single...
static void addIterationCountCheckBlock(VPlan &Plan, ElementCount VF, unsigned UF, bool RequiresScalarEpilogue, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE)
Add a new check block before the vector preheader to Plan to check if the main vector loop should be ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void optimizeInductionLiveOutUsers(VPlan &Plan, PredicatedScalarEvolution &PSE, bool FoldTail)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static void createPartialReductions(VPlan &Plan, VPCostContext &CostCtx, VFRange &Range)
Detect and create partial reduction recipes for scaled reductions in Plan.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static LLVM_ABI_FOR_TEST void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static void dropPoisonGeneratingRecipes(VPlan &Plan)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void convertEVLExitCond(VPlan &Plan)
Replaces the exit condition from (branch-on-cond eq CanonicalIVInc, VectorTripCount) to (branch-on-co...
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks