LLVM 23.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cmath>
150#include <cstdint>
151#include <functional>
152#include <iterator>
153#include <limits>
154#include <memory>
155#include <string>
156#include <tuple>
157#include <utility>
158
159using namespace llvm;
160using namespace SCEVPatternMatch;
161
162#define LV_NAME "loop-vectorize"
163#define DEBUG_TYPE LV_NAME
164
165#ifndef NDEBUG
166const char VerboseDebug[] = DEBUG_TYPE "-verbose";
167#endif
168
169STATISTIC(LoopsVectorized, "Number of loops vectorized");
170STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
171STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
172STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
173
175 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
176 cl::desc("Enable vectorization of epilogue loops."));
177
179 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
180 cl::desc("When epilogue vectorization is enabled, and a value greater than "
181 "1 is specified, forces the given VF for all applicable epilogue "
182 "loops."));
183
185 "epilogue-vectorization-minimum-VF", cl::Hidden,
186 cl::desc("Only loops with vectorization factor equal to or larger than "
187 "the specified value are considered for epilogue vectorization."));
188
189/// Loops with a known constant trip count below this number are vectorized only
190/// if no scalar iteration overheads are incurred.
192 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
193 cl::desc("Loops with a constant trip count that is smaller than this "
194 "value are vectorized only if no scalar iteration overheads "
195 "are incurred."));
196
198 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
199 cl::desc("The maximum allowed number of runtime memory checks"));
200
201// Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
202// that predication is preferred, and this lists all options. I.e., the
203// vectorizer will try to fold the tail-loop (epilogue) into the vector body
204// and predicate the instructions accordingly. If tail-folding fails, there are
205// different fallback strategies depending on these values:
212} // namespace PreferPredicateTy
213
215 "prefer-predicate-over-epilogue",
218 cl::desc("Tail-folding and predication preferences over creating a scalar "
219 "epilogue loop."),
221 "scalar-epilogue",
222 "Don't tail-predicate loops, create scalar epilogue"),
224 "predicate-else-scalar-epilogue",
225 "prefer tail-folding, create scalar epilogue if tail "
226 "folding fails."),
228 "predicate-dont-vectorize",
229 "prefers tail-folding, don't attempt vectorization if "
230 "tail-folding fails.")));
231
233 "force-tail-folding-style", cl::desc("Force the tail folding style"),
236 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
239 "Create lane mask for data only, using active.lane.mask intrinsic"),
241 "data-without-lane-mask",
242 "Create lane mask with compare/stepvector"),
244 "Create lane mask using active.lane.mask intrinsic, and use "
245 "it for both data and control flow"),
247 "data-and-control-without-rt-check",
248 "Similar to data-and-control, but remove the runtime check"),
250 "Use predicated EVL instructions for tail folding. If EVL "
251 "is unsupported, fallback to data-without-lane-mask.")));
252
254 "enable-wide-lane-mask", cl::init(false), cl::Hidden,
255 cl::desc("Enable use of wide lane masks when used for control flow in "
256 "tail-folded loops"));
257
259 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
260 cl::desc("Maximize bandwidth when selecting vectorization factor which "
261 "will be determined by the smallest type in loop."));
262
264 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
265 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
266
267/// An interleave-group may need masking if it resides in a block that needs
268/// predication, or in order to mask away gaps.
270 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
271 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
272
274 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
275 cl::desc("A flag that overrides the target's number of scalar registers."));
276
278 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
279 cl::desc("A flag that overrides the target's number of vector registers."));
280
282 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
283 cl::desc("A flag that overrides the target's max interleave factor for "
284 "scalar loops."));
285
287 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
288 cl::desc("A flag that overrides the target's max interleave factor for "
289 "vectorized loops."));
290
292 "force-target-instruction-cost", cl::init(0), cl::Hidden,
293 cl::desc("A flag that overrides the target's expected cost for "
294 "an instruction to a single constant value. Mostly "
295 "useful for getting consistent testing."));
296
298 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
299 cl::desc(
300 "Pretend that scalable vectors are supported, even if the target does "
301 "not support them. This flag should only be used for testing."));
302
304 "small-loop-cost", cl::init(20), cl::Hidden,
305 cl::desc(
306 "The cost of a loop that is considered 'small' by the interleaver."));
307
309 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
310 cl::desc("Enable the use of the block frequency analysis to access PGO "
311 "heuristics minimizing code growth in cold regions and being more "
312 "aggressive in hot regions."));
313
314// Runtime interleave loops for load/store throughput.
316 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
317 cl::desc(
318 "Enable runtime interleaving until load/store ports are saturated"));
319
320/// The number of stores in a loop that are allowed to need predication.
322 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
323 cl::desc("Max number of stores to be predicated behind an if."));
324
326 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
327 cl::desc("Count the induction variable only once when interleaving"));
328
330 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
331 cl::desc("Enable if predication of stores during vectorization."));
332
334 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
335 cl::desc("The maximum interleave count to use when interleaving a scalar "
336 "reduction in a nested loop."));
337
338static cl::opt<bool>
339 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
341 cl::desc("Prefer in-loop vector reductions, "
342 "overriding the targets preference."));
343
345 "force-ordered-reductions", cl::init(false), cl::Hidden,
346 cl::desc("Enable the vectorisation of loops with in-order (strict) "
347 "FP reductions"));
348
350 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
351 cl::desc(
352 "Prefer predicating a reduction operation over an after loop select."));
353
355 "enable-vplan-native-path", cl::Hidden,
356 cl::desc("Enable VPlan-native vectorization path with "
357 "support for outer loop vectorization."));
358
360 llvm::VerifyEachVPlan("vplan-verify-each",
361#ifdef EXPENSIVE_CHECKS
362 cl::init(true),
363#else
364 cl::init(false),
365#endif
367 cl::desc("Verfiy VPlans after VPlan transforms."));
368
369#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
371 "vplan-print-after-all", cl::init(false), cl::Hidden,
372 cl::desc("Print after each VPlanTransforms::runPass."));
373#endif
374
375// This flag enables the stress testing of the VPlan H-CFG construction in the
376// VPlan-native vectorization path. It must be used in conjuction with
377// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
378// verification of the H-CFGs built.
380 "vplan-build-stress-test", cl::init(false), cl::Hidden,
381 cl::desc(
382 "Build VPlan for every supported loop nest in the function and bail "
383 "out right after the build (stress test the VPlan H-CFG construction "
384 "in the VPlan-native vectorization path)."));
385
387 "interleave-loops", cl::init(true), cl::Hidden,
388 cl::desc("Enable loop interleaving in Loop vectorization passes"));
390 "vectorize-loops", cl::init(true), cl::Hidden,
391 cl::desc("Run the Loop vectorization passes"));
392
394 "force-widen-divrem-via-safe-divisor", cl::Hidden,
395 cl::desc(
396 "Override cost based safe divisor widening for div/rem instructions"));
397
399 "vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true),
401 cl::desc("Try wider VFs if they enable the use of vector variants"));
402
404 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
405 cl::desc(
406 "Enable vectorization of early exit loops with uncountable exits."));
407
409 "vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden,
410 cl::desc("Discard VFs if their register pressure is too high."));
411
412// Likelyhood of bypassing the vectorized loop because there are zero trips left
413// after prolog. See `emitIterationCountCheck`.
414static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
415
416/// A helper function that returns true if the given type is irregular. The
417/// type is irregular if its allocated size doesn't equal the store size of an
418/// element of the corresponding vector type.
419static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
420 // Determine if an array of N elements of type Ty is "bitcast compatible"
421 // with a <N x Ty> vector.
422 // This is only true if there is no padding between the array elements.
423 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
424}
425
426/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
427/// ElementCount to include loops whose trip count is a function of vscale.
429 const Loop *L) {
430 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
431 return ElementCount::getFixed(ExpectedTC);
432
433 const SCEV *BTC = SE->getBackedgeTakenCount(L);
435 return ElementCount::getFixed(0);
436
437 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
438 if (isa<SCEVVScale>(ExitCount))
440
441 const APInt *Scale;
442 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
443 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
444 if (Scale->getActiveBits() <= 32)
446
447 return ElementCount::getFixed(0);
448}
449
450/// Returns "best known" trip count, which is either a valid positive trip count
451/// or std::nullopt when an estimate cannot be made (including when the trip
452/// count would overflow), for the specified loop \p L as defined by the
453/// following procedure:
454/// 1) Returns exact trip count if it is known.
455/// 2) Returns expected trip count according to profile data if any.
456/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
457/// 4) Returns std::nullopt if all of the above failed.
458static std::optional<ElementCount>
460 bool CanUseConstantMax = true) {
461 // Check if exact trip count is known.
462 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
463 return ExpectedTC;
464
465 // Check if there is an expected trip count available from profile data.
467 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
468 return ElementCount::getFixed(*EstimatedTC);
469
470 if (!CanUseConstantMax)
471 return std::nullopt;
472
473 // Check if upper bound estimate is known.
474 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
475 return ElementCount::getFixed(ExpectedTC);
476
477 return std::nullopt;
478}
479
480namespace {
481// Forward declare GeneratedRTChecks.
482class GeneratedRTChecks;
483
484using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
485} // namespace
486
487namespace llvm {
488
490
491/// InnerLoopVectorizer vectorizes loops which contain only one basic
492/// block to a specified vectorization factor (VF).
493/// This class performs the widening of scalars into vectors, or multiple
494/// scalars. This class also implements the following features:
495/// * It inserts an epilogue loop for handling loops that don't have iteration
496/// counts that are known to be a multiple of the vectorization factor.
497/// * It handles the code generation for reduction variables.
498/// * Scalarization (implementation using scalars) of un-vectorizable
499/// instructions.
500/// InnerLoopVectorizer does not perform any vectorization-legality
501/// checks, and relies on the caller to check for the different legality
502/// aspects. The InnerLoopVectorizer relies on the
503/// LoopVectorizationLegality class to provide information about the induction
504/// and reduction variables that were found to a given vectorization factor.
506public:
510 ElementCount VecWidth, unsigned UnrollFactor,
512 GeneratedRTChecks &RTChecks, VPlan &Plan)
513 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
514 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
517 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
518
519 virtual ~InnerLoopVectorizer() = default;
520
521 /// Creates a basic block for the scalar preheader. Both
522 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
523 /// the method to create additional blocks and checks needed for epilogue
524 /// vectorization.
526
527 /// Fix the vectorized code, taking care of header phi's, and more.
529
530 /// Fix the non-induction PHIs in \p Plan.
532
533 /// Returns the original loop trip count.
534 Value *getTripCount() const { return TripCount; }
535
536 /// Used to set the trip count after ILV's construction and after the
537 /// preheader block has been executed. Note that this always holds the trip
538 /// count of the original loop for both main loop and epilogue vectorization.
539 void setTripCount(Value *TC) { TripCount = TC; }
540
541protected:
543
544 /// Create and return a new IR basic block for the scalar preheader whose name
545 /// is prefixed with \p Prefix.
547
548 /// Allow subclasses to override and print debug traces before/after vplan
549 /// execution, when trace information is requested.
550 virtual void printDebugTracesAtStart() {}
551 virtual void printDebugTracesAtEnd() {}
552
553 /// The original loop.
555
556 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
557 /// dynamic knowledge to simplify SCEV expressions and converts them to a
558 /// more usable form.
560
561 /// Loop Info.
563
564 /// Dominator Tree.
566
567 /// Target Transform Info.
569
570 /// Assumption Cache.
572
573 /// The vectorization SIMD factor to use. Each vector will have this many
574 /// vector elements.
576
577 /// The vectorization unroll factor to use. Each scalar is vectorized to this
578 /// many different vector instructions.
579 unsigned UF;
580
581 /// The builder that we use
583
584 // --- Vectorization state ---
585
586 /// Trip count of the original loop.
587 Value *TripCount = nullptr;
588
589 /// The profitablity analysis.
591
592 /// Structure to hold information about generated runtime checks, responsible
593 /// for cleaning the checks, if vectorization turns out unprofitable.
594 GeneratedRTChecks &RTChecks;
595
597
598 /// The vector preheader block of \p Plan, used as target for check blocks
599 /// introduced during skeleton creation.
601};
602
603/// Encapsulate information regarding vectorization of a loop and its epilogue.
604/// This information is meant to be updated and used across two stages of
605/// epilogue vectorization.
608 unsigned MainLoopUF = 0;
610 unsigned EpilogueUF = 0;
613 Value *TripCount = nullptr;
616
618 ElementCount EVF, unsigned EUF,
620 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
622 assert(EUF == 1 &&
623 "A high UF for the epilogue loop is likely not beneficial.");
624 }
625};
626
627/// An extension of the inner loop vectorizer that creates a skeleton for a
628/// vectorized loop that has its epilogue (residual) also vectorized.
629/// The idea is to run the vplan on a given loop twice, firstly to setup the
630/// skeleton and vectorize the main loop, and secondly to complete the skeleton
631/// from the first step and vectorize the epilogue. This is achieved by
632/// deriving two concrete strategy classes from this base class and invoking
633/// them in succession from the loop vectorizer planner.
635public:
645
646 /// Holds and updates state information required to vectorize the main loop
647 /// and its epilogue in two separate passes. This setup helps us avoid
648 /// regenerating and recomputing runtime safety checks. It also helps us to
649 /// shorten the iteration-count-check path length for the cases where the
650 /// iteration count of the loop is so small that the main vector loop is
651 /// completely skipped.
653
654protected:
656};
657
658/// A specialized derived class of inner loop vectorizer that performs
659/// vectorization of *main* loops in the process of vectorizing loops and their
660/// epilogues.
662public:
673 /// Implements the interface for creating a vectorized skeleton using the
674 /// *main loop* strategy (i.e., the first pass of VPlan execution).
676
677protected:
678 /// Introduces a new VPIRBasicBlock for \p CheckIRBB to Plan between the
679 /// vector preheader and its predecessor, also connecting the new block to the
680 /// scalar preheader.
681 void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB);
682
683 // Create a check to see if the main vector loop should be executed
685 unsigned UF) const;
686
687 /// Emits an iteration count bypass check once for the main loop (when \p
688 /// ForEpilogue is false) and once for the epilogue loop (when \p
689 /// ForEpilogue is true).
691 bool ForEpilogue);
692 void printDebugTracesAtStart() override;
693 void printDebugTracesAtEnd() override;
694};
695
696// A specialized derived class of inner loop vectorizer that performs
697// vectorization of *epilogue* loops in the process of vectorizing loops and
698// their epilogues.
700public:
707 GeneratedRTChecks &Checks, VPlan &Plan)
709 Checks, Plan, EPI.EpilogueVF,
710 EPI.EpilogueVF, EPI.EpilogueUF) {}
711 /// Implements the interface for creating a vectorized skeleton using the
712 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
714
715protected:
716 void printDebugTracesAtStart() override;
717 void printDebugTracesAtEnd() override;
718};
719} // end namespace llvm
720
721/// Look for a meaningful debug location on the instruction or its operands.
723 if (!I)
724 return DebugLoc::getUnknown();
725
727 if (I->getDebugLoc() != Empty)
728 return I->getDebugLoc();
729
730 for (Use &Op : I->operands()) {
731 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
732 if (OpInst->getDebugLoc() != Empty)
733 return OpInst->getDebugLoc();
734 }
735
736 return I->getDebugLoc();
737}
738
739/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
740/// is passed, the message relates to that particular instruction.
741#ifndef NDEBUG
742static void debugVectorizationMessage(const StringRef Prefix,
743 const StringRef DebugMsg,
744 Instruction *I) {
745 dbgs() << "LV: " << Prefix << DebugMsg;
746 if (I != nullptr)
747 dbgs() << " " << *I;
748 else
749 dbgs() << '.';
750 dbgs() << '\n';
751}
752#endif
753
754/// Create an analysis remark that explains why vectorization failed
755///
756/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
757/// RemarkName is the identifier for the remark. If \p I is passed it is an
758/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
759/// the location of the remark. If \p DL is passed, use it as debug location for
760/// the remark. \return the remark object that can be streamed to.
761static OptimizationRemarkAnalysis
762createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
763 Instruction *I, DebugLoc DL = {}) {
764 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
765 // If debug location is attached to the instruction, use it. Otherwise if DL
766 // was not provided, use the loop's.
767 if (I && I->getDebugLoc())
768 DL = I->getDebugLoc();
769 else if (!DL)
770 DL = TheLoop->getStartLoc();
771
772 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
773}
774
775namespace llvm {
776
777/// Return a value for Step multiplied by VF.
779 int64_t Step) {
780 assert(Ty->isIntegerTy() && "Expected an integer step");
781 ElementCount VFxStep = VF.multiplyCoefficientBy(Step);
782 assert(isPowerOf2_64(VF.getKnownMinValue()) && "must pass power-of-2 VF");
783 if (VF.isScalable() && isPowerOf2_64(Step)) {
784 return B.CreateShl(
785 B.CreateVScale(Ty),
786 ConstantInt::get(Ty, Log2_64(VFxStep.getKnownMinValue())), "", true);
787 }
788 return B.CreateElementCount(Ty, VFxStep);
789}
790
791/// Return the runtime value for VF.
793 return B.CreateElementCount(Ty, VF);
794}
795
797 const StringRef OREMsg, const StringRef ORETag,
798 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
799 Instruction *I) {
800 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
801 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
802 ORE->emit(
803 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
804 << "loop not vectorized: " << OREMsg);
805}
806
807/// Reports an informative message: print \p Msg for debugging purposes as well
808/// as an optimization remark. Uses either \p I as location of the remark, or
809/// otherwise \p TheLoop. If \p DL is passed, use it as debug location for the
810/// remark. If \p DL is passed, use it as debug location for the remark.
811static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
813 Loop *TheLoop, Instruction *I = nullptr,
814 DebugLoc DL = {}) {
816 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
817 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
818 I, DL)
819 << Msg);
820}
821
822/// Report successful vectorization of the loop. In case an outer loop is
823/// vectorized, prepend "outer" to the vectorization remark.
825 VectorizationFactor VF, unsigned IC) {
827 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
828 nullptr));
829 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
830 ORE->emit([&]() {
831 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
832 TheLoop->getHeader())
833 << "vectorized " << LoopType << "loop (vectorization width: "
834 << ore::NV("VectorizationFactor", VF.Width)
835 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
836 });
837}
838
839} // end namespace llvm
840
841namespace llvm {
842
843// Loop vectorization cost-model hints how the scalar epilogue loop should be
844// lowered.
846
847 // The default: allowing scalar epilogues.
849
850 // Vectorization with OptForSize: don't allow epilogues.
852
853 // A special case of vectorisation with OptForSize: loops with a very small
854 // trip count are considered for vectorization under OptForSize, thereby
855 // making sure the cost of their loop body is dominant, free of runtime
856 // guards and scalar iteration overheads.
858
859 // Loop hint predicate indicating an epilogue is undesired.
861
862 // Directive indicating we must either tail fold or not vectorize
864};
865
866/// LoopVectorizationCostModel - estimates the expected speedups due to
867/// vectorization.
868/// In many cases vectorization is not profitable. This can happen because of
869/// a number of reasons. In this class we mainly attempt to predict the
870/// expected speedup/slowdowns due to the supported instruction set. We use the
871/// TargetTransformInfo to query the different backends for the cost of
872/// different operations.
875
876public:
884 std::function<BlockFrequencyInfo &()> GetBFI,
885 const Function *F, const LoopVectorizeHints *Hints,
887 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
888 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), GetBFI(GetBFI),
891 if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors)
892 initializeVScaleForTuning();
894 }
895
896 /// \return An upper bound for the vectorization factors (both fixed and
897 /// scalable). If the factors are 0, vectorization and interleaving should be
898 /// avoided up front.
899 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
900
901 /// \return True if runtime checks are required for vectorization, and false
902 /// otherwise.
903 bool runtimeChecksRequired();
904
905 /// Setup cost-based decisions for user vectorization factor.
906 /// \return true if the UserVF is a feasible VF to be chosen.
909 return expectedCost(UserVF).isValid();
910 }
911
912 /// \return True if maximizing vector bandwidth is enabled by the target or
913 /// user options, for the given register kind.
914 bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind);
915
916 /// \return True if register pressure should be considered for the given VF.
917 bool shouldConsiderRegPressureForVF(ElementCount VF);
918
919 /// \return The size (in bits) of the smallest and widest types in the code
920 /// that needs to be vectorized. We ignore values that remain scalar such as
921 /// 64 bit loop indices.
922 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
923
924 /// Memory access instruction may be vectorized in more than one way.
925 /// Form of instruction after vectorization depends on cost.
926 /// This function takes cost-based decisions for Load/Store instructions
927 /// and collects them in a map. This decisions map is used for building
928 /// the lists of loop-uniform and loop-scalar instructions.
929 /// The calculated cost is saved with widening decision in order to
930 /// avoid redundant calculations.
931 void setCostBasedWideningDecision(ElementCount VF);
932
933 /// A call may be vectorized in different ways depending on whether we have
934 /// vectorized variants available and whether the target supports masking.
935 /// This function analyzes all calls in the function at the supplied VF,
936 /// makes a decision based on the costs of available options, and stores that
937 /// decision in a map for use in planning and plan execution.
938 void setVectorizedCallDecision(ElementCount VF);
939
940 /// Collect values we want to ignore in the cost model.
941 void collectValuesToIgnore();
942
943 /// Collect all element types in the loop for which widening is needed.
944 void collectElementTypesForWidening();
945
946 /// Split reductions into those that happen in the loop, and those that happen
947 /// outside. In loop reductions are collected into InLoopReductions.
948 void collectInLoopReductions();
949
950 /// Returns true if we should use strict in-order reductions for the given
951 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
952 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
953 /// of FP operations.
954 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const {
955 return !Hints->allowReordering() && RdxDesc.isOrdered();
956 }
957
958 /// \returns The smallest bitwidth each instruction can be represented with.
959 /// The vector equivalents of these instructions should be truncated to this
960 /// type.
962 return MinBWs;
963 }
964
965 /// \returns True if it is more profitable to scalarize instruction \p I for
966 /// vectorization factor \p VF.
968 assert(VF.isVector() &&
969 "Profitable to scalarize relevant only for VF > 1.");
970 assert(
971 TheLoop->isInnermost() &&
972 "cost-model should not be used for outer loops (in VPlan-native path)");
973
974 auto Scalars = InstsToScalarize.find(VF);
975 assert(Scalars != InstsToScalarize.end() &&
976 "VF not yet analyzed for scalarization profitability");
977 return Scalars->second.contains(I);
978 }
979
980 /// Returns true if \p I is known to be uniform after vectorization.
982 assert(
983 TheLoop->isInnermost() &&
984 "cost-model should not be used for outer loops (in VPlan-native path)");
985 // Pseudo probe needs to be duplicated for each unrolled iteration and
986 // vector lane so that profiled loop trip count can be accurately
987 // accumulated instead of being under counted.
989 return false;
990
991 if (VF.isScalar())
992 return true;
993
994 auto UniformsPerVF = Uniforms.find(VF);
995 assert(UniformsPerVF != Uniforms.end() &&
996 "VF not yet analyzed for uniformity");
997 return UniformsPerVF->second.count(I);
998 }
999
1000 /// Returns true if \p I is known to be scalar after vectorization.
1002 assert(
1003 TheLoop->isInnermost() &&
1004 "cost-model should not be used for outer loops (in VPlan-native path)");
1005 if (VF.isScalar())
1006 return true;
1007
1008 auto ScalarsPerVF = Scalars.find(VF);
1009 assert(ScalarsPerVF != Scalars.end() &&
1010 "Scalar values are not calculated for VF");
1011 return ScalarsPerVF->second.count(I);
1012 }
1013
1014 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1015 /// for vectorization factor \p VF.
1017 // Truncs must truncate at most to their destination type.
1018 if (isa_and_nonnull<TruncInst>(I) && MinBWs.contains(I) &&
1019 I->getType()->getScalarSizeInBits() < MinBWs.lookup(I))
1020 return false;
1021 return VF.isVector() && MinBWs.contains(I) &&
1022 !isProfitableToScalarize(I, VF) &&
1024 }
1025
1026 /// Decision that was taken during cost calculation for memory instruction.
1029 CM_Widen, // For consecutive accesses with stride +1.
1030 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1036 };
1037
1038 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1039 /// instruction \p I and vector width \p VF.
1042 assert(VF.isVector() && "Expected VF >=2");
1043 WideningDecisions[{I, VF}] = {W, Cost};
1044 }
1045
1046 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1047 /// interleaving group \p Grp and vector width \p VF.
1051 assert(VF.isVector() && "Expected VF >=2");
1052 /// Broadcast this decicion to all instructions inside the group.
1053 /// When interleaving, the cost will only be assigned one instruction, the
1054 /// insert position. For other cases, add the appropriate fraction of the
1055 /// total cost to each instruction. This ensures accurate costs are used,
1056 /// even if the insert position instruction is not used.
1057 InstructionCost InsertPosCost = Cost;
1058 InstructionCost OtherMemberCost = 0;
1059 if (W != CM_Interleave)
1060 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
1061 ;
1062 for (unsigned Idx = 0; Idx < Grp->getFactor(); ++Idx) {
1063 if (auto *I = Grp->getMember(Idx)) {
1064 if (Grp->getInsertPos() == I)
1065 WideningDecisions[{I, VF}] = {W, InsertPosCost};
1066 else
1067 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
1068 }
1069 }
1070 }
1071
1072 /// Return the cost model decision for the given instruction \p I and vector
1073 /// width \p VF. Return CM_Unknown if this instruction did not pass
1074 /// through the cost modeling.
1076 assert(VF.isVector() && "Expected VF to be a vector VF");
1077 assert(
1078 TheLoop->isInnermost() &&
1079 "cost-model should not be used for outer loops (in VPlan-native path)");
1080
1081 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1082 auto Itr = WideningDecisions.find(InstOnVF);
1083 if (Itr == WideningDecisions.end())
1084 return CM_Unknown;
1085 return Itr->second.first;
1086 }
1087
1088 /// Return the vectorization cost for the given instruction \p I and vector
1089 /// width \p VF.
1091 assert(VF.isVector() && "Expected VF >=2");
1092 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1093 assert(WideningDecisions.contains(InstOnVF) &&
1094 "The cost is not calculated");
1095 return WideningDecisions[InstOnVF].second;
1096 }
1097
1105
1107 Function *Variant, Intrinsic::ID IID,
1108 std::optional<unsigned> MaskPos,
1110 assert(!VF.isScalar() && "Expected vector VF");
1111 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1112 }
1113
1115 ElementCount VF) const {
1116 assert(!VF.isScalar() && "Expected vector VF");
1117 auto I = CallWideningDecisions.find({CI, VF});
1118 if (I == CallWideningDecisions.end())
1119 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1120 return I->second;
1121 }
1122
1123 /// Return True if instruction \p I is an optimizable truncate whose operand
1124 /// is an induction variable. Such a truncate will be removed by adding a new
1125 /// induction variable with the destination type.
1127 // If the instruction is not a truncate, return false.
1128 auto *Trunc = dyn_cast<TruncInst>(I);
1129 if (!Trunc)
1130 return false;
1131
1132 // Get the source and destination types of the truncate.
1133 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1134 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1135
1136 // If the truncate is free for the given types, return false. Replacing a
1137 // free truncate with an induction variable would add an induction variable
1138 // update instruction to each iteration of the loop. We exclude from this
1139 // check the primary induction variable since it will need an update
1140 // instruction regardless.
1141 Value *Op = Trunc->getOperand(0);
1142 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1143 return false;
1144
1145 // If the truncated value is not an induction variable, return false.
1146 return Legal->isInductionPhi(Op);
1147 }
1148
1149 /// Collects the instructions to scalarize for each predicated instruction in
1150 /// the loop.
1151 void collectInstsToScalarize(ElementCount VF);
1152
1153 /// Collect values that will not be widened, including Uniforms, Scalars, and
1154 /// Instructions to Scalarize for the given \p VF.
1155 /// The sets depend on CM decision for Load/Store instructions
1156 /// that may be vectorized as interleave, gather-scatter or scalarized.
1157 /// Also make a decision on what to do about call instructions in the loop
1158 /// at that VF -- scalarize, call a known vector routine, or call a
1159 /// vector intrinsic.
1161 // Do the analysis once.
1162 if (VF.isScalar() || Uniforms.contains(VF))
1163 return;
1165 collectLoopUniforms(VF);
1167 collectLoopScalars(VF);
1169 }
1170
1171 /// Returns true if the target machine supports masked store operation
1172 /// for the given \p DataType and kind of access to \p Ptr.
1173 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment,
1174 unsigned AddressSpace) const {
1175 return Legal->isConsecutivePtr(DataType, Ptr) &&
1176 TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace);
1177 }
1178
1179 /// Returns true if the target machine supports masked load operation
1180 /// for the given \p DataType and kind of access to \p Ptr.
1181 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment,
1182 unsigned AddressSpace) const {
1183 return Legal->isConsecutivePtr(DataType, Ptr) &&
1184 TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace);
1185 }
1186
1187 /// Returns true if the target machine can represent \p V as a masked gather
1188 /// or scatter operation.
1190 bool LI = isa<LoadInst>(V);
1191 bool SI = isa<StoreInst>(V);
1192 if (!LI && !SI)
1193 return false;
1194 auto *Ty = getLoadStoreType(V);
1196 if (VF.isVector())
1197 Ty = VectorType::get(Ty, VF);
1198 return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1199 (SI && TTI.isLegalMaskedScatter(Ty, Align));
1200 }
1201
1202 /// Returns true if the target machine supports all of the reduction
1203 /// variables found for the given VF.
1205 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1206 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1207 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1208 }));
1209 }
1210
1211 /// Given costs for both strategies, return true if the scalar predication
1212 /// lowering should be used for div/rem. This incorporates an override
1213 /// option so it is not simply a cost comparison.
1215 InstructionCost SafeDivisorCost) const {
1216 switch (ForceSafeDivisor) {
1217 case cl::BOU_UNSET:
1218 return ScalarCost < SafeDivisorCost;
1219 case cl::BOU_TRUE:
1220 return false;
1221 case cl::BOU_FALSE:
1222 return true;
1223 }
1224 llvm_unreachable("impossible case value");
1225 }
1226
1227 /// Returns true if \p I is an instruction which requires predication and
1228 /// for which our chosen predication strategy is scalarization (i.e. we
1229 /// don't have an alternate strategy such as masking available).
1230 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1231 bool isScalarWithPredication(Instruction *I, ElementCount VF);
1232
1233 /// Returns true if \p I is an instruction that needs to be predicated
1234 /// at runtime. The result is independent of the predication mechanism.
1235 /// Superset of instructions that return true for isScalarWithPredication.
1236 bool isPredicatedInst(Instruction *I) const;
1237
1238 /// A helper function that returns how much we should divide the cost of a
1239 /// predicated block by. Typically this is the reciprocal of the block
1240 /// probability, i.e. if we return X we are assuming the predicated block will
1241 /// execute once for every X iterations of the loop header so the block should
1242 /// only contribute 1/X of its cost to the total cost calculation, but when
1243 /// optimizing for code size it will just be 1 as code size costs don't depend
1244 /// on execution probabilities.
1245 ///
1246 /// Note that if a block wasn't originally predicated but was predicated due
1247 /// to tail folding, the divisor will still be 1 because it will execute for
1248 /// every iteration of the loop header.
1249 inline uint64_t
1250 getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind,
1251 const BasicBlock *BB);
1252
1253 /// Return the costs for our two available strategies for lowering a
1254 /// div/rem operation which requires speculating at least one lane.
1255 /// First result is for scalarization (will be invalid for scalable
1256 /// vectors); second is for the safe-divisor strategy.
1257 std::pair<InstructionCost, InstructionCost>
1258 getDivRemSpeculationCost(Instruction *I, ElementCount VF);
1259
1260 /// Returns true if \p I is a memory instruction with consecutive memory
1261 /// access that can be widened.
1262 bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
1263
1264 /// Returns true if \p I is a memory instruction in an interleaved-group
1265 /// of memory accesses that can be vectorized with wide vector loads/stores
1266 /// and shuffles.
1267 bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const;
1268
1269 /// Check if \p Instr belongs to any interleaved access group.
1271 return InterleaveInfo.isInterleaved(Instr);
1272 }
1273
1274 /// Get the interleaved access group that \p Instr belongs to.
1277 return InterleaveInfo.getInterleaveGroup(Instr);
1278 }
1279
1280 /// Returns true if we're required to use a scalar epilogue for at least
1281 /// the final iteration of the original loop.
1282 bool requiresScalarEpilogue(bool IsVectorizing) const {
1283 if (!isScalarEpilogueAllowed()) {
1284 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1285 return false;
1286 }
1287 // If we might exit from anywhere but the latch and early exit vectorization
1288 // is disabled, we must run the exiting iteration in scalar form.
1289 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1290 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1291 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1292 "from latch block\n");
1293 return true;
1294 }
1295 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1296 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1297 "interleaved group requires scalar epilogue\n");
1298 return true;
1299 }
1300 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1301 return false;
1302 }
1303
1304 /// Returns true if a scalar epilogue is not allowed due to optsize or a
1305 /// loop hint annotation.
1307 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1308 }
1309
1310 /// Returns true if tail-folding is preferred over a scalar epilogue.
1312 return ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate ||
1313 ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate;
1314 }
1315
1316 /// Returns the TailFoldingStyle that is best for the current loop.
1317 TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
1318 if (!ChosenTailFoldingStyle)
1320 return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first
1321 : ChosenTailFoldingStyle->second;
1322 }
1323
1324 /// Selects and saves TailFoldingStyle for 2 options - if IV update may
1325 /// overflow or not.
1326 /// \param IsScalableVF true if scalable vector factors enabled.
1327 /// \param UserIC User specific interleave count.
1328 void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) {
1329 assert(!ChosenTailFoldingStyle && "Tail folding must not be selected yet.");
1330 if (!Legal->canFoldTailByMasking()) {
1331 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1332 return;
1333 }
1334
1335 // Default to TTI preference, but allow command line override.
1336 ChosenTailFoldingStyle = {
1337 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true),
1338 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false)};
1339 if (ForceTailFoldingStyle.getNumOccurrences())
1340 ChosenTailFoldingStyle = {ForceTailFoldingStyle.getValue(),
1341 ForceTailFoldingStyle.getValue()};
1342
1343 if (ChosenTailFoldingStyle->first != TailFoldingStyle::DataWithEVL &&
1344 ChosenTailFoldingStyle->second != TailFoldingStyle::DataWithEVL)
1345 return;
1346 // Override EVL styles if needed.
1347 // FIXME: Investigate opportunity for fixed vector factor.
1348 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1349 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1350 if (EVLIsLegal)
1351 return;
1352 // If for some reason EVL mode is unsupported, fallback to a scalar epilogue
1353 // if it's allowed, or DataWithoutLaneMask otherwise.
1354 if (ScalarEpilogueStatus == CM_ScalarEpilogueAllowed ||
1355 ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate)
1356 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1357 else
1358 ChosenTailFoldingStyle = {TailFoldingStyle::DataWithoutLaneMask,
1360
1361 LLVM_DEBUG(
1362 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1363 "not try to generate VP Intrinsics "
1364 << (UserIC > 1
1365 ? "since interleave count specified is greater than 1.\n"
1366 : "due to non-interleaving reasons.\n"));
1367 }
1368
1369 /// Returns true if all loop blocks should be masked to fold tail loop.
1370 bool foldTailByMasking() const {
1371 // TODO: check if it is possible to check for None style independent of
1372 // IVUpdateMayOverflow flag in getTailFoldingStyle.
1374 }
1375
1376 /// Returns true if the use of wide lane masks is requested and the loop is
1377 /// using tail-folding with a lane mask for control flow.
1386
1387 /// Return maximum safe number of elements to be processed per vector
1388 /// iteration, which do not prevent store-load forwarding and are safe with
1389 /// regard to the memory dependencies. Required for EVL-based VPlans to
1390 /// correctly calculate AVL (application vector length) as min(remaining AVL,
1391 /// MaxSafeElements).
1392 /// TODO: need to consider adjusting cost model to use this value as a
1393 /// vectorization factor for EVL-based vectorization.
1394 std::optional<unsigned> getMaxSafeElements() const { return MaxSafeElements; }
1395
1396 /// Returns true if the instructions in this block requires predication
1397 /// for any reason, e.g. because tail folding now requires a predicate
1398 /// or because the block in the original loop was predicated.
1400 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1401 }
1402
1403 /// Returns true if VP intrinsics with explicit vector length support should
1404 /// be generated in the tail folded loop.
1408
1409 /// Returns true if the Phi is part of an inloop reduction.
1410 bool isInLoopReduction(PHINode *Phi) const {
1411 return InLoopReductions.contains(Phi);
1412 }
1413
1414 /// Returns the set of in-loop reduction PHIs.
1416 return InLoopReductions;
1417 }
1418
1419 /// Returns true if the predicated reduction select should be used to set the
1420 /// incoming value for the reduction phi.
1422 // Force to use predicated reduction select since the EVL of the
1423 // second-to-last iteration might not be VF*UF.
1424 if (foldTailWithEVL())
1425 return true;
1427 TTI.preferPredicatedReductionSelect();
1428 }
1429
1430 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1431 /// with factor VF. Return the cost of the instruction, including
1432 /// scalarization overhead if it's needed.
1433 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1434
1435 /// Estimate cost of a call instruction CI if it were vectorized with factor
1436 /// VF. Return the cost of the instruction, including scalarization overhead
1437 /// if it's needed.
1438 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const;
1439
1440 /// Invalidates decisions already taken by the cost model.
1442 WideningDecisions.clear();
1443 CallWideningDecisions.clear();
1444 Uniforms.clear();
1445 Scalars.clear();
1446 }
1447
1448 /// Returns the expected execution cost. The unit of the cost does
1449 /// not matter because we use the 'cost' units to compare different
1450 /// vector widths. The cost that is returned is *not* normalized by
1451 /// the factor width.
1452 InstructionCost expectedCost(ElementCount VF);
1453
1454 bool hasPredStores() const { return NumPredStores > 0; }
1455
1456 /// Returns true if epilogue vectorization is considered profitable, and
1457 /// false otherwise.
1458 /// \p VF is the vectorization factor chosen for the original loop.
1459 /// \p Multiplier is an aditional scaling factor applied to VF before
1460 /// comparing to EpilogueVectorizationMinVF.
1461 bool isEpilogueVectorizationProfitable(const ElementCount VF,
1462 const unsigned IC) const;
1463
1464 /// Returns the execution time cost of an instruction for a given vector
1465 /// width. Vector width of one means scalar.
1466 InstructionCost getInstructionCost(Instruction *I, ElementCount VF);
1467
1468 /// Return the cost of instructions in an inloop reduction pattern, if I is
1469 /// part of that pattern.
1470 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1471 ElementCount VF,
1472 Type *VectorTy) const;
1473
1474 /// Returns true if \p Op should be considered invariant and if it is
1475 /// trivially hoistable.
1476 bool shouldConsiderInvariant(Value *Op);
1477
1478 /// Return the value of vscale used for tuning the cost model.
1479 std::optional<unsigned> getVScaleForTuning() const { return VScaleForTuning; }
1480
1481private:
1482 unsigned NumPredStores = 0;
1483
1484 /// Used to store the value of vscale used for tuning the cost model. It is
1485 /// initialized during object construction.
1486 std::optional<unsigned> VScaleForTuning;
1487
1488 /// Initializes the value of vscale used for tuning the cost model. If
1489 /// vscale_range.min == vscale_range.max then return vscale_range.max, else
1490 /// return the value returned by the corresponding TTI method.
1491 void initializeVScaleForTuning() {
1492 const Function *Fn = TheLoop->getHeader()->getParent();
1493 if (Fn->hasFnAttribute(Attribute::VScaleRange)) {
1494 auto Attr = Fn->getFnAttribute(Attribute::VScaleRange);
1495 auto Min = Attr.getVScaleRangeMin();
1496 auto Max = Attr.getVScaleRangeMax();
1497 if (Max && Min == Max) {
1498 VScaleForTuning = Max;
1499 return;
1500 }
1501 }
1502
1503 VScaleForTuning = TTI.getVScaleForTuning();
1504 }
1505
1506 /// \return An upper bound for the vectorization factors for both
1507 /// fixed and scalable vectorization, where the minimum-known number of
1508 /// elements is a power-of-2 larger than zero. If scalable vectorization is
1509 /// disabled or unsupported, then the scalable part will be equal to
1510 /// ElementCount::getScalable(0).
1511 FixedScalableVFPair computeFeasibleMaxVF(unsigned MaxTripCount,
1512 ElementCount UserVF, unsigned UserIC,
1513 bool FoldTailByMasking);
1514
1515 /// If \p VF * \p UserIC > MaxTripcount, clamps VF to the next lower VF that
1516 /// results in VF * UserIC <= MaxTripCount.
1517 ElementCount clampVFByMaxTripCount(ElementCount VF, unsigned MaxTripCount,
1518 unsigned UserIC,
1519 bool FoldTailByMasking) const;
1520
1521 /// \return the maximized element count based on the targets vector
1522 /// registers and the loop trip-count, but limited to a maximum safe VF.
1523 /// This is a helper function of computeFeasibleMaxVF.
1524 ElementCount getMaximizedVFForTarget(unsigned MaxTripCount,
1525 unsigned SmallestType,
1526 unsigned WidestType,
1527 ElementCount MaxSafeVF, unsigned UserIC,
1528 bool FoldTailByMasking);
1529
1530 /// Checks if scalable vectorization is supported and enabled. Caches the
1531 /// result to avoid repeated debug dumps for repeated queries.
1532 bool isScalableVectorizationAllowed();
1533
1534 /// \return the maximum legal scalable VF, based on the safe max number
1535 /// of elements.
1536 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1537
1538 /// Calculate vectorization cost of memory instruction \p I.
1539 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1540
1541 /// The cost computation for scalarized memory instruction.
1542 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1543
1544 /// The cost computation for interleaving group of memory instructions.
1545 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1546
1547 /// The cost computation for Gather/Scatter instruction.
1548 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1549
1550 /// The cost computation for widening instruction \p I with consecutive
1551 /// memory access.
1552 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1553
1554 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1555 /// Load: scalar load + broadcast.
1556 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1557 /// element)
1558 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1559
1560 /// Estimate the overhead of scalarizing an instruction. This is a
1561 /// convenience wrapper for the type-based getScalarizationOverhead API.
1563 ElementCount VF) const;
1564
1565 /// Returns true if an artificially high cost for emulated masked memrefs
1566 /// should be used.
1567 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1568
1569 /// Map of scalar integer values to the smallest bitwidth they can be legally
1570 /// represented as. The vector equivalents of these values should be truncated
1571 /// to this type.
1572 MapVector<Instruction *, uint64_t> MinBWs;
1573
1574 /// A type representing the costs for instructions if they were to be
1575 /// scalarized rather than vectorized. The entries are Instruction-Cost
1576 /// pairs.
1577 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1578
1579 /// A set containing all BasicBlocks that are known to present after
1580 /// vectorization as a predicated block.
1581 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
1582 PredicatedBBsAfterVectorization;
1583
1584 /// Records whether it is allowed to have the original scalar loop execute at
1585 /// least once. This may be needed as a fallback loop in case runtime
1586 /// aliasing/dependence checks fail, or to handle the tail/remainder
1587 /// iterations when the trip count is unknown or doesn't divide by the VF,
1588 /// or as a peel-loop to handle gaps in interleave-groups.
1589 /// Under optsize and when the trip count is very small we don't allow any
1590 /// iterations to execute in the scalar loop.
1591 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1592
1593 /// Control finally chosen tail folding style. The first element is used if
1594 /// the IV update may overflow, the second element - if it does not.
1595 std::optional<std::pair<TailFoldingStyle, TailFoldingStyle>>
1596 ChosenTailFoldingStyle;
1597
1598 /// true if scalable vectorization is supported and enabled.
1599 std::optional<bool> IsScalableVectorizationAllowed;
1600
1601 /// Maximum safe number of elements to be processed per vector iteration,
1602 /// which do not prevent store-load forwarding and are safe with regard to the
1603 /// memory dependencies. Required for EVL-based veectorization, where this
1604 /// value is used as the upper bound of the safe AVL.
1605 std::optional<unsigned> MaxSafeElements;
1606
1607 /// A map holding scalar costs for different vectorization factors. The
1608 /// presence of a cost for an instruction in the mapping indicates that the
1609 /// instruction will be scalarized when vectorizing with the associated
1610 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1611 MapVector<ElementCount, ScalarCostsTy> InstsToScalarize;
1612
1613 /// Holds the instructions known to be uniform after vectorization.
1614 /// The data is collected per VF.
1615 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1616
1617 /// Holds the instructions known to be scalar after vectorization.
1618 /// The data is collected per VF.
1619 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1620
1621 /// Holds the instructions (address computations) that are forced to be
1622 /// scalarized.
1623 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1624
1625 /// PHINodes of the reductions that should be expanded in-loop.
1626 SmallPtrSet<PHINode *, 4> InLoopReductions;
1627
1628 /// A Map of inloop reduction operations and their immediate chain operand.
1629 /// FIXME: This can be removed once reductions can be costed correctly in
1630 /// VPlan. This was added to allow quick lookup of the inloop operations.
1631 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1632
1633 /// Returns the expected difference in cost from scalarizing the expression
1634 /// feeding a predicated instruction \p PredInst. The instructions to
1635 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1636 /// non-negative return value implies the expression will be scalarized.
1637 /// Currently, only single-use chains are considered for scalarization.
1638 InstructionCost computePredInstDiscount(Instruction *PredInst,
1639 ScalarCostsTy &ScalarCosts,
1640 ElementCount VF);
1641
1642 /// Collect the instructions that are uniform after vectorization. An
1643 /// instruction is uniform if we represent it with a single scalar value in
1644 /// the vectorized loop corresponding to each vector iteration. Examples of
1645 /// uniform instructions include pointer operands of consecutive or
1646 /// interleaved memory accesses. Note that although uniformity implies an
1647 /// instruction will be scalar, the reverse is not true. In general, a
1648 /// scalarized instruction will be represented by VF scalar values in the
1649 /// vectorized loop, each corresponding to an iteration of the original
1650 /// scalar loop.
1651 void collectLoopUniforms(ElementCount VF);
1652
1653 /// Collect the instructions that are scalar after vectorization. An
1654 /// instruction is scalar if it is known to be uniform or will be scalarized
1655 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1656 /// to the list if they are used by a load/store instruction that is marked as
1657 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1658 /// VF values in the vectorized loop, each corresponding to an iteration of
1659 /// the original scalar loop.
1660 void collectLoopScalars(ElementCount VF);
1661
1662 /// Keeps cost model vectorization decision and cost for instructions.
1663 /// Right now it is used for memory instructions only.
1664 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1665 std::pair<InstWidening, InstructionCost>>;
1666
1667 DecisionList WideningDecisions;
1668
1669 using CallDecisionList =
1670 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1671
1672 CallDecisionList CallWideningDecisions;
1673
1674 /// Returns true if \p V is expected to be vectorized and it needs to be
1675 /// extracted.
1676 bool needsExtract(Value *V, ElementCount VF) const {
1678 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1679 TheLoop->isLoopInvariant(I) ||
1680 getWideningDecision(I, VF) == CM_Scalarize ||
1681 (isa<CallInst>(I) &&
1682 getCallWideningDecision(cast<CallInst>(I), VF).Kind == CM_Scalarize))
1683 return false;
1684
1685 // Assume we can vectorize V (and hence we need extraction) if the
1686 // scalars are not computed yet. This can happen, because it is called
1687 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1688 // the scalars are collected. That should be a safe assumption in most
1689 // cases, because we check if the operands have vectorizable types
1690 // beforehand in LoopVectorizationLegality.
1691 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1692 };
1693
1694 /// Returns a range containing only operands needing to be extracted.
1695 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1696 ElementCount VF) const {
1697
1698 SmallPtrSet<const Value *, 4> UniqueOperands;
1699 SmallVector<Value *, 4> Res;
1700 for (Value *Op : Ops) {
1701 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1702 !needsExtract(Op, VF))
1703 continue;
1704 Res.push_back(Op);
1705 }
1706 return Res;
1707 }
1708
1709public:
1710 /// The loop that we evaluate.
1712
1713 /// Predicated scalar evolution analysis.
1715
1716 /// Loop Info analysis.
1718
1719 /// Vectorization legality.
1721
1722 /// Vector target information.
1724
1725 /// Target Library Info.
1727
1728 /// Demanded bits analysis.
1730
1731 /// Assumption cache.
1733
1734 /// Interface to emit optimization remarks.
1736
1737 /// A function to lazily fetch BlockFrequencyInfo. This avoids computing it
1738 /// unless necessary, e.g. when the loop isn't legal to vectorize or when
1739 /// there is no predication.
1740 std::function<BlockFrequencyInfo &()> GetBFI;
1741 /// The BlockFrequencyInfo returned from GetBFI.
1743 /// Returns the BlockFrequencyInfo for the function if cached, otherwise
1744 /// fetches it via GetBFI. Avoids an indirect call to the std::function.
1746 if (!BFI)
1747 BFI = &GetBFI();
1748 return *BFI;
1749 }
1750
1752
1753 /// Loop Vectorize Hint.
1755
1756 /// The interleave access information contains groups of interleaved accesses
1757 /// with the same stride and close to each other.
1759
1760 /// Values to ignore in the cost model.
1762
1763 /// Values to ignore in the cost model when VF > 1.
1765
1766 /// All element types found in the loop.
1768
1769 /// The kind of cost that we are calculating
1771
1772 /// Whether this loop should be optimized for size based on function attribute
1773 /// or profile information.
1775
1776 /// The highest VF possible for this loop, without using MaxBandwidth.
1778};
1779} // end namespace llvm
1780
1781namespace {
1782/// Helper struct to manage generating runtime checks for vectorization.
1783///
1784/// The runtime checks are created up-front in temporary blocks to allow better
1785/// estimating the cost and un-linked from the existing IR. After deciding to
1786/// vectorize, the checks are moved back. If deciding not to vectorize, the
1787/// temporary blocks are completely removed.
1788class GeneratedRTChecks {
1789 /// Basic block which contains the generated SCEV checks, if any.
1790 BasicBlock *SCEVCheckBlock = nullptr;
1791
1792 /// The value representing the result of the generated SCEV checks. If it is
1793 /// nullptr no SCEV checks have been generated.
1794 Value *SCEVCheckCond = nullptr;
1795
1796 /// Basic block which contains the generated memory runtime checks, if any.
1797 BasicBlock *MemCheckBlock = nullptr;
1798
1799 /// The value representing the result of the generated memory runtime checks.
1800 /// If it is nullptr no memory runtime checks have been generated.
1801 Value *MemRuntimeCheckCond = nullptr;
1802
1803 DominatorTree *DT;
1804 LoopInfo *LI;
1806
1807 SCEVExpander SCEVExp;
1808 SCEVExpander MemCheckExp;
1809
1810 bool CostTooHigh = false;
1811
1812 Loop *OuterLoop = nullptr;
1813
1815
1816 /// The kind of cost that we are calculating
1818
1819public:
1820 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1823 : DT(DT), LI(LI), TTI(TTI),
1824 SCEVExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1825 MemCheckExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1826 PSE(PSE), CostKind(CostKind) {}
1827
1828 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1829 /// accurately estimate the cost of the runtime checks. The blocks are
1830 /// un-linked from the IR and are added back during vector code generation. If
1831 /// there is no vector code generation, the check blocks are removed
1832 /// completely.
1833 void create(Loop *L, const LoopAccessInfo &LAI,
1834 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC,
1835 OptimizationRemarkEmitter &ORE) {
1836
1837 // Hard cutoff to limit compile-time increase in case a very large number of
1838 // runtime checks needs to be generated.
1839 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1840 // profile info.
1841 CostTooHigh =
1843 if (CostTooHigh) {
1844 // Mark runtime checks as never succeeding when they exceed the threshold.
1845 MemRuntimeCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1846 SCEVCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1847 ORE.emit([&]() {
1848 return OptimizationRemarkAnalysisAliasing(
1849 DEBUG_TYPE, "TooManyMemoryRuntimeChecks", L->getStartLoc(),
1850 L->getHeader())
1851 << "loop not vectorized: too many memory checks needed";
1852 });
1853 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
1854 return;
1855 }
1856
1857 BasicBlock *LoopHeader = L->getHeader();
1858 BasicBlock *Preheader = L->getLoopPreheader();
1859
1860 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1861 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1862 // may be used by SCEVExpander. The blocks will be un-linked from their
1863 // predecessors and removed from LI & DT at the end of the function.
1864 if (!UnionPred.isAlwaysTrue()) {
1865 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1866 nullptr, "vector.scevcheck");
1867
1868 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1869 &UnionPred, SCEVCheckBlock->getTerminator());
1870 if (isa<Constant>(SCEVCheckCond)) {
1871 // Clean up directly after expanding the predicate to a constant, to
1872 // avoid further expansions re-using anything left over from SCEVExp.
1873 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1874 SCEVCleaner.cleanup();
1875 }
1876 }
1877
1878 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1879 if (RtPtrChecking.Need) {
1880 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1881 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1882 "vector.memcheck");
1883
1884 auto DiffChecks = RtPtrChecking.getDiffChecks();
1885 if (DiffChecks) {
1886 Value *RuntimeVF = nullptr;
1887 MemRuntimeCheckCond = addDiffRuntimeChecks(
1888 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1889 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1890 if (!RuntimeVF)
1891 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1892 return RuntimeVF;
1893 },
1894 IC);
1895 } else {
1896 MemRuntimeCheckCond = addRuntimeChecks(
1897 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1899 }
1900 assert(MemRuntimeCheckCond &&
1901 "no RT checks generated although RtPtrChecking "
1902 "claimed checks are required");
1903 }
1904
1905 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1906
1907 if (!MemCheckBlock && !SCEVCheckBlock)
1908 return;
1909
1910 // Unhook the temporary block with the checks, update various places
1911 // accordingly.
1912 if (SCEVCheckBlock)
1913 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1914 if (MemCheckBlock)
1915 MemCheckBlock->replaceAllUsesWith(Preheader);
1916
1917 if (SCEVCheckBlock) {
1918 SCEVCheckBlock->getTerminator()->moveBefore(
1919 Preheader->getTerminator()->getIterator());
1920 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1921 UI->setDebugLoc(DebugLoc::getTemporary());
1922 Preheader->getTerminator()->eraseFromParent();
1923 }
1924 if (MemCheckBlock) {
1925 MemCheckBlock->getTerminator()->moveBefore(
1926 Preheader->getTerminator()->getIterator());
1927 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1928 UI->setDebugLoc(DebugLoc::getTemporary());
1929 Preheader->getTerminator()->eraseFromParent();
1930 }
1931
1932 DT->changeImmediateDominator(LoopHeader, Preheader);
1933 if (MemCheckBlock) {
1934 DT->eraseNode(MemCheckBlock);
1935 LI->removeBlock(MemCheckBlock);
1936 }
1937 if (SCEVCheckBlock) {
1938 DT->eraseNode(SCEVCheckBlock);
1939 LI->removeBlock(SCEVCheckBlock);
1940 }
1941
1942 // Outer loop is used as part of the later cost calculations.
1943 OuterLoop = L->getParentLoop();
1944 }
1945
1947 if (SCEVCheckBlock || MemCheckBlock)
1948 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1949
1950 if (CostTooHigh) {
1952 Cost.setInvalid();
1953 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1954 return Cost;
1955 }
1956
1957 InstructionCost RTCheckCost = 0;
1958 if (SCEVCheckBlock)
1959 for (Instruction &I : *SCEVCheckBlock) {
1960 if (SCEVCheckBlock->getTerminator() == &I)
1961 continue;
1963 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1964 RTCheckCost += C;
1965 }
1966 if (MemCheckBlock) {
1967 InstructionCost MemCheckCost = 0;
1968 for (Instruction &I : *MemCheckBlock) {
1969 if (MemCheckBlock->getTerminator() == &I)
1970 continue;
1972 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1973 MemCheckCost += C;
1974 }
1975
1976 // If the runtime memory checks are being created inside an outer loop
1977 // we should find out if these checks are outer loop invariant. If so,
1978 // the checks will likely be hoisted out and so the effective cost will
1979 // reduce according to the outer loop trip count.
1980 if (OuterLoop) {
1981 ScalarEvolution *SE = MemCheckExp.getSE();
1982 // TODO: If profitable, we could refine this further by analysing every
1983 // individual memory check, since there could be a mixture of loop
1984 // variant and invariant checks that mean the final condition is
1985 // variant.
1986 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1987 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1988 // It seems reasonable to assume that we can reduce the effective
1989 // cost of the checks even when we know nothing about the trip
1990 // count. Assume that the outer loop executes at least twice.
1991 unsigned BestTripCount = 2;
1992
1993 // Get the best known TC estimate.
1994 if (auto EstimatedTC = getSmallBestKnownTC(
1995 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1996 if (EstimatedTC->isFixed())
1997 BestTripCount = EstimatedTC->getFixedValue();
1998
1999 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
2000
2001 // Let's ensure the cost is always at least 1.
2002 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
2003 (InstructionCost::CostType)1);
2004
2005 if (BestTripCount > 1)
2007 << "We expect runtime memory checks to be hoisted "
2008 << "out of the outer loop. Cost reduced from "
2009 << MemCheckCost << " to " << NewMemCheckCost << '\n');
2010
2011 MemCheckCost = NewMemCheckCost;
2012 }
2013 }
2014
2015 RTCheckCost += MemCheckCost;
2016 }
2017
2018 if (SCEVCheckBlock || MemCheckBlock)
2019 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
2020 << "\n");
2021
2022 return RTCheckCost;
2023 }
2024
2025 /// Remove the created SCEV & memory runtime check blocks & instructions, if
2026 /// unused.
2027 ~GeneratedRTChecks() {
2028 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2029 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2030 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
2031 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
2032 if (SCEVChecksUsed)
2033 SCEVCleaner.markResultUsed();
2034
2035 if (MemChecksUsed) {
2036 MemCheckCleaner.markResultUsed();
2037 } else {
2038 auto &SE = *MemCheckExp.getSE();
2039 // Memory runtime check generation creates compares that use expanded
2040 // values. Remove them before running the SCEVExpanderCleaners.
2041 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2042 if (MemCheckExp.isInsertedInstruction(&I))
2043 continue;
2044 SE.forgetValue(&I);
2045 I.eraseFromParent();
2046 }
2047 }
2048 MemCheckCleaner.cleanup();
2049 SCEVCleaner.cleanup();
2050
2051 if (!SCEVChecksUsed)
2052 SCEVCheckBlock->eraseFromParent();
2053 if (!MemChecksUsed)
2054 MemCheckBlock->eraseFromParent();
2055 }
2056
2057 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
2058 /// outside VPlan.
2059 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
2060 using namespace llvm::PatternMatch;
2061 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
2062 return {nullptr, nullptr};
2063
2064 return {SCEVCheckCond, SCEVCheckBlock};
2065 }
2066
2067 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
2068 /// outside VPlan.
2069 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
2070 using namespace llvm::PatternMatch;
2071 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
2072 return {nullptr, nullptr};
2073 return {MemRuntimeCheckCond, MemCheckBlock};
2074 }
2075
2076 /// Return true if any runtime checks have been added
2077 bool hasChecks() const {
2078 return getSCEVChecks().first || getMemRuntimeChecks().first;
2079 }
2080};
2081} // namespace
2082
2088
2093
2094// Return true if \p OuterLp is an outer loop annotated with hints for explicit
2095// vectorization. The loop needs to be annotated with #pragma omp simd
2096// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2097// vector length information is not provided, vectorization is not considered
2098// explicit. Interleave hints are not allowed either. These limitations will be
2099// relaxed in the future.
2100// Please, note that we are currently forced to abuse the pragma 'clang
2101// vectorize' semantics. This pragma provides *auto-vectorization hints*
2102// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2103// provides *explicit vectorization hints* (LV can bypass legal checks and
2104// assume that vectorization is legal). However, both hints are implemented
2105// using the same metadata (llvm.loop.vectorize, processed by
2106// LoopVectorizeHints). This will be fixed in the future when the native IR
2107// representation for pragma 'omp simd' is introduced.
2108static bool isExplicitVecOuterLoop(Loop *OuterLp,
2110 assert(!OuterLp->isInnermost() && "This is not an outer loop");
2111 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2112
2113 // Only outer loops with an explicit vectorization hint are supported.
2114 // Unannotated outer loops are ignored.
2116 return false;
2117
2118 Function *Fn = OuterLp->getHeader()->getParent();
2119 if (!Hints.allowVectorization(Fn, OuterLp,
2120 true /*VectorizeOnlyWhenForced*/)) {
2121 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2122 return false;
2123 }
2124
2125 if (Hints.getInterleave() > 1) {
2126 // TODO: Interleave support is future work.
2127 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2128 "outer loops.\n");
2129 Hints.emitRemarkWithHints();
2130 return false;
2131 }
2132
2133 return true;
2134}
2135
2139 // Collect inner loops and outer loops without irreducible control flow. For
2140 // now, only collect outer loops that have explicit vectorization hints. If we
2141 // are stress testing the VPlan H-CFG construction, we collect the outermost
2142 // loop of every loop nest.
2143 if (L.isInnermost() || VPlanBuildStressTest ||
2145 LoopBlocksRPO RPOT(&L);
2146 RPOT.perform(LI);
2148 V.push_back(&L);
2149 // TODO: Collect inner loops inside marked outer loops in case
2150 // vectorization fails for the outer loop. Do not invoke
2151 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2152 // already known to be reducible. We can use an inherited attribute for
2153 // that.
2154 return;
2155 }
2156 }
2157 for (Loop *InnerL : L)
2158 collectSupportedLoops(*InnerL, LI, ORE, V);
2159}
2160
2161//===----------------------------------------------------------------------===//
2162// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2163// LoopVectorizationCostModel and LoopVectorizationPlanner.
2164//===----------------------------------------------------------------------===//
2165
2166/// FIXME: The newly created binary instructions should contain nsw/nuw
2167/// flags, which can be found from the original scalar operations.
2168Value *
2170 Value *Step,
2172 const BinaryOperator *InductionBinOp) {
2173 using namespace llvm::PatternMatch;
2174 Type *StepTy = Step->getType();
2175 Value *CastedIndex = StepTy->isIntegerTy()
2176 ? B.CreateSExtOrTrunc(Index, StepTy)
2177 : B.CreateCast(Instruction::SIToFP, Index, StepTy);
2178 if (CastedIndex != Index) {
2179 CastedIndex->setName(CastedIndex->getName() + ".cast");
2180 Index = CastedIndex;
2181 }
2182
2183 // Note: the IR at this point is broken. We cannot use SE to create any new
2184 // SCEV and then expand it, hoping that SCEV's simplification will give us
2185 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2186 // lead to various SCEV crashes. So all we can do is to use builder and rely
2187 // on InstCombine for future simplifications. Here we handle some trivial
2188 // cases only.
2189 auto CreateAdd = [&B](Value *X, Value *Y) {
2190 assert(X->getType() == Y->getType() && "Types don't match!");
2191 if (match(X, m_ZeroInt()))
2192 return Y;
2193 if (match(Y, m_ZeroInt()))
2194 return X;
2195 return B.CreateAdd(X, Y);
2196 };
2197
2198 // We allow X to be a vector type, in which case Y will potentially be
2199 // splatted into a vector with the same element count.
2200 auto CreateMul = [&B](Value *X, Value *Y) {
2201 assert(X->getType()->getScalarType() == Y->getType() &&
2202 "Types don't match!");
2203 if (match(X, m_One()))
2204 return Y;
2205 if (match(Y, m_One()))
2206 return X;
2207 VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2208 if (XVTy && !isa<VectorType>(Y->getType()))
2209 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2210 return B.CreateMul(X, Y);
2211 };
2212
2213 switch (InductionKind) {
2215 assert(!isa<VectorType>(Index->getType()) &&
2216 "Vector indices not supported for integer inductions yet");
2217 assert(Index->getType() == StartValue->getType() &&
2218 "Index type does not match StartValue type");
2219 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2220 return B.CreateSub(StartValue, Index);
2221 auto *Offset = CreateMul(Index, Step);
2222 return CreateAdd(StartValue, Offset);
2223 }
2225 return B.CreatePtrAdd(StartValue, CreateMul(Index, Step));
2227 assert(!isa<VectorType>(Index->getType()) &&
2228 "Vector indices not supported for FP inductions yet");
2229 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2230 assert(InductionBinOp &&
2231 (InductionBinOp->getOpcode() == Instruction::FAdd ||
2232 InductionBinOp->getOpcode() == Instruction::FSub) &&
2233 "Original bin op should be defined for FP induction");
2234
2235 Value *MulExp = B.CreateFMul(Step, Index);
2236 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2237 "induction");
2238 }
2240 return nullptr;
2241 }
2242 llvm_unreachable("invalid enum");
2243}
2244
2245static std::optional<unsigned> getMaxVScale(const Function &F,
2246 const TargetTransformInfo &TTI) {
2247 if (std::optional<unsigned> MaxVScale = TTI.getMaxVScale())
2248 return MaxVScale;
2249
2250 if (F.hasFnAttribute(Attribute::VScaleRange))
2251 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2252
2253 return std::nullopt;
2254}
2255
2256/// For the given VF and UF and maximum trip count computed for the loop, return
2257/// whether the induction variable might overflow in the vectorized loop. If not,
2258/// then we know a runtime overflow check always evaluates to false and can be
2259/// removed.
2261 const LoopVectorizationCostModel *Cost,
2262 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2263 // Always be conservative if we don't know the exact unroll factor.
2264 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
2265
2266 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
2267 APInt MaxUIntTripCount = IdxTy->getMask();
2268
2269 // We know the runtime overflow check is known false iff the (max) trip-count
2270 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
2271 // the vector loop induction variable.
2272 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
2273 uint64_t MaxVF = VF.getKnownMinValue();
2274 if (VF.isScalable()) {
2275 std::optional<unsigned> MaxVScale =
2276 getMaxVScale(*Cost->TheFunction, Cost->TTI);
2277 if (!MaxVScale)
2278 return false;
2279 MaxVF *= *MaxVScale;
2280 }
2281
2282 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2283 }
2284
2285 return false;
2286}
2287
2288// Return whether we allow using masked interleave-groups (for dealing with
2289// strided loads/stores that reside in predicated blocks, or for dealing
2290// with gaps).
2292 // If an override option has been passed in for interleaved accesses, use it.
2293 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2295
2296 return TTI.enableMaskedInterleavedAccessVectorization();
2297}
2298
2300 BasicBlock *CheckIRBB) {
2301 // Note: The block with the minimum trip-count check is already connected
2302 // during earlier VPlan construction.
2303 VPBlockBase *ScalarPH = Plan.getScalarPreheader();
2304 VPBlockBase *PreVectorPH = VectorPHVPBB->getSinglePredecessor();
2305 assert(PreVectorPH->getNumSuccessors() == 2 && "Expected 2 successors");
2306 assert(PreVectorPH->getSuccessors()[0] == ScalarPH && "Unexpected successor");
2307 VPIRBasicBlock *CheckVPIRBB = Plan.createVPIRBasicBlock(CheckIRBB);
2308 VPBlockUtils::insertOnEdge(PreVectorPH, VectorPHVPBB, CheckVPIRBB);
2309 PreVectorPH = CheckVPIRBB;
2310 VPBlockUtils::connectBlocks(PreVectorPH, ScalarPH);
2311 PreVectorPH->swapSuccessors();
2312
2313 // We just connected a new block to the scalar preheader. Update all
2314 // VPPhis by adding an incoming value for it, replicating the last value.
2315 unsigned NumPredecessors = ScalarPH->getNumPredecessors();
2316 for (VPRecipeBase &R : cast<VPBasicBlock>(ScalarPH)->phis()) {
2317 assert(isa<VPPhi>(&R) && "Phi expected to be VPPhi");
2318 assert(cast<VPPhi>(&R)->getNumIncoming() == NumPredecessors - 1 &&
2319 "must have incoming values for all operands");
2320 R.addOperand(R.getOperand(NumPredecessors - 2));
2321 }
2322}
2323
2325 BasicBlock *VectorPH, ElementCount VF, unsigned UF) const {
2326 // Generate code to check if the loop's trip count is less than VF * UF, or
2327 // equal to it in case a scalar epilogue is required; this implies that the
2328 // vector trip count is zero. This check also covers the case where adding one
2329 // to the backedge-taken count overflowed leading to an incorrect trip count
2330 // of zero. In this case we will also jump to the scalar loop.
2331 auto P = Cost->requiresScalarEpilogue(VF.isVector()) ? ICmpInst::ICMP_ULE
2333
2334 // Reuse existing vector loop preheader for TC checks.
2335 // Note that new preheader block is generated for vector loop.
2336 BasicBlock *const TCCheckBlock = VectorPH;
2338 TCCheckBlock->getContext(),
2339 InstSimplifyFolder(TCCheckBlock->getDataLayout()));
2340 Builder.SetInsertPoint(TCCheckBlock->getTerminator());
2341
2342 // If tail is to be folded, vector loop takes care of all iterations.
2344 Type *CountTy = Count->getType();
2345 Value *CheckMinIters = Builder.getFalse();
2346 auto CreateStep = [&]() -> Value * {
2347 // Create step with max(MinProTripCount, UF * VF).
2348 if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue())
2349 return createStepForVF(Builder, CountTy, VF, UF);
2350
2351 Value *MinProfTC =
2352 Builder.CreateElementCount(CountTy, MinProfitableTripCount);
2353 if (!VF.isScalable())
2354 return MinProfTC;
2355 return Builder.CreateBinaryIntrinsic(
2356 Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF));
2357 };
2358
2359 TailFoldingStyle Style = Cost->getTailFoldingStyle();
2360 if (Style == TailFoldingStyle::None) {
2361 Value *Step = CreateStep();
2362 ScalarEvolution &SE = *PSE.getSE();
2363 // TODO: Emit unconditional branch to vector preheader instead of
2364 // conditional branch with known condition.
2365 const SCEV *TripCountSCEV = SE.applyLoopGuards(SE.getSCEV(Count), OrigLoop);
2366 // Check if the trip count is < the step.
2367 if (SE.isKnownPredicate(P, TripCountSCEV, SE.getSCEV(Step))) {
2368 // TODO: Ensure step is at most the trip count when determining max VF and
2369 // UF, w/o tail folding.
2370 CheckMinIters = Builder.getTrue();
2372 TripCountSCEV, SE.getSCEV(Step))) {
2373 // Generate the minimum iteration check only if we cannot prove the
2374 // check is known to be true, or known to be false.
2375 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2376 } // else step known to be < trip count, use CheckMinIters preset to false.
2377 } else if (VF.isScalable() && !TTI->isVScaleKnownToBeAPowerOfTwo() &&
2380 // vscale is not necessarily a power-of-2, which means we cannot guarantee
2381 // an overflow to zero when updating induction variables and so an
2382 // additional overflow check is required before entering the vector loop.
2383
2384 // Get the maximum unsigned value for the type.
2385 Value *MaxUIntTripCount =
2386 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2387 Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count);
2388
2389 // Don't execute the vector loop if (UMax - n) < (VF * UF).
2390 CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep());
2391 }
2392 return CheckMinIters;
2393}
2394
2395/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
2396/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
2397/// predecessors and successors of VPBB, if any, are rewired to the new
2398/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
2400 BasicBlock *IRBB,
2401 VPlan *Plan = nullptr) {
2402 if (!Plan)
2403 Plan = VPBB->getPlan();
2404 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
2405 auto IP = IRVPBB->begin();
2406 for (auto &R : make_early_inc_range(VPBB->phis()))
2407 R.moveBefore(*IRVPBB, IP);
2408
2409 for (auto &R :
2411 R.moveBefore(*IRVPBB, IRVPBB->end());
2412
2413 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
2414 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
2415 return IRVPBB;
2416}
2417
2419 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2420 assert(VectorPH && "Invalid loop structure");
2421 assert((OrigLoop->getUniqueLatchExitBlock() ||
2422 Cost->requiresScalarEpilogue(VF.isVector())) &&
2423 "loops not exiting via the latch without required epilogue?");
2424
2425 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
2426 // wrapping the newly created scalar preheader here at the moment, because the
2427 // Plan's scalar preheader may be unreachable at this point. Instead it is
2428 // replaced in executePlan.
2429 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
2430 Twine(Prefix) + "scalar.ph");
2431}
2432
2433/// Return the expanded step for \p ID using \p ExpandedSCEVs to look up SCEV
2434/// expansion results.
2436 const SCEV2ValueTy &ExpandedSCEVs) {
2437 const SCEV *Step = ID.getStep();
2438 if (auto *C = dyn_cast<SCEVConstant>(Step))
2439 return C->getValue();
2440 if (auto *U = dyn_cast<SCEVUnknown>(Step))
2441 return U->getValue();
2442 Value *V = ExpandedSCEVs.lookup(Step);
2443 assert(V && "SCEV must be expanded at this point");
2444 return V;
2445}
2446
2447/// Knowing that loop \p L executes a single vector iteration, add instructions
2448/// that will get simplified and thus should not have any cost to \p
2449/// InstsToIgnore.
2452 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
2453 auto *Cmp = L->getLatchCmpInst();
2454 if (Cmp)
2455 InstsToIgnore.insert(Cmp);
2456 for (const auto &KV : IL) {
2457 // Extract the key by hand so that it can be used in the lambda below. Note
2458 // that captured structured bindings are a C++20 extension.
2459 const PHINode *IV = KV.first;
2460
2461 // Get next iteration value of the induction variable.
2462 Instruction *IVInst =
2463 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2464 if (all_of(IVInst->users(),
2465 [&](const User *U) { return U == IV || U == Cmp; }))
2466 InstsToIgnore.insert(IVInst);
2467 }
2468}
2469
2471 // Create a new IR basic block for the scalar preheader.
2472 BasicBlock *ScalarPH = createScalarPreheader("");
2473 return ScalarPH->getSinglePredecessor();
2474}
2475
2476namespace {
2477
2478struct CSEDenseMapInfo {
2479 static bool canHandle(const Instruction *I) {
2482 }
2483
2484 static inline Instruction *getEmptyKey() {
2486 }
2487
2488 static inline Instruction *getTombstoneKey() {
2489 return DenseMapInfo<Instruction *>::getTombstoneKey();
2490 }
2491
2492 static unsigned getHashValue(const Instruction *I) {
2493 assert(canHandle(I) && "Unknown instruction!");
2494 return hash_combine(I->getOpcode(),
2495 hash_combine_range(I->operand_values()));
2496 }
2497
2498 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2499 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2500 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2501 return LHS == RHS;
2502 return LHS->isIdenticalTo(RHS);
2503 }
2504};
2505
2506} // end anonymous namespace
2507
2508/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
2509/// removal, in favor of the VPlan-based one.
2510static void legacyCSE(BasicBlock *BB) {
2511 // Perform simple cse.
2513 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2514 if (!CSEDenseMapInfo::canHandle(&In))
2515 continue;
2516
2517 // Check if we can replace this instruction with any of the
2518 // visited instructions.
2519 if (Instruction *V = CSEMap.lookup(&In)) {
2520 In.replaceAllUsesWith(V);
2521 In.eraseFromParent();
2522 continue;
2523 }
2524
2525 CSEMap[&In] = &In;
2526 }
2527}
2528
2529/// This function attempts to return a value that represents the ElementCount
2530/// at runtime. For fixed-width VFs we know this precisely at compile
2531/// time, but for scalable VFs we calculate it based on an estimate of the
2532/// vscale value.
2534 std::optional<unsigned> VScale) {
2535 unsigned EstimatedVF = VF.getKnownMinValue();
2536 if (VF.isScalable())
2537 if (VScale)
2538 EstimatedVF *= *VScale;
2539 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2540 return EstimatedVF;
2541}
2542
2545 ElementCount VF) const {
2546 // We only need to calculate a cost if the VF is scalar; for actual vectors
2547 // we should already have a pre-calculated cost at each VF.
2548 if (!VF.isScalar())
2549 return getCallWideningDecision(CI, VF).Cost;
2550
2551 Type *RetTy = CI->getType();
2553 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2554 return *RedCost;
2555
2557 for (auto &ArgOp : CI->args())
2558 Tys.push_back(ArgOp->getType());
2559
2560 InstructionCost ScalarCallCost =
2561 TTI.getCallInstrCost(CI->getCalledFunction(), RetTy, Tys, CostKind);
2562
2563 // If this is an intrinsic we may have a lower cost for it.
2566 return std::min(ScalarCallCost, IntrinsicCost);
2567 }
2568 return ScalarCallCost;
2569}
2570
2572 if (VF.isScalar() || !canVectorizeTy(Ty))
2573 return Ty;
2574 return toVectorizedTy(Ty, VF);
2575}
2576
2579 ElementCount VF) const {
2581 assert(ID && "Expected intrinsic call!");
2582 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2583 FastMathFlags FMF;
2584 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2585 FMF = FPMO->getFastMathFlags();
2586
2589 SmallVector<Type *> ParamTys;
2590 std::transform(FTy->param_begin(), FTy->param_end(),
2591 std::back_inserter(ParamTys),
2592 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2593
2594 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2597 return TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
2598}
2599
2601 // Fix widened non-induction PHIs by setting up the PHI operands.
2602 fixNonInductionPHIs(State);
2603
2604 // Don't apply optimizations below when no (vector) loop remains, as they all
2605 // require one at the moment.
2606 VPBasicBlock *HeaderVPBB =
2607 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2608 if (!HeaderVPBB)
2609 return;
2610
2611 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2612
2613 // Remove redundant induction instructions.
2614 legacyCSE(HeaderBB);
2615}
2616
2618 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2620 for (VPRecipeBase &P : VPBB->phis()) {
2622 if (!VPPhi)
2623 continue;
2624 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2625 // Make sure the builder has a valid insert point.
2626 Builder.SetInsertPoint(NewPhi);
2627 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2628 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2629 }
2630 }
2631}
2632
2633void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2634 // We should not collect Scalars more than once per VF. Right now, this
2635 // function is called from collectUniformsAndScalars(), which already does
2636 // this check. Collecting Scalars for VF=1 does not make any sense.
2637 assert(VF.isVector() && !Scalars.contains(VF) &&
2638 "This function should not be visited twice for the same VF");
2639
2640 // This avoids any chances of creating a REPLICATE recipe during planning
2641 // since that would result in generation of scalarized code during execution,
2642 // which is not supported for scalable vectors.
2643 if (VF.isScalable()) {
2644 Scalars[VF].insert_range(Uniforms[VF]);
2645 return;
2646 }
2647
2649
2650 // These sets are used to seed the analysis with pointers used by memory
2651 // accesses that will remain scalar.
2653 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2654 auto *Latch = TheLoop->getLoopLatch();
2655
2656 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2657 // The pointer operands of loads and stores will be scalar as long as the
2658 // memory access is not a gather or scatter operation. The value operand of a
2659 // store will remain scalar if the store is scalarized.
2660 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2661 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2662 assert(WideningDecision != CM_Unknown &&
2663 "Widening decision should be ready at this moment");
2664 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2665 if (Ptr == Store->getValueOperand())
2666 return WideningDecision == CM_Scalarize;
2667 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2668 "Ptr is neither a value or pointer operand");
2669 return WideningDecision != CM_GatherScatter;
2670 };
2671
2672 // A helper that returns true if the given value is a getelementptr
2673 // instruction contained in the loop.
2674 auto IsLoopVaryingGEP = [&](Value *V) {
2675 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2676 };
2677
2678 // A helper that evaluates a memory access's use of a pointer. If the use will
2679 // be a scalar use and the pointer is only used by memory accesses, we place
2680 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2681 // PossibleNonScalarPtrs.
2682 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2683 // We only care about bitcast and getelementptr instructions contained in
2684 // the loop.
2685 if (!IsLoopVaryingGEP(Ptr))
2686 return;
2687
2688 // If the pointer has already been identified as scalar (e.g., if it was
2689 // also identified as uniform), there's nothing to do.
2690 auto *I = cast<Instruction>(Ptr);
2691 if (Worklist.count(I))
2692 return;
2693
2694 // If the use of the pointer will be a scalar use, and all users of the
2695 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2696 // place the pointer in PossibleNonScalarPtrs.
2697 if (IsScalarUse(MemAccess, Ptr) &&
2699 ScalarPtrs.insert(I);
2700 else
2701 PossibleNonScalarPtrs.insert(I);
2702 };
2703
2704 // We seed the scalars analysis with three classes of instructions: (1)
2705 // instructions marked uniform-after-vectorization and (2) bitcast,
2706 // getelementptr and (pointer) phi instructions used by memory accesses
2707 // requiring a scalar use.
2708 //
2709 // (1) Add to the worklist all instructions that have been identified as
2710 // uniform-after-vectorization.
2711 Worklist.insert_range(Uniforms[VF]);
2712
2713 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2714 // memory accesses requiring a scalar use. The pointer operands of loads and
2715 // stores will be scalar unless the operation is a gather or scatter.
2716 // The value operand of a store will remain scalar if the store is scalarized.
2717 for (auto *BB : TheLoop->blocks())
2718 for (auto &I : *BB) {
2719 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2720 EvaluatePtrUse(Load, Load->getPointerOperand());
2721 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2722 EvaluatePtrUse(Store, Store->getPointerOperand());
2723 EvaluatePtrUse(Store, Store->getValueOperand());
2724 }
2725 }
2726 for (auto *I : ScalarPtrs)
2727 if (!PossibleNonScalarPtrs.count(I)) {
2728 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2729 Worklist.insert(I);
2730 }
2731
2732 // Insert the forced scalars.
2733 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2734 // induction variable when the PHI user is scalarized.
2735 auto ForcedScalar = ForcedScalars.find(VF);
2736 if (ForcedScalar != ForcedScalars.end())
2737 for (auto *I : ForcedScalar->second) {
2738 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2739 Worklist.insert(I);
2740 }
2741
2742 // Expand the worklist by looking through any bitcasts and getelementptr
2743 // instructions we've already identified as scalar. This is similar to the
2744 // expansion step in collectLoopUniforms(); however, here we're only
2745 // expanding to include additional bitcasts and getelementptr instructions.
2746 unsigned Idx = 0;
2747 while (Idx != Worklist.size()) {
2748 Instruction *Dst = Worklist[Idx++];
2749 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2750 continue;
2751 auto *Src = cast<Instruction>(Dst->getOperand(0));
2752 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2753 auto *J = cast<Instruction>(U);
2754 return !TheLoop->contains(J) || Worklist.count(J) ||
2755 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2756 IsScalarUse(J, Src));
2757 })) {
2758 Worklist.insert(Src);
2759 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2760 }
2761 }
2762
2763 // An induction variable will remain scalar if all users of the induction
2764 // variable and induction variable update remain scalar.
2765 for (const auto &Induction : Legal->getInductionVars()) {
2766 auto *Ind = Induction.first;
2767 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2768
2769 // If tail-folding is applied, the primary induction variable will be used
2770 // to feed a vector compare.
2771 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2772 continue;
2773
2774 // Returns true if \p Indvar is a pointer induction that is used directly by
2775 // load/store instruction \p I.
2776 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2777 Instruction *I) {
2778 return Induction.second.getKind() ==
2781 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2782 };
2783
2784 // Determine if all users of the induction variable are scalar after
2785 // vectorization.
2786 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2787 auto *I = cast<Instruction>(U);
2788 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2789 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2790 });
2791 if (!ScalarInd)
2792 continue;
2793
2794 // If the induction variable update is a fixed-order recurrence, neither the
2795 // induction variable or its update should be marked scalar after
2796 // vectorization.
2797 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2798 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2799 continue;
2800
2801 // Determine if all users of the induction variable update instruction are
2802 // scalar after vectorization.
2803 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2804 auto *I = cast<Instruction>(U);
2805 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2806 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2807 });
2808 if (!ScalarIndUpdate)
2809 continue;
2810
2811 // The induction variable and its update instruction will remain scalar.
2812 Worklist.insert(Ind);
2813 Worklist.insert(IndUpdate);
2814 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2815 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2816 << "\n");
2817 }
2818
2819 Scalars[VF].insert_range(Worklist);
2820}
2821
2823 ElementCount VF) {
2824 if (!isPredicatedInst(I))
2825 return false;
2826
2827 // Do we have a non-scalar lowering for this predicated
2828 // instruction? No - it is scalar with predication.
2829 switch(I->getOpcode()) {
2830 default:
2831 return true;
2832 case Instruction::Call:
2833 if (VF.isScalar())
2834 return true;
2836 case Instruction::Load:
2837 case Instruction::Store: {
2838 auto *Ptr = getLoadStorePointerOperand(I);
2839 auto *Ty = getLoadStoreType(I);
2840 unsigned AS = getLoadStoreAddressSpace(I);
2841 Type *VTy = Ty;
2842 if (VF.isVector())
2843 VTy = VectorType::get(Ty, VF);
2844 const Align Alignment = getLoadStoreAlignment(I);
2845 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment, AS) ||
2846 TTI.isLegalMaskedGather(VTy, Alignment))
2847 : !(isLegalMaskedStore(Ty, Ptr, Alignment, AS) ||
2848 TTI.isLegalMaskedScatter(VTy, Alignment));
2849 }
2850 case Instruction::UDiv:
2851 case Instruction::SDiv:
2852 case Instruction::SRem:
2853 case Instruction::URem: {
2854 // We have the option to use the safe-divisor idiom to avoid predication.
2855 // The cost based decision here will always select safe-divisor for
2856 // scalable vectors as scalarization isn't legal.
2857 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2858 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2859 }
2860 }
2861}
2862
2863// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2865 // TODO: We can use the loop-preheader as context point here and get
2866 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2868 (isa<LoadInst, StoreInst, CallInst>(I) && !Legal->isMaskRequired(I)) ||
2870 return false;
2871
2872 // If the instruction was executed conditionally in the original scalar loop,
2873 // predication is needed with a mask whose lanes are all possibly inactive.
2874 if (Legal->blockNeedsPredication(I->getParent()))
2875 return true;
2876
2877 // If we're not folding the tail by masking, predication is unnecessary.
2878 if (!foldTailByMasking())
2879 return false;
2880
2881 // All that remain are instructions with side-effects originally executed in
2882 // the loop unconditionally, but now execute under a tail-fold mask (only)
2883 // having at least one active lane (the first). If the side-effects of the
2884 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2885 // - it will cause the same side-effects as when masked.
2886 switch(I->getOpcode()) {
2887 default:
2889 "instruction should have been considered by earlier checks");
2890 case Instruction::Call:
2891 // Side-effects of a Call are assumed to be non-invariant, needing a
2892 // (fold-tail) mask.
2893 assert(Legal->isMaskRequired(I) &&
2894 "should have returned earlier for calls not needing a mask");
2895 return true;
2896 case Instruction::Load:
2897 // If the address is loop invariant no predication is needed.
2898 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2899 case Instruction::Store: {
2900 // For stores, we need to prove both speculation safety (which follows from
2901 // the same argument as loads), but also must prove the value being stored
2902 // is correct. The easiest form of the later is to require that all values
2903 // stored are the same.
2904 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2905 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2906 }
2907 case Instruction::UDiv:
2908 case Instruction::URem:
2909 // If the divisor is loop-invariant no predication is needed.
2910 return !Legal->isInvariant(I->getOperand(1));
2911 case Instruction::SDiv:
2912 case Instruction::SRem:
2913 // Conservative for now, since masked-off lanes may be poison and could
2914 // trigger signed overflow.
2915 return true;
2916 }
2917}
2918
2922 return 1;
2923 // If the block wasn't originally predicated then return early to avoid
2924 // computing BlockFrequencyInfo unnecessarily.
2925 if (!Legal->blockNeedsPredication(BB))
2926 return 1;
2927
2928 uint64_t HeaderFreq =
2929 getBFI().getBlockFreq(TheLoop->getHeader()).getFrequency();
2930 uint64_t BBFreq = getBFI().getBlockFreq(BB).getFrequency();
2931 assert(HeaderFreq >= BBFreq &&
2932 "Header has smaller block freq than dominated BB?");
2933 return std::round((double)HeaderFreq / BBFreq);
2934}
2935
2936std::pair<InstructionCost, InstructionCost>
2938 ElementCount VF) {
2939 assert(I->getOpcode() == Instruction::UDiv ||
2940 I->getOpcode() == Instruction::SDiv ||
2941 I->getOpcode() == Instruction::SRem ||
2942 I->getOpcode() == Instruction::URem);
2944
2945 // Scalarization isn't legal for scalable vector types
2946 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2947 if (!VF.isScalable()) {
2948 // Get the scalarization cost and scale this amount by the probability of
2949 // executing the predicated block. If the instruction is not predicated,
2950 // we fall through to the next case.
2951 ScalarizationCost = 0;
2952
2953 // These instructions have a non-void type, so account for the phi nodes
2954 // that we will create. This cost is likely to be zero. The phi node
2955 // cost, if any, should be scaled by the block probability because it
2956 // models a copy at the end of each predicated block.
2957 ScalarizationCost +=
2958 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
2959
2960 // The cost of the non-predicated instruction.
2961 ScalarizationCost +=
2962 VF.getFixedValue() *
2963 TTI.getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind);
2964
2965 // The cost of insertelement and extractelement instructions needed for
2966 // scalarization.
2967 ScalarizationCost += getScalarizationOverhead(I, VF);
2968
2969 // Scale the cost by the probability of executing the predicated blocks.
2970 // This assumes the predicated block for each vector lane is equally
2971 // likely.
2972 ScalarizationCost =
2973 ScalarizationCost / getPredBlockCostDivisor(CostKind, I->getParent());
2974 }
2975
2976 InstructionCost SafeDivisorCost = 0;
2977 auto *VecTy = toVectorTy(I->getType(), VF);
2978 // The cost of the select guard to ensure all lanes are well defined
2979 // after we speculate above any internal control flow.
2980 SafeDivisorCost +=
2981 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2982 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2984
2985 SmallVector<const Value *, 4> Operands(I->operand_values());
2986 SafeDivisorCost += TTI.getArithmeticInstrCost(
2987 I->getOpcode(), VecTy, CostKind,
2988 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2989 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2990 Operands, I);
2991 return {ScalarizationCost, SafeDivisorCost};
2992}
2993
2995 Instruction *I, ElementCount VF) const {
2996 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2998 "Decision should not be set yet.");
2999 auto *Group = getInterleavedAccessGroup(I);
3000 assert(Group && "Must have a group.");
3001 unsigned InterleaveFactor = Group->getFactor();
3002
3003 // If the instruction's allocated size doesn't equal its type size, it
3004 // requires padding and will be scalarized.
3005 auto &DL = I->getDataLayout();
3006 auto *ScalarTy = getLoadStoreType(I);
3007 if (hasIrregularType(ScalarTy, DL))
3008 return false;
3009
3010 // For scalable vectors, the interleave factors must be <= 8 since we require
3011 // the (de)interleaveN intrinsics instead of shufflevectors.
3012 if (VF.isScalable() && InterleaveFactor > 8)
3013 return false;
3014
3015 // If the group involves a non-integral pointer, we may not be able to
3016 // losslessly cast all values to a common type.
3017 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
3018 for (unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
3019 Instruction *Member = Group->getMember(Idx);
3020 if (!Member)
3021 continue;
3022 auto *MemberTy = getLoadStoreType(Member);
3023 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
3024 // Don't coerce non-integral pointers to integers or vice versa.
3025 if (MemberNI != ScalarNI)
3026 // TODO: Consider adding special nullptr value case here
3027 return false;
3028 if (MemberNI && ScalarNI &&
3029 ScalarTy->getPointerAddressSpace() !=
3030 MemberTy->getPointerAddressSpace())
3031 return false;
3032 }
3033
3034 // Check if masking is required.
3035 // A Group may need masking for one of two reasons: it resides in a block that
3036 // needs predication, or it was decided to use masking to deal with gaps
3037 // (either a gap at the end of a load-access that may result in a speculative
3038 // load, or any gaps in a store-access).
3039 bool PredicatedAccessRequiresMasking =
3040 blockNeedsPredicationForAnyReason(I->getParent()) &&
3041 Legal->isMaskRequired(I);
3042 bool LoadAccessWithGapsRequiresEpilogMasking =
3043 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
3045 bool StoreAccessWithGapsRequiresMasking =
3046 isa<StoreInst>(I) && !Group->isFull();
3047 if (!PredicatedAccessRequiresMasking &&
3048 !LoadAccessWithGapsRequiresEpilogMasking &&
3049 !StoreAccessWithGapsRequiresMasking)
3050 return true;
3051
3052 // If masked interleaving is required, we expect that the user/target had
3053 // enabled it, because otherwise it either wouldn't have been created or
3054 // it should have been invalidated by the CostModel.
3056 "Masked interleave-groups for predicated accesses are not enabled.");
3057
3058 if (Group->isReverse())
3059 return false;
3060
3061 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
3062 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
3063 StoreAccessWithGapsRequiresMasking;
3064 if (VF.isScalable() && NeedsMaskForGaps)
3065 return false;
3066
3067 auto *Ty = getLoadStoreType(I);
3068 const Align Alignment = getLoadStoreAlignment(I);
3069 unsigned AS = getLoadStoreAddressSpace(I);
3070 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
3071 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
3072}
3073
3075 Instruction *I, ElementCount VF) {
3076 // Get and ensure we have a valid memory instruction.
3077 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
3078
3079 auto *Ptr = getLoadStorePointerOperand(I);
3080 auto *ScalarTy = getLoadStoreType(I);
3081
3082 // In order to be widened, the pointer should be consecutive, first of all.
3083 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
3084 return false;
3085
3086 // If the instruction is a store located in a predicated block, it will be
3087 // scalarized.
3088 if (isScalarWithPredication(I, VF))
3089 return false;
3090
3091 // If the instruction's allocated size doesn't equal it's type size, it
3092 // requires padding and will be scalarized.
3093 auto &DL = I->getDataLayout();
3094 if (hasIrregularType(ScalarTy, DL))
3095 return false;
3096
3097 return true;
3098}
3099
3100void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
3101 // We should not collect Uniforms more than once per VF. Right now,
3102 // this function is called from collectUniformsAndScalars(), which
3103 // already does this check. Collecting Uniforms for VF=1 does not make any
3104 // sense.
3105
3106 assert(VF.isVector() && !Uniforms.contains(VF) &&
3107 "This function should not be visited twice for the same VF");
3108
3109 // Visit the list of Uniforms. If we find no uniform value, we won't
3110 // analyze again. Uniforms.count(VF) will return 1.
3111 Uniforms[VF].clear();
3112
3113 // Now we know that the loop is vectorizable!
3114 // Collect instructions inside the loop that will remain uniform after
3115 // vectorization.
3116
3117 // Global values, params and instructions outside of current loop are out of
3118 // scope.
3119 auto IsOutOfScope = [&](Value *V) -> bool {
3121 return (!I || !TheLoop->contains(I));
3122 };
3123
3124 // Worklist containing uniform instructions demanding lane 0.
3125 SetVector<Instruction *> Worklist;
3126
3127 // Add uniform instructions demanding lane 0 to the worklist. Instructions
3128 // that require predication must not be considered uniform after
3129 // vectorization, because that would create an erroneous replicating region
3130 // where only a single instance out of VF should be formed.
3131 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
3132 if (IsOutOfScope(I)) {
3133 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
3134 << *I << "\n");
3135 return;
3136 }
3137 if (isPredicatedInst(I)) {
3138 LLVM_DEBUG(
3139 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
3140 << "\n");
3141 return;
3142 }
3143 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
3144 Worklist.insert(I);
3145 };
3146
3147 // Start with the conditional branches exiting the loop. If the branch
3148 // condition is an instruction contained in the loop that is only used by the
3149 // branch, it is uniform. Note conditions from uncountable early exits are not
3150 // uniform.
3152 TheLoop->getExitingBlocks(Exiting);
3153 for (BasicBlock *E : Exiting) {
3154 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
3155 continue;
3156 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3157 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
3158 AddToWorklistIfAllowed(Cmp);
3159 }
3160
3161 auto PrevVF = VF.divideCoefficientBy(2);
3162 // Return true if all lanes perform the same memory operation, and we can
3163 // thus choose to execute only one.
3164 auto IsUniformMemOpUse = [&](Instruction *I) {
3165 // If the value was already known to not be uniform for the previous
3166 // (smaller VF), it cannot be uniform for the larger VF.
3167 if (PrevVF.isVector()) {
3168 auto Iter = Uniforms.find(PrevVF);
3169 if (Iter != Uniforms.end() && !Iter->second.contains(I))
3170 return false;
3171 }
3172 if (!Legal->isUniformMemOp(*I, VF))
3173 return false;
3174 if (isa<LoadInst>(I))
3175 // Loading the same address always produces the same result - at least
3176 // assuming aliasing and ordering which have already been checked.
3177 return true;
3178 // Storing the same value on every iteration.
3179 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
3180 };
3181
3182 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
3183 InstWidening WideningDecision = getWideningDecision(I, VF);
3184 assert(WideningDecision != CM_Unknown &&
3185 "Widening decision should be ready at this moment");
3186
3187 if (IsUniformMemOpUse(I))
3188 return true;
3189
3190 return (WideningDecision == CM_Widen ||
3191 WideningDecision == CM_Widen_Reverse ||
3192 WideningDecision == CM_Interleave);
3193 };
3194
3195 // Returns true if Ptr is the pointer operand of a memory access instruction
3196 // I, I is known to not require scalarization, and the pointer is not also
3197 // stored.
3198 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
3199 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
3200 return false;
3201 return getLoadStorePointerOperand(I) == Ptr &&
3202 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
3203 };
3204
3205 // Holds a list of values which are known to have at least one uniform use.
3206 // Note that there may be other uses which aren't uniform. A "uniform use"
3207 // here is something which only demands lane 0 of the unrolled iterations;
3208 // it does not imply that all lanes produce the same value (e.g. this is not
3209 // the usual meaning of uniform)
3210 SetVector<Value *> HasUniformUse;
3211
3212 // Scan the loop for instructions which are either a) known to have only
3213 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
3214 for (auto *BB : TheLoop->blocks())
3215 for (auto &I : *BB) {
3216 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
3217 switch (II->getIntrinsicID()) {
3218 case Intrinsic::sideeffect:
3219 case Intrinsic::experimental_noalias_scope_decl:
3220 case Intrinsic::assume:
3221 case Intrinsic::lifetime_start:
3222 case Intrinsic::lifetime_end:
3223 if (TheLoop->hasLoopInvariantOperands(&I))
3224 AddToWorklistIfAllowed(&I);
3225 break;
3226 default:
3227 break;
3228 }
3229 }
3230
3231 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
3232 if (IsOutOfScope(EVI->getAggregateOperand())) {
3233 AddToWorklistIfAllowed(EVI);
3234 continue;
3235 }
3236 // Only ExtractValue instructions where the aggregate value comes from a
3237 // call are allowed to be non-uniform.
3238 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
3239 "Expected aggregate value to be call return value");
3240 }
3241
3242 // If there's no pointer operand, there's nothing to do.
3243 auto *Ptr = getLoadStorePointerOperand(&I);
3244 if (!Ptr)
3245 continue;
3246
3247 // If the pointer can be proven to be uniform, always add it to the
3248 // worklist.
3249 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
3250 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
3251
3252 if (IsUniformMemOpUse(&I))
3253 AddToWorklistIfAllowed(&I);
3254
3255 if (IsVectorizedMemAccessUse(&I, Ptr))
3256 HasUniformUse.insert(Ptr);
3257 }
3258
3259 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
3260 // demanding) users. Since loops are assumed to be in LCSSA form, this
3261 // disallows uses outside the loop as well.
3262 for (auto *V : HasUniformUse) {
3263 if (IsOutOfScope(V))
3264 continue;
3265 auto *I = cast<Instruction>(V);
3266 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
3267 auto *UI = cast<Instruction>(U);
3268 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
3269 });
3270 if (UsersAreMemAccesses)
3271 AddToWorklistIfAllowed(I);
3272 }
3273
3274 // Expand Worklist in topological order: whenever a new instruction
3275 // is added , its users should be already inside Worklist. It ensures
3276 // a uniform instruction will only be used by uniform instructions.
3277 unsigned Idx = 0;
3278 while (Idx != Worklist.size()) {
3279 Instruction *I = Worklist[Idx++];
3280
3281 for (auto *OV : I->operand_values()) {
3282 // isOutOfScope operands cannot be uniform instructions.
3283 if (IsOutOfScope(OV))
3284 continue;
3285 // First order recurrence Phi's should typically be considered
3286 // non-uniform.
3287 auto *OP = dyn_cast<PHINode>(OV);
3288 if (OP && Legal->isFixedOrderRecurrence(OP))
3289 continue;
3290 // If all the users of the operand are uniform, then add the
3291 // operand into the uniform worklist.
3292 auto *OI = cast<Instruction>(OV);
3293 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
3294 auto *J = cast<Instruction>(U);
3295 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
3296 }))
3297 AddToWorklistIfAllowed(OI);
3298 }
3299 }
3300
3301 // For an instruction to be added into Worklist above, all its users inside
3302 // the loop should also be in Worklist. However, this condition cannot be
3303 // true for phi nodes that form a cyclic dependence. We must process phi
3304 // nodes separately. An induction variable will remain uniform if all users
3305 // of the induction variable and induction variable update remain uniform.
3306 // The code below handles both pointer and non-pointer induction variables.
3307 BasicBlock *Latch = TheLoop->getLoopLatch();
3308 for (const auto &Induction : Legal->getInductionVars()) {
3309 auto *Ind = Induction.first;
3310 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3311
3312 // Determine if all users of the induction variable are uniform after
3313 // vectorization.
3314 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
3315 auto *I = cast<Instruction>(U);
3316 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3317 IsVectorizedMemAccessUse(I, Ind);
3318 });
3319 if (!UniformInd)
3320 continue;
3321
3322 // Determine if all users of the induction variable update instruction are
3323 // uniform after vectorization.
3324 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
3325 auto *I = cast<Instruction>(U);
3326 return I == Ind || Worklist.count(I) ||
3327 IsVectorizedMemAccessUse(I, IndUpdate);
3328 });
3329 if (!UniformIndUpdate)
3330 continue;
3331
3332 // The induction variable and its update instruction will remain uniform.
3333 AddToWorklistIfAllowed(Ind);
3334 AddToWorklistIfAllowed(IndUpdate);
3335 }
3336
3337 Uniforms[VF].insert_range(Worklist);
3338}
3339
3341 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
3342
3343 if (Legal->getRuntimePointerChecking()->Need) {
3344 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
3345 "runtime pointer checks needed. Enable vectorization of this "
3346 "loop with '#pragma clang loop vectorize(enable)' when "
3347 "compiling with -Os/-Oz",
3348 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3349 return true;
3350 }
3351
3352 if (!PSE.getPredicate().isAlwaysTrue()) {
3353 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
3354 "runtime SCEV checks needed. Enable vectorization of this "
3355 "loop with '#pragma clang loop vectorize(enable)' when "
3356 "compiling with -Os/-Oz",
3357 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3358 return true;
3359 }
3360
3361 // FIXME: Avoid specializing for stride==1 instead of bailing out.
3362 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
3363 reportVectorizationFailure("Runtime stride check for small trip count",
3364 "runtime stride == 1 checks needed. Enable vectorization of "
3365 "this loop without such check by compiling with -Os/-Oz",
3366 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3367 return true;
3368 }
3369
3370 return false;
3371}
3372
3373bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3374 if (IsScalableVectorizationAllowed)
3375 return *IsScalableVectorizationAllowed;
3376
3377 IsScalableVectorizationAllowed = false;
3378 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
3379 return false;
3380
3381 if (Hints->isScalableVectorizationDisabled()) {
3382 reportVectorizationInfo("Scalable vectorization is explicitly disabled",
3383 "ScalableVectorizationDisabled", ORE, TheLoop);
3384 return false;
3385 }
3386
3387 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
3388
3389 auto MaxScalableVF = ElementCount::getScalable(
3390 std::numeric_limits<ElementCount::ScalarTy>::max());
3391
3392 // Test that the loop-vectorizer can legalize all operations for this MaxVF.
3393 // FIXME: While for scalable vectors this is currently sufficient, this should
3394 // be replaced by a more detailed mechanism that filters out specific VFs,
3395 // instead of invalidating vectorization for a whole set of VFs based on the
3396 // MaxVF.
3397
3398 // Disable scalable vectorization if the loop contains unsupported reductions.
3399 if (!canVectorizeReductions(MaxScalableVF)) {
3401 "Scalable vectorization not supported for the reduction "
3402 "operations found in this loop.",
3403 "ScalableVFUnfeasible", ORE, TheLoop);
3404 return false;
3405 }
3406
3407 // Disable scalable vectorization if the loop contains any instructions
3408 // with element types not supported for scalable vectors.
3409 if (any_of(ElementTypesInLoop, [&](Type *Ty) {
3410 return !Ty->isVoidTy() &&
3412 })) {
3413 reportVectorizationInfo("Scalable vectorization is not supported "
3414 "for all element types found in this loop.",
3415 "ScalableVFUnfeasible", ORE, TheLoop);
3416 return false;
3417 }
3418
3419 if (!Legal->isSafeForAnyVectorWidth() && !getMaxVScale(*TheFunction, TTI)) {
3420 reportVectorizationInfo("The target does not provide maximum vscale value "
3421 "for safe distance analysis.",
3422 "ScalableVFUnfeasible", ORE, TheLoop);
3423 return false;
3424 }
3425
3426 IsScalableVectorizationAllowed = true;
3427 return true;
3428}
3429
3430ElementCount
3431LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
3432 if (!isScalableVectorizationAllowed())
3433 return ElementCount::getScalable(0);
3434
3435 auto MaxScalableVF = ElementCount::getScalable(
3436 std::numeric_limits<ElementCount::ScalarTy>::max());
3437 if (Legal->isSafeForAnyVectorWidth())
3438 return MaxScalableVF;
3439
3440 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3441 // Limit MaxScalableVF by the maximum safe dependence distance.
3442 MaxScalableVF = ElementCount::getScalable(MaxSafeElements / *MaxVScale);
3443
3444 if (!MaxScalableVF)
3446 "Max legal vector width too small, scalable vectorization "
3447 "unfeasible.",
3448 "ScalableVFUnfeasible", ORE, TheLoop);
3449
3450 return MaxScalableVF;
3451}
3452
3453FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
3454 unsigned MaxTripCount, ElementCount UserVF, unsigned UserIC,
3455 bool FoldTailByMasking) {
3456 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
3457 unsigned SmallestType, WidestType;
3458 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
3459
3460 // Get the maximum safe dependence distance in bits computed by LAA.
3461 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
3462 // the memory accesses that is most restrictive (involved in the smallest
3463 // dependence distance).
3464 unsigned MaxSafeElementsPowerOf2 =
3465 bit_floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
3466 if (!Legal->isSafeForAnyStoreLoadForwardDistances()) {
3467 unsigned SLDist = Legal->getMaxStoreLoadForwardSafeDistanceInBits();
3468 MaxSafeElementsPowerOf2 =
3469 std::min(MaxSafeElementsPowerOf2, SLDist / WidestType);
3470 }
3471 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElementsPowerOf2);
3472 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElementsPowerOf2);
3473
3474 if (!Legal->isSafeForAnyVectorWidth())
3475 this->MaxSafeElements = MaxSafeElementsPowerOf2;
3476
3477 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
3478 << ".\n");
3479 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
3480 << ".\n");
3481
3482 // First analyze the UserVF, fall back if the UserVF should be ignored.
3483 if (UserVF) {
3484 auto MaxSafeUserVF =
3485 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3486
3487 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
3488 // If `VF=vscale x N` is safe, then so is `VF=N`
3489 if (UserVF.isScalable())
3490 return FixedScalableVFPair(
3491 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
3492
3493 return UserVF;
3494 }
3495
3496 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
3497
3498 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
3499 // is better to ignore the hint and let the compiler choose a suitable VF.
3500 if (!UserVF.isScalable()) {
3501 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3502 << " is unsafe, clamping to max safe VF="
3503 << MaxSafeFixedVF << ".\n");
3504 ORE->emit([&]() {
3505 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3506 TheLoop->getStartLoc(),
3507 TheLoop->getHeader())
3508 << "User-specified vectorization factor "
3509 << ore::NV("UserVectorizationFactor", UserVF)
3510 << " is unsafe, clamping to maximum safe vectorization factor "
3511 << ore::NV("VectorizationFactor", MaxSafeFixedVF);
3512 });
3513 return MaxSafeFixedVF;
3514 }
3515
3517 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3518 << " is ignored because scalable vectors are not "
3519 "available.\n");
3520 ORE->emit([&]() {
3521 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3522 TheLoop->getStartLoc(),
3523 TheLoop->getHeader())
3524 << "User-specified vectorization factor "
3525 << ore::NV("UserVectorizationFactor", UserVF)
3526 << " is ignored because the target does not support scalable "
3527 "vectors. The compiler will pick a more suitable value.";
3528 });
3529 } else {
3530 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3531 << " is unsafe. Ignoring scalable UserVF.\n");
3532 ORE->emit([&]() {
3533 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3534 TheLoop->getStartLoc(),
3535 TheLoop->getHeader())
3536 << "User-specified vectorization factor "
3537 << ore::NV("UserVectorizationFactor", UserVF)
3538 << " is unsafe. Ignoring the hint to let the compiler pick a "
3539 "more suitable value.";
3540 });
3541 }
3542 }
3543
3544 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
3545 << " / " << WidestType << " bits.\n");
3546
3547 FixedScalableVFPair Result(ElementCount::getFixed(1),
3549 if (auto MaxVF =
3550 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3551 MaxSafeFixedVF, UserIC, FoldTailByMasking))
3552 Result.FixedVF = MaxVF;
3553
3554 if (auto MaxVF =
3555 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3556 MaxSafeScalableVF, UserIC, FoldTailByMasking))
3557 if (MaxVF.isScalable()) {
3558 Result.ScalableVF = MaxVF;
3559 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
3560 << "\n");
3561 }
3562
3563 return Result;
3564}
3565
3566FixedScalableVFPair
3568 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
3569 // TODO: It may be useful to do since it's still likely to be dynamically
3570 // uniform if the target can skip.
3572 "Not inserting runtime ptr check for divergent target",
3573 "runtime pointer checks needed. Not enabled for divergent target",
3574 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
3576 }
3577
3578 ScalarEvolution *SE = PSE.getSE();
3580 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
3581 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
3582 if (TC != ElementCount::getFixed(MaxTC))
3583 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
3584 if (TC.isScalar()) {
3585 reportVectorizationFailure("Single iteration (non) loop",
3586 "loop trip count is one, irrelevant for vectorization",
3587 "SingleIterationLoop", ORE, TheLoop);
3589 }
3590
3591 // If BTC matches the widest induction type and is -1 then the trip count
3592 // computation will wrap to 0 and the vector trip count will be 0. Do not try
3593 // to vectorize.
3594 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
3595 if (!isa<SCEVCouldNotCompute>(BTC) &&
3596 BTC->getType()->getScalarSizeInBits() >=
3597 Legal->getWidestInductionType()->getScalarSizeInBits() &&
3599 SE->getMinusOne(BTC->getType()))) {
3601 "Trip count computation wrapped",
3602 "backedge-taken count is -1, loop trip count wrapped to 0",
3603 "TripCountWrapped", ORE, TheLoop);
3605 }
3606
3607 switch (ScalarEpilogueStatus) {
3609 return computeFeasibleMaxVF(MaxTC, UserVF, UserIC, false);
3611 [[fallthrough]];
3613 LLVM_DEBUG(
3614 dbgs() << "LV: vector predicate hint/switch found.\n"
3615 << "LV: Not allowing scalar epilogue, creating predicated "
3616 << "vector loop.\n");
3617 break;
3619 // fallthrough as a special case of OptForSize
3621 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
3622 LLVM_DEBUG(
3623 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
3624 else
3625 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
3626 << "count.\n");
3627
3628 // Bail if runtime checks are required, which are not good when optimising
3629 // for size.
3632
3633 break;
3634 }
3635
3636 // Now try the tail folding
3637
3638 // Invalidate interleave groups that require an epilogue if we can't mask
3639 // the interleave-group.
3641 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
3642 "No decisions should have been taken at this point");
3643 // Note: There is no need to invalidate any cost modeling decisions here, as
3644 // none were taken so far.
3645 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
3646 }
3647
3648 FixedScalableVFPair MaxFactors =
3649 computeFeasibleMaxVF(MaxTC, UserVF, UserIC, true);
3650
3651 // Avoid tail folding if the trip count is known to be a multiple of any VF
3652 // we choose.
3653 std::optional<unsigned> MaxPowerOf2RuntimeVF =
3654 MaxFactors.FixedVF.getFixedValue();
3655 if (MaxFactors.ScalableVF) {
3656 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3657 if (MaxVScale && TTI.isVScaleKnownToBeAPowerOfTwo()) {
3658 MaxPowerOf2RuntimeVF = std::max<unsigned>(
3659 *MaxPowerOf2RuntimeVF,
3660 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
3661 } else
3662 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
3663 }
3664
3665 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
3666 // Return false if the loop is neither a single-latch-exit loop nor an
3667 // early-exit loop as tail-folding is not supported in that case.
3668 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
3669 !Legal->hasUncountableEarlyExit())
3670 return false;
3671 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
3672 ScalarEvolution *SE = PSE.getSE();
3673 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
3674 // with uncountable exits. For countable loops, the symbolic maximum must
3675 // remain identical to the known back-edge taken count.
3676 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
3677 assert((Legal->hasUncountableEarlyExit() ||
3678 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
3679 "Invalid loop count");
3680 const SCEV *ExitCount = SE->getAddExpr(
3681 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3682 const SCEV *Rem = SE->getURemExpr(
3683 SE->applyLoopGuards(ExitCount, TheLoop),
3684 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3685 return Rem->isZero();
3686 };
3687
3688 if (MaxPowerOf2RuntimeVF > 0u) {
3689 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3690 "MaxFixedVF must be a power of 2");
3691 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3692 // Accept MaxFixedVF if we do not have a tail.
3693 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3694 return MaxFactors;
3695 }
3696 }
3697
3698 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3699 if (ExpectedTC && ExpectedTC->isFixed() &&
3700 ExpectedTC->getFixedValue() <=
3701 TTI.getMinTripCountTailFoldingThreshold()) {
3702 if (MaxPowerOf2RuntimeVF > 0u) {
3703 // If we have a low-trip-count, and the fixed-width VF is known to divide
3704 // the trip count but the scalable factor does not, use the fixed-width
3705 // factor in preference to allow the generation of a non-predicated loop.
3706 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedLowTripLoop &&
3707 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3708 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3709 "remain for any chosen VF.\n");
3710 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3711 return MaxFactors;
3712 }
3713 }
3714
3716 "The trip count is below the minial threshold value.",
3717 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3718 ORE, TheLoop);
3720 }
3721
3722 // If we don't know the precise trip count, or if the trip count that we
3723 // found modulo the vectorization factor is not zero, try to fold the tail
3724 // by masking.
3725 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3726 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3727 setTailFoldingStyles(ContainsScalableVF, UserIC);
3728 if (foldTailByMasking()) {
3729 if (foldTailWithEVL()) {
3730 LLVM_DEBUG(
3731 dbgs()
3732 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3733 "try to generate VP Intrinsics with scalable vector "
3734 "factors only.\n");
3735 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3736 // for now.
3737 // TODO: extend it for fixed vectors, if required.
3738 assert(ContainsScalableVF && "Expected scalable vector factor.");
3739
3740 MaxFactors.FixedVF = ElementCount::getFixed(1);
3741 }
3742 return MaxFactors;
3743 }
3744
3745 // If there was a tail-folding hint/switch, but we can't fold the tail by
3746 // masking, fallback to a vectorization with a scalar epilogue.
3747 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
3748 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
3749 "scalar epilogue instead.\n");
3750 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
3751 return MaxFactors;
3752 }
3753
3754 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
3755 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3757 }
3758
3759 if (TC.isZero()) {
3761 "unable to calculate the loop count due to complex control flow",
3762 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3764 }
3765
3767 "Cannot optimize for size and vectorize at the same time.",
3768 "cannot optimize for size and vectorize at the same time. "
3769 "Enable vectorization of this loop with '#pragma clang loop "
3770 "vectorize(enable)' when compiling with -Os/-Oz",
3771 "NoTailLoopWithOptForSize", ORE, TheLoop);
3773}
3774
3776 ElementCount VF) {
3777 if (ConsiderRegPressure.getNumOccurrences())
3778 return ConsiderRegPressure;
3779
3780 // TODO: We should eventually consider register pressure for all targets. The
3781 // TTI hook is temporary whilst target-specific issues are being fixed.
3782 if (TTI.shouldConsiderVectorizationRegPressure())
3783 return true;
3784
3785 if (!useMaxBandwidth(VF.isScalable()
3788 return false;
3789 // Only calculate register pressure for VFs enabled by MaxBandwidth.
3791 VF, VF.isScalable() ? MaxPermissibleVFWithoutMaxBW.ScalableVF
3793}
3794
3797 return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
3798 (TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3800 Legal->hasVectorCallVariants())));
3801}
3802
3803ElementCount LoopVectorizationCostModel::clampVFByMaxTripCount(
3804 ElementCount VF, unsigned MaxTripCount, unsigned UserIC,
3805 bool FoldTailByMasking) const {
3806 unsigned EstimatedVF = VF.getKnownMinValue();
3807 if (VF.isScalable() && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
3808 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
3809 auto Min = Attr.getVScaleRangeMin();
3810 EstimatedVF *= Min;
3811 }
3812
3813 // When a scalar epilogue is required, at least one iteration of the scalar
3814 // loop has to execute. Adjust MaxTripCount accordingly to avoid picking a
3815 // max VF that results in a dead vector loop.
3816 if (MaxTripCount > 0 && requiresScalarEpilogue(true))
3817 MaxTripCount -= 1;
3818
3819 // When the user specifies an interleave count, we need to ensure that
3820 // VF * UserIC <= MaxTripCount to avoid a dead vector loop.
3821 unsigned IC = UserIC > 0 ? UserIC : 1;
3822 unsigned EstimatedVFTimesIC = EstimatedVF * IC;
3823
3824 if (MaxTripCount && MaxTripCount <= EstimatedVFTimesIC &&
3825 (!FoldTailByMasking || isPowerOf2_32(MaxTripCount))) {
3826 // If upper bound loop trip count (TC) is known at compile time there is no
3827 // point in choosing VF greater than TC / IC (as done in the loop below).
3828 // Select maximum power of two which doesn't exceed TC / IC. If VF is
3829 // scalable, we only fall back on a fixed VF when the TC is less than or
3830 // equal to the known number of lanes.
3831 auto ClampedUpperTripCount = llvm::bit_floor(MaxTripCount / IC);
3832 if (ClampedUpperTripCount == 0)
3833 ClampedUpperTripCount = 1;
3834 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
3835 "exceeding the constant trip count"
3836 << (UserIC > 0 ? " divided by UserIC" : "") << ": "
3837 << ClampedUpperTripCount << "\n");
3838 return ElementCount::get(ClampedUpperTripCount,
3839 FoldTailByMasking ? VF.isScalable() : false);
3840 }
3841 return VF;
3842}
3843
3844ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
3845 unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
3846 ElementCount MaxSafeVF, unsigned UserIC, bool FoldTailByMasking) {
3847 bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
3848 const TypeSize WidestRegister = TTI.getRegisterBitWidth(
3849 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3851
3852 // Convenience function to return the minimum of two ElementCounts.
3853 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
3854 assert((LHS.isScalable() == RHS.isScalable()) &&
3855 "Scalable flags must match");
3856 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
3857 };
3858
3859 // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
3860 // Note that both WidestRegister and WidestType may not be a powers of 2.
3861 auto MaxVectorElementCount = ElementCount::get(
3862 llvm::bit_floor(WidestRegister.getKnownMinValue() / WidestType),
3863 ComputeScalableMaxVF);
3864 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
3865 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
3866 << (MaxVectorElementCount * WidestType) << " bits.\n");
3867
3868 if (!MaxVectorElementCount) {
3869 LLVM_DEBUG(dbgs() << "LV: The target has no "
3870 << (ComputeScalableMaxVF ? "scalable" : "fixed")
3871 << " vector registers.\n");
3872 return ElementCount::getFixed(1);
3873 }
3874
3875 ElementCount MaxVF = clampVFByMaxTripCount(
3876 MaxVectorElementCount, MaxTripCount, UserIC, FoldTailByMasking);
3877 // If the MaxVF was already clamped, there's no point in trying to pick a
3878 // larger one.
3879 if (MaxVF != MaxVectorElementCount)
3880 return MaxVF;
3881
3883 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3885
3886 if (MaxVF.isScalable())
3887 MaxPermissibleVFWithoutMaxBW.ScalableVF = MaxVF;
3888 else
3889 MaxPermissibleVFWithoutMaxBW.FixedVF = MaxVF;
3890
3891 if (useMaxBandwidth(RegKind)) {
3892 auto MaxVectorElementCountMaxBW = ElementCount::get(
3893 llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
3894 ComputeScalableMaxVF);
3895 MaxVF = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
3896
3897 if (ElementCount MinVF =
3898 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
3899 if (ElementCount::isKnownLT(MaxVF, MinVF)) {
3900 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
3901 << ") with target's minimum: " << MinVF << '\n');
3902 MaxVF = MinVF;
3903 }
3904 }
3905
3906 MaxVF =
3907 clampVFByMaxTripCount(MaxVF, MaxTripCount, UserIC, FoldTailByMasking);
3908
3909 if (MaxVectorElementCount != MaxVF) {
3910 // Invalidate any widening decisions we might have made, in case the loop
3911 // requires prediction (decided later), but we have already made some
3912 // load/store widening decisions.
3913 invalidateCostModelingDecisions();
3914 }
3915 }
3916 return MaxVF;
3917}
3918
3919bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3920 const VectorizationFactor &B,
3921 const unsigned MaxTripCount,
3922 bool HasTail,
3923 bool IsEpilogue) const {
3924 InstructionCost CostA = A.Cost;
3925 InstructionCost CostB = B.Cost;
3926
3927 // Improve estimate for the vector width if it is scalable.
3928 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3929 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3930 if (std::optional<unsigned> VScale = CM.getVScaleForTuning()) {
3931 if (A.Width.isScalable())
3932 EstimatedWidthA *= *VScale;
3933 if (B.Width.isScalable())
3934 EstimatedWidthB *= *VScale;
3935 }
3936
3937 // When optimizing for size choose whichever is smallest, which will be the
3938 // one with the smallest cost for the whole loop. On a tie pick the larger
3939 // vector width, on the assumption that throughput will be greater.
3940 if (CM.CostKind == TTI::TCK_CodeSize)
3941 return CostA < CostB ||
3942 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3943
3944 // Assume vscale may be larger than 1 (or the value being tuned for),
3945 // so that scalable vectorization is slightly favorable over fixed-width
3946 // vectorization.
3947 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3948 A.Width.isScalable() && !B.Width.isScalable();
3949
3950 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3951 const InstructionCost &RHS) {
3952 return PreferScalable ? LHS <= RHS : LHS < RHS;
3953 };
3954
3955 // To avoid the need for FP division:
3956 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3957 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3958 if (!MaxTripCount)
3959 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3960
3961 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3962 InstructionCost VectorCost,
3963 InstructionCost ScalarCost) {
3964 // If the trip count is a known (possibly small) constant, the trip count
3965 // will be rounded up to an integer number of iterations under
3966 // FoldTailByMasking. The total cost in that case will be
3967 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3968 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3969 // some extra overheads, but for the purpose of comparing the costs of
3970 // different VFs we can use this to compare the total loop-body cost
3971 // expected after vectorization.
3972 if (HasTail)
3973 return VectorCost * (MaxTripCount / VF) +
3974 ScalarCost * (MaxTripCount % VF);
3975 return VectorCost * divideCeil(MaxTripCount, VF);
3976 };
3977
3978 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3979 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3980 return CmpFn(RTCostA, RTCostB);
3981}
3982
3983bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3984 const VectorizationFactor &B,
3985 bool HasTail,
3986 bool IsEpilogue) const {
3987 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3988 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3989 IsEpilogue);
3990}
3991
3994 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3995 SmallVector<RecipeVFPair> InvalidCosts;
3996 for (const auto &Plan : VPlans) {
3997 for (ElementCount VF : Plan->vectorFactors()) {
3998 // The VPlan-based cost model is designed for computing vector cost.
3999 // Querying VPlan-based cost model with a scarlar VF will cause some
4000 // errors because we expect the VF is vector for most of the widen
4001 // recipes.
4002 if (VF.isScalar())
4003 continue;
4004
4005 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, CM.PSE,
4006 OrigLoop);
4007 precomputeCosts(*Plan, VF, CostCtx);
4008 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
4010 for (auto &R : *VPBB) {
4011 if (!R.cost(VF, CostCtx).isValid())
4012 InvalidCosts.emplace_back(&R, VF);
4013 }
4014 }
4015 }
4016 }
4017 if (InvalidCosts.empty())
4018 return;
4019
4020 // Emit a report of VFs with invalid costs in the loop.
4021
4022 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
4024 unsigned I = 0;
4025 for (auto &Pair : InvalidCosts)
4026 if (Numbering.try_emplace(Pair.first, I).second)
4027 ++I;
4028
4029 // Sort the list, first on recipe(number) then on VF.
4030 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
4031 unsigned NA = Numbering[A.first];
4032 unsigned NB = Numbering[B.first];
4033 if (NA != NB)
4034 return NA < NB;
4035 return ElementCount::isKnownLT(A.second, B.second);
4036 });
4037
4038 // For a list of ordered recipe-VF pairs:
4039 // [(load, VF1), (load, VF2), (store, VF1)]
4040 // group the recipes together to emit separate remarks for:
4041 // load (VF1, VF2)
4042 // store (VF1)
4043 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
4044 auto Subset = ArrayRef<RecipeVFPair>();
4045 do {
4046 if (Subset.empty())
4047 Subset = Tail.take_front(1);
4048
4049 VPRecipeBase *R = Subset.front().first;
4050
4051 unsigned Opcode =
4053 .Case([](const VPHeaderPHIRecipe *R) { return Instruction::PHI; })
4054 .Case(
4055 [](const VPWidenStoreRecipe *R) { return Instruction::Store; })
4056 .Case([](const VPWidenLoadRecipe *R) { return Instruction::Load; })
4057 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
4058 [](const auto *R) { return Instruction::Call; })
4061 [](const auto *R) { return R->getOpcode(); })
4062 .Case([](const VPInterleaveRecipe *R) {
4063 return R->getStoredValues().empty() ? Instruction::Load
4064 : Instruction::Store;
4065 })
4066 .Case([](const VPReductionRecipe *R) {
4067 return RecurrenceDescriptor::getOpcode(R->getRecurrenceKind());
4068 });
4069
4070 // If the next recipe is different, or if there are no other pairs,
4071 // emit a remark for the collated subset. e.g.
4072 // [(load, VF1), (load, VF2))]
4073 // to emit:
4074 // remark: invalid costs for 'load' at VF=(VF1, VF2)
4075 if (Subset == Tail || Tail[Subset.size()].first != R) {
4076 std::string OutString;
4077 raw_string_ostream OS(OutString);
4078 assert(!Subset.empty() && "Unexpected empty range");
4079 OS << "Recipe with invalid costs prevented vectorization at VF=(";
4080 for (const auto &Pair : Subset)
4081 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
4082 OS << "):";
4083 if (Opcode == Instruction::Call) {
4084 StringRef Name = "";
4085 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
4086 Name = Int->getIntrinsicName();
4087 } else {
4088 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
4089 Function *CalledFn =
4090 WidenCall ? WidenCall->getCalledScalarFunction()
4091 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
4092 ->getLiveInIRValue());
4093 Name = CalledFn->getName();
4094 }
4095 OS << " call to " << Name;
4096 } else
4097 OS << " " << Instruction::getOpcodeName(Opcode);
4098 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
4099 R->getDebugLoc());
4100 Tail = Tail.drop_front(Subset.size());
4101 Subset = {};
4102 } else
4103 // Grow the subset by one element
4104 Subset = Tail.take_front(Subset.size() + 1);
4105 } while (!Tail.empty());
4106}
4107
4108/// Check if any recipe of \p Plan will generate a vector value, which will be
4109/// assigned a vector register.
4111 const TargetTransformInfo &TTI) {
4112 assert(VF.isVector() && "Checking a scalar VF?");
4113 VPTypeAnalysis TypeInfo(Plan);
4114 DenseSet<VPRecipeBase *> EphemeralRecipes;
4115 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
4116 // Set of already visited types.
4117 DenseSet<Type *> Visited;
4120 for (VPRecipeBase &R : *VPBB) {
4121 if (EphemeralRecipes.contains(&R))
4122 continue;
4123 // Continue early if the recipe is considered to not produce a vector
4124 // result. Note that this includes VPInstruction where some opcodes may
4125 // produce a vector, to preserve existing behavior as VPInstructions model
4126 // aspects not directly mapped to existing IR instructions.
4127 switch (R.getVPRecipeID()) {
4128 case VPRecipeBase::VPDerivedIVSC:
4129 case VPRecipeBase::VPScalarIVStepsSC:
4130 case VPRecipeBase::VPReplicateSC:
4131 case VPRecipeBase::VPInstructionSC:
4132 case VPRecipeBase::VPCanonicalIVPHISC:
4133 case VPRecipeBase::VPVectorPointerSC:
4134 case VPRecipeBase::VPVectorEndPointerSC:
4135 case VPRecipeBase::VPExpandSCEVSC:
4136 case VPRecipeBase::VPEVLBasedIVPHISC:
4137 case VPRecipeBase::VPPredInstPHISC:
4138 case VPRecipeBase::VPBranchOnMaskSC:
4139 continue;
4140 case VPRecipeBase::VPReductionSC:
4141 case VPRecipeBase::VPActiveLaneMaskPHISC:
4142 case VPRecipeBase::VPWidenCallSC:
4143 case VPRecipeBase::VPWidenCanonicalIVSC:
4144 case VPRecipeBase::VPWidenCastSC:
4145 case VPRecipeBase::VPWidenGEPSC:
4146 case VPRecipeBase::VPWidenIntrinsicSC:
4147 case VPRecipeBase::VPWidenSC:
4148 case VPRecipeBase::VPBlendSC:
4149 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
4150 case VPRecipeBase::VPHistogramSC:
4151 case VPRecipeBase::VPWidenPHISC:
4152 case VPRecipeBase::VPWidenIntOrFpInductionSC:
4153 case VPRecipeBase::VPWidenPointerInductionSC:
4154 case VPRecipeBase::VPReductionPHISC:
4155 case VPRecipeBase::VPInterleaveEVLSC:
4156 case VPRecipeBase::VPInterleaveSC:
4157 case VPRecipeBase::VPWidenLoadEVLSC:
4158 case VPRecipeBase::VPWidenLoadSC:
4159 case VPRecipeBase::VPWidenStoreEVLSC:
4160 case VPRecipeBase::VPWidenStoreSC:
4161 break;
4162 default:
4163 llvm_unreachable("unhandled recipe");
4164 }
4165
4166 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
4167 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
4168 if (!NumLegalParts)
4169 return false;
4170 if (VF.isScalable()) {
4171 // <vscale x 1 x iN> is assumed to be profitable over iN because
4172 // scalable registers are a distinct register class from scalar
4173 // ones. If we ever find a target which wants to lower scalable
4174 // vectors back to scalars, we'll need to update this code to
4175 // explicitly ask TTI about the register class uses for each part.
4176 return NumLegalParts <= VF.getKnownMinValue();
4177 }
4178 // Two or more elements that share a register - are vectorized.
4179 return NumLegalParts < VF.getFixedValue();
4180 };
4181
4182 // If no def nor is a store, e.g., branches, continue - no value to check.
4183 if (R.getNumDefinedValues() == 0 &&
4185 continue;
4186 // For multi-def recipes, currently only interleaved loads, suffice to
4187 // check first def only.
4188 // For stores check their stored value; for interleaved stores suffice
4189 // the check first stored value only. In all cases this is the second
4190 // operand.
4191 VPValue *ToCheck =
4192 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4193 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
4194 if (!Visited.insert({ScalarTy}).second)
4195 continue;
4196 Type *WideTy = toVectorizedTy(ScalarTy, VF);
4197 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
4198 return true;
4199 }
4200 }
4201
4202 return false;
4203}
4204
4205static bool hasReplicatorRegion(VPlan &Plan) {
4207 Plan.getVectorLoopRegion()->getEntry())),
4208 [](auto *VPRB) { return VPRB->isReplicator(); });
4209}
4210
4211#ifndef NDEBUG
4212VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
4213 InstructionCost ExpectedCost = CM.expectedCost(ElementCount::getFixed(1));
4214 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
4215 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
4216 assert(
4217 any_of(VPlans,
4218 [](std::unique_ptr<VPlan> &P) { return P->hasScalarVFOnly(); }) &&
4219 "Expected Scalar VF to be a candidate");
4220
4221 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost,
4222 ExpectedCost);
4223 VectorizationFactor ChosenFactor = ScalarCost;
4224
4225 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
4226 if (ForceVectorization &&
4227 (VPlans.size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4228 // Ignore scalar width, because the user explicitly wants vectorization.
4229 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
4230 // evaluation.
4231 ChosenFactor.Cost = InstructionCost::getMax();
4232 }
4233
4234 for (auto &P : VPlans) {
4235 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
4236 P->vectorFactors().end());
4237
4239 if (any_of(VFs, [this](ElementCount VF) {
4240 return CM.shouldConsiderRegPressureForVF(VF);
4241 }))
4242 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
4243
4244 for (unsigned I = 0; I < VFs.size(); I++) {
4245 ElementCount VF = VFs[I];
4246 // The cost for scalar VF=1 is already calculated, so ignore it.
4247 if (VF.isScalar())
4248 continue;
4249
4250 /// If the register pressure needs to be considered for VF,
4251 /// don't consider the VF as valid if it exceeds the number
4252 /// of registers for the target.
4253 if (CM.shouldConsiderRegPressureForVF(VF) &&
4254 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs))
4255 continue;
4256
4257 InstructionCost C = CM.expectedCost(VF);
4258
4259 // Add on other costs that are modelled in VPlan, but not in the legacy
4260 // cost model.
4261 VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind, CM.PSE,
4262 OrigLoop);
4263 VPRegionBlock *VectorRegion = P->getVectorLoopRegion();
4264 assert(VectorRegion && "Expected to have a vector region!");
4265 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4266 vp_depth_first_shallow(VectorRegion->getEntry()))) {
4267 for (VPRecipeBase &R : *VPBB) {
4268 auto *VPI = dyn_cast<VPInstruction>(&R);
4269 if (!VPI)
4270 continue;
4271 switch (VPI->getOpcode()) {
4272 // Selects are only modelled in the legacy cost model for safe
4273 // divisors.
4274 case Instruction::Select: {
4275 if (auto *WR =
4276 dyn_cast_or_null<VPWidenRecipe>(VPI->getSingleUser())) {
4277 switch (WR->getOpcode()) {
4278 case Instruction::UDiv:
4279 case Instruction::SDiv:
4280 case Instruction::URem:
4281 case Instruction::SRem:
4282 continue;
4283 default:
4284 break;
4285 }
4286 }
4287 C += VPI->cost(VF, CostCtx);
4288 break;
4289 }
4291 unsigned Multiplier =
4292 cast<VPConstantInt>(VPI->getOperand(2))->getZExtValue();
4293 C += VPI->cost(VF * Multiplier, CostCtx);
4294 break;
4295 }
4297 C += VPI->cost(VF, CostCtx);
4298 break;
4299 default:
4300 break;
4301 }
4302 }
4303 }
4304
4305 VectorizationFactor Candidate(VF, C, ScalarCost.ScalarCost);
4306 unsigned Width =
4307 estimateElementCount(Candidate.Width, CM.getVScaleForTuning());
4308 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << VF
4309 << " costs: " << (Candidate.Cost / Width));
4310 if (VF.isScalable())
4311 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
4312 << CM.getVScaleForTuning().value_or(1) << ")");
4313 LLVM_DEBUG(dbgs() << ".\n");
4314
4315 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
4316 LLVM_DEBUG(
4317 dbgs()
4318 << "LV: Not considering vector loop of width " << VF
4319 << " because it will not generate any vector instructions.\n");
4320 continue;
4321 }
4322
4323 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
4324 LLVM_DEBUG(
4325 dbgs()
4326 << "LV: Not considering vector loop of width " << VF
4327 << " because it would cause replicated blocks to be generated,"
4328 << " which isn't allowed when optimizing for size.\n");
4329 continue;
4330 }
4331
4332 if (isMoreProfitable(Candidate, ChosenFactor, P->hasScalarTail()))
4333 ChosenFactor = Candidate;
4334 }
4335 }
4336
4337 if (!EnableCondStoresVectorization && CM.hasPredStores()) {
4339 "There are conditional stores.",
4340 "store that is conditionally executed prevents vectorization",
4341 "ConditionalStore", ORE, OrigLoop);
4342 ChosenFactor = ScalarCost;
4343 }
4344
4345 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
4346 !isMoreProfitable(ChosenFactor, ScalarCost,
4347 !CM.foldTailByMasking())) dbgs()
4348 << "LV: Vectorization seems to be not beneficial, "
4349 << "but was forced by a user.\n");
4350 return ChosenFactor;
4351}
4352#endif
4353
4354/// Returns true if the VPlan contains a VPReductionPHIRecipe with
4355/// FindLast recurrence kind.
4356static bool hasFindLastReductionPhi(VPlan &Plan) {
4358 [](VPRecipeBase &R) {
4359 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
4360 return RedPhi &&
4361 RecurrenceDescriptor::isFindLastRecurrenceKind(
4362 RedPhi->getRecurrenceKind());
4363 });
4364}
4365
4366bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4367 ElementCount VF) const {
4368 // Cross iteration phis such as fixed-order recurrences and FMaxNum/FMinNum
4369 // reductions need special handling and are currently unsupported.
4370 if (any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4371 if (!Legal->isReductionVariable(&Phi))
4372 return Legal->isFixedOrderRecurrence(&Phi);
4373 RecurKind Kind =
4374 Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind();
4375 return RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(Kind);
4376 }))
4377 return false;
4378
4379 // FindLast reductions require special handling for the synthesized mask PHI
4380 // and are currently unsupported for epilogue vectorization.
4381 if (hasFindLastReductionPhi(getPlanFor(VF)))
4382 return false;
4383
4384 // Phis with uses outside of the loop require special handling and are
4385 // currently unsupported.
4386 for (const auto &Entry : Legal->getInductionVars()) {
4387 // Look for uses of the value of the induction at the last iteration.
4388 Value *PostInc =
4389 Entry.first->getIncomingValueForBlock(OrigLoop->getLoopLatch());
4390 for (User *U : PostInc->users())
4391 if (!OrigLoop->contains(cast<Instruction>(U)))
4392 return false;
4393 // Look for uses of penultimate value of the induction.
4394 for (User *U : Entry.first->users())
4395 if (!OrigLoop->contains(cast<Instruction>(U)))
4396 return false;
4397 }
4398
4399 // Epilogue vectorization code has not been auditted to ensure it handles
4400 // non-latch exits properly. It may be fine, but it needs auditted and
4401 // tested.
4402 // TODO: Add support for loops with an early exit.
4403 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
4404 return false;
4405
4406 return true;
4407}
4408
4410 const ElementCount VF, const unsigned IC) const {
4411 // FIXME: We need a much better cost-model to take different parameters such
4412 // as register pressure, code size increase and cost of extra branches into
4413 // account. For now we apply a very crude heuristic and only consider loops
4414 // with vectorization factors larger than a certain value.
4415
4416 // Allow the target to opt out entirely.
4417 if (!TTI.preferEpilogueVectorization())
4418 return false;
4419
4420 // We also consider epilogue vectorization unprofitable for targets that don't
4421 // consider interleaving beneficial (eg. MVE).
4422 if (TTI.getMaxInterleaveFactor(VF) <= 1)
4423 return false;
4424
4425 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
4427 : TTI.getEpilogueVectorizationMinVF();
4428 return estimateElementCount(VF * IC, VScaleForTuning) >= MinVFThreshold;
4429}
4430
4432 const ElementCount MainLoopVF, unsigned IC) {
4435 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
4436 return Result;
4437 }
4438
4439 if (!CM.isScalarEpilogueAllowed()) {
4440 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
4441 "epilogue is allowed.\n");
4442 return Result;
4443 }
4444
4445 // Not really a cost consideration, but check for unsupported cases here to
4446 // simplify the logic.
4447 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
4448 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
4449 "is not a supported candidate.\n");
4450 return Result;
4451 }
4452
4454 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
4456 if (hasPlanWithVF(ForcedEC))
4457 return {ForcedEC, 0, 0};
4458
4459 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
4460 "viable.\n");
4461 return Result;
4462 }
4463
4464 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
4465 LLVM_DEBUG(
4466 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
4467 return Result;
4468 }
4469
4470 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
4471 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
4472 "this loop\n");
4473 return Result;
4474 }
4475
4476 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
4477 // the main loop handles 8 lanes per iteration. We could still benefit from
4478 // vectorizing the epilogue loop with VF=4.
4479 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
4480 estimateElementCount(MainLoopVF, CM.getVScaleForTuning()));
4481
4482 Type *TCType = Legal->getWidestInductionType();
4483 const SCEV *RemainingIterations = nullptr;
4484 unsigned MaxTripCount = 0;
4486 getPlanFor(MainLoopVF).getTripCount(), PSE);
4487 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
4488 const SCEV *KnownMinTC;
4489 bool ScalableTC = match(TC, m_scev_c_Mul(m_SCEV(KnownMinTC), m_SCEVVScale()));
4490 bool ScalableRemIter = false;
4491 ScalarEvolution &SE = *PSE.getSE();
4492 // Use versions of TC and VF in which both are either scalable or fixed.
4493 if (ScalableTC == MainLoopVF.isScalable()) {
4494 ScalableRemIter = ScalableTC;
4495 RemainingIterations =
4496 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
4497 } else if (ScalableTC) {
4498 const SCEV *EstimatedTC = SE.getMulExpr(
4499 KnownMinTC,
4500 SE.getConstant(TCType, CM.getVScaleForTuning().value_or(1)));
4501 RemainingIterations = SE.getURemExpr(
4502 EstimatedTC, SE.getElementCount(TCType, MainLoopVF * IC));
4503 } else
4504 RemainingIterations =
4505 SE.getURemExpr(TC, SE.getElementCount(TCType, EstimatedRuntimeVF * IC));
4506
4507 // No iterations left to process in the epilogue.
4508 if (RemainingIterations->isZero())
4509 return Result;
4510
4511 if (MainLoopVF.isFixed()) {
4512 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
4513 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
4514 SE.getConstant(TCType, MaxTripCount))) {
4515 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
4516 }
4517 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
4518 << MaxTripCount << "\n");
4519 }
4520
4521 auto SkipVF = [&](const SCEV *VF, const SCEV *RemIter) -> bool {
4522 return SE.isKnownPredicate(CmpInst::ICMP_UGT, VF, RemIter);
4523 };
4524 for (auto &NextVF : ProfitableVFs) {
4525 // Skip candidate VFs without a corresponding VPlan.
4526 if (!hasPlanWithVF(NextVF.Width))
4527 continue;
4528
4529 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
4530 // vectors) or > the VF of the main loop (fixed vectors).
4531 if ((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
4532 ElementCount::isKnownGE(NextVF.Width, EstimatedRuntimeVF)) ||
4533 (NextVF.Width.isScalable() &&
4534 ElementCount::isKnownGE(NextVF.Width, MainLoopVF)) ||
4535 (!NextVF.Width.isScalable() && !MainLoopVF.isScalable() &&
4536 ElementCount::isKnownGT(NextVF.Width, MainLoopVF)))
4537 continue;
4538
4539 // If NextVF is greater than the number of remaining iterations, the
4540 // epilogue loop would be dead. Skip such factors.
4541 // TODO: We should also consider comparing against a scalable
4542 // RemainingIterations when SCEV be able to evaluate non-canonical
4543 // vscale-based expressions.
4544 if (!ScalableRemIter) {
4545 // Handle the case where NextVF and RemainingIterations are in different
4546 // numerical spaces.
4547 ElementCount EC = NextVF.Width;
4548 if (NextVF.Width.isScalable())
4550 estimateElementCount(NextVF.Width, CM.getVScaleForTuning()));
4551 if (SkipVF(SE.getElementCount(TCType, EC), RemainingIterations))
4552 continue;
4553 }
4554
4555 if (Result.Width.isScalar() ||
4556 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
4557 /*IsEpilogue*/ true))
4558 Result = NextVF;
4559 }
4560
4561 if (Result != VectorizationFactor::Disabled())
4562 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
4563 << Result.Width << "\n");
4564 return Result;
4565}
4566
4567std::pair<unsigned, unsigned>
4569 unsigned MinWidth = -1U;
4570 unsigned MaxWidth = 8;
4571 const DataLayout &DL = TheFunction->getDataLayout();
4572 // For in-loop reductions, no element types are added to ElementTypesInLoop
4573 // if there are no loads/stores in the loop. In this case, check through the
4574 // reduction variables to determine the maximum width.
4575 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
4576 for (const auto &PhiDescriptorPair : Legal->getReductionVars()) {
4577 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
4578 // When finding the min width used by the recurrence we need to account
4579 // for casts on the input operands of the recurrence.
4580 MinWidth = std::min(
4581 MinWidth,
4582 std::min(RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
4584 MaxWidth = std::max(MaxWidth,
4586 }
4587 } else {
4588 for (Type *T : ElementTypesInLoop) {
4589 MinWidth = std::min<unsigned>(
4590 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4591 MaxWidth = std::max<unsigned>(
4592 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4593 }
4594 }
4595 return {MinWidth, MaxWidth};
4596}
4597
4599 ElementTypesInLoop.clear();
4600 // For each block.
4601 for (BasicBlock *BB : TheLoop->blocks()) {
4602 // For each instruction in the loop.
4603 for (Instruction &I : BB->instructionsWithoutDebug()) {
4604 Type *T = I.getType();
4605
4606 // Skip ignored values.
4607 if (ValuesToIgnore.count(&I))
4608 continue;
4609
4610 // Only examine Loads, Stores and PHINodes.
4611 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
4612 continue;
4613
4614 // Examine PHI nodes that are reduction variables. Update the type to
4615 // account for the recurrence type.
4616 if (auto *PN = dyn_cast<PHINode>(&I)) {
4617 if (!Legal->isReductionVariable(PN))
4618 continue;
4619 const RecurrenceDescriptor &RdxDesc =
4620 Legal->getRecurrenceDescriptor(PN);
4622 TTI.preferInLoopReduction(RdxDesc.getRecurrenceKind(),
4623 RdxDesc.getRecurrenceType()))
4624 continue;
4625 T = RdxDesc.getRecurrenceType();
4626 }
4627
4628 // Examine the stored values.
4629 if (auto *ST = dyn_cast<StoreInst>(&I))
4630 T = ST->getValueOperand()->getType();
4631
4632 assert(T->isSized() &&
4633 "Expected the load/store/recurrence type to be sized");
4634
4635 ElementTypesInLoop.insert(T);
4636 }
4637 }
4638}
4639
4640unsigned
4642 InstructionCost LoopCost) {
4643 // -- The interleave heuristics --
4644 // We interleave the loop in order to expose ILP and reduce the loop overhead.
4645 // There are many micro-architectural considerations that we can't predict
4646 // at this level. For example, frontend pressure (on decode or fetch) due to
4647 // code size, or the number and capabilities of the execution ports.
4648 //
4649 // We use the following heuristics to select the interleave count:
4650 // 1. If the code has reductions, then we interleave to break the cross
4651 // iteration dependency.
4652 // 2. If the loop is really small, then we interleave to reduce the loop
4653 // overhead.
4654 // 3. We don't interleave if we think that we will spill registers to memory
4655 // due to the increased register pressure.
4656
4657 // Only interleave tail-folded loops if wide lane masks are requested, as the
4658 // overhead of multiple instructions to calculate the predicate is likely
4659 // not beneficial. If a scalar epilogue is not allowed for any other reason,
4660 // do not interleave.
4661 if (!CM.isScalarEpilogueAllowed() &&
4662 !(CM.preferPredicatedLoop() && CM.useWideActiveLaneMask()))
4663 return 1;
4664
4667 LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. "
4668 "Unroll factor forced to be 1.\n");
4669 return 1;
4670 }
4671
4672 // We used the distance for the interleave count.
4673 if (!Legal->isSafeForAnyVectorWidth())
4674 return 1;
4675
4676 // We don't attempt to perform interleaving for loops with uncountable early
4677 // exits because the VPInstruction::AnyOf code cannot currently handle
4678 // multiple parts.
4679 if (Plan.hasEarlyExit())
4680 return 1;
4681
4682 const bool HasReductions =
4685
4686 // FIXME: implement interleaving for FindLast transform correctly.
4687 if (hasFindLastReductionPhi(Plan))
4688 return 1;
4689
4690 // If we did not calculate the cost for VF (because the user selected the VF)
4691 // then we calculate the cost of VF here.
4692 if (LoopCost == 0) {
4693 if (VF.isScalar())
4694 LoopCost = CM.expectedCost(VF);
4695 else
4696 LoopCost = cost(Plan, VF);
4697 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
4698
4699 // Loop body is free and there is no need for interleaving.
4700 if (LoopCost == 0)
4701 return 1;
4702 }
4703
4704 VPRegisterUsage R =
4705 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
4706 // We divide by these constants so assume that we have at least one
4707 // instruction that uses at least one register.
4708 for (auto &Pair : R.MaxLocalUsers) {
4709 Pair.second = std::max(Pair.second, 1U);
4710 }
4711
4712 // We calculate the interleave count using the following formula.
4713 // Subtract the number of loop invariants from the number of available
4714 // registers. These registers are used by all of the interleaved instances.
4715 // Next, divide the remaining registers by the number of registers that is
4716 // required by the loop, in order to estimate how many parallel instances
4717 // fit without causing spills. All of this is rounded down if necessary to be
4718 // a power of two. We want power of two interleave count to simplify any
4719 // addressing operations or alignment considerations.
4720 // We also want power of two interleave counts to ensure that the induction
4721 // variable of the vector loop wraps to zero, when tail is folded by masking;
4722 // this currently happens when OptForSize, in which case IC is set to 1 above.
4723 unsigned IC = UINT_MAX;
4724
4725 for (const auto &Pair : R.MaxLocalUsers) {
4726 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
4727 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
4728 << " registers of "
4729 << TTI.getRegisterClassName(Pair.first)
4730 << " register class\n");
4731 if (VF.isScalar()) {
4732 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
4733 TargetNumRegisters = ForceTargetNumScalarRegs;
4734 } else {
4735 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
4736 TargetNumRegisters = ForceTargetNumVectorRegs;
4737 }
4738 unsigned MaxLocalUsers = Pair.second;
4739 unsigned LoopInvariantRegs = 0;
4740 if (R.LoopInvariantRegs.contains(Pair.first))
4741 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
4742
4743 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4744 MaxLocalUsers);
4745 // Don't count the induction variable as interleaved.
4747 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4748 std::max(1U, (MaxLocalUsers - 1)));
4749 }
4750
4751 IC = std::min(IC, TmpIC);
4752 }
4753
4754 // Clamp the interleave ranges to reasonable counts.
4755 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4756
4757 // Check if the user has overridden the max.
4758 if (VF.isScalar()) {
4759 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
4760 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
4761 } else {
4762 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
4763 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
4764 }
4765
4766 // Try to get the exact trip count, or an estimate based on profiling data or
4767 // ConstantMax from PSE, failing that.
4768 auto BestKnownTC = getSmallBestKnownTC(PSE, OrigLoop);
4769
4770 // For fixed length VFs treat a scalable trip count as unknown.
4771 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
4772 // Re-evaluate trip counts and VFs to be in the same numerical space.
4773 unsigned AvailableTC =
4774 estimateElementCount(*BestKnownTC, CM.getVScaleForTuning());
4775 unsigned EstimatedVF = estimateElementCount(VF, CM.getVScaleForTuning());
4776
4777 // At least one iteration must be scalar when this constraint holds. So the
4778 // maximum available iterations for interleaving is one less.
4779 if (CM.requiresScalarEpilogue(VF.isVector()))
4780 --AvailableTC;
4781
4782 unsigned InterleaveCountLB = bit_floor(std::max(
4783 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4784
4785 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
4786 // If the best known trip count is exact, we select between two
4787 // prospective ICs, where
4788 //
4789 // 1) the aggressive IC is capped by the trip count divided by VF
4790 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
4791 //
4792 // The final IC is selected in a way that the epilogue loop trip count is
4793 // minimized while maximizing the IC itself, so that we either run the
4794 // vector loop at least once if it generates a small epilogue loop, or
4795 // else we run the vector loop at least twice.
4796
4797 unsigned InterleaveCountUB = bit_floor(std::max(
4798 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4799 MaxInterleaveCount = InterleaveCountLB;
4800
4801 if (InterleaveCountUB != InterleaveCountLB) {
4802 unsigned TailTripCountUB =
4803 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4804 unsigned TailTripCountLB =
4805 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4806 // If both produce same scalar tail, maximize the IC to do the same work
4807 // in fewer vector loop iterations
4808 if (TailTripCountUB == TailTripCountLB)
4809 MaxInterleaveCount = InterleaveCountUB;
4810 }
4811 } else {
4812 // If trip count is an estimated compile time constant, limit the
4813 // IC to be capped by the trip count divided by VF * 2, such that the
4814 // vector loop runs at least twice to make interleaving seem profitable
4815 // when there is an epilogue loop present. Since exact Trip count is not
4816 // known we choose to be conservative in our IC estimate.
4817 MaxInterleaveCount = InterleaveCountLB;
4818 }
4819 }
4820
4821 assert(MaxInterleaveCount > 0 &&
4822 "Maximum interleave count must be greater than 0");
4823
4824 // Clamp the calculated IC to be between the 1 and the max interleave count
4825 // that the target and trip count allows.
4826 if (IC > MaxInterleaveCount)
4827 IC = MaxInterleaveCount;
4828 else
4829 // Make sure IC is greater than 0.
4830 IC = std::max(1u, IC);
4831
4832 assert(IC > 0 && "Interleave count must be greater than 0.");
4833
4834 // Interleave if we vectorized this loop and there is a reduction that could
4835 // benefit from interleaving.
4836 if (VF.isVector() && HasReductions) {
4837 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
4838 return IC;
4839 }
4840
4841 // For any scalar loop that either requires runtime checks or predication we
4842 // are better off leaving this to the unroller. Note that if we've already
4843 // vectorized the loop we will have done the runtime check and so interleaving
4844 // won't require further checks.
4845 bool ScalarInterleavingRequiresPredication =
4846 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
4847 return Legal->blockNeedsPredication(BB);
4848 }));
4849 bool ScalarInterleavingRequiresRuntimePointerCheck =
4850 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
4851
4852 // We want to interleave small loops in order to reduce the loop overhead and
4853 // potentially expose ILP opportunities.
4854 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
4855 << "LV: IC is " << IC << '\n'
4856 << "LV: VF is " << VF << '\n');
4857 const bool AggressivelyInterleaveReductions =
4858 TTI.enableAggressiveInterleaving(HasReductions);
4859 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
4860 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
4861 // We assume that the cost overhead is 1 and we use the cost model
4862 // to estimate the cost of the loop and interleave until the cost of the
4863 // loop overhead is about 5% of the cost of the loop.
4864 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
4865 SmallLoopCost / LoopCost.getValue()));
4866
4867 // Interleave until store/load ports (estimated by max interleave count) are
4868 // saturated.
4869 unsigned NumStores = 0;
4870 unsigned NumLoads = 0;
4873 for (VPRecipeBase &R : *VPBB) {
4875 NumLoads++;
4876 continue;
4877 }
4879 NumStores++;
4880 continue;
4881 }
4882
4883 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
4884 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
4885 NumStores += StoreOps;
4886 else
4887 NumLoads += InterleaveR->getNumDefinedValues();
4888 continue;
4889 }
4890 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
4891 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
4892 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
4893 continue;
4894 }
4895 if (isa<VPHistogramRecipe>(&R)) {
4896 NumLoads++;
4897 NumStores++;
4898 continue;
4899 }
4900 }
4901 }
4902 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
4903 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
4904
4905 // There is little point in interleaving for reductions containing selects
4906 // and compares when VF=1 since it may just create more overhead than it's
4907 // worth for loops with small trip counts. This is because we still have to
4908 // do the final reduction after the loop.
4909 bool HasSelectCmpReductions =
4910 HasReductions &&
4912 [](VPRecipeBase &R) {
4913 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4914 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4915 RedR->getRecurrenceKind()) ||
4916 RecurrenceDescriptor::isFindIVRecurrenceKind(
4917 RedR->getRecurrenceKind()));
4918 });
4919 if (HasSelectCmpReductions) {
4920 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
4921 return 1;
4922 }
4923
4924 // If we have a scalar reduction (vector reductions are already dealt with
4925 // by this point), we can increase the critical path length if the loop
4926 // we're interleaving is inside another loop. For tree-wise reductions
4927 // set the limit to 2, and for ordered reductions it's best to disable
4928 // interleaving entirely.
4929 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
4930 bool HasOrderedReductions =
4932 [](VPRecipeBase &R) {
4933 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4934
4935 return RedR && RedR->isOrdered();
4936 });
4937 if (HasOrderedReductions) {
4938 LLVM_DEBUG(
4939 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
4940 return 1;
4941 }
4942
4943 unsigned F = MaxNestedScalarReductionIC;
4944 SmallIC = std::min(SmallIC, F);
4945 StoresIC = std::min(StoresIC, F);
4946 LoadsIC = std::min(LoadsIC, F);
4947 }
4948
4950 std::max(StoresIC, LoadsIC) > SmallIC) {
4951 LLVM_DEBUG(
4952 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
4953 return std::max(StoresIC, LoadsIC);
4954 }
4955
4956 // If there are scalar reductions and TTI has enabled aggressive
4957 // interleaving for reductions, we will interleave to expose ILP.
4958 if (VF.isScalar() && AggressivelyInterleaveReductions) {
4959 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4960 // Interleave no less than SmallIC but not as aggressive as the normal IC
4961 // to satisfy the rare situation when resources are too limited.
4962 return std::max(IC / 2, SmallIC);
4963 }
4964
4965 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
4966 return SmallIC;
4967 }
4968
4969 // Interleave if this is a large loop (small loops are already dealt with by
4970 // this point) that could benefit from interleaving.
4971 if (AggressivelyInterleaveReductions) {
4972 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4973 return IC;
4974 }
4975
4976 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4977 return 1;
4978}
4979
4980bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
4981 ElementCount VF) {
4982 // TODO: Cost model for emulated masked load/store is completely
4983 // broken. This hack guides the cost model to use an artificially
4984 // high enough value to practically disable vectorization with such
4985 // operations, except where previously deployed legality hack allowed
4986 // using very low cost values. This is to avoid regressions coming simply
4987 // from moving "masked load/store" check from legality to cost model.
4988 // Masked Load/Gather emulation was previously never allowed.
4989 // Limited number of Masked Store/Scatter emulation was allowed.
4990 assert((isPredicatedInst(I)) &&
4991 "Expecting a scalar emulated instruction");
4992 return isa<LoadInst>(I) ||
4993 (isa<StoreInst>(I) &&
4994 NumPredStores > NumberOfStoresToPredicate);
4995}
4996
4998 assert(VF.isVector() && "Expected VF >= 2");
4999
5000 // If we've already collected the instructions to scalarize or the predicated
5001 // BBs after vectorization, there's nothing to do. Collection may already have
5002 // occurred if we have a user-selected VF and are now computing the expected
5003 // cost for interleaving.
5004 if (InstsToScalarize.contains(VF) ||
5005 PredicatedBBsAfterVectorization.contains(VF))
5006 return;
5007
5008 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5009 // not profitable to scalarize any instructions, the presence of VF in the
5010 // map will indicate that we've analyzed it already.
5011 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5012
5013 // Find all the instructions that are scalar with predication in the loop and
5014 // determine if it would be better to not if-convert the blocks they are in.
5015 // If so, we also record the instructions to scalarize.
5016 for (BasicBlock *BB : TheLoop->blocks()) {
5018 continue;
5019 for (Instruction &I : *BB)
5020 if (isScalarWithPredication(&I, VF)) {
5021 ScalarCostsTy ScalarCosts;
5022 // Do not apply discount logic for:
5023 // 1. Scalars after vectorization, as there will only be a single copy
5024 // of the instruction.
5025 // 2. Scalable VF, as that would lead to invalid scalarization costs.
5026 // 3. Emulated masked memrefs, if a hacked cost is needed.
5027 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
5028 !useEmulatedMaskMemRefHack(&I, VF) &&
5029 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
5030 for (const auto &[I, IC] : ScalarCosts)
5031 ScalarCostsVF.insert({I, IC});
5032 // Check if we decided to scalarize a call. If so, update the widening
5033 // decision of the call to CM_Scalarize with the computed scalar cost.
5034 for (const auto &[I, Cost] : ScalarCosts) {
5035 auto *CI = dyn_cast<CallInst>(I);
5036 if (!CI || !CallWideningDecisions.contains({CI, VF}))
5037 continue;
5038 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
5039 CallWideningDecisions[{CI, VF}].Cost = Cost;
5040 }
5041 }
5042 // Remember that BB will remain after vectorization.
5043 PredicatedBBsAfterVectorization[VF].insert(BB);
5044 for (auto *Pred : predecessors(BB)) {
5045 if (Pred->getSingleSuccessor() == BB)
5046 PredicatedBBsAfterVectorization[VF].insert(Pred);
5047 }
5048 }
5049 }
5050}
5051
5052InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
5053 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
5054 assert(!isUniformAfterVectorization(PredInst, VF) &&
5055 "Instruction marked uniform-after-vectorization will be predicated");
5056
5057 // Initialize the discount to zero, meaning that the scalar version and the
5058 // vector version cost the same.
5059 InstructionCost Discount = 0;
5060
5061 // Holds instructions to analyze. The instructions we visit are mapped in
5062 // ScalarCosts. Those instructions are the ones that would be scalarized if
5063 // we find that the scalar version costs less.
5065
5066 // Returns true if the given instruction can be scalarized.
5067 auto CanBeScalarized = [&](Instruction *I) -> bool {
5068 // We only attempt to scalarize instructions forming a single-use chain
5069 // from the original predicated block that would otherwise be vectorized.
5070 // Although not strictly necessary, we give up on instructions we know will
5071 // already be scalar to avoid traversing chains that are unlikely to be
5072 // beneficial.
5073 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5074 isScalarAfterVectorization(I, VF))
5075 return false;
5076
5077 // If the instruction is scalar with predication, it will be analyzed
5078 // separately. We ignore it within the context of PredInst.
5079 if (isScalarWithPredication(I, VF))
5080 return false;
5081
5082 // If any of the instruction's operands are uniform after vectorization,
5083 // the instruction cannot be scalarized. This prevents, for example, a
5084 // masked load from being scalarized.
5085 //
5086 // We assume we will only emit a value for lane zero of an instruction
5087 // marked uniform after vectorization, rather than VF identical values.
5088 // Thus, if we scalarize an instruction that uses a uniform, we would
5089 // create uses of values corresponding to the lanes we aren't emitting code
5090 // for. This behavior can be changed by allowing getScalarValue to clone
5091 // the lane zero values for uniforms rather than asserting.
5092 for (Use &U : I->operands())
5093 if (auto *J = dyn_cast<Instruction>(U.get()))
5094 if (isUniformAfterVectorization(J, VF))
5095 return false;
5096
5097 // Otherwise, we can scalarize the instruction.
5098 return true;
5099 };
5100
5101 // Compute the expected cost discount from scalarizing the entire expression
5102 // feeding the predicated instruction. We currently only consider expressions
5103 // that are single-use instruction chains.
5104 Worklist.push_back(PredInst);
5105 while (!Worklist.empty()) {
5106 Instruction *I = Worklist.pop_back_val();
5107
5108 // If we've already analyzed the instruction, there's nothing to do.
5109 if (ScalarCosts.contains(I))
5110 continue;
5111
5112 // Cannot scalarize fixed-order recurrence phis at the moment.
5113 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5114 continue;
5115
5116 // Compute the cost of the vector instruction. Note that this cost already
5117 // includes the scalarization overhead of the predicated instruction.
5118 InstructionCost VectorCost = getInstructionCost(I, VF);
5119
5120 // Compute the cost of the scalarized instruction. This cost is the cost of
5121 // the instruction as if it wasn't if-converted and instead remained in the
5122 // predicated block. We will scale this cost by block probability after
5123 // computing the scalarization overhead.
5124 InstructionCost ScalarCost =
5125 VF.getFixedValue() * getInstructionCost(I, ElementCount::getFixed(1));
5126
5127 // Compute the scalarization overhead of needed insertelement instructions
5128 // and phi nodes.
5129 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
5130 Type *WideTy = toVectorizedTy(I->getType(), VF);
5131 for (Type *VectorTy : getContainedTypes(WideTy)) {
5132 ScalarCost += TTI.getScalarizationOverhead(
5134 /*Insert=*/true,
5135 /*Extract=*/false, CostKind);
5136 }
5137 ScalarCost +=
5138 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
5139 }
5140
5141 // Compute the scalarization overhead of needed extractelement
5142 // instructions. For each of the instruction's operands, if the operand can
5143 // be scalarized, add it to the worklist; otherwise, account for the
5144 // overhead.
5145 for (Use &U : I->operands())
5146 if (auto *J = dyn_cast<Instruction>(U.get())) {
5147 assert(canVectorizeTy(J->getType()) &&
5148 "Instruction has non-scalar type");
5149 if (CanBeScalarized(J))
5150 Worklist.push_back(J);
5151 else if (needsExtract(J, VF)) {
5152 Type *WideTy = toVectorizedTy(J->getType(), VF);
5153 for (Type *VectorTy : getContainedTypes(WideTy)) {
5154 ScalarCost += TTI.getScalarizationOverhead(
5155 cast<VectorType>(VectorTy),
5156 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
5157 /*Extract*/ true, CostKind);
5158 }
5159 }
5160 }
5161
5162 // Scale the total scalar cost by block probability.
5163 ScalarCost /= getPredBlockCostDivisor(CostKind, I->getParent());
5164
5165 // Compute the discount. A non-negative discount means the vector version
5166 // of the instruction costs more, and scalarizing would be beneficial.
5167 Discount += VectorCost - ScalarCost;
5168 ScalarCosts[I] = ScalarCost;
5169 }
5170
5171 return Discount;
5172}
5173
5176
5177 // If the vector loop gets executed exactly once with the given VF, ignore the
5178 // costs of comparison and induction instructions, as they'll get simplified
5179 // away.
5180 SmallPtrSet<Instruction *, 2> ValuesToIgnoreForVF;
5181 auto TC = getSmallConstantTripCount(PSE.getSE(), TheLoop);
5182 if (TC == VF && !foldTailByMasking())
5184 ValuesToIgnoreForVF);
5185
5186 // For each block.
5187 for (BasicBlock *BB : TheLoop->blocks()) {
5188 InstructionCost BlockCost;
5189
5190 // For each instruction in the old loop.
5191 for (Instruction &I : BB->instructionsWithoutDebug()) {
5192 // Skip ignored values.
5193 if (ValuesToIgnore.count(&I) || ValuesToIgnoreForVF.count(&I) ||
5194 (VF.isVector() && VecValuesToIgnore.count(&I)))
5195 continue;
5196
5198
5199 // Check if we should override the cost.
5200 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0) {
5201 // For interleave groups, use ForceTargetInstructionCost once for the
5202 // whole group.
5203 if (VF.isVector() && getWideningDecision(&I, VF) == CM_Interleave) {
5204 if (getInterleavedAccessGroup(&I)->getInsertPos() == &I)
5206 else
5207 C = InstructionCost(0);
5208 } else {
5210 }
5211 }
5212
5213 BlockCost += C;
5214 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
5215 << VF << " For instruction: " << I << '\n');
5216 }
5217
5218 // If we are vectorizing a predicated block, it will have been
5219 // if-converted. This means that the block's instructions (aside from
5220 // stores and instructions that may divide by zero) will now be
5221 // unconditionally executed. For the scalar case, we may not always execute
5222 // the predicated block, if it is an if-else block. Thus, scale the block's
5223 // cost by the probability of executing it.
5224 // getPredBlockCostDivisor will return 1 for blocks that are only predicated
5225 // by the header mask when folding the tail.
5226 if (VF.isScalar())
5227 BlockCost /= getPredBlockCostDivisor(CostKind, BB);
5228
5229 Cost += BlockCost;
5230 }
5231
5232 return Cost;
5233}
5234
5235/// Gets the address access SCEV for Ptr, if it should be used for cost modeling
5236/// according to isAddressSCEVForCost.
5237///
5238/// This SCEV can be sent to the Target in order to estimate the address
5239/// calculation cost.
5241 Value *Ptr,
5243 const Loop *TheLoop) {
5244 const SCEV *Addr = PSE.getSCEV(Ptr);
5245 return vputils::isAddressSCEVForCost(Addr, *PSE.getSE(), TheLoop) ? Addr
5246 : nullptr;
5247}
5248
5250LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5251 ElementCount VF) {
5252 assert(VF.isVector() &&
5253 "Scalarization cost of instruction implies vectorization.");
5254 if (VF.isScalable())
5255 return InstructionCost::getInvalid();
5256
5257 Type *ValTy = getLoadStoreType(I);
5258 auto *SE = PSE.getSE();
5259
5260 unsigned AS = getLoadStoreAddressSpace(I);
5262 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
5263 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
5264 // that it is being called from this specific place.
5265
5266 // Figure out whether the access is strided and get the stride value
5267 // if it's known in compile time
5268 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, PSE, TheLoop);
5269
5270 // Get the cost of the scalar memory instruction and address computation.
5272 PtrTy, SE, PtrSCEV, CostKind);
5273
5274 // Don't pass *I here, since it is scalar but will actually be part of a
5275 // vectorized loop where the user of it is a vectorized instruction.
5276 const Align Alignment = getLoadStoreAlignment(I);
5277 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5278 Cost += VF.getFixedValue() *
5279 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5280 AS, CostKind, OpInfo);
5281
5282 // Get the overhead of the extractelement and insertelement instructions
5283 // we might create due to scalarization.
5285
5286 // If we have a predicated load/store, it will need extra i1 extracts and
5287 // conditional branches, but may not be executed for each vector lane. Scale
5288 // the cost by the probability of executing the predicated block.
5289 if (isPredicatedInst(I)) {
5290 Cost /= getPredBlockCostDivisor(CostKind, I->getParent());
5291
5292 // Add the cost of an i1 extract and a branch
5293 auto *VecI1Ty =
5294 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
5296 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5297 /*Insert=*/false, /*Extract=*/true, CostKind);
5298 Cost += TTI.getCFInstrCost(Instruction::Br, CostKind);
5299
5300 if (useEmulatedMaskMemRefHack(I, VF))
5301 // Artificially setting to a high enough value to practically disable
5302 // vectorization with such operations.
5303 Cost = 3000000;
5304 }
5305
5306 return Cost;
5307}
5308
5310LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5311 ElementCount VF) {
5312 Type *ValTy = getLoadStoreType(I);
5313 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5315 unsigned AS = getLoadStoreAddressSpace(I);
5316 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
5317
5318 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5319 "Stride should be 1 or -1 for consecutive memory access");
5320 const Align Alignment = getLoadStoreAlignment(I);
5322 if (Legal->isMaskRequired(I)) {
5323 unsigned IID = I->getOpcode() == Instruction::Load
5324 ? Intrinsic::masked_load
5325 : Intrinsic::masked_store;
5327 MemIntrinsicCostAttributes(IID, VectorTy, Alignment, AS), CostKind);
5328 } else {
5329 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5330 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5331 CostKind, OpInfo, I);
5332 }
5333
5334 bool Reverse = ConsecutiveStride < 0;
5335 if (Reverse)
5337 VectorTy, {}, CostKind, 0);
5338 return Cost;
5339}
5340
5342LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5343 ElementCount VF) {
5344 assert(Legal->isUniformMemOp(*I, VF));
5345
5346 Type *ValTy = getLoadStoreType(I);
5348 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5349 const Align Alignment = getLoadStoreAlignment(I);
5350 unsigned AS = getLoadStoreAddressSpace(I);
5351 if (isa<LoadInst>(I)) {
5352 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5353 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5354 CostKind) +
5356 VectorTy, {}, CostKind);
5357 }
5358 StoreInst *SI = cast<StoreInst>(I);
5359
5360 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
5361 // TODO: We have existing tests that request the cost of extracting element
5362 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
5363 // the actual generated code, which involves extracting the last element of
5364 // a scalable vector where the lane to extract is unknown at compile time.
5366 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5367 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, CostKind);
5368 if (!IsLoopInvariantStoreValue)
5369 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
5370 VectorTy, CostKind, 0);
5371 return Cost;
5372}
5373
5375LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5376 ElementCount VF) {
5377 Type *ValTy = getLoadStoreType(I);
5378 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5379 const Align Alignment = getLoadStoreAlignment(I);
5381 Type *PtrTy = Ptr->getType();
5382
5383 if (!Legal->isUniform(Ptr, VF))
5384 PtrTy = toVectorTy(PtrTy, VF);
5385
5386 unsigned IID = I->getOpcode() == Instruction::Load
5387 ? Intrinsic::masked_gather
5388 : Intrinsic::masked_scatter;
5389 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5391 MemIntrinsicCostAttributes(IID, VectorTy, Ptr,
5392 Legal->isMaskRequired(I), Alignment, I),
5393 CostKind);
5394}
5395
5397LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5398 ElementCount VF) {
5399 const auto *Group = getInterleavedAccessGroup(I);
5400 assert(Group && "Fail to get an interleaved access group.");
5401
5402 Instruction *InsertPos = Group->getInsertPos();
5403 Type *ValTy = getLoadStoreType(InsertPos);
5404 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5405 unsigned AS = getLoadStoreAddressSpace(InsertPos);
5406
5407 unsigned InterleaveFactor = Group->getFactor();
5408 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5409
5410 // Holds the indices of existing members in the interleaved group.
5411 SmallVector<unsigned, 4> Indices;
5412 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
5413 if (Group->getMember(IF))
5414 Indices.push_back(IF);
5415
5416 // Calculate the cost of the whole interleaved group.
5417 bool UseMaskForGaps =
5418 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
5419 (isa<StoreInst>(I) && !Group->isFull());
5421 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5422 Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
5423 UseMaskForGaps);
5424
5425 if (Group->isReverse()) {
5426 // TODO: Add support for reversed masked interleaved access.
5427 assert(!Legal->isMaskRequired(I) &&
5428 "Reverse masked interleaved access not supported.");
5429 Cost += Group->getNumMembers() *
5431 VectorTy, {}, CostKind, 0);
5432 }
5433 return Cost;
5434}
5435
5436std::optional<InstructionCost>
5438 ElementCount VF,
5439 Type *Ty) const {
5440 using namespace llvm::PatternMatch;
5441 // Early exit for no inloop reductions
5442 if (InLoopReductions.empty() || VF.isScalar() || !isa<VectorType>(Ty))
5443 return std::nullopt;
5444 auto *VectorTy = cast<VectorType>(Ty);
5445
5446 // We are looking for a pattern of, and finding the minimal acceptable cost:
5447 // reduce(mul(ext(A), ext(B))) or
5448 // reduce(mul(A, B)) or
5449 // reduce(ext(A)) or
5450 // reduce(A).
5451 // The basic idea is that we walk down the tree to do that, finding the root
5452 // reduction instruction in InLoopReductionImmediateChains. From there we find
5453 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
5454 // of the components. If the reduction cost is lower then we return it for the
5455 // reduction instruction and 0 for the other instructions in the pattern. If
5456 // it is not we return an invalid cost specifying the orignal cost method
5457 // should be used.
5458 Instruction *RetI = I;
5459 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
5460 if (!RetI->hasOneUser())
5461 return std::nullopt;
5462 RetI = RetI->user_back();
5463 }
5464
5465 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5466 RetI->user_back()->getOpcode() == Instruction::Add) {
5467 RetI = RetI->user_back();
5468 }
5469
5470 // Test if the found instruction is a reduction, and if not return an invalid
5471 // cost specifying the parent to use the original cost modelling.
5472 Instruction *LastChain = InLoopReductionImmediateChains.lookup(RetI);
5473 if (!LastChain)
5474 return std::nullopt;
5475
5476 // Find the reduction this chain is a part of and calculate the basic cost of
5477 // the reduction on its own.
5478 Instruction *ReductionPhi = LastChain;
5479 while (!isa<PHINode>(ReductionPhi))
5480 ReductionPhi = InLoopReductionImmediateChains.at(ReductionPhi);
5481
5482 const RecurrenceDescriptor &RdxDesc =
5483 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
5484
5485 InstructionCost BaseCost;
5486 RecurKind RK = RdxDesc.getRecurrenceKind();
5489 BaseCost = TTI.getMinMaxReductionCost(MinMaxID, VectorTy,
5490 RdxDesc.getFastMathFlags(), CostKind);
5491 } else {
5492 BaseCost = TTI.getArithmeticReductionCost(
5493 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
5494 }
5495
5496 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
5497 // normal fmul instruction to the cost of the fadd reduction.
5498 if (RK == RecurKind::FMulAdd)
5499 BaseCost +=
5500 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
5501
5502 // If we're using ordered reductions then we can just return the base cost
5503 // here, since getArithmeticReductionCost calculates the full ordered
5504 // reduction cost when FP reassociation is not allowed.
5505 if (useOrderedReductions(RdxDesc))
5506 return BaseCost;
5507
5508 // Get the operand that was not the reduction chain and match it to one of the
5509 // patterns, returning the better cost if it is found.
5510 Instruction *RedOp = RetI->getOperand(1) == LastChain
5513
5514 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
5515
5516 Instruction *Op0, *Op1;
5517 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5518 match(RedOp,
5520 match(Op0, m_ZExtOrSExt(m_Value())) &&
5521 Op0->getOpcode() == Op1->getOpcode() &&
5522 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
5523 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
5524 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
5525
5526 // Matched reduce.add(ext(mul(ext(A), ext(B)))
5527 // Note that the extend opcodes need to all match, or if A==B they will have
5528 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
5529 // which is equally fine.
5530 bool IsUnsigned = isa<ZExtInst>(Op0);
5531 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
5532 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
5533
5534 InstructionCost ExtCost =
5535 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
5537 InstructionCost MulCost =
5538 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
5539 InstructionCost Ext2Cost =
5540 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
5542
5543 InstructionCost RedCost = TTI.getMulAccReductionCost(
5544 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5545 CostKind);
5546
5547 if (RedCost.isValid() &&
5548 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5549 return I == RetI ? RedCost : 0;
5550 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
5551 !TheLoop->isLoopInvariant(RedOp)) {
5552 // Matched reduce(ext(A))
5553 bool IsUnsigned = isa<ZExtInst>(RedOp);
5554 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
5555 InstructionCost RedCost = TTI.getExtendedReductionCost(
5556 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
5557 RdxDesc.getFastMathFlags(), CostKind);
5558
5559 InstructionCost ExtCost =
5560 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
5562 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
5563 return I == RetI ? RedCost : 0;
5564 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5565 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
5566 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
5567 Op0->getOpcode() == Op1->getOpcode() &&
5568 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
5569 bool IsUnsigned = isa<ZExtInst>(Op0);
5570 Type *Op0Ty = Op0->getOperand(0)->getType();
5571 Type *Op1Ty = Op1->getOperand(0)->getType();
5572 Type *LargestOpTy =
5573 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
5574 : Op0Ty;
5575 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
5576
5577 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
5578 // different sizes. We take the largest type as the ext to reduce, and add
5579 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
5580 InstructionCost ExtCost0 = TTI.getCastInstrCost(
5581 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
5583 InstructionCost ExtCost1 = TTI.getCastInstrCost(
5584 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
5586 InstructionCost MulCost =
5587 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5588
5589 InstructionCost RedCost = TTI.getMulAccReductionCost(
5590 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5591 CostKind);
5592 InstructionCost ExtraExtCost = 0;
5593 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5594 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5595 ExtraExtCost = TTI.getCastInstrCost(
5596 ExtraExtOp->getOpcode(), ExtType,
5597 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
5599 }
5600
5601 if (RedCost.isValid() &&
5602 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5603 return I == RetI ? RedCost : 0;
5604 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
5605 // Matched reduce.add(mul())
5606 InstructionCost MulCost =
5607 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5608
5609 InstructionCost RedCost = TTI.getMulAccReductionCost(
5610 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
5611 CostKind);
5612
5613 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
5614 return I == RetI ? RedCost : 0;
5615 }
5616 }
5617
5618 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5619}
5620
5622LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5623 ElementCount VF) {
5624 // Calculate scalar cost only. Vectorization cost should be ready at this
5625 // moment.
5626 if (VF.isScalar()) {
5627 Type *ValTy = getLoadStoreType(I);
5629 const Align Alignment = getLoadStoreAlignment(I);
5630 unsigned AS = getLoadStoreAddressSpace(I);
5631
5632 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5633 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5634 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, CostKind,
5635 OpInfo, I);
5636 }
5637 return getWideningCost(I, VF);
5638}
5639
5641LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5642 ElementCount VF) const {
5643
5644 // There is no mechanism yet to create a scalable scalarization loop,
5645 // so this is currently Invalid.
5646 if (VF.isScalable())
5647 return InstructionCost::getInvalid();
5648
5649 if (VF.isScalar())
5650 return 0;
5651
5653 Type *RetTy = toVectorizedTy(I->getType(), VF);
5654 if (!RetTy->isVoidTy() &&
5656
5658 if (isa<LoadInst>(I))
5660 else if (isa<StoreInst>(I))
5662
5663 for (Type *VectorTy : getContainedTypes(RetTy)) {
5666 /*Insert=*/true, /*Extract=*/false, CostKind,
5667 /*ForPoisonSrc=*/true, {}, VIC);
5668 }
5669 }
5670
5671 // Some targets keep addresses scalar.
5673 return Cost;
5674
5675 // Some targets support efficient element stores.
5677 return Cost;
5678
5679 // Collect operands to consider.
5680 CallInst *CI = dyn_cast<CallInst>(I);
5681 Instruction::op_range Ops = CI ? CI->args() : I->operands();
5682
5683 // Skip operands that do not require extraction/scalarization and do not incur
5684 // any overhead.
5686 for (auto *V : filterExtractingOperands(Ops, VF))
5687 Tys.push_back(maybeVectorizeType(V->getType(), VF));
5688
5692 return Cost + TTI.getOperandsScalarizationOverhead(Tys, CostKind, OperandVIC);
5693}
5694
5696 if (VF.isScalar())
5697 return;
5698 NumPredStores = 0;
5699 for (BasicBlock *BB : TheLoop->blocks()) {
5700 // For each instruction in the old loop.
5701 for (Instruction &I : *BB) {
5703 if (!Ptr)
5704 continue;
5705
5706 // TODO: We should generate better code and update the cost model for
5707 // predicated uniform stores. Today they are treated as any other
5708 // predicated store (see added test cases in
5709 // invariant-store-vectorization.ll).
5711 NumPredStores++;
5712
5713 if (Legal->isUniformMemOp(I, VF)) {
5714 auto IsLegalToScalarize = [&]() {
5715 if (!VF.isScalable())
5716 // Scalarization of fixed length vectors "just works".
5717 return true;
5718
5719 // We have dedicated lowering for unpredicated uniform loads and
5720 // stores. Note that even with tail folding we know that at least
5721 // one lane is active (i.e. generalized predication is not possible
5722 // here), and the logic below depends on this fact.
5723 if (!foldTailByMasking())
5724 return true;
5725
5726 // For scalable vectors, a uniform memop load is always
5727 // uniform-by-parts and we know how to scalarize that.
5728 if (isa<LoadInst>(I))
5729 return true;
5730
5731 // A uniform store isn't neccessarily uniform-by-part
5732 // and we can't assume scalarization.
5733 auto &SI = cast<StoreInst>(I);
5734 return TheLoop->isLoopInvariant(SI.getValueOperand());
5735 };
5736
5737 const InstructionCost GatherScatterCost =
5739 getGatherScatterCost(&I, VF) : InstructionCost::getInvalid();
5740
5741 // Load: Scalar load + broadcast
5742 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
5743 // FIXME: This cost is a significant under-estimate for tail folded
5744 // memory ops.
5745 const InstructionCost ScalarizationCost =
5746 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
5748
5749 // Choose better solution for the current VF, Note that Invalid
5750 // costs compare as maximumal large. If both are invalid, we get
5751 // scalable invalid which signals a failure and a vectorization abort.
5752 if (GatherScatterCost < ScalarizationCost)
5753 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
5754 else
5755 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
5756 continue;
5757 }
5758
5759 // We assume that widening is the best solution when possible.
5760 if (memoryInstructionCanBeWidened(&I, VF)) {
5761 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
5762 int ConsecutiveStride = Legal->isConsecutivePtr(
5764 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5765 "Expected consecutive stride.");
5766 InstWidening Decision =
5767 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5768 setWideningDecision(&I, VF, Decision, Cost);
5769 continue;
5770 }
5771
5772 // Choose between Interleaving, Gather/Scatter or Scalarization.
5774 unsigned NumAccesses = 1;
5775 if (isAccessInterleaved(&I)) {
5776 const auto *Group = getInterleavedAccessGroup(&I);
5777 assert(Group && "Fail to get an interleaved access group.");
5778
5779 // Make one decision for the whole group.
5780 if (getWideningDecision(&I, VF) != CM_Unknown)
5781 continue;
5782
5783 NumAccesses = Group->getNumMembers();
5785 InterleaveCost = getInterleaveGroupCost(&I, VF);
5786 }
5787
5788 InstructionCost GatherScatterCost =
5790 ? getGatherScatterCost(&I, VF) * NumAccesses
5792
5793 InstructionCost ScalarizationCost =
5794 getMemInstScalarizationCost(&I, VF) * NumAccesses;
5795
5796 // Choose better solution for the current VF,
5797 // write down this decision and use it during vectorization.
5799 InstWidening Decision;
5800 if (InterleaveCost <= GatherScatterCost &&
5801 InterleaveCost < ScalarizationCost) {
5802 Decision = CM_Interleave;
5803 Cost = InterleaveCost;
5804 } else if (GatherScatterCost < ScalarizationCost) {
5805 Decision = CM_GatherScatter;
5806 Cost = GatherScatterCost;
5807 } else {
5808 Decision = CM_Scalarize;
5809 Cost = ScalarizationCost;
5810 }
5811 // If the instructions belongs to an interleave group, the whole group
5812 // receives the same decision. The whole group receives the cost, but
5813 // the cost will actually be assigned to one instruction.
5814 if (const auto *Group = getInterleavedAccessGroup(&I)) {
5815 if (Decision == CM_Scalarize) {
5816 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5817 if (auto *I = Group->getMember(Idx)) {
5818 setWideningDecision(I, VF, Decision,
5819 getMemInstScalarizationCost(I, VF));
5820 }
5821 }
5822 } else {
5823 setWideningDecision(Group, VF, Decision, Cost);
5824 }
5825 } else
5826 setWideningDecision(&I, VF, Decision, Cost);
5827 }
5828 }
5829
5830 // Make sure that any load of address and any other address computation
5831 // remains scalar unless there is gather/scatter support. This avoids
5832 // inevitable extracts into address registers, and also has the benefit of
5833 // activating LSR more, since that pass can't optimize vectorized
5834 // addresses.
5835 if (TTI.prefersVectorizedAddressing())
5836 return;
5837
5838 // Start with all scalar pointer uses.
5840 for (BasicBlock *BB : TheLoop->blocks())
5841 for (Instruction &I : *BB) {
5842 Instruction *PtrDef =
5844 if (PtrDef && TheLoop->contains(PtrDef) &&
5846 AddrDefs.insert(PtrDef);
5847 }
5848
5849 // Add all instructions used to generate the addresses.
5851 append_range(Worklist, AddrDefs);
5852 while (!Worklist.empty()) {
5853 Instruction *I = Worklist.pop_back_val();
5854 for (auto &Op : I->operands())
5855 if (auto *InstOp = dyn_cast<Instruction>(Op))
5856 if (TheLoop->contains(InstOp) && !isa<PHINode>(InstOp) &&
5857 AddrDefs.insert(InstOp).second)
5858 Worklist.push_back(InstOp);
5859 }
5860
5861 auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
5862 // If there are direct memory op users of the newly scalarized load,
5863 // their cost may have changed because there's no scalarization
5864 // overhead for the operand. Update it.
5865 for (User *U : LI->users()) {
5867 continue;
5869 continue;
5872 getMemInstScalarizationCost(cast<Instruction>(U), VF));
5873 }
5874 };
5875 for (auto *I : AddrDefs) {
5876 if (isa<LoadInst>(I)) {
5877 // Setting the desired widening decision should ideally be handled in
5878 // by cost functions, but since this involves the task of finding out
5879 // if the loaded register is involved in an address computation, it is
5880 // instead changed here when we know this is the case.
5881 InstWidening Decision = getWideningDecision(I, VF);
5882 if (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
5883 (!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) &&
5884 Decision == CM_Scalarize)) {
5885 // Scalarize a widened load of address or update the cost of a scalar
5886 // load of an address.
5888 I, VF, CM_Scalarize,
5889 (VF.getKnownMinValue() *
5890 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
5891 UpdateMemOpUserCost(cast<LoadInst>(I));
5892 } else if (const auto *Group = getInterleavedAccessGroup(I)) {
5893 // Scalarize all members of this interleaved group when any member
5894 // is used as an address. The address-used load skips scalarization
5895 // overhead, other members include it.
5896 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5897 if (Instruction *Member = Group->getMember(Idx)) {
5899 AddrDefs.contains(Member)
5900 ? (VF.getKnownMinValue() *
5901 getMemoryInstructionCost(Member,
5903 : getMemInstScalarizationCost(Member, VF);
5905 UpdateMemOpUserCost(cast<LoadInst>(Member));
5906 }
5907 }
5908 }
5909 } else {
5910 // Cannot scalarize fixed-order recurrence phis at the moment.
5911 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5912 continue;
5913
5914 // Make sure I gets scalarized and a cost estimate without
5915 // scalarization overhead.
5916 ForcedScalars[VF].insert(I);
5917 }
5918 }
5919}
5920
5922 assert(!VF.isScalar() &&
5923 "Trying to set a vectorization decision for a scalar VF");
5924
5925 auto ForcedScalar = ForcedScalars.find(VF);
5926 for (BasicBlock *BB : TheLoop->blocks()) {
5927 // For each instruction in the old loop.
5928 for (Instruction &I : *BB) {
5930
5931 if (!CI)
5932 continue;
5933
5937 Function *ScalarFunc = CI->getCalledFunction();
5938 Type *ScalarRetTy = CI->getType();
5939 SmallVector<Type *, 4> Tys, ScalarTys;
5940 for (auto &ArgOp : CI->args())
5941 ScalarTys.push_back(ArgOp->getType());
5942
5943 // Estimate cost of scalarized vector call. The source operands are
5944 // assumed to be vectors, so we need to extract individual elements from
5945 // there, execute VF scalar calls, and then gather the result into the
5946 // vector return value.
5947 if (VF.isFixed()) {
5948 InstructionCost ScalarCallCost =
5949 TTI.getCallInstrCost(ScalarFunc, ScalarRetTy, ScalarTys, CostKind);
5950
5951 // Compute costs of unpacking argument values for the scalar calls and
5952 // packing the return values to a vector.
5953 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
5954 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
5955 } else {
5956 // There is no point attempting to calculate the scalar cost for a
5957 // scalable VF as we know it will be Invalid.
5959 "Unexpected valid cost for scalarizing scalable vectors");
5960 ScalarCost = InstructionCost::getInvalid();
5961 }
5962
5963 // Honor ForcedScalars and UniformAfterVectorization decisions.
5964 // TODO: For calls, it might still be more profitable to widen. Use
5965 // VPlan-based cost model to compare different options.
5966 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
5967 ForcedScalar->second.contains(CI)) ||
5968 isUniformAfterVectorization(CI, VF))) {
5969 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
5970 Intrinsic::not_intrinsic, std::nullopt,
5971 ScalarCost);
5972 continue;
5973 }
5974
5975 bool MaskRequired = Legal->isMaskRequired(CI);
5976 // Compute corresponding vector type for return value and arguments.
5977 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
5978 for (Type *ScalarTy : ScalarTys)
5979 Tys.push_back(toVectorizedTy(ScalarTy, VF));
5980
5981 // An in-loop reduction using an fmuladd intrinsic is a special case;
5982 // we don't want the normal cost for that intrinsic.
5984 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
5987 std::nullopt, *RedCost);
5988 continue;
5989 }
5990
5991 // Find the cost of vectorizing the call, if we can find a suitable
5992 // vector variant of the function.
5993 VFInfo FuncInfo;
5994 Function *VecFunc = nullptr;
5995 // Search through any available variants for one we can use at this VF.
5996 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
5997 // Must match requested VF.
5998 if (Info.Shape.VF != VF)
5999 continue;
6000
6001 // Must take a mask argument if one is required
6002 if (MaskRequired && !Info.isMasked())
6003 continue;
6004
6005 // Check that all parameter kinds are supported
6006 bool ParamsOk = true;
6007 for (VFParameter Param : Info.Shape.Parameters) {
6008 switch (Param.ParamKind) {
6010 break;
6012 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
6013 // Make sure the scalar parameter in the loop is invariant.
6014 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
6015 TheLoop))
6016 ParamsOk = false;
6017 break;
6018 }
6020 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
6021 // Find the stride for the scalar parameter in this loop and see if
6022 // it matches the stride for the variant.
6023 // TODO: do we need to figure out the cost of an extract to get the
6024 // first lane? Or do we hope that it will be folded away?
6025 ScalarEvolution *SE = PSE.getSE();
6026 if (!match(SE->getSCEV(ScalarParam),
6028 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
6030 ParamsOk = false;
6031 break;
6032 }
6034 break;
6035 default:
6036 ParamsOk = false;
6037 break;
6038 }
6039 }
6040
6041 if (!ParamsOk)
6042 continue;
6043
6044 // Found a suitable candidate, stop here.
6045 VecFunc = CI->getModule()->getFunction(Info.VectorName);
6046 FuncInfo = Info;
6047 break;
6048 }
6049
6050 if (TLI && VecFunc && !CI->isNoBuiltin())
6051 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind);
6052
6053 // Find the cost of an intrinsic; some targets may have instructions that
6054 // perform the operation without needing an actual call.
6056 if (IID != Intrinsic::not_intrinsic)
6058
6059 InstructionCost Cost = ScalarCost;
6060 InstWidening Decision = CM_Scalarize;
6061
6062 if (VectorCost.isValid() && VectorCost <= Cost) {
6063 Cost = VectorCost;
6064 Decision = CM_VectorCall;
6065 }
6066
6067 if (IntrinsicCost.isValid() && IntrinsicCost <= Cost) {
6069 Decision = CM_IntrinsicCall;
6070 }
6071
6072 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
6074 }
6075 }
6076}
6077
6079 if (!Legal->isInvariant(Op))
6080 return false;
6081 // Consider Op invariant, if it or its operands aren't predicated
6082 // instruction in the loop. In that case, it is not trivially hoistable.
6083 auto *OpI = dyn_cast<Instruction>(Op);
6084 return !OpI || !TheLoop->contains(OpI) ||
6085 (!isPredicatedInst(OpI) &&
6086 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
6087 all_of(OpI->operands(),
6088 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
6089}
6090
6093 ElementCount VF) {
6094 // If we know that this instruction will remain uniform, check the cost of
6095 // the scalar version.
6097 VF = ElementCount::getFixed(1);
6098
6099 if (VF.isVector() && isProfitableToScalarize(I, VF))
6100 return InstsToScalarize[VF][I];
6101
6102 // Forced scalars do not have any scalarization overhead.
6103 auto ForcedScalar = ForcedScalars.find(VF);
6104 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6105 auto InstSet = ForcedScalar->second;
6106 if (InstSet.count(I))
6108 VF.getKnownMinValue();
6109 }
6110
6111 Type *RetTy = I->getType();
6113 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6114 auto *SE = PSE.getSE();
6115
6116 Type *VectorTy;
6117 if (isScalarAfterVectorization(I, VF)) {
6118 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
6119 [this](Instruction *I, ElementCount VF) -> bool {
6120 if (VF.isScalar())
6121 return true;
6122
6123 auto Scalarized = InstsToScalarize.find(VF);
6124 assert(Scalarized != InstsToScalarize.end() &&
6125 "VF not yet analyzed for scalarization profitability");
6126 return !Scalarized->second.count(I) &&
6127 llvm::all_of(I->users(), [&](User *U) {
6128 auto *UI = cast<Instruction>(U);
6129 return !Scalarized->second.count(UI);
6130 });
6131 };
6132
6133 // With the exception of GEPs and PHIs, after scalarization there should
6134 // only be one copy of the instruction generated in the loop. This is
6135 // because the VF is either 1, or any instructions that need scalarizing
6136 // have already been dealt with by the time we get here. As a result,
6137 // it means we don't have to multiply the instruction cost by VF.
6138 assert(I->getOpcode() == Instruction::GetElementPtr ||
6139 I->getOpcode() == Instruction::PHI ||
6140 (I->getOpcode() == Instruction::BitCast &&
6141 I->getType()->isPointerTy()) ||
6142 HasSingleCopyAfterVectorization(I, VF));
6143 VectorTy = RetTy;
6144 } else
6145 VectorTy = toVectorizedTy(RetTy, VF);
6146
6147 if (VF.isVector() && VectorTy->isVectorTy() &&
6148 !TTI.getNumberOfParts(VectorTy))
6150
6151 // TODO: We need to estimate the cost of intrinsic calls.
6152 switch (I->getOpcode()) {
6153 case Instruction::GetElementPtr:
6154 // We mark this instruction as zero-cost because the cost of GEPs in
6155 // vectorized code depends on whether the corresponding memory instruction
6156 // is scalarized or not. Therefore, we handle GEPs with the memory
6157 // instruction cost.
6158 return 0;
6159 case Instruction::Br: {
6160 // In cases of scalarized and predicated instructions, there will be VF
6161 // predicated blocks in the vectorized loop. Each branch around these
6162 // blocks requires also an extract of its vector compare i1 element.
6163 // Note that the conditional branch from the loop latch will be replaced by
6164 // a single branch controlling the loop, so there is no extra overhead from
6165 // scalarization.
6166 bool ScalarPredicatedBB = false;
6168 if (VF.isVector() && BI->isConditional() &&
6169 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
6170 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
6171 BI->getParent() != TheLoop->getLoopLatch())
6172 ScalarPredicatedBB = true;
6173
6174 if (ScalarPredicatedBB) {
6175 // Not possible to scalarize scalable vector with predicated instructions.
6176 if (VF.isScalable())
6178 // Return cost for branches around scalarized and predicated blocks.
6179 auto *VecI1Ty =
6181 return (
6182 TTI.getScalarizationOverhead(
6183 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
6184 /*Insert*/ false, /*Extract*/ true, CostKind) +
6185 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
6186 }
6187
6188 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6189 // The back-edge branch will remain, as will all scalar branches.
6190 return TTI.getCFInstrCost(Instruction::Br, CostKind);
6191
6192 // This branch will be eliminated by if-conversion.
6193 return 0;
6194 // Note: We currently assume zero cost for an unconditional branch inside
6195 // a predicated block since it will become a fall-through, although we
6196 // may decide in the future to call TTI for all branches.
6197 }
6198 case Instruction::Switch: {
6199 if (VF.isScalar())
6200 return TTI.getCFInstrCost(Instruction::Switch, CostKind);
6201 auto *Switch = cast<SwitchInst>(I);
6202 return Switch->getNumCases() *
6203 TTI.getCmpSelInstrCost(
6204 Instruction::ICmp,
6205 toVectorTy(Switch->getCondition()->getType(), VF),
6206 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
6208 }
6209 case Instruction::PHI: {
6210 auto *Phi = cast<PHINode>(I);
6211
6212 // First-order recurrences are replaced by vector shuffles inside the loop.
6213 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
6215 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
6216 return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
6217 cast<VectorType>(VectorTy),
6218 cast<VectorType>(VectorTy), Mask, CostKind,
6219 VF.getKnownMinValue() - 1);
6220 }
6221
6222 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6223 // converted into select instructions. We require N - 1 selects per phi
6224 // node, where N is the number of incoming values.
6225 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
6226 Type *ResultTy = Phi->getType();
6227
6228 // All instructions in an Any-of reduction chain are narrowed to bool.
6229 // Check if that is the case for this phi node.
6230 auto *HeaderUser = cast_if_present<PHINode>(
6231 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
6232 auto *Phi = dyn_cast<PHINode>(U);
6233 if (Phi && Phi->getParent() == TheLoop->getHeader())
6234 return Phi;
6235 return nullptr;
6236 }));
6237 if (HeaderUser) {
6238 auto &ReductionVars = Legal->getReductionVars();
6239 auto Iter = ReductionVars.find(HeaderUser);
6240 if (Iter != ReductionVars.end() &&
6242 Iter->second.getRecurrenceKind()))
6243 ResultTy = Type::getInt1Ty(Phi->getContext());
6244 }
6245 return (Phi->getNumIncomingValues() - 1) *
6246 TTI.getCmpSelInstrCost(
6247 Instruction::Select, toVectorTy(ResultTy, VF),
6248 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6250 }
6251
6252 // When tail folding with EVL, if the phi is part of an out of loop
6253 // reduction then it will be transformed into a wide vp_merge.
6254 if (VF.isVector() && foldTailWithEVL() &&
6255 Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
6257 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6258 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6259 return TTI.getIntrinsicInstrCost(ICA, CostKind);
6260 }
6261
6262 return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6263 }
6264 case Instruction::UDiv:
6265 case Instruction::SDiv:
6266 case Instruction::URem:
6267 case Instruction::SRem:
6268 if (VF.isVector() && isPredicatedInst(I)) {
6269 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
6270 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
6271 ScalarCost : SafeDivisorCost;
6272 }
6273 // We've proven all lanes safe to speculate, fall through.
6274 [[fallthrough]];
6275 case Instruction::Add:
6276 case Instruction::Sub: {
6277 auto Info = Legal->getHistogramInfo(I);
6278 if (Info && VF.isVector()) {
6279 const HistogramInfo *HGram = Info.value();
6280 // Assume that a non-constant update value (or a constant != 1) requires
6281 // a multiply, and add that into the cost.
6283 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
6284 if (!RHS || RHS->getZExtValue() != 1)
6285 MulCost =
6286 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6287
6288 // Find the cost of the histogram operation itself.
6289 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
6290 Type *ScalarTy = I->getType();
6291 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
6292 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
6293 Type::getVoidTy(I->getContext()),
6294 {PtrTy, ScalarTy, MaskTy});
6295
6296 // Add the costs together with the add/sub operation.
6297 return TTI.getIntrinsicInstrCost(ICA, CostKind) + MulCost +
6298 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, CostKind);
6299 }
6300 [[fallthrough]];
6301 }
6302 case Instruction::FAdd:
6303 case Instruction::FSub:
6304 case Instruction::Mul:
6305 case Instruction::FMul:
6306 case Instruction::FDiv:
6307 case Instruction::FRem:
6308 case Instruction::Shl:
6309 case Instruction::LShr:
6310 case Instruction::AShr:
6311 case Instruction::And:
6312 case Instruction::Or:
6313 case Instruction::Xor: {
6314 // If we're speculating on the stride being 1, the multiplication may
6315 // fold away. We can generalize this for all operations using the notion
6316 // of neutral elements. (TODO)
6317 if (I->getOpcode() == Instruction::Mul &&
6318 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
6319 PSE.getSCEV(I->getOperand(0))->isOne()) ||
6320 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
6321 PSE.getSCEV(I->getOperand(1))->isOne())))
6322 return 0;
6323
6324 // Detect reduction patterns
6325 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6326 return *RedCost;
6327
6328 // Certain instructions can be cheaper to vectorize if they have a constant
6329 // second vector operand. One example of this are shifts on x86.
6330 Value *Op2 = I->getOperand(1);
6331 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
6332 PSE.getSE()->isSCEVable(Op2->getType()) &&
6333 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
6334 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
6335 }
6336 auto Op2Info = TTI.getOperandInfo(Op2);
6337 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
6340
6341 SmallVector<const Value *, 4> Operands(I->operand_values());
6342 return TTI.getArithmeticInstrCost(
6343 I->getOpcode(), VectorTy, CostKind,
6344 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6345 Op2Info, Operands, I, TLI);
6346 }
6347 case Instruction::FNeg: {
6348 return TTI.getArithmeticInstrCost(
6349 I->getOpcode(), VectorTy, CostKind,
6350 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6351 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6352 I->getOperand(0), I);
6353 }
6354 case Instruction::Select: {
6356 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6357 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6358
6359 const Value *Op0, *Op1;
6360 using namespace llvm::PatternMatch;
6361 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
6362 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
6363 // select x, y, false --> x & y
6364 // select x, true, y --> x | y
6365 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
6366 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
6367 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
6368 Op1->getType()->getScalarSizeInBits() == 1);
6369
6370 return TTI.getArithmeticInstrCost(
6371 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
6372 VectorTy, CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1}, I);
6373 }
6374
6375 Type *CondTy = SI->getCondition()->getType();
6376 if (!ScalarCond)
6377 CondTy = VectorType::get(CondTy, VF);
6378
6380 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6381 Pred = Cmp->getPredicate();
6382 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
6383 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
6384 {TTI::OK_AnyValue, TTI::OP_None}, I);
6385 }
6386 case Instruction::ICmp:
6387 case Instruction::FCmp: {
6388 Type *ValTy = I->getOperand(0)->getType();
6389
6391 [[maybe_unused]] Instruction *Op0AsInstruction =
6392 dyn_cast<Instruction>(I->getOperand(0));
6393 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
6394 MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6395 "if both the operand and the compare are marked for "
6396 "truncation, they must have the same bitwidth");
6397 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
6398 }
6399
6400 VectorTy = toVectorTy(ValTy, VF);
6401 return TTI.getCmpSelInstrCost(
6402 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
6403 cast<CmpInst>(I)->getPredicate(), CostKind,
6404 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
6405 }
6406 case Instruction::Store:
6407 case Instruction::Load: {
6408 ElementCount Width = VF;
6409 if (Width.isVector()) {
6410 InstWidening Decision = getWideningDecision(I, Width);
6411 assert(Decision != CM_Unknown &&
6412 "CM decision should be taken at this point");
6415 if (Decision == CM_Scalarize)
6416 Width = ElementCount::getFixed(1);
6417 }
6418 VectorTy = toVectorTy(getLoadStoreType(I), Width);
6419 return getMemoryInstructionCost(I, VF);
6420 }
6421 case Instruction::BitCast:
6422 if (I->getType()->isPointerTy())
6423 return 0;
6424 [[fallthrough]];
6425 case Instruction::ZExt:
6426 case Instruction::SExt:
6427 case Instruction::FPToUI:
6428 case Instruction::FPToSI:
6429 case Instruction::FPExt:
6430 case Instruction::PtrToInt:
6431 case Instruction::IntToPtr:
6432 case Instruction::SIToFP:
6433 case Instruction::UIToFP:
6434 case Instruction::Trunc:
6435 case Instruction::FPTrunc: {
6436 // Computes the CastContextHint from a Load/Store instruction.
6437 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
6439 "Expected a load or a store!");
6440
6441 if (VF.isScalar() || !TheLoop->contains(I))
6443
6444 switch (getWideningDecision(I, VF)) {
6456 llvm_unreachable("Instr did not go through cost modelling?");
6459 llvm_unreachable_internal("Instr has invalid widening decision");
6460 }
6461
6462 llvm_unreachable("Unhandled case!");
6463 };
6464
6465 unsigned Opcode = I->getOpcode();
6467 // For Trunc, the context is the only user, which must be a StoreInst.
6468 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6469 if (I->hasOneUse())
6470 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
6471 CCH = ComputeCCH(Store);
6472 }
6473 // For Z/Sext, the context is the operand, which must be a LoadInst.
6474 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6475 Opcode == Instruction::FPExt) {
6476 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
6477 CCH = ComputeCCH(Load);
6478 }
6479
6480 // We optimize the truncation of induction variables having constant
6481 // integer steps. The cost of these truncations is the same as the scalar
6482 // operation.
6483 if (isOptimizableIVTruncate(I, VF)) {
6484 auto *Trunc = cast<TruncInst>(I);
6485 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6486 Trunc->getSrcTy(), CCH, CostKind, Trunc);
6487 }
6488
6489 // Detect reduction patterns
6490 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6491 return *RedCost;
6492
6493 Type *SrcScalarTy = I->getOperand(0)->getType();
6494 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6495 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6496 SrcScalarTy =
6497 IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
6498 Type *SrcVecTy =
6499 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6500
6502 // If the result type is <= the source type, there will be no extend
6503 // after truncating the users to the minimal required bitwidth.
6504 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
6505 (I->getOpcode() == Instruction::ZExt ||
6506 I->getOpcode() == Instruction::SExt))
6507 return 0;
6508 }
6509
6510 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
6511 }
6512 case Instruction::Call:
6513 return getVectorCallCost(cast<CallInst>(I), VF);
6514 case Instruction::ExtractValue:
6515 return TTI.getInstructionCost(I, CostKind);
6516 case Instruction::Alloca:
6517 // We cannot easily widen alloca to a scalable alloca, as
6518 // the result would need to be a vector of pointers.
6519 if (VF.isScalable())
6521 return TTI.getArithmeticInstrCost(Instruction::Mul, RetTy, CostKind);
6522 default:
6523 // This opcode is unknown. Assume that it is the same as 'mul'.
6524 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6525 } // end of switch.
6526}
6527
6529 // Ignore ephemeral values.
6531
6532 SmallVector<Value *, 4> DeadInterleavePointerOps;
6534
6535 // If a scalar epilogue is required, users outside the loop won't use
6536 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
6537 // that is the case.
6538 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
6539 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
6540 return RequiresScalarEpilogue &&
6541 !TheLoop->contains(cast<Instruction>(U)->getParent());
6542 };
6543
6545 DFS.perform(LI);
6546 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
6547 for (Instruction &I : reverse(*BB)) {
6548 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
6549 continue;
6550
6551 // Add instructions that would be trivially dead and are only used by
6552 // values already ignored to DeadOps to seed worklist.
6554 all_of(I.users(), [this, IsLiveOutDead](User *U) {
6555 return VecValuesToIgnore.contains(U) ||
6556 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6557 }))
6558 DeadOps.push_back(&I);
6559
6560 // For interleave groups, we only create a pointer for the start of the
6561 // interleave group. Queue up addresses of group members except the insert
6562 // position for further processing.
6563 if (isAccessInterleaved(&I)) {
6564 auto *Group = getInterleavedAccessGroup(&I);
6565 if (Group->getInsertPos() == &I)
6566 continue;
6567 Value *PointerOp = getLoadStorePointerOperand(&I);
6568 DeadInterleavePointerOps.push_back(PointerOp);
6569 }
6570
6571 // Queue branches for analysis. They are dead, if their successors only
6572 // contain dead instructions.
6573 if (auto *Br = dyn_cast<BranchInst>(&I)) {
6574 if (Br->isConditional())
6575 DeadOps.push_back(&I);
6576 }
6577 }
6578
6579 // Mark ops feeding interleave group members as free, if they are only used
6580 // by other dead computations.
6581 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
6582 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
6583 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
6584 Instruction *UI = cast<Instruction>(U);
6585 return !VecValuesToIgnore.contains(U) &&
6586 (!isAccessInterleaved(UI) ||
6587 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6588 }))
6589 continue;
6590 VecValuesToIgnore.insert(Op);
6591 append_range(DeadInterleavePointerOps, Op->operands());
6592 }
6593
6594 // Mark ops that would be trivially dead and are only used by ignored
6595 // instructions as free.
6596 BasicBlock *Header = TheLoop->getHeader();
6597
6598 // Returns true if the block contains only dead instructions. Such blocks will
6599 // be removed by VPlan-to-VPlan transforms and won't be considered by the
6600 // VPlan-based cost model, so skip them in the legacy cost-model as well.
6601 auto IsEmptyBlock = [this](BasicBlock *BB) {
6602 return all_of(*BB, [this](Instruction &I) {
6603 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
6604 (isa<BranchInst>(&I) && !cast<BranchInst>(&I)->isConditional());
6605 });
6606 };
6607 for (unsigned I = 0; I != DeadOps.size(); ++I) {
6608 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
6609
6610 // Check if the branch should be considered dead.
6611 if (auto *Br = dyn_cast_or_null<BranchInst>(Op)) {
6612 BasicBlock *ThenBB = Br->getSuccessor(0);
6613 BasicBlock *ElseBB = Br->getSuccessor(1);
6614 // Don't considers branches leaving the loop for simplification.
6615 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
6616 continue;
6617 bool ThenEmpty = IsEmptyBlock(ThenBB);
6618 bool ElseEmpty = IsEmptyBlock(ElseBB);
6619 if ((ThenEmpty && ElseEmpty) ||
6620 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
6621 ElseBB->phis().empty()) ||
6622 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
6623 ThenBB->phis().empty())) {
6624 VecValuesToIgnore.insert(Br);
6625 DeadOps.push_back(Br->getCondition());
6626 }
6627 continue;
6628 }
6629
6630 // Skip any op that shouldn't be considered dead.
6631 if (!Op || !TheLoop->contains(Op) ||
6632 (isa<PHINode>(Op) && Op->getParent() == Header) ||
6634 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
6635 return !VecValuesToIgnore.contains(U) &&
6636 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6637 }))
6638 continue;
6639
6640 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
6641 // which applies for both scalar and vector versions. Otherwise it is only
6642 // dead in vector versions, so only add it to VecValuesToIgnore.
6643 if (all_of(Op->users(),
6644 [this](User *U) { return ValuesToIgnore.contains(U); }))
6645 ValuesToIgnore.insert(Op);
6646
6647 VecValuesToIgnore.insert(Op);
6648 append_range(DeadOps, Op->operands());
6649 }
6650
6651 // Ignore type-promoting instructions we identified during reduction
6652 // detection.
6653 for (const auto &Reduction : Legal->getReductionVars()) {
6654 const RecurrenceDescriptor &RedDes = Reduction.second;
6655 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6656 VecValuesToIgnore.insert_range(Casts);
6657 }
6658 // Ignore type-casting instructions we identified during induction
6659 // detection.
6660 for (const auto &Induction : Legal->getInductionVars()) {
6661 const InductionDescriptor &IndDes = Induction.second;
6662 VecValuesToIgnore.insert_range(IndDes.getCastInsts());
6663 }
6664}
6665
6667 // Avoid duplicating work finding in-loop reductions.
6668 if (!InLoopReductions.empty())
6669 return;
6670
6671 for (const auto &Reduction : Legal->getReductionVars()) {
6672 PHINode *Phi = Reduction.first;
6673 const RecurrenceDescriptor &RdxDesc = Reduction.second;
6674
6675 // Multi-use reductions (e.g., used in FindLastIV patterns) are handled
6676 // separately and should not be considered for in-loop reductions.
6677 if (RdxDesc.hasUsesOutsideReductionChain())
6678 continue;
6679
6680 // We don't collect reductions that are type promoted (yet).
6681 if (RdxDesc.getRecurrenceType() != Phi->getType())
6682 continue;
6683
6684 // In-loop AnyOf and FindIV reductions are not yet supported.
6685 RecurKind Kind = RdxDesc.getRecurrenceKind();
6689 continue;
6690
6691 // If the target would prefer this reduction to happen "in-loop", then we
6692 // want to record it as such.
6693 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
6694 !TTI.preferInLoopReduction(Kind, Phi->getType()))
6695 continue;
6696
6697 // Check that we can correctly put the reductions into the loop, by
6698 // finding the chain of operations that leads from the phi to the loop
6699 // exit value.
6700 SmallVector<Instruction *, 4> ReductionOperations =
6701 RdxDesc.getReductionOpChain(Phi, TheLoop);
6702 bool InLoop = !ReductionOperations.empty();
6703
6704 if (InLoop) {
6705 InLoopReductions.insert(Phi);
6706 // Add the elements to InLoopReductionImmediateChains for cost modelling.
6707 Instruction *LastChain = Phi;
6708 for (auto *I : ReductionOperations) {
6709 InLoopReductionImmediateChains[I] = LastChain;
6710 LastChain = I;
6711 }
6712 }
6713 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
6714 << " reduction for phi: " << *Phi << "\n");
6715 }
6716}
6717
6718// This function will select a scalable VF if the target supports scalable
6719// vectors and a fixed one otherwise.
6720// TODO: we could return a pair of values that specify the max VF and
6721// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6722// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6723// doesn't have a cost model that can choose which plan to execute if
6724// more than one is generated.
6727 unsigned WidestType;
6728 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6729
6731 TTI.enableScalableVectorization()
6734
6735 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
6736 unsigned N = RegSize.getKnownMinValue() / WidestType;
6737 return ElementCount::get(N, RegSize.isScalable());
6738}
6739
6742 ElementCount VF = UserVF;
6743 // Outer loop handling: They may require CFG and instruction level
6744 // transformations before even evaluating whether vectorization is profitable.
6745 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6746 // the vectorization pipeline.
6747 if (!OrigLoop->isInnermost()) {
6748 // If the user doesn't provide a vectorization factor, determine a
6749 // reasonable one.
6750 if (UserVF.isZero()) {
6751 VF = determineVPlanVF(TTI, CM);
6752 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6753
6754 // Make sure we have a VF > 1 for stress testing.
6755 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
6756 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6757 << "overriding computed VF.\n");
6758 VF = ElementCount::getFixed(4);
6759 }
6760 } else if (UserVF.isScalable() && !TTI.supportsScalableVectors() &&
6762 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
6763 << "not supported by the target.\n");
6765 "Scalable vectorization requested but not supported by the target",
6766 "the scalable user-specified vectorization width for outer-loop "
6767 "vectorization cannot be used because the target does not support "
6768 "scalable vectors.",
6769 "ScalableVFUnfeasible", ORE, OrigLoop);
6771 }
6772 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6774 "VF needs to be a power of two");
6775 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
6776 << "VF " << VF << " to build VPlans.\n");
6777 buildVPlans(VF, VF);
6778
6779 if (VPlans.empty())
6781
6782 // For VPlan build stress testing, we bail out after VPlan construction.
6785
6786 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
6787 }
6788
6789 LLVM_DEBUG(
6790 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6791 "VPlan-native path.\n");
6793}
6794
6795void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
6796 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6797 CM.collectValuesToIgnore();
6798 CM.collectElementTypesForWidening();
6799
6800 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
6801 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
6802 return;
6803
6804 // Invalidate interleave groups if all blocks of loop will be predicated.
6805 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
6807 LLVM_DEBUG(
6808 dbgs()
6809 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6810 "which requires masked-interleaved support.\n");
6811 if (CM.InterleaveInfo.invalidateGroups())
6812 // Invalidating interleave groups also requires invalidating all decisions
6813 // based on them, which includes widening decisions and uniform and scalar
6814 // values.
6815 CM.invalidateCostModelingDecisions();
6816 }
6817
6818 if (CM.foldTailByMasking())
6819 Legal->prepareToFoldTailByMasking();
6820
6821 ElementCount MaxUserVF =
6822 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
6823 if (UserVF) {
6824 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
6826 "UserVF ignored because it may be larger than the maximal safe VF",
6827 "InvalidUserVF", ORE, OrigLoop);
6828 } else {
6830 "VF needs to be a power of two");
6831 // Collect the instructions (and their associated costs) that will be more
6832 // profitable to scalarize.
6833 CM.collectInLoopReductions();
6834 if (CM.selectUserVectorizationFactor(UserVF)) {
6835 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6836 buildVPlansWithVPRecipes(UserVF, UserVF);
6838 return;
6839 }
6840 reportVectorizationInfo("UserVF ignored because of invalid costs.",
6841 "InvalidCost", ORE, OrigLoop);
6842 }
6843 }
6844
6845 // Collect the Vectorization Factor Candidates.
6846 SmallVector<ElementCount> VFCandidates;
6847 for (auto VF = ElementCount::getFixed(1);
6848 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
6849 VFCandidates.push_back(VF);
6850 for (auto VF = ElementCount::getScalable(1);
6851 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
6852 VFCandidates.push_back(VF);
6853
6854 CM.collectInLoopReductions();
6855 for (const auto &VF : VFCandidates) {
6856 // Collect Uniform and Scalar instructions after vectorization with VF.
6857 CM.collectNonVectorizedAndSetWideningDecisions(VF);
6858 }
6859
6860 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
6861 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
6862
6864}
6865
6867 ElementCount VF) const {
6868 InstructionCost Cost = CM.getInstructionCost(UI, VF);
6869 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
6871 return Cost;
6872}
6873
6875 ElementCount VF) const {
6876 return CM.isUniformAfterVectorization(I, VF);
6877}
6878
6879bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
6880 return CM.ValuesToIgnore.contains(UI) ||
6881 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
6882 SkipCostComputation.contains(UI);
6883}
6884
6886 return CM.getPredBlockCostDivisor(CostKind, BB);
6887}
6888
6890LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
6891 VPCostContext &CostCtx) const {
6893 // Cost modeling for inductions is inaccurate in the legacy cost model
6894 // compared to the recipes that are generated. To match here initially during
6895 // VPlan cost model bring up directly use the induction costs from the legacy
6896 // cost model. Note that we do this as pre-processing; the VPlan may not have
6897 // any recipes associated with the original induction increment instruction
6898 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
6899 // the cost of induction phis and increments (both that are represented by
6900 // recipes and those that are not), to avoid distinguishing between them here,
6901 // and skip all recipes that represent induction phis and increments (the
6902 // former case) later on, if they exist, to avoid counting them twice.
6903 // Similarly we pre-compute the cost of any optimized truncates.
6904 // TODO: Switch to more accurate costing based on VPlan.
6905 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
6907 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
6908 SmallVector<Instruction *> IVInsts = {IVInc};
6909 for (unsigned I = 0; I != IVInsts.size(); I++) {
6910 for (Value *Op : IVInsts[I]->operands()) {
6911 auto *OpI = dyn_cast<Instruction>(Op);
6912 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
6913 continue;
6914 IVInsts.push_back(OpI);
6915 }
6916 }
6917 IVInsts.push_back(IV);
6918 for (User *U : IV->users()) {
6919 auto *CI = cast<Instruction>(U);
6920 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
6921 continue;
6922 IVInsts.push_back(CI);
6923 }
6924
6925 // If the vector loop gets executed exactly once with the given VF, ignore
6926 // the costs of comparison and induction instructions, as they'll get
6927 // simplified away.
6928 // TODO: Remove this code after stepping away from the legacy cost model and
6929 // adding code to simplify VPlans before calculating their costs.
6930 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
6931 if (TC == VF && !CM.foldTailByMasking())
6932 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
6933 CostCtx.SkipCostComputation);
6934
6935 for (Instruction *IVInst : IVInsts) {
6936 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
6937 continue;
6938 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
6939 LLVM_DEBUG({
6940 dbgs() << "Cost of " << InductionCost << " for VF " << VF
6941 << ": induction instruction " << *IVInst << "\n";
6942 });
6943 Cost += InductionCost;
6944 CostCtx.SkipCostComputation.insert(IVInst);
6945 }
6946 }
6947
6948 /// Compute the cost of all exiting conditions of the loop using the legacy
6949 /// cost model. This is to match the legacy behavior, which adds the cost of
6950 /// all exit conditions. Note that this over-estimates the cost, as there will
6951 /// be a single condition to control the vector loop.
6953 CM.TheLoop->getExitingBlocks(Exiting);
6954 SetVector<Instruction *> ExitInstrs;
6955 // Collect all exit conditions.
6956 for (BasicBlock *EB : Exiting) {
6957 auto *Term = dyn_cast<BranchInst>(EB->getTerminator());
6958 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
6959 continue;
6960 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
6961 ExitInstrs.insert(CondI);
6962 }
6963 }
6964 // Compute the cost of all instructions only feeding the exit conditions.
6965 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
6966 Instruction *CondI = ExitInstrs[I];
6967 if (!OrigLoop->contains(CondI) ||
6968 !CostCtx.SkipCostComputation.insert(CondI).second)
6969 continue;
6970 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
6971 LLVM_DEBUG({
6972 dbgs() << "Cost of " << CondICost << " for VF " << VF
6973 << ": exit condition instruction " << *CondI << "\n";
6974 });
6975 Cost += CondICost;
6976 for (Value *Op : CondI->operands()) {
6977 auto *OpI = dyn_cast<Instruction>(Op);
6978 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
6979 any_of(OpI->users(), [&ExitInstrs](User *U) {
6980 return !ExitInstrs.contains(cast<Instruction>(U));
6981 }))
6982 continue;
6983 ExitInstrs.insert(OpI);
6984 }
6985 }
6986
6987 // Pre-compute the costs for branches except for the backedge, as the number
6988 // of replicate regions in a VPlan may not directly match the number of
6989 // branches, which would lead to different decisions.
6990 // TODO: Compute cost of branches for each replicate region in the VPlan,
6991 // which is more accurate than the legacy cost model.
6992 for (BasicBlock *BB : OrigLoop->blocks()) {
6993 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
6994 continue;
6995 CostCtx.SkipCostComputation.insert(BB->getTerminator());
6996 if (BB == OrigLoop->getLoopLatch())
6997 continue;
6998 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
6999 Cost += BranchCost;
7000 }
7001
7002 // Pre-compute costs for instructions that are forced-scalar or profitable to
7003 // scalarize. Their costs will be computed separately in the legacy cost
7004 // model.
7005 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
7006 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
7007 continue;
7008 CostCtx.SkipCostComputation.insert(ForcedScalar);
7009 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
7010 LLVM_DEBUG({
7011 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
7012 << ": forced scalar " << *ForcedScalar << "\n";
7013 });
7014 Cost += ForcedCost;
7015 }
7016 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
7017 if (CostCtx.skipCostComputation(Scalarized, VF.isVector()))
7018 continue;
7019 CostCtx.SkipCostComputation.insert(Scalarized);
7020 LLVM_DEBUG({
7021 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
7022 << ": profitable to scalarize " << *Scalarized << "\n";
7023 });
7024 Cost += ScalarCost;
7025 }
7026
7027 return Cost;
7028}
7029
7030InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
7031 ElementCount VF) const {
7032 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, PSE, OrigLoop);
7033 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
7034
7035 // Now compute and add the VPlan-based cost.
7036 Cost += Plan.cost(VF, CostCtx);
7037#ifndef NDEBUG
7038 unsigned EstimatedWidth = estimateElementCount(VF, CM.getVScaleForTuning());
7039 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
7040 << " (Estimated cost per lane: ");
7041 if (Cost.isValid()) {
7042 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
7043 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
7044 } else /* No point dividing an invalid cost - it will still be invalid */
7045 LLVM_DEBUG(dbgs() << "Invalid");
7046 LLVM_DEBUG(dbgs() << ")\n");
7047#endif
7048 return Cost;
7049}
7050
7051#ifndef NDEBUG
7052/// Return true if the original loop \ TheLoop contains any instructions that do
7053/// not have corresponding recipes in \p Plan and are not marked to be ignored
7054/// in \p CostCtx. This means the VPlan contains simplification that the legacy
7055/// cost-model did not account for.
7057 VPCostContext &CostCtx,
7058 Loop *TheLoop,
7059 ElementCount VF) {
7060 // First collect all instructions for the recipes in Plan.
7061 auto GetInstructionForCost = [](const VPRecipeBase *R) -> Instruction * {
7062 if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
7063 return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
7064 if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
7065 return &WidenMem->getIngredient();
7066 return nullptr;
7067 };
7068
7069 // Check if a select for a safe divisor was hoisted to the pre-header. If so,
7070 // the select doesn't need to be considered for the vector loop cost; go with
7071 // the more accurate VPlan-based cost model.
7072 for (VPRecipeBase &R : *Plan.getVectorPreheader()) {
7073 auto *VPI = dyn_cast<VPInstruction>(&R);
7074 if (!VPI || VPI->getOpcode() != Instruction::Select)
7075 continue;
7076
7077 if (auto *WR = dyn_cast_or_null<VPWidenRecipe>(VPI->getSingleUser())) {
7078 switch (WR->getOpcode()) {
7079 case Instruction::UDiv:
7080 case Instruction::SDiv:
7081 case Instruction::URem:
7082 case Instruction::SRem:
7083 return true;
7084 default:
7085 break;
7086 }
7087 }
7088 }
7089
7090 DenseSet<Instruction *> SeenInstrs;
7091 auto Iter = vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry());
7093 for (VPRecipeBase &R : *VPBB) {
7094 if (auto *IR = dyn_cast<VPInterleaveRecipe>(&R)) {
7095 auto *IG = IR->getInterleaveGroup();
7096 unsigned NumMembers = IG->getNumMembers();
7097 for (unsigned I = 0; I != NumMembers; ++I) {
7098 if (Instruction *M = IG->getMember(I))
7099 SeenInstrs.insert(M);
7100 }
7101 continue;
7102 }
7103 // Unused FOR splices are removed by VPlan transforms, so the VPlan-based
7104 // cost model won't cost it whilst the legacy will.
7105 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) {
7106 using namespace VPlanPatternMatch;
7107 if (none_of(FOR->users(),
7108 match_fn(m_VPInstruction<
7110 return true;
7111 }
7112 // The VPlan-based cost model is more accurate for partial reductions and
7113 // comparing against the legacy cost isn't desirable.
7114 if (auto *VPR = dyn_cast<VPReductionRecipe>(&R))
7115 if (VPR->isPartialReduction())
7116 return true;
7117
7118 // The VPlan-based cost model can analyze if recipes are scalar
7119 // recursively, but the legacy cost model cannot.
7120 if (auto *WidenMemR = dyn_cast<VPWidenMemoryRecipe>(&R)) {
7121 auto *AddrI = dyn_cast<Instruction>(
7122 getLoadStorePointerOperand(&WidenMemR->getIngredient()));
7123 if (AddrI && vputils::isSingleScalar(WidenMemR->getAddr()) !=
7124 CostCtx.isLegacyUniformAfterVectorization(AddrI, VF))
7125 return true;
7126
7127 if (WidenMemR->isReverse()) {
7128 // If the stored value of a reverse store is invariant, LICM will
7129 // hoist the reverse operation to the preheader. In this case, the
7130 // result of the VPlan-based cost model will diverge from that of
7131 // the legacy model.
7132 if (auto *StoreR = dyn_cast<VPWidenStoreRecipe>(WidenMemR))
7133 if (StoreR->getStoredValue()->isDefinedOutsideLoopRegions())
7134 return true;
7135
7136 if (auto *StoreR = dyn_cast<VPWidenStoreEVLRecipe>(WidenMemR))
7137 if (StoreR->getStoredValue()->isDefinedOutsideLoopRegions())
7138 return true;
7139 }
7140 }
7141
7142 // The legacy cost model costs non-header phis with a scalar VF as a phi,
7143 // but scalar unrolled VPlans will have VPBlendRecipes which emit selects.
7144 if (isa<VPBlendRecipe>(&R) &&
7145 vputils::onlyFirstLaneUsed(R.getVPSingleValue()))
7146 return true;
7147
7148 /// If a VPlan transform folded a recipe to one producing a single-scalar,
7149 /// but the original instruction wasn't uniform-after-vectorization in the
7150 /// legacy cost model, the legacy cost overestimates the actual cost.
7151 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
7152 if (RepR->isSingleScalar() &&
7154 RepR->getUnderlyingInstr(), VF))
7155 return true;
7156 }
7157 if (Instruction *UI = GetInstructionForCost(&R)) {
7158 // If we adjusted the predicate of the recipe, the cost in the legacy
7159 // cost model may be different.
7160 using namespace VPlanPatternMatch;
7161 CmpPredicate Pred;
7162 if (match(&R, m_Cmp(Pred, m_VPValue(), m_VPValue())) &&
7163 cast<VPRecipeWithIRFlags>(R).getPredicate() !=
7164 cast<CmpInst>(UI)->getPredicate())
7165 return true;
7166 SeenInstrs.insert(UI);
7167 }
7168 }
7169 }
7170
7171 // Return true if the loop contains any instructions that are not also part of
7172 // the VPlan or are skipped for VPlan-based cost computations. This indicates
7173 // that the VPlan contains extra simplifications.
7174 return any_of(TheLoop->blocks(), [&SeenInstrs, &CostCtx,
7175 TheLoop](BasicBlock *BB) {
7176 return any_of(*BB, [&SeenInstrs, &CostCtx, TheLoop, BB](Instruction &I) {
7177 // Skip induction phis when checking for simplifications, as they may not
7178 // be lowered directly be lowered to a corresponding PHI recipe.
7179 if (isa<PHINode>(&I) && BB == TheLoop->getHeader() &&
7180 CostCtx.CM.Legal->isInductionPhi(cast<PHINode>(&I)))
7181 return false;
7182 return !SeenInstrs.contains(&I) && !CostCtx.skipCostComputation(&I, true);
7183 });
7184 });
7185}
7186#endif
7187
7189 if (VPlans.empty())
7191 // If there is a single VPlan with a single VF, return it directly.
7192 VPlan &FirstPlan = *VPlans[0];
7193 if (VPlans.size() == 1 && size(FirstPlan.vectorFactors()) == 1)
7194 return {*FirstPlan.vectorFactors().begin(), 0, 0};
7195
7196 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
7197 << (CM.CostKind == TTI::TCK_RecipThroughput
7198 ? "Reciprocal Throughput\n"
7199 : CM.CostKind == TTI::TCK_Latency
7200 ? "Instruction Latency\n"
7201 : CM.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
7202 : CM.CostKind == TTI::TCK_SizeAndLatency
7203 ? "Code Size and Latency\n"
7204 : "Unknown\n"));
7205
7207 assert(hasPlanWithVF(ScalarVF) &&
7208 "More than a single plan/VF w/o any plan having scalar VF");
7209
7210 // TODO: Compute scalar cost using VPlan-based cost model.
7211 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
7212 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
7213 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
7214 VectorizationFactor BestFactor = ScalarFactor;
7215
7216 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
7217 if (ForceVectorization) {
7218 // Ignore scalar width, because the user explicitly wants vectorization.
7219 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
7220 // evaluation.
7221 BestFactor.Cost = InstructionCost::getMax();
7222 }
7223
7224 for (auto &P : VPlans) {
7225 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
7226 P->vectorFactors().end());
7227
7229 if (any_of(VFs, [this](ElementCount VF) {
7230 return CM.shouldConsiderRegPressureForVF(VF);
7231 }))
7232 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
7233
7234 for (unsigned I = 0; I < VFs.size(); I++) {
7235 ElementCount VF = VFs[I];
7236 if (VF.isScalar())
7237 continue;
7238 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
7239 LLVM_DEBUG(
7240 dbgs()
7241 << "LV: Not considering vector loop of width " << VF
7242 << " because it will not generate any vector instructions.\n");
7243 continue;
7244 }
7245 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
7246 LLVM_DEBUG(
7247 dbgs()
7248 << "LV: Not considering vector loop of width " << VF
7249 << " because it would cause replicated blocks to be generated,"
7250 << " which isn't allowed when optimizing for size.\n");
7251 continue;
7252 }
7253
7254 InstructionCost Cost = cost(*P, VF);
7255 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
7256
7257 if (CM.shouldConsiderRegPressureForVF(VF) &&
7258 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs)) {
7259 LLVM_DEBUG(dbgs() << "LV(REG): Not considering vector loop of width "
7260 << VF << " because it uses too many registers\n");
7261 continue;
7262 }
7263
7264 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail()))
7265 BestFactor = CurrentFactor;
7266
7267 // If profitable add it to ProfitableVF list.
7268 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
7269 ProfitableVFs.push_back(CurrentFactor);
7270 }
7271 }
7272
7273#ifndef NDEBUG
7274 // Select the optimal vectorization factor according to the legacy cost-model.
7275 // This is now only used to verify the decisions by the new VPlan-based
7276 // cost-model and will be retired once the VPlan-based cost-model is
7277 // stabilized.
7278 VectorizationFactor LegacyVF = selectVectorizationFactor();
7279 VPlan &BestPlan = getPlanFor(BestFactor.Width);
7280
7281 // Pre-compute the cost and use it to check if BestPlan contains any
7282 // simplifications not accounted for in the legacy cost model. If that's the
7283 // case, don't trigger the assertion, as the extra simplifications may cause a
7284 // different VF to be picked by the VPlan-based cost model.
7285 VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind, CM.PSE,
7286 OrigLoop);
7287 precomputeCosts(BestPlan, BestFactor.Width, CostCtx);
7288 // Verify that the VPlan-based and legacy cost models agree, except for
7289 // * VPlans with early exits,
7290 // * VPlans with additional VPlan simplifications,
7291 // * EVL-based VPlans with gather/scatters (the VPlan-based cost model uses
7292 // vp_scatter/vp_gather).
7293 // The legacy cost model doesn't properly model costs for such loops.
7294 bool UsesEVLGatherScatter =
7296 BestPlan.getVectorLoopRegion()->getEntry())),
7297 [](VPBasicBlock *VPBB) {
7298 return any_of(*VPBB, [](VPRecipeBase &R) {
7299 return isa<VPWidenLoadEVLRecipe, VPWidenStoreEVLRecipe>(&R) &&
7300 !cast<VPWidenMemoryRecipe>(&R)->isConsecutive();
7301 });
7302 });
7303 assert(
7304 (BestFactor.Width == LegacyVF.Width || BestPlan.hasEarlyExit() ||
7305 !Legal->getLAI()->getSymbolicStrides().empty() || UsesEVLGatherScatter ||
7307 getPlanFor(BestFactor.Width), CostCtx, OrigLoop, BestFactor.Width) ||
7309 getPlanFor(LegacyVF.Width), CostCtx, OrigLoop, LegacyVF.Width)) &&
7310 " VPlan cost model and legacy cost model disagreed");
7311 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
7312 "when vectorizing, the scalar cost must be computed.");
7313#endif
7314
7315 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
7316 return BestFactor;
7317}
7318
7319// If \p EpiResumePhiR is resume VPPhi for a reduction when vectorizing the
7320// epilog loop, fix the reduction's scalar PHI node by adding the incoming value
7321// from the main vector loop.
7323 VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock) {
7324 using namespace VPlanPatternMatch;
7325 // Get the VPInstruction computing the reduction result in the middle block.
7326 // The first operand may not be from the middle block if it is not connected
7327 // to the scalar preheader. In that case, there's nothing to fix.
7328 VPValue *Incoming = EpiResumePhiR->getOperand(0);
7331 auto *EpiRedResult = dyn_cast<VPInstruction>(Incoming);
7332 if (!EpiRedResult)
7333 return;
7334
7335 VPValue *BackedgeVal;
7336 bool IsFindIV = false;
7337 if (EpiRedResult->getOpcode() == VPInstruction::ComputeAnyOfResult ||
7338 EpiRedResult->getOpcode() == VPInstruction::ComputeReductionResult)
7339 BackedgeVal = EpiRedResult->getOperand(EpiRedResult->getNumOperands() - 1);
7340 else if (matchFindIVResult(EpiRedResult, m_VPValue(BackedgeVal), m_VPValue()))
7341 IsFindIV = true;
7342 else
7343 return;
7344
7345 auto *EpiRedHeaderPhi = cast_if_present<VPReductionPHIRecipe>(
7347 if (!EpiRedHeaderPhi) {
7348 match(BackedgeVal,
7350 VPlanPatternMatch::m_VPValue(BackedgeVal),
7352 EpiRedHeaderPhi = cast<VPReductionPHIRecipe>(
7354 }
7355
7356 Value *MainResumeValue;
7357 if (auto *VPI = dyn_cast<VPInstruction>(EpiRedHeaderPhi->getStartValue())) {
7358 assert((VPI->getOpcode() == VPInstruction::Broadcast ||
7359 VPI->getOpcode() == VPInstruction::ReductionStartVector) &&
7360 "unexpected start recipe");
7361 MainResumeValue = VPI->getOperand(0)->getUnderlyingValue();
7362 } else
7363 MainResumeValue = EpiRedHeaderPhi->getStartValue()->getUnderlyingValue();
7364 if (EpiRedResult->getOpcode() == VPInstruction::ComputeAnyOfResult) {
7365 [[maybe_unused]] Value *StartV =
7366 EpiRedResult->getOperand(0)->getLiveInIRValue();
7367 auto *Cmp = cast<ICmpInst>(MainResumeValue);
7368 assert(Cmp->getPredicate() == CmpInst::ICMP_NE &&
7369 "AnyOf expected to start with ICMP_NE");
7370 assert(Cmp->getOperand(1) == StartV &&
7371 "AnyOf expected to start by comparing main resume value to original "
7372 "start value");
7373 MainResumeValue = Cmp->getOperand(0);
7374 } else if (IsFindIV) {
7375 MainResumeValue = cast<SelectInst>(MainResumeValue)->getFalseValue();
7376 }
7377 PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
7378
7379 // When fixing reductions in the epilogue loop we should already have
7380 // created a bc.merge.rdx Phi after the main vector body. Ensure that we carry
7381 // over the incoming values correctly.
7382 EpiResumePhi.setIncomingValueForBlock(
7383 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
7384}
7385
7387 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
7388 InnerLoopVectorizer &ILV, DominatorTree *DT, bool VectorizingEpilogue) {
7389 assert(BestVPlan.hasVF(BestVF) &&
7390 "Trying to execute plan with unsupported VF");
7391 assert(BestVPlan.hasUF(BestUF) &&
7392 "Trying to execute plan with unsupported UF");
7393 if (BestVPlan.hasEarlyExit())
7394 ++LoopsEarlyExitVectorized;
7395 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
7396 // cost model is complete for better cost estimates.
7397 RUN_VPLAN_PASS(VPlanTransforms::unrollByUF, BestVPlan, BestUF);
7401 bool HasBranchWeights =
7402 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
7403 if (HasBranchWeights) {
7404 std::optional<unsigned> VScale = CM.getVScaleForTuning();
7406 BestVPlan, BestVF, VScale);
7407 }
7408
7409 // Checks are the same for all VPlans, added to BestVPlan only for
7410 // compactness.
7411 attachRuntimeChecks(BestVPlan, ILV.RTChecks, HasBranchWeights);
7412
7413 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
7414 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
7415
7416 VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
7419 if (BestVPlan.getEntry()->getSingleSuccessor() ==
7420 BestVPlan.getScalarPreheader()) {
7421 // TODO: The vector loop would be dead, should not even try to vectorize.
7422 ORE->emit([&]() {
7423 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
7424 OrigLoop->getStartLoc(),
7425 OrigLoop->getHeader())
7426 << "Created vector loop never executes due to insufficient trip "
7427 "count.";
7428 });
7430 }
7431
7433 BestVPlan, BestVF,
7434 TTI.getRegisterBitWidth(BestVF.isScalable()
7438
7440 // Regions are dissolved after optimizing for VF and UF, which completely
7441 // removes unneeded loop regions first.
7443 // Expand BranchOnTwoConds after dissolution, when latch has direct access to
7444 // its successors.
7446 // Canonicalize EVL loops after regions are dissolved.
7450 BestVPlan, VectorPH, CM.foldTailByMasking(),
7451 CM.requiresScalarEpilogue(BestVF.isVector()));
7452 VPlanTransforms::materializeVFAndVFxUF(BestVPlan, VectorPH, BestVF);
7453 VPlanTransforms::cse(BestVPlan);
7455
7456 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
7457 // making any changes to the CFG.
7458 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
7459 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
7460 if (!ILV.getTripCount()) {
7461 ILV.setTripCount(BestVPlan.getTripCount()->getLiveInIRValue());
7462 } else {
7463 assert(VectorizingEpilogue && "should only re-use the existing trip "
7464 "count during epilogue vectorization");
7465 }
7466
7467 // Perform the actual loop transformation.
7468 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
7469 OrigLoop->getParentLoop(),
7470 Legal->getWidestInductionType());
7471
7472#ifdef EXPENSIVE_CHECKS
7473 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
7474#endif
7475
7476 // 1. Set up the skeleton for vectorization, including vector pre-header and
7477 // middle block. The vector loop is created during VPlan execution.
7478 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7480 State.CFG.PrevBB->getSingleSuccessor(), &BestVPlan);
7482
7483 assert(verifyVPlanIsValid(BestVPlan, true /*VerifyLate*/) &&
7484 "final VPlan is invalid");
7485
7486 // After vectorization, the exit blocks of the original loop will have
7487 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
7488 // looked through single-entry phis.
7489 ScalarEvolution &SE = *PSE.getSE();
7490 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
7491 if (!Exit->hasPredecessors())
7492 continue;
7493 for (VPRecipeBase &PhiR : Exit->phis())
7495 &cast<VPIRPhi>(PhiR).getIRPhi());
7496 }
7497 // Forget the original loop and block dispositions.
7498 SE.forgetLoop(OrigLoop);
7500
7502
7503 //===------------------------------------------------===//
7504 //
7505 // Notice: any optimization or new instruction that go
7506 // into the code below should also be implemented in
7507 // the cost-model.
7508 //
7509 //===------------------------------------------------===//
7510
7511 // Retrieve loop information before executing the plan, which may remove the
7512 // original loop, if it becomes unreachable.
7513 MDNode *LID = OrigLoop->getLoopID();
7514 unsigned OrigLoopInvocationWeight = 0;
7515 std::optional<unsigned> OrigAverageTripCount =
7516 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
7517
7518 BestVPlan.execute(&State);
7519
7520 // 2.6. Maintain Loop Hints
7521 // Keep all loop hints from the original loop on the vector loop (we'll
7522 // replace the vectorizer-specific hints below).
7523 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
7524 // Add metadata to disable runtime unrolling a scalar loop when there
7525 // are no runtime checks about strides and memory. A scalar loop that is
7526 // rarely used is not worth unrolling.
7527 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
7529 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
7530 : nullptr,
7531 HeaderVPBB, BestVPlan, VectorizingEpilogue, LID, OrigAverageTripCount,
7532 OrigLoopInvocationWeight,
7533 estimateElementCount(BestVF * BestUF, CM.getVScaleForTuning()),
7534 DisableRuntimeUnroll);
7535
7536 // 3. Fix the vectorized code: take care of header phi's, live-outs,
7537 // predication, updating analyses.
7538 ILV.fixVectorizedLoop(State);
7539
7541
7542 return ExpandedSCEVs;
7543}
7544
7545//===--------------------------------------------------------------------===//
7546// EpilogueVectorizerMainLoop
7547//===--------------------------------------------------------------------===//
7548
7549/// This function is partially responsible for generating the control flow
7550/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7552 BasicBlock *ScalarPH = createScalarPreheader("");
7553 BasicBlock *VectorPH = ScalarPH->getSinglePredecessor();
7554
7555 // Generate the code to check the minimum iteration count of the vector
7556 // epilogue (see below).
7557 EPI.EpilogueIterationCountCheck =
7558 emitIterationCountCheck(VectorPH, ScalarPH, true);
7559 EPI.EpilogueIterationCountCheck->setName("iter.check");
7560
7561 VectorPH = cast<BranchInst>(EPI.EpilogueIterationCountCheck->getTerminator())
7562 ->getSuccessor(1);
7563 // Generate the iteration count check for the main loop, *after* the check
7564 // for the epilogue loop, so that the path-length is shorter for the case
7565 // that goes directly through the vector epilogue. The longer-path length for
7566 // the main loop is compensated for, by the gain from vectorizing the larger
7567 // trip count. Note: the branch will get updated later on when we vectorize
7568 // the epilogue.
7569 EPI.MainLoopIterationCountCheck =
7570 emitIterationCountCheck(VectorPH, ScalarPH, false);
7571
7572 return cast<BranchInst>(EPI.MainLoopIterationCountCheck->getTerminator())
7573 ->getSuccessor(1);
7574}
7575
7577 LLVM_DEBUG({
7578 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7579 << "Main Loop VF:" << EPI.MainLoopVF
7580 << ", Main Loop UF:" << EPI.MainLoopUF
7581 << ", Epilogue Loop VF:" << EPI.EpilogueVF
7582 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7583 });
7584}
7585
7588 dbgs() << "intermediate fn:\n"
7589 << *OrigLoop->getHeader()->getParent() << "\n";
7590 });
7591}
7592
7594 BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue) {
7595 assert(Bypass && "Expected valid bypass basic block.");
7598 Value *CheckMinIters = createIterationCountCheck(
7599 VectorPH, ForEpilogue ? EPI.EpilogueVF : EPI.MainLoopVF,
7600 ForEpilogue ? EPI.EpilogueUF : EPI.MainLoopUF);
7601
7602 BasicBlock *const TCCheckBlock = VectorPH;
7603 if (!ForEpilogue)
7604 TCCheckBlock->setName("vector.main.loop.iter.check");
7605
7606 // Create new preheader for vector loop.
7607 VectorPH = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7608 static_cast<DominatorTree *>(nullptr), LI, nullptr,
7609 "vector.ph");
7610 if (ForEpilogue) {
7611 // Save the trip count so we don't have to regenerate it in the
7612 // vec.epilog.iter.check. This is safe to do because the trip count
7613 // generated here dominates the vector epilog iter check.
7614 EPI.TripCount = Count;
7615 } else {
7617 }
7618
7619 BranchInst &BI = *BranchInst::Create(Bypass, VectorPH, CheckMinIters);
7620 if (hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator()))
7621 setBranchWeights(BI, MinItersBypassWeights, /*IsExpected=*/false);
7622 ReplaceInstWithInst(TCCheckBlock->getTerminator(), &BI);
7623
7624 // When vectorizing the main loop, its trip-count check is placed in a new
7625 // block, whereas the overall trip-count check is placed in the VPlan entry
7626 // block. When vectorizing the epilogue loop, its trip-count check is placed
7627 // in the VPlan entry block.
7628 if (!ForEpilogue)
7629 introduceCheckBlockInVPlan(TCCheckBlock);
7630 return TCCheckBlock;
7631}
7632
7633//===--------------------------------------------------------------------===//
7634// EpilogueVectorizerEpilogueLoop
7635//===--------------------------------------------------------------------===//
7636
7637/// This function creates a new scalar preheader, using the previous one as
7638/// entry block to the epilogue VPlan. The minimum iteration check is being
7639/// represented in VPlan.
7641 BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog.");
7642 BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor();
7643 OriginalScalarPH->setName("vec.epilog.iter.check");
7644 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH);
7645 VPBasicBlock *OldEntry = Plan.getEntry();
7646 for (auto &R : make_early_inc_range(*OldEntry)) {
7647 // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by
7648 // defining.
7649 if (isa<VPIRInstruction>(&R))
7650 continue;
7651 R.moveBefore(*NewEntry, NewEntry->end());
7652 }
7653
7654 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
7655 Plan.setEntry(NewEntry);
7656 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
7657
7658 return OriginalScalarPH;
7659}
7660
7662 LLVM_DEBUG({
7663 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7664 << "Epilogue Loop VF:" << EPI.EpilogueVF
7665 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7666 });
7667}
7668
7671 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7672 });
7673}
7674
7675VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(VPInstruction *VPI,
7676 VFRange &Range) {
7677 assert((VPI->getOpcode() == Instruction::Load ||
7678 VPI->getOpcode() == Instruction::Store) &&
7679 "Must be called with either a load or store");
7681
7682 auto WillWiden = [&](ElementCount VF) -> bool {
7684 CM.getWideningDecision(I, VF);
7686 "CM decision should be taken at this point.");
7688 return true;
7689 if (CM.isScalarAfterVectorization(I, VF) ||
7690 CM.isProfitableToScalarize(I, VF))
7691 return false;
7693 };
7694
7696 return nullptr;
7697
7698 VPValue *Mask = nullptr;
7699 if (Legal->isMaskRequired(I))
7700 Mask = getBlockInMask(Builder.getInsertBlock());
7701
7702 // Determine if the pointer operand of the access is either consecutive or
7703 // reverse consecutive.
7705 CM.getWideningDecision(I, Range.Start);
7707 bool Consecutive =
7709
7710 VPValue *Ptr = VPI->getOpcode() == Instruction::Load ? VPI->getOperand(0)
7711 : VPI->getOperand(1);
7712 if (Consecutive) {
7715 VPSingleDefRecipe *VectorPtr;
7716 if (Reverse) {
7717 // When folding the tail, we may compute an address that we don't in the
7718 // original scalar loop: drop the GEP no-wrap flags in this case.
7719 // Otherwise preserve existing flags without no-unsigned-wrap, as we will
7720 // emit negative indices.
7721 GEPNoWrapFlags Flags =
7722 CM.foldTailByMasking() || !GEP
7724 : GEP->getNoWrapFlags().withoutNoUnsignedWrap();
7725 VectorPtr = new VPVectorEndPointerRecipe(
7726 Ptr, &Plan.getVF(), getLoadStoreType(I),
7727 /*Stride*/ -1, Flags, VPI->getDebugLoc());
7728 } else {
7729 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
7730 GEP ? GEP->getNoWrapFlags()
7732 VPI->getDebugLoc());
7733 }
7734 Builder.insert(VectorPtr);
7735 Ptr = VectorPtr;
7736 }
7737
7738 if (VPI->getOpcode() == Instruction::Load) {
7739 auto *Load = cast<LoadInst>(I);
7740 auto *LoadR = new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
7741 *VPI, Load->getDebugLoc());
7742 if (Reverse) {
7743 Builder.insert(LoadR);
7744 return new VPInstruction(VPInstruction::Reverse, LoadR, {}, {},
7745 LoadR->getDebugLoc());
7746 }
7747 return LoadR;
7748 }
7749
7750 StoreInst *Store = cast<StoreInst>(I);
7751 VPValue *StoredVal = VPI->getOperand(0);
7752 if (Reverse)
7753 StoredVal = Builder.createNaryOp(VPInstruction::Reverse, StoredVal,
7754 Store->getDebugLoc());
7755 return new VPWidenStoreRecipe(*Store, Ptr, StoredVal, Mask, Consecutive,
7756 Reverse, *VPI, Store->getDebugLoc());
7757}
7758
7760VPRecipeBuilder::tryToOptimizeInductionTruncate(VPInstruction *VPI,
7761 VFRange &Range) {
7762 auto *I = cast<TruncInst>(VPI->getUnderlyingInstr());
7763 // Optimize the special case where the source is a constant integer
7764 // induction variable. Notice that we can only optimize the 'trunc' case
7765 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7766 // (c) other casts depend on pointer size.
7767
7768 // Determine whether \p K is a truncation based on an induction variable that
7769 // can be optimized.
7770 auto IsOptimizableIVTruncate =
7771 [&](Instruction *K) -> std::function<bool(ElementCount)> {
7772 return [=](ElementCount VF) -> bool {
7773 return CM.isOptimizableIVTruncate(K, VF);
7774 };
7775 };
7776
7778 IsOptimizableIVTruncate(I), Range))
7779 return nullptr;
7780
7782 VPI->getOperand(0)->getDefiningRecipe());
7783 PHINode *Phi = WidenIV->getPHINode();
7784 VPIRValue *Start = WidenIV->getStartValue();
7785 const InductionDescriptor &IndDesc = WidenIV->getInductionDescriptor();
7786
7787 // It is always safe to copy over the NoWrap and FastMath flags. In
7788 // particular, when folding tail by masking, the masked-off lanes are never
7789 // used, so it is safe.
7790 VPIRFlags Flags = vputils::getFlagsFromIndDesc(IndDesc);
7791 VPValue *Step =
7793 return new VPWidenIntOrFpInductionRecipe(
7794 Phi, Start, Step, &Plan.getVF(), IndDesc, I, Flags, VPI->getDebugLoc());
7795}
7796
7797VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(VPInstruction *VPI,
7798 VFRange &Range) {
7799 CallInst *CI = cast<CallInst>(VPI->getUnderlyingInstr());
7801 [this, CI](ElementCount VF) {
7802 return CM.isScalarWithPredication(CI, VF);
7803 },
7804 Range);
7805
7806 if (IsPredicated)
7807 return nullptr;
7808
7810 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7811 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
7812 ID == Intrinsic::pseudoprobe ||
7813 ID == Intrinsic::experimental_noalias_scope_decl))
7814 return nullptr;
7815
7817 VPI->op_begin() + CI->arg_size());
7818
7819 // Is it beneficial to perform intrinsic call compared to lib call?
7820 bool ShouldUseVectorIntrinsic =
7822 [&](ElementCount VF) -> bool {
7823 return CM.getCallWideningDecision(CI, VF).Kind ==
7825 },
7826 Range);
7827 if (ShouldUseVectorIntrinsic)
7828 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(), *VPI, *VPI,
7829 VPI->getDebugLoc());
7830
7831 Function *Variant = nullptr;
7832 std::optional<unsigned> MaskPos;
7833 // Is better to call a vectorized version of the function than to to scalarize
7834 // the call?
7835 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
7836 [&](ElementCount VF) -> bool {
7837 // The following case may be scalarized depending on the VF.
7838 // The flag shows whether we can use a usual Call for vectorized
7839 // version of the instruction.
7840
7841 // If we've found a variant at a previous VF, then stop looking. A
7842 // vectorized variant of a function expects input in a certain shape
7843 // -- basically the number of input registers, the number of lanes
7844 // per register, and whether there's a mask required.
7845 // We store a pointer to the variant in the VPWidenCallRecipe, so
7846 // once we have an appropriate variant it's only valid for that VF.
7847 // This will force a different vplan to be generated for each VF that
7848 // finds a valid variant.
7849 if (Variant)
7850 return false;
7851 LoopVectorizationCostModel::CallWideningDecision Decision =
7852 CM.getCallWideningDecision(CI, VF);
7854 Variant = Decision.Variant;
7855 MaskPos = Decision.MaskPos;
7856 return true;
7857 }
7858
7859 return false;
7860 },
7861 Range);
7862 if (ShouldUseVectorCall) {
7863 if (MaskPos.has_value()) {
7864 // We have 2 cases that would require a mask:
7865 // 1) The block needs to be predicated, either due to a conditional
7866 // in the scalar loop or use of an active lane mask with
7867 // tail-folding, and we use the appropriate mask for the block.
7868 // 2) No mask is required for the block, but the only available
7869 // vector variant at this VF requires a mask, so we synthesize an
7870 // all-true mask.
7871 VPValue *Mask = Legal->isMaskRequired(CI)
7872 ? getBlockInMask(Builder.getInsertBlock())
7873 : Plan.getTrue();
7874
7875 Ops.insert(Ops.begin() + *MaskPos, Mask);
7876 }
7877
7878 Ops.push_back(VPI->getOperand(VPI->getNumOperands() - 1));
7879 return new VPWidenCallRecipe(CI, Variant, Ops, *VPI, *VPI,
7880 VPI->getDebugLoc());
7881 }
7882
7883 return nullptr;
7884}
7885
7886bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7888 !isa<StoreInst>(I) && "Instruction should have been handled earlier");
7889 // Instruction should be widened, unless it is scalar after vectorization,
7890 // scalarization is profitable or it is predicated.
7891 auto WillScalarize = [this, I](ElementCount VF) -> bool {
7892 return CM.isScalarAfterVectorization(I, VF) ||
7893 CM.isProfitableToScalarize(I, VF) ||
7894 CM.isScalarWithPredication(I, VF);
7895 };
7897 Range);
7898}
7899
7900VPWidenRecipe *VPRecipeBuilder::tryToWiden(VPInstruction *VPI) {
7901 auto *I = VPI->getUnderlyingInstr();
7902 switch (VPI->getOpcode()) {
7903 default:
7904 return nullptr;
7905 case Instruction::SDiv:
7906 case Instruction::UDiv:
7907 case Instruction::SRem:
7908 case Instruction::URem: {
7909 // If not provably safe, use a select to form a safe divisor before widening the
7910 // div/rem operation itself. Otherwise fall through to general handling below.
7911 if (CM.isPredicatedInst(I)) {
7913 VPValue *Mask = getBlockInMask(Builder.getInsertBlock());
7914 VPValue *One = Plan.getConstantInt(I->getType(), 1u);
7915 auto *SafeRHS =
7916 Builder.createSelect(Mask, Ops[1], One, VPI->getDebugLoc());
7917 Ops[1] = SafeRHS;
7918 return new VPWidenRecipe(*I, Ops, *VPI, *VPI, VPI->getDebugLoc());
7919 }
7920 [[fallthrough]];
7921 }
7922 case Instruction::Add:
7923 case Instruction::And:
7924 case Instruction::AShr:
7925 case Instruction::FAdd:
7926 case Instruction::FCmp:
7927 case Instruction::FDiv:
7928 case Instruction::FMul:
7929 case Instruction::FNeg:
7930 case Instruction::FRem:
7931 case Instruction::FSub:
7932 case Instruction::ICmp:
7933 case Instruction::LShr:
7934 case Instruction::Mul:
7935 case Instruction::Or:
7936 case Instruction::Select:
7937 case Instruction::Shl:
7938 case Instruction::Sub:
7939 case Instruction::Xor:
7940 case Instruction::Freeze:
7941 return new VPWidenRecipe(*I, VPI->operands(), *VPI, *VPI,
7942 VPI->getDebugLoc());
7943 case Instruction::ExtractValue: {
7944 SmallVector<VPValue *> NewOps(VPI->operands());
7945 auto *EVI = cast<ExtractValueInst>(I);
7946 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
7947 unsigned Idx = EVI->getIndices()[0];
7948 NewOps.push_back(Plan.getConstantInt(32, Idx));
7949 return new VPWidenRecipe(*I, NewOps, *VPI, *VPI, VPI->getDebugLoc());
7950 }
7951 };
7952}
7953
7954VPHistogramRecipe *VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
7955 VPInstruction *VPI) {
7956 // FIXME: Support other operations.
7957 unsigned Opcode = HI->Update->getOpcode();
7958 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
7959 "Histogram update operation must be an Add or Sub");
7960
7962 // Bucket address.
7963 HGramOps.push_back(VPI->getOperand(1));
7964 // Increment value.
7965 HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1)));
7966
7967 // In case of predicated execution (due to tail-folding, or conditional
7968 // execution, or both), pass the relevant mask.
7969 if (Legal->isMaskRequired(HI->Store))
7970 HGramOps.push_back(getBlockInMask(Builder.getInsertBlock()));
7971
7972 return new VPHistogramRecipe(Opcode, HGramOps, VPI->getDebugLoc());
7973}
7974
7976 VFRange &Range) {
7977 auto *I = VPI->getUnderlyingInstr();
7979 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
7980 Range);
7981
7982 bool IsPredicated = CM.isPredicatedInst(I);
7983
7984 // Even if the instruction is not marked as uniform, there are certain
7985 // intrinsic calls that can be effectively treated as such, so we check for
7986 // them here. Conservatively, we only do this for scalable vectors, since
7987 // for fixed-width VFs we can always fall back on full scalarization.
7988 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
7989 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
7990 case Intrinsic::assume:
7991 case Intrinsic::lifetime_start:
7992 case Intrinsic::lifetime_end:
7993 // For scalable vectors if one of the operands is variant then we still
7994 // want to mark as uniform, which will generate one instruction for just
7995 // the first lane of the vector. We can't scalarize the call in the same
7996 // way as for fixed-width vectors because we don't know how many lanes
7997 // there are.
7998 //
7999 // The reasons for doing it this way for scalable vectors are:
8000 // 1. For the assume intrinsic generating the instruction for the first
8001 // lane is still be better than not generating any at all. For
8002 // example, the input may be a splat across all lanes.
8003 // 2. For the lifetime start/end intrinsics the pointer operand only
8004 // does anything useful when the input comes from a stack object,
8005 // which suggests it should always be uniform. For non-stack objects
8006 // the effect is to poison the object, which still allows us to
8007 // remove the call.
8008 IsUniform = true;
8009 break;
8010 default:
8011 break;
8012 }
8013 }
8014 VPValue *BlockInMask = nullptr;
8015 if (!IsPredicated) {
8016 // Finalize the recipe for Instr, first if it is not predicated.
8017 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8018 } else {
8019 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8020 // Instructions marked for predication are replicated and a mask operand is
8021 // added initially. Masked replicate recipes will later be placed under an
8022 // if-then construct to prevent side-effects. Generate recipes to compute
8023 // the block mask for this region.
8024 BlockInMask = getBlockInMask(Builder.getInsertBlock());
8025 }
8026
8027 // Note that there is some custom logic to mark some intrinsics as uniform
8028 // manually above for scalable vectors, which this assert needs to account for
8029 // as well.
8030 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
8031 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
8032 "Should not predicate a uniform recipe");
8033 auto *Recipe =
8034 new VPReplicateRecipe(I, VPI->operands(), IsUniform, BlockInMask, *VPI,
8035 *VPI, VPI->getDebugLoc());
8036 return Recipe;
8037}
8038
8041 VFRange &Range) {
8042 assert(!R->isPhi() && "phis must be handled earlier");
8043 // First, check for specific widening recipes that deal with optimizing
8044 // truncates, calls and memory operations.
8045
8046 VPRecipeBase *Recipe;
8047 auto *VPI = cast<VPInstruction>(R);
8048 if (VPI->getOpcode() == Instruction::Trunc &&
8049 (Recipe = tryToOptimizeInductionTruncate(VPI, Range)))
8050 return Recipe;
8051
8052 // All widen recipes below deal only with VF > 1.
8054 [&](ElementCount VF) { return VF.isScalar(); }, Range))
8055 return nullptr;
8056
8057 if (VPI->getOpcode() == Instruction::Call)
8058 return tryToWidenCall(VPI, Range);
8059
8060 Instruction *Instr = R->getUnderlyingInstr();
8061 if (VPI->getOpcode() == Instruction::Store)
8062 if (auto HistInfo = Legal->getHistogramInfo(cast<StoreInst>(Instr)))
8063 return tryToWidenHistogram(*HistInfo, VPI);
8064
8065 if (VPI->getOpcode() == Instruction::Load ||
8066 VPI->getOpcode() == Instruction::Store)
8067 return tryToWidenMemory(VPI, Range);
8068
8069 if (!shouldWiden(Instr, Range))
8070 return nullptr;
8071
8072 if (VPI->getOpcode() == Instruction::GetElementPtr)
8073 return new VPWidenGEPRecipe(cast<GetElementPtrInst>(Instr), R->operands(),
8074 *VPI, VPI->getDebugLoc());
8075
8076 if (Instruction::isCast(VPI->getOpcode())) {
8077 auto *CI = cast<CastInst>(Instr);
8078 auto *CastR = cast<VPInstructionWithType>(VPI);
8079 return new VPWidenCastRecipe(CI->getOpcode(), VPI->getOperand(0),
8080 CastR->getResultType(), CI, *VPI, *VPI,
8081 VPI->getDebugLoc());
8082 }
8083
8084 return tryToWiden(VPI);
8085}
8086
8087void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8088 ElementCount MaxVF) {
8089 if (ElementCount::isKnownGT(MinVF, MaxVF))
8090 return;
8091
8092 assert(OrigLoop->isInnermost() && "Inner loop expected.");
8093
8094 const LoopAccessInfo *LAI = Legal->getLAI();
8096 OrigLoop, LI, DT, PSE.getSE());
8097 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
8099 // Only use noalias metadata when using memory checks guaranteeing no
8100 // overlap across all iterations.
8101 LVer.prepareNoAliasMetadata();
8102 }
8103
8104 // Create initial base VPlan0, to serve as common starting point for all
8105 // candidates built later for specific VF ranges.
8106 auto VPlan0 = VPlanTransforms::buildVPlan0(
8107 OrigLoop, *LI, Legal->getWidestInductionType(),
8108 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE, &LVer);
8109
8110 // Create recipes for header phis.
8112 *VPlan0, PSE, *OrigLoop, Legal->getInductionVars(),
8113 Legal->getReductionVars(), Legal->getFixedOrderRecurrences(),
8114 CM.getInLoopReductions(), Hints.allowReordering());
8115
8116 auto MaxVFTimes2 = MaxVF * 2;
8117 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
8118 VFRange SubRange = {VF, MaxVFTimes2};
8119 if (auto Plan = tryToBuildVPlanWithVPRecipes(
8120 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange, &LVer)) {
8121 // Now optimize the initial VPlan.
8122 VPlanTransforms::hoistPredicatedLoads(*Plan, PSE, OrigLoop);
8123 VPlanTransforms::sinkPredicatedStores(*Plan, PSE, OrigLoop);
8125 CM.getMinimalBitwidths());
8127 // TODO: try to put addExplicitVectorLength close to addActiveLaneMask
8128 if (CM.foldTailWithEVL()) {
8130 CM.getMaxSafeElements());
8132 }
8133 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8134 VPlans.push_back(std::move(Plan));
8135 }
8136 VF = SubRange.End;
8137 }
8138}
8139
8140VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
8141 VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) {
8142
8143 using namespace llvm::VPlanPatternMatch;
8144 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8145
8146 // ---------------------------------------------------------------------------
8147 // Build initial VPlan: Scan the body of the loop in a topological order to
8148 // visit each basic block after having visited its predecessor basic blocks.
8149 // ---------------------------------------------------------------------------
8150
8151 bool RequiresScalarEpilogueCheck =
8153 [this](ElementCount VF) {
8154 return !CM.requiresScalarEpilogue(VF.isVector());
8155 },
8156 Range);
8157 VPlanTransforms::handleEarlyExits(*Plan, Legal->hasUncountableEarlyExit());
8158 VPlanTransforms::addMiddleCheck(*Plan, RequiresScalarEpilogueCheck,
8159 CM.foldTailByMasking());
8160
8162
8163 // Don't use getDecisionAndClampRange here, because we don't know the UF
8164 // so this function is better to be conservative, rather than to split
8165 // it up into different VPlans.
8166 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
8167 bool IVUpdateMayOverflow = false;
8168 for (ElementCount VF : Range)
8169 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
8170
8171 TailFoldingStyle Style = CM.getTailFoldingStyle(IVUpdateMayOverflow);
8172 // Use NUW for the induction increment if we proved that it won't overflow in
8173 // the vector loop or when not folding the tail. In the later case, we know
8174 // that the canonical induction increment will not overflow as the vector trip
8175 // count is >= increment and a multiple of the increment.
8176 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
8177 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
8178 if (!HasNUW) {
8179 auto *IVInc =
8180 LoopRegion->getExitingBasicBlock()->getTerminator()->getOperand(0);
8181 assert(match(IVInc,
8182 m_VPInstruction<Instruction::Add>(
8183 m_Specific(LoopRegion->getCanonicalIV()), m_VPValue())) &&
8184 "Did not find the canonical IV increment");
8185 cast<VPRecipeWithIRFlags>(IVInc)->dropPoisonGeneratingFlags();
8186 }
8187
8188 // ---------------------------------------------------------------------------
8189 // Pre-construction: record ingredients whose recipes we'll need to further
8190 // process after constructing the initial VPlan.
8191 // ---------------------------------------------------------------------------
8192
8193 // For each interleave group which is relevant for this (possibly trimmed)
8194 // Range, add it to the set of groups to be later applied to the VPlan and add
8195 // placeholders for its members' Recipes which we'll be replacing with a
8196 // single VPInterleaveRecipe.
8197 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8198 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
8199 bool Result = (VF.isVector() && // Query is illegal for VF == 1
8200 CM.getWideningDecision(IG->getInsertPos(), VF) ==
8202 // For scalable vectors, the interleave factors must be <= 8 since we
8203 // require the (de)interleaveN intrinsics instead of shufflevectors.
8204 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
8205 "Unsupported interleave factor for scalable vectors");
8206 return Result;
8207 };
8208 if (!getDecisionAndClampRange(ApplyIG, Range))
8209 continue;
8210 InterleaveGroups.insert(IG);
8211 }
8212
8213 // ---------------------------------------------------------------------------
8214 // Predicate and linearize the top-level loop region.
8215 // ---------------------------------------------------------------------------
8216 auto BlockMaskCache = VPlanTransforms::introduceMasksAndLinearize(
8217 *Plan, CM.foldTailByMasking());
8218
8219 // ---------------------------------------------------------------------------
8220 // Construct wide recipes and apply predication for original scalar
8221 // VPInstructions in the loop.
8222 // ---------------------------------------------------------------------------
8223 VPRecipeBuilder RecipeBuilder(*Plan, TLI, Legal, CM, Builder, BlockMaskCache);
8224
8225 // Scan the body of the loop in a topological order to visit each basic block
8226 // after having visited its predecessor basic blocks.
8227 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
8228 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
8229 HeaderVPBB);
8230
8231 auto *MiddleVPBB = Plan->getMiddleBlock();
8232 VPBasicBlock::iterator MBIP = MiddleVPBB->getFirstNonPhi();
8233 // Mapping from VPValues in the initial plan to their widened VPValues. Needed
8234 // temporarily to update created block masks.
8235 DenseMap<VPValue *, VPValue *> Old2New;
8236
8237 // Collect blocks that need predication for in-loop reduction recipes.
8238 DenseSet<BasicBlock *> BlocksNeedingPredication;
8239 for (BasicBlock *BB : OrigLoop->blocks())
8240 if (CM.blockNeedsPredicationForAnyReason(BB))
8241 BlocksNeedingPredication.insert(BB);
8242
8244 *Plan, BlockMaskCache, BlocksNeedingPredication, Range.Start);
8245
8246 // Now process all other blocks and instructions.
8247 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
8248 // Convert input VPInstructions to widened recipes.
8249 for (VPRecipeBase &R : make_early_inc_range(
8250 make_range(VPBB->getFirstNonPhi(), VPBB->end()))) {
8251 // Skip recipes that do not need transforming.
8253 continue;
8254 auto *VPI = cast<VPInstruction>(&R);
8255 if (!VPI->getUnderlyingValue())
8256 continue;
8257
8258 // TODO: Gradually replace uses of underlying instruction by analyses on
8259 // VPlan. Migrate code relying on the underlying instruction from VPlan0
8260 // to construct recipes below to not use the underlying instruction.
8262 Builder.setInsertPoint(VPI);
8263
8264 // The stores with invariant address inside the loop will be deleted, and
8265 // in the exit block, a uniform store recipe will be created for the final
8266 // invariant store of the reduction.
8267 StoreInst *SI;
8268 if ((SI = dyn_cast<StoreInst>(Instr)) &&
8269 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
8270 // Only create recipe for the final invariant store of the reduction.
8271 if (Legal->isInvariantStoreOfReduction(SI)) {
8272 auto *Recipe = new VPReplicateRecipe(
8273 SI, R.operands(), true /* IsUniform */, nullptr /*Mask*/, *VPI,
8274 *VPI, VPI->getDebugLoc());
8275 Recipe->insertBefore(*MiddleVPBB, MBIP);
8276 }
8277 R.eraseFromParent();
8278 continue;
8279 }
8280
8281 VPRecipeBase *Recipe =
8282 RecipeBuilder.tryToCreateWidenNonPhiRecipe(VPI, Range);
8283 if (!Recipe)
8284 Recipe =
8285 RecipeBuilder.handleReplication(cast<VPInstruction>(VPI), Range);
8286
8287 RecipeBuilder.setRecipe(Instr, Recipe);
8288 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
8289 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
8290 // moved to the phi section in the header.
8291 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8292 } else {
8293 Builder.insert(Recipe);
8294 }
8295 if (Recipe->getNumDefinedValues() == 1) {
8296 VPI->replaceAllUsesWith(Recipe->getVPSingleValue());
8297 Old2New[VPI] = Recipe->getVPSingleValue();
8298 } else {
8299 assert(Recipe->getNumDefinedValues() == 0 &&
8300 "Unexpected multidef recipe");
8301 R.eraseFromParent();
8302 }
8303 }
8304 }
8305
8306 // replaceAllUsesWith above may invalidate the block masks. Update them here.
8307 // TODO: Include the masks as operands in the predicated VPlan directly
8308 // to remove the need to keep a map of masks beyond the predication
8309 // transform.
8310 RecipeBuilder.updateBlockMaskCache(Old2New);
8311 for (VPValue *Old : Old2New.keys())
8312 Old->getDefiningRecipe()->eraseFromParent();
8313
8314 assert(isa<VPRegionBlock>(LoopRegion) &&
8315 !LoopRegion->getEntryBasicBlock()->empty() &&
8316 "entry block must be set to a VPRegionBlock having a non-empty entry "
8317 "VPBasicBlock");
8318
8319 // TODO: We can't call runPass on these transforms yet, due to verifier
8320 // failures.
8322 DenseMap<VPValue *, VPValue *> IVEndValues;
8323 VPlanTransforms::updateScalarResumePhis(*Plan, IVEndValues);
8324
8325 // ---------------------------------------------------------------------------
8326 // Transform initial VPlan: Apply previously taken decisions, in order, to
8327 // bring the VPlan to its final state.
8328 // ---------------------------------------------------------------------------
8329
8330 addReductionResultComputation(Plan, RecipeBuilder, Range.Start);
8331
8332 // Apply mandatory transformation to handle reductions with multiple in-loop
8333 // uses if possible, bail out otherwise.
8335 return nullptr;
8336 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
8337 // NaNs if possible, bail out otherwise.
8339 return nullptr;
8340
8341 // Create whole-vector selects for find-last recurrences.
8343 return nullptr;
8344
8345 // Create partial reduction recipes for scaled reductions and transform
8346 // recipes to abstract recipes if it is legal and beneficial and clamp the
8347 // range for better cost estimation.
8348 // TODO: Enable following transform when the EVL-version of extended-reduction
8349 // and mulacc-reduction are implemented.
8350 if (!CM.foldTailWithEVL()) {
8351 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, CM.PSE,
8352 OrigLoop);
8354 Range);
8356 Range);
8357 }
8358
8359 for (ElementCount VF : Range)
8360 Plan->addVF(VF);
8361 Plan->setName("Initial VPlan");
8362
8363 // Interleave memory: for each Interleave Group we marked earlier as relevant
8364 // for this VPlan, replace the Recipes widening its memory instructions with a
8365 // single VPInterleaveRecipe at its insertion point.
8367 InterleaveGroups, RecipeBuilder, CM.isScalarEpilogueAllowed());
8368
8369 // Replace VPValues for known constant strides.
8371 Legal->getLAI()->getSymbolicStrides());
8372
8373 auto BlockNeedsPredication = [this](BasicBlock *BB) {
8374 return Legal->blockNeedsPredication(BB);
8375 };
8377 BlockNeedsPredication);
8378
8379 // Sink users of fixed-order recurrence past the recipe defining the previous
8380 // value and introduce FirstOrderRecurrenceSplice VPInstructions.
8382 Builder))
8383 return nullptr;
8384
8385 if (useActiveLaneMask(Style)) {
8386 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
8387 // TailFoldingStyle is visible there.
8388 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
8389 bool WithoutRuntimeCheck =
8391 VPlanTransforms::addActiveLaneMask(*Plan, ForControlFlow,
8392 WithoutRuntimeCheck);
8393 }
8394 VPlanTransforms::optimizeInductionExitUsers(*Plan, IVEndValues, PSE);
8395
8396 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8397 return Plan;
8398}
8399
8400VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
8401 // Outer loop handling: They may require CFG and instruction level
8402 // transformations before even evaluating whether vectorization is profitable.
8403 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8404 // the vectorization pipeline.
8405 assert(!OrigLoop->isInnermost());
8406 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8407
8408 auto Plan = VPlanTransforms::buildVPlan0(
8409 OrigLoop, *LI, Legal->getWidestInductionType(),
8410 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8411
8413 *Plan, PSE, *OrigLoop, Legal->getInductionVars(),
8414 MapVector<PHINode *, RecurrenceDescriptor>(),
8415 SmallPtrSet<const PHINode *, 1>(), SmallPtrSet<PHINode *, 1>(),
8416 /*AllowReordering=*/false);
8418 /*HasUncountableExit*/ false);
8419 VPlanTransforms::addMiddleCheck(*Plan, /*RequiresScalarEpilogue*/ true,
8420 /*TailFolded*/ false);
8421
8423
8424 for (ElementCount VF : Range)
8425 Plan->addVF(VF);
8426
8428 return nullptr;
8429
8430 // TODO: IVEndValues are not used yet in the native path, to optimize exit
8431 // values.
8432 // TODO: We can't call runPass on the transform yet, due to verifier
8433 // failures.
8434 DenseMap<VPValue *, VPValue *> IVEndValues;
8435 VPlanTransforms::updateScalarResumePhis(*Plan, IVEndValues);
8436
8437 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8438 return Plan;
8439}
8440
8441void LoopVectorizationPlanner::addReductionResultComputation(
8442 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
8443 using namespace VPlanPatternMatch;
8444 VPTypeAnalysis TypeInfo(*Plan);
8445 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8446 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8448 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
8449 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
8450 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
8451 for (VPRecipeBase &R :
8452 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8453 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8454 if (!PhiR)
8455 continue;
8456
8457 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
8459 Type *PhiTy = TypeInfo.inferScalarType(PhiR);
8460 // If tail is folded by masking, introduce selects between the phi
8461 // and the users outside the vector region of each reduction, at the
8462 // beginning of the dedicated latch block.
8463 auto *OrigExitingVPV = PhiR->getBackedgeValue();
8464 auto *NewExitingVPV = PhiR->getBackedgeValue();
8465 // Don't output selects for partial reductions because they have an output
8466 // with fewer lanes than the VF. So the operands of the select would have
8467 // different numbers of lanes. Partial reductions mask the input instead.
8468 auto *RR = dyn_cast<VPReductionRecipe>(OrigExitingVPV->getDefiningRecipe());
8469 if (!PhiR->isInLoop() && CM.foldTailByMasking() &&
8470 (!RR || !RR->isPartialReduction())) {
8471 VPValue *Cond = RecipeBuilder.getBlockInMask(PhiR->getParent());
8472 std::optional<FastMathFlags> FMFs =
8473 PhiTy->isFloatingPointTy()
8474 ? std::make_optional(RdxDesc.getFastMathFlags())
8475 : std::nullopt;
8476 NewExitingVPV =
8477 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", FMFs);
8478 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
8479 using namespace VPlanPatternMatch;
8480 return match(
8481 &U, m_CombineOr(
8482 m_VPInstruction<VPInstruction::ComputeAnyOfResult>(),
8483 m_VPInstruction<VPInstruction::ComputeReductionResult>()));
8484 });
8485 if (CM.usePredicatedReductionSelect())
8486 PhiR->setOperand(1, NewExitingVPV);
8487 }
8488
8489 // We want code in the middle block to appear to execute on the location of
8490 // the scalar loop's latch terminator because: (a) it is all compiler
8491 // generated, (b) these instructions are always executed after evaluating
8492 // the latch conditional branch, and (c) other passes may add new
8493 // predecessors which terminate on this line. This is the easiest way to
8494 // ensure we don't accidentally cause an extra step back into the loop while
8495 // debugging.
8496 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
8497
8498 // TODO: At the moment ComputeReductionResult also drives creation of the
8499 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
8500 // even for in-loop reductions, until the reduction resume value handling is
8501 // also modeled in VPlan.
8502 VPInstruction *FinalReductionResult;
8503 VPBuilder::InsertPointGuard Guard(Builder);
8504 Builder.setInsertPoint(MiddleVPBB, IP);
8505 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
8506 // For AnyOf reductions, find the select among PhiR's users. This is used
8507 // both to find NewVal for ComputeAnyOfResult and to adjust the reduction.
8508 VPRecipeBase *AnyOfSelect = nullptr;
8509 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
8510 AnyOfSelect = cast<VPRecipeBase>(*find_if(PhiR->users(), [](VPUser *U) {
8511 return match(U, m_Select(m_VPValue(), m_VPValue(), m_VPValue()));
8512 }));
8513 }
8515 VPValue *Start = PhiR->getStartValue();
8516 VPValue *Sentinel = Plan->getOrAddLiveIn(RdxDesc.getSentinelValue());
8517 RecurKind MinMaxKind;
8518 bool IsSigned =
8521 MinMaxKind = IsSigned ? RecurKind::SMax : RecurKind::UMax;
8522 else
8523 MinMaxKind = IsSigned ? RecurKind::SMin : RecurKind::UMin;
8524 VPIRFlags Flags(MinMaxKind, /*IsOrdered=*/false, /*IsInLoop=*/false,
8525 FastMathFlags());
8526 auto *ReducedIV =
8527 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
8528 {NewExitingVPV}, Flags, ExitDL);
8529 auto *Cmp =
8530 Builder.createICmp(CmpInst::ICMP_NE, ReducedIV, Sentinel, ExitDL);
8531 FinalReductionResult = cast<VPInstruction>(
8532 Builder.createSelect(Cmp, ReducedIV, Start, ExitDL));
8533 } else if (AnyOfSelect) {
8534 VPValue *Start = PhiR->getStartValue();
8535 // NewVal is the non-phi operand of the select.
8536 VPValue *NewVal = AnyOfSelect->getOperand(1) == PhiR
8537 ? AnyOfSelect->getOperand(2)
8538 : AnyOfSelect->getOperand(1);
8539 FinalReductionResult =
8540 Builder.createNaryOp(VPInstruction::ComputeAnyOfResult,
8541 {Start, NewVal, NewExitingVPV}, ExitDL);
8542 } else {
8543 FastMathFlags FMFs =
8545 ? RdxDesc.getFastMathFlags()
8546 : FastMathFlags();
8547 VPIRFlags Flags(RecurrenceKind, PhiR->isOrdered(), PhiR->isInLoop(),
8548 FMFs);
8549 FinalReductionResult =
8550 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
8551 {NewExitingVPV}, Flags, ExitDL);
8552 }
8553 // If the vector reduction can be performed in a smaller type, we truncate
8554 // then extend the loop exit value to enable InstCombine to evaluate the
8555 // entire expression in the smaller type.
8556 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
8558 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
8560 "Unexpected truncated min-max recurrence!");
8561 Type *RdxTy = RdxDesc.getRecurrenceType();
8562 VPWidenCastRecipe *Trunc;
8563 Instruction::CastOps ExtendOpc =
8564 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
8565 VPWidenCastRecipe *Extnd;
8566 {
8567 VPBuilder::InsertPointGuard Guard(Builder);
8568 Builder.setInsertPoint(
8569 NewExitingVPV->getDefiningRecipe()->getParent(),
8570 std::next(NewExitingVPV->getDefiningRecipe()->getIterator()));
8571 Trunc =
8572 Builder.createWidenCast(Instruction::Trunc, NewExitingVPV, RdxTy);
8573 Extnd = Builder.createWidenCast(ExtendOpc, Trunc, PhiTy);
8574 }
8575 if (PhiR->getOperand(1) == NewExitingVPV)
8576 PhiR->setOperand(1, Extnd->getVPSingleValue());
8577
8578 // Update ComputeReductionResult with the truncated exiting value and
8579 // extend its result. Operand 0 provides the values to be reduced.
8580 FinalReductionResult->setOperand(0, Trunc);
8581 FinalReductionResult =
8582 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
8583 }
8584
8585 // Update all users outside the vector region. Also replace redundant
8586 // extracts.
8587 for (auto *U : to_vector(OrigExitingVPV->users())) {
8588 auto *Parent = cast<VPRecipeBase>(U)->getParent();
8589 if (FinalReductionResult == U || Parent->getParent())
8590 continue;
8591 // Skip FindIV reduction chain recipes (ComputeReductionResult, icmp).
8593 match(U, m_CombineOr(
8594 m_VPInstruction<VPInstruction::ComputeReductionResult>(),
8595 m_VPInstruction<Instruction::ICmp>())))
8596 continue;
8597 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
8598
8599 // Look through ExtractLastPart.
8601 U = cast<VPInstruction>(U)->getSingleUser();
8602
8605 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
8606 }
8607
8608 // Adjust AnyOf reductions; replace the reduction phi for the selected value
8609 // with a boolean reduction phi node to check if the condition is true in
8610 // any iteration. The final value is selected by the final
8611 // ComputeReductionResult.
8612 if (AnyOfSelect) {
8613 VPValue *Cmp = AnyOfSelect->getOperand(0);
8614 // If the compare is checking the reduction PHI node, adjust it to check
8615 // the start value.
8616 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
8617 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
8618 Builder.setInsertPoint(AnyOfSelect);
8619
8620 // If the true value of the select is the reduction phi, the new value is
8621 // selected if the negated condition is true in any iteration.
8622 if (AnyOfSelect->getOperand(1) == PhiR)
8623 Cmp = Builder.createNot(Cmp);
8624 VPValue *Or = Builder.createOr(PhiR, Cmp);
8625 AnyOfSelect->getVPSingleValue()->replaceAllUsesWith(Or);
8626 // Delete AnyOfSelect now that it has invalid types.
8627 ToDelete.push_back(AnyOfSelect);
8628
8629 // Convert the reduction phi to operate on bools.
8630 PhiR->setOperand(0, Plan->getFalse());
8631 continue;
8632 }
8633
8635 RdxDesc.getRecurrenceKind())) {
8636 // Adjust the start value for FindFirstIV/FindLastIV recurrences to use
8637 // the sentinel value after generating the ResumePhi recipe, which uses
8638 // the original start value.
8639 PhiR->setOperand(0, Plan->getOrAddLiveIn(RdxDesc.getSentinelValue()));
8640 }
8641 RecurKind RK = RdxDesc.getRecurrenceKind();
8646 VPBuilder PHBuilder(Plan->getVectorPreheader());
8647 VPValue *Iden = Plan->getOrAddLiveIn(
8648 getRecurrenceIdentity(RK, PhiTy, RdxDesc.getFastMathFlags()));
8649 auto *ScaleFactorVPV = Plan->getConstantInt(32, 1);
8650 VPValue *StartV = PHBuilder.createNaryOp(
8652 {PhiR->getStartValue(), Iden, ScaleFactorVPV},
8653 PhiTy->isFloatingPointTy() ? RdxDesc.getFastMathFlags()
8654 : FastMathFlags());
8655 PhiR->setOperand(0, StartV);
8656 }
8657 }
8658 for (VPRecipeBase *R : ToDelete)
8659 R->eraseFromParent();
8660
8662}
8663
8664void LoopVectorizationPlanner::attachRuntimeChecks(
8665 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
8666 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
8667 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
8668 assert((!CM.OptForSize ||
8669 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
8670 "Cannot SCEV check stride or overflow when optimizing for size");
8671 VPlanTransforms::attachCheckBlock(Plan, SCEVCheckCond, SCEVCheckBlock,
8672 HasBranchWeights);
8673 }
8674 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
8675 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
8676 // VPlan-native path does not do any analysis for runtime checks
8677 // currently.
8678 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
8679 "Runtime checks are not supported for outer loops yet");
8680
8681 if (CM.OptForSize) {
8682 assert(
8683 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
8684 "Cannot emit memory checks when optimizing for size, unless forced "
8685 "to vectorize.");
8686 ORE->emit([&]() {
8687 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
8688 OrigLoop->getStartLoc(),
8689 OrigLoop->getHeader())
8690 << "Code-size may be reduced by not forcing "
8691 "vectorization, or by source-code modifications "
8692 "eliminating the need for runtime checks "
8693 "(e.g., adding 'restrict').";
8694 });
8695 }
8696 VPlanTransforms::attachCheckBlock(Plan, MemCheckCond, MemCheckBlock,
8697 HasBranchWeights);
8698 }
8699}
8700
8702 VPlan &Plan, ElementCount VF, unsigned UF,
8703 ElementCount MinProfitableTripCount) const {
8704 // vscale is not necessarily a power-of-2, which means we cannot guarantee
8705 // an overflow to zero when updating induction variables and so an
8706 // additional overflow check is required before entering the vector loop.
8707 bool IsIndvarOverflowCheckNeededForVF =
8708 VF.isScalable() && !TTI.isVScaleKnownToBeAPowerOfTwo() &&
8709 !isIndvarOverflowCheckKnownFalse(&CM, VF, UF) &&
8710 CM.getTailFoldingStyle() !=
8712 const uint32_t *BranchWeigths =
8713 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
8715 : nullptr;
8717 Plan, VF, UF, MinProfitableTripCount,
8718 CM.requiresScalarEpilogue(VF.isVector()), CM.foldTailByMasking(),
8719 IsIndvarOverflowCheckNeededForVF, OrigLoop, BranchWeigths,
8720 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(), PSE);
8721}
8722
8723// Determine how to lower the scalar epilogue, which depends on 1) optimising
8724// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
8725// predication, and 4) a TTI hook that analyses whether the loop is suitable
8726// for predication.
8728 Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize,
8731 // 1) OptSize takes precedence over all other options, i.e. if this is set,
8732 // don't look at hints or options, and don't request a scalar epilogue.
8733 if (F->hasOptSize() ||
8734 (OptForSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled))
8736
8737 // 2) If set, obey the directives
8738 if (PreferPredicateOverEpilogue.getNumOccurrences()) {
8746 };
8747 }
8748
8749 // 3) If set, obey the hints
8750 switch (Hints.getPredicate()) {
8755 };
8756
8757 // 4) if the TTI hook indicates this is profitable, request predication.
8758 TailFoldingInfo TFI(TLI, &LVL, IAI);
8759 if (TTI->preferPredicateOverEpilogue(&TFI))
8761
8763}
8764
8765// Process the loop in the VPlan-native vectorization path. This path builds
8766// VPlan upfront in the vectorization pipeline, which allows to apply
8767// VPlan-to-VPlan transformations from the very beginning without modifying the
8768// input LLVM IR.
8774 std::function<BlockFrequencyInfo &()> GetBFI, bool OptForSize,
8775 LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements) {
8776
8778 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
8779 return false;
8780 }
8781 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
8782 Function *F = L->getHeader()->getParent();
8783 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
8784
8786 getScalarEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, *LVL, &IAI);
8787
8788 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE,
8789 GetBFI, F, &Hints, IAI, OptForSize);
8790 // Use the planner for outer loop vectorization.
8791 // TODO: CM is not used at this point inside the planner. Turn CM into an
8792 // optional argument if we don't need it in the future.
8793 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, IAI, PSE, Hints,
8794 ORE);
8795
8796 // Get user vectorization factor.
8797 ElementCount UserVF = Hints.getWidth();
8798
8800
8801 // Plan how to best vectorize, return the best VF and its cost.
8802 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
8803
8804 // If we are stress testing VPlan builds, do not attempt to generate vector
8805 // code. Masked vector code generation support will follow soon.
8806 // Also, do not attempt to vectorize if no vector code will be produced.
8808 return false;
8809
8810 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
8811
8812 {
8813 GeneratedRTChecks Checks(PSE, DT, LI, TTI, CM.CostKind);
8814 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
8815 Checks, BestPlan);
8816 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
8817 << L->getHeader()->getParent()->getName() << "\"\n");
8818 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
8820
8821 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT, false);
8822 }
8823
8824 reportVectorization(ORE, L, VF, 1);
8825
8826 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
8827 return true;
8828}
8829
8830// Emit a remark if there are stores to floats that required a floating point
8831// extension. If the vectorized loop was generated with floating point there
8832// will be a performance penalty from the conversion overhead and the change in
8833// the vector width.
8836 for (BasicBlock *BB : L->getBlocks()) {
8837 for (Instruction &Inst : *BB) {
8838 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
8839 if (S->getValueOperand()->getType()->isFloatTy())
8840 Worklist.push_back(S);
8841 }
8842 }
8843 }
8844
8845 // Traverse the floating point stores upwards searching, for floating point
8846 // conversions.
8849 while (!Worklist.empty()) {
8850 auto *I = Worklist.pop_back_val();
8851 if (!L->contains(I))
8852 continue;
8853 if (!Visited.insert(I).second)
8854 continue;
8855
8856 // Emit a remark if the floating point store required a floating
8857 // point conversion.
8858 // TODO: More work could be done to identify the root cause such as a
8859 // constant or a function return type and point the user to it.
8860 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
8861 ORE->emit([&]() {
8862 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
8863 I->getDebugLoc(), L->getHeader())
8864 << "floating point conversion changes vector width. "
8865 << "Mixed floating point precision requires an up/down "
8866 << "cast that will negatively impact performance.";
8867 });
8868
8869 for (Use &Op : I->operands())
8870 if (auto *OpI = dyn_cast<Instruction>(Op))
8871 Worklist.push_back(OpI);
8872 }
8873}
8874
8875/// For loops with uncountable early exits, find the cost of doing work when
8876/// exiting the loop early, such as calculating the final exit values of
8877/// variables used outside the loop.
8878/// TODO: This is currently overly pessimistic because the loop may not take
8879/// the early exit, but better to keep this conservative for now. In future,
8880/// it might be possible to relax this by using branch probabilities.
8882 VPlan &Plan, ElementCount VF) {
8883 InstructionCost Cost = 0;
8884 for (auto *ExitVPBB : Plan.getExitBlocks()) {
8885 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
8886 // If the predecessor is not the middle.block, then it must be the
8887 // vector.early.exit block, which may contain work to calculate the exit
8888 // values of variables used outside the loop.
8889 if (PredVPBB != Plan.getMiddleBlock()) {
8890 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
8891 << PredVPBB->getName() << ":\n");
8892 Cost += PredVPBB->cost(VF, CostCtx);
8893 }
8894 }
8895 }
8896 return Cost;
8897}
8898
8899/// This function determines whether or not it's still profitable to vectorize
8900/// the loop given the extra work we have to do outside of the loop:
8901/// 1. Perform the runtime checks before entering the loop to ensure it's safe
8902/// to vectorize.
8903/// 2. In the case of loops with uncountable early exits, we may have to do
8904/// extra work when exiting the loop early, such as calculating the final
8905/// exit values of variables used outside the loop.
8906/// 3. The middle block.
8907static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
8908 VectorizationFactor &VF, Loop *L,
8910 VPCostContext &CostCtx, VPlan &Plan,
8912 std::optional<unsigned> VScale) {
8913 InstructionCost RtC = Checks.getCost();
8914 if (!RtC.isValid())
8915 return false;
8916
8917 // When interleaving only scalar and vector cost will be equal, which in turn
8918 // would lead to a divide by 0. Fall back to hard threshold.
8919 if (VF.Width.isScalar()) {
8920 // TODO: Should we rename VectorizeMemoryCheckThreshold?
8922 LLVM_DEBUG(
8923 dbgs()
8924 << "LV: Interleaving only is not profitable due to runtime checks\n");
8925 return false;
8926 }
8927 return true;
8928 }
8929
8930 // The scalar cost should only be 0 when vectorizing with a user specified
8931 // VF/IC. In those cases, runtime checks should always be generated.
8932 uint64_t ScalarC = VF.ScalarCost.getValue();
8933 if (ScalarC == 0)
8934 return true;
8935
8936 InstructionCost TotalCost = RtC;
8937 // Add on the cost of any work required in the vector early exit block, if
8938 // one exists.
8939 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
8940 TotalCost += Plan.getMiddleBlock()->cost(VF.Width, CostCtx);
8941
8942 // First, compute the minimum iteration count required so that the vector
8943 // loop outperforms the scalar loop.
8944 // The total cost of the scalar loop is
8945 // ScalarC * TC
8946 // where
8947 // * TC is the actual trip count of the loop.
8948 // * ScalarC is the cost of a single scalar iteration.
8949 //
8950 // The total cost of the vector loop is
8951 // TotalCost + VecC * (TC / VF) + EpiC
8952 // where
8953 // * TotalCost is the sum of the costs cost of
8954 // - the generated runtime checks, i.e. RtC
8955 // - performing any additional work in the vector.early.exit block for
8956 // loops with uncountable early exits.
8957 // - the middle block, if ExpectedTC <= VF.Width.
8958 // * VecC is the cost of a single vector iteration.
8959 // * TC is the actual trip count of the loop
8960 // * VF is the vectorization factor
8961 // * EpiCost is the cost of the generated epilogue, including the cost
8962 // of the remaining scalar operations.
8963 //
8964 // Vectorization is profitable once the total vector cost is less than the
8965 // total scalar cost:
8966 // TotalCost + VecC * (TC / VF) + EpiC < ScalarC * TC
8967 //
8968 // Now we can compute the minimum required trip count TC as
8969 // VF * (TotalCost + EpiC) / (ScalarC * VF - VecC) < TC
8970 //
8971 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
8972 // the computations are performed on doubles, not integers and the result
8973 // is rounded up, hence we get an upper estimate of the TC.
8974 unsigned IntVF = estimateElementCount(VF.Width, VScale);
8975 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
8976 uint64_t MinTC1 =
8977 Div == 0 ? 0 : divideCeil(TotalCost.getValue() * IntVF, Div);
8978
8979 // Second, compute a minimum iteration count so that the cost of the
8980 // runtime checks is only a fraction of the total scalar loop cost. This
8981 // adds a loop-dependent bound on the overhead incurred if the runtime
8982 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
8983 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
8984 // cost, compute
8985 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
8986 uint64_t MinTC2 = divideCeil(RtC.getValue() * 10, ScalarC);
8987
8988 // Now pick the larger minimum. If it is not a multiple of VF and a scalar
8989 // epilogue is allowed, choose the next closest multiple of VF. This should
8990 // partly compensate for ignoring the epilogue cost.
8991 uint64_t MinTC = std::max(MinTC1, MinTC2);
8992 if (SEL == CM_ScalarEpilogueAllowed)
8993 MinTC = alignTo(MinTC, IntVF);
8995
8996 LLVM_DEBUG(
8997 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
8998 << VF.MinProfitableTripCount << "\n");
8999
9000 // Skip vectorization if the expected trip count is less than the minimum
9001 // required trip count.
9002 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
9003 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
9004 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
9005 "trip count < minimum profitable VF ("
9006 << *ExpectedTC << " < " << VF.MinProfitableTripCount
9007 << ")\n");
9008
9009 return false;
9010 }
9011 }
9012 return true;
9013}
9014
9016 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9018 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9020
9021/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
9022/// vectorization. Remove ResumePhis from \p MainPlan for inductions that
9023/// don't have a corresponding wide induction in \p EpiPlan.
9024static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) {
9025 // Collect PHI nodes of widened phis in the VPlan for the epilogue. Those
9026 // will need their resume-values computed in the main vector loop. Others
9027 // can be removed from the main VPlan.
9028 SmallPtrSet<PHINode *, 2> EpiWidenedPhis;
9029 for (VPRecipeBase &R :
9032 continue;
9033 EpiWidenedPhis.insert(
9034 cast<PHINode>(R.getVPSingleValue()->getUnderlyingValue()));
9035 }
9036 for (VPRecipeBase &R :
9037 make_early_inc_range(MainPlan.getScalarHeader()->phis())) {
9038 auto *VPIRInst = cast<VPIRPhi>(&R);
9039 if (EpiWidenedPhis.contains(&VPIRInst->getIRPhi()))
9040 continue;
9041 // There is no corresponding wide induction in the epilogue plan that would
9042 // need a resume value. Remove the VPIRInst wrapping the scalar header phi
9043 // together with the corresponding ResumePhi. The resume values for the
9044 // scalar loop will be created during execution of EpiPlan.
9045 VPRecipeBase *ResumePhi = VPIRInst->getOperand(0)->getDefiningRecipe();
9046 VPIRInst->eraseFromParent();
9047 ResumePhi->eraseFromParent();
9048 }
9050
9051 using namespace VPlanPatternMatch;
9052 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
9053 // introduce multiple uses of undef/poison. If the reduction start value may
9054 // be undef or poison it needs to be frozen and the frozen start has to be
9055 // used when computing the reduction result. We also need to use the frozen
9056 // value in the resume phi generated by the main vector loop, as this is also
9057 // used to compute the reduction result after the epilogue vector loop.
9058 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
9059 bool UpdateResumePhis) {
9060 VPBuilder Builder(Plan.getEntry());
9061 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
9062 auto *VPI = dyn_cast<VPInstruction>(&R);
9063 if (!VPI)
9064 continue;
9065 VPValue *OrigStart;
9066 if (!matchFindIVResult(VPI, m_VPValue(), m_VPValue(OrigStart)))
9067 continue;
9069 continue;
9070 VPInstruction *Freeze =
9071 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
9072 VPI->setOperand(2, Freeze);
9073 if (UpdateResumePhis)
9074 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
9075 return Freeze != &U && isa<VPPhi>(&U);
9076 });
9077 }
9078 };
9079 AddFreezeForFindLastIVReductions(MainPlan, true);
9080 AddFreezeForFindLastIVReductions(EpiPlan, false);
9081
9082 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
9083 VPValue *VectorTC = &MainPlan.getVectorTripCount();
9084 // If there is a suitable resume value for the canonical induction in the
9085 // scalar (which will become vector) epilogue loop, use it and move it to the
9086 // beginning of the scalar preheader. Otherwise create it below.
9087 auto ResumePhiIter =
9088 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
9089 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
9090 m_ZeroInt()));
9091 });
9092 VPPhi *ResumePhi = nullptr;
9093 if (ResumePhiIter == MainScalarPH->phis().end()) {
9094 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
9095 ResumePhi = ScalarPHBuilder.createScalarPhi(
9096 {VectorTC,
9098 {}, "vec.epilog.resume.val");
9099 } else {
9100 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
9101 if (MainScalarPH->begin() == MainScalarPH->end())
9102 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->end());
9103 else if (&*MainScalarPH->begin() != ResumePhi)
9104 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
9105 }
9106 // Add a user to to make sure the resume phi won't get removed.
9107 VPBuilder(MainScalarPH)
9109}
9110
9111/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
9112/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some
9113/// reductions require creating new instructions to compute the resume values.
9114/// They are collected in a vector and returned. They must be moved to the
9115/// preheader of the vector epilogue loop, after created by the execution of \p
9116/// Plan.
9118 VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs,
9120 ScalarEvolution &SE) {
9121 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
9122 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
9123 Header->setName("vec.epilog.vector.body");
9124
9125 VPCanonicalIVPHIRecipe *IV = VectorLoop->getCanonicalIV();
9126 // When vectorizing the epilogue loop, the canonical induction needs to be
9127 // adjusted by the value after the main vector loop. Find the resume value
9128 // created during execution of the main VPlan. It must be the first phi in the
9129 // loop preheader. Use the value to increment the canonical IV, and update all
9130 // users in the loop region to use the adjusted value.
9131 // FIXME: Improve modeling for canonical IV start values in the epilogue
9132 // loop.
9133 using namespace llvm::PatternMatch;
9134 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
9135 for (Value *Inc : EPResumeVal->incoming_values()) {
9136 if (match(Inc, m_SpecificInt(0)))
9137 continue;
9138 assert(!EPI.VectorTripCount &&
9139 "Must only have a single non-zero incoming value");
9140 EPI.VectorTripCount = Inc;
9141 }
9142 // If we didn't find a non-zero vector trip count, all incoming values
9143 // must be zero, which also means the vector trip count is zero. Pick the
9144 // first zero as vector trip count.
9145 // TODO: We should not choose VF * UF so the main vector loop is known to
9146 // be dead.
9147 if (!EPI.VectorTripCount) {
9148 assert(EPResumeVal->getNumIncomingValues() > 0 &&
9149 all_of(EPResumeVal->incoming_values(),
9150 [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
9151 "all incoming values must be 0");
9152 EPI.VectorTripCount = EPResumeVal->getOperand(0);
9153 }
9154 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
9155 assert(all_of(IV->users(),
9156 [](const VPUser *U) {
9157 return isa<VPScalarIVStepsRecipe>(U) ||
9158 isa<VPDerivedIVRecipe>(U) ||
9159 cast<VPRecipeBase>(U)->isScalarCast() ||
9160 cast<VPInstruction>(U)->getOpcode() ==
9161 Instruction::Add;
9162 }) &&
9163 "the canonical IV should only be used by its increment or "
9164 "ScalarIVSteps when resetting the start value");
9165 VPBuilder Builder(Header, Header->getFirstNonPhi());
9166 VPInstruction *Add = Builder.createNaryOp(Instruction::Add, {IV, VPV});
9167 IV->replaceAllUsesWith(Add);
9168 Add->setOperand(0, IV);
9169
9171 SmallVector<Instruction *> InstsToMove;
9172 // Ensure that the start values for all header phi recipes are updated before
9173 // vectorizing the epilogue loop. Skip the canonical IV, which has been
9174 // handled above.
9175 for (VPRecipeBase &R : drop_begin(Header->phis())) {
9176 Value *ResumeV = nullptr;
9177 // TODO: Move setting of resume values to prepareToExecute.
9178 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
9179 // Find the reduction result by searching users of the phi or its backedge
9180 // value.
9181 auto IsReductionResult = [](VPRecipeBase *R) {
9182 auto *VPI = dyn_cast<VPInstruction>(R);
9183 if (!VPI)
9184 return false;
9187 };
9188 auto *RdxResult = cast<VPInstruction>(
9189 vputils::findRecipe(ReductionPhi->getBackedgeValue(), IsReductionResult));
9190 assert(RdxResult && "expected to find reduction result");
9191
9192 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
9193 ->getIncomingValueForBlock(L->getLoopPreheader());
9194
9195 // Check for FindIV pattern by looking for icmp user of RdxResult.
9196 // The pattern is: select(icmp ne RdxResult, Sentinel), RdxResult, Start
9197 using namespace VPlanPatternMatch;
9198 VPValue *SentinelVPV = nullptr;
9199 bool IsFindIV = any_of(RdxResult->users(), [&](VPUser *U) {
9200 return match(U, VPlanPatternMatch::m_SpecificICmp(
9201 ICmpInst::ICMP_NE, m_Specific(RdxResult),
9202 m_VPValue(SentinelVPV)));
9203 });
9204
9205 if (RdxResult->getOpcode() == VPInstruction::ComputeAnyOfResult) {
9206 Value *StartV = RdxResult->getOperand(0)->getLiveInIRValue();
9207 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
9208 // start value; compare the final value from the main vector loop
9209 // to the start value.
9210 BasicBlock *PBB = cast<Instruction>(ResumeV)->getParent();
9211 IRBuilder<> Builder(PBB, PBB->getFirstNonPHIIt());
9212 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
9213 if (auto *I = dyn_cast<Instruction>(ResumeV))
9214 InstsToMove.push_back(I);
9215 } else if (IsFindIV) {
9216 assert(SentinelVPV && "expected to find icmp using RdxResult");
9217
9218 // Get the frozen start value from the main loop.
9219 Value *FrozenStartV = cast<PHINode>(ResumeV)->getIncomingValueForBlock(
9221 if (auto *FreezeI = dyn_cast<FreezeInst>(FrozenStartV))
9222 ToFrozen[FreezeI->getOperand(0)] = FrozenStartV;
9223
9224 // Adjust resume: select(icmp eq ResumeV, FrozenStartV), Sentinel,
9225 // ResumeV
9226 BasicBlock *ResumeBB = cast<Instruction>(ResumeV)->getParent();
9227 IRBuilder<> Builder(ResumeBB, ResumeBB->getFirstNonPHIIt());
9228 Value *Cmp = Builder.CreateICmpEQ(ResumeV, FrozenStartV);
9229 if (auto *I = dyn_cast<Instruction>(Cmp))
9230 InstsToMove.push_back(I);
9231 ResumeV =
9232 Builder.CreateSelect(Cmp, SentinelVPV->getLiveInIRValue(), ResumeV);
9233 if (auto *I = dyn_cast<Instruction>(ResumeV))
9234 InstsToMove.push_back(I);
9235 } else {
9236 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9237 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9238 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
9240 "unexpected start value");
9241 VPI->setOperand(0, StartVal);
9242 continue;
9243 }
9244 }
9245 } else {
9246 // Retrieve the induction resume values for wide inductions from
9247 // their original phi nodes in the scalar loop.
9248 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
9249 // Hook up to the PHINode generated by a ResumePhi recipe of main
9250 // loop VPlan, which feeds the scalar loop.
9251 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
9252 }
9253 assert(ResumeV && "Must have a resume value");
9254 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9255 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
9256 }
9257
9258 // For some VPValues in the epilogue plan we must re-use the generated IR
9259 // values from the main plan. Replace them with live-in VPValues.
9260 // TODO: This is a workaround needed for epilogue vectorization and it
9261 // should be removed once induction resume value creation is done
9262 // directly in VPlan.
9263 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
9264 // Re-use frozen values from the main plan for Freeze VPInstructions in the
9265 // epilogue plan. This ensures all users use the same frozen value.
9266 auto *VPI = dyn_cast<VPInstruction>(&R);
9267 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
9269 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
9270 continue;
9271 }
9272
9273 // Re-use the trip count and steps expanded for the main loop, as
9274 // skeleton creation needs it as a value that dominates both the scalar
9275 // and vector epilogue loops
9276 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
9277 if (!ExpandR)
9278 continue;
9279 VPValue *ExpandedVal =
9280 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
9281 ExpandR->replaceAllUsesWith(ExpandedVal);
9282 if (Plan.getTripCount() == ExpandR)
9283 Plan.resetTripCount(ExpandedVal);
9284 ExpandR->eraseFromParent();
9285 }
9286
9287 auto VScale = CM.getVScaleForTuning();
9288 unsigned MainLoopStep =
9289 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
9290 unsigned EpilogueLoopStep =
9291 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
9293 Plan, EPI.TripCount, EPI.VectorTripCount,
9295 EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
9296
9297 return InstsToMove;
9298}
9299
9300// Generate bypass values from the additional bypass block. Note that when the
9301// vectorized epilogue is skipped due to iteration count check, then the
9302// resume value for the induction variable comes from the trip count of the
9303// main vector loop, passed as the second argument.
9305 PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder,
9306 const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount,
9307 Instruction *OldInduction) {
9308 Value *Step = getExpandedStep(II, ExpandedSCEVs);
9309 // For the primary induction the additional bypass end value is known.
9310 // Otherwise it is computed.
9311 Value *EndValueFromAdditionalBypass = MainVectorTripCount;
9312 if (OrigPhi != OldInduction) {
9313 auto *BinOp = II.getInductionBinOp();
9314 // Fast-math-flags propagate from the original induction instruction.
9316 BypassBuilder.setFastMathFlags(BinOp->getFastMathFlags());
9317
9318 // Compute the end value for the additional bypass.
9319 EndValueFromAdditionalBypass =
9320 emitTransformedIndex(BypassBuilder, MainVectorTripCount,
9321 II.getStartValue(), Step, II.getKind(), BinOp);
9322 EndValueFromAdditionalBypass->setName("ind.end");
9323 }
9324 return EndValueFromAdditionalBypass;
9325}
9326
9328 VPlan &BestEpiPlan,
9330 const SCEV2ValueTy &ExpandedSCEVs,
9331 Value *MainVectorTripCount) {
9332 // Fix reduction resume values from the additional bypass block.
9333 BasicBlock *PH = L->getLoopPreheader();
9334 for (auto *Pred : predecessors(PH)) {
9335 for (PHINode &Phi : PH->phis()) {
9336 if (Phi.getBasicBlockIndex(Pred) != -1)
9337 continue;
9338 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
9339 }
9340 }
9341 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
9342 if (ScalarPH->hasPredecessors()) {
9343 // If ScalarPH has predecessors, we may need to update its reduction
9344 // resume values.
9345 for (const auto &[R, IRPhi] :
9346 zip(ScalarPH->phis(), ScalarPH->getIRBasicBlock()->phis())) {
9348 BypassBlock);
9349 }
9350 }
9351
9352 // Fix induction resume values from the additional bypass block.
9353 IRBuilder<> BypassBuilder(BypassBlock, BypassBlock->getFirstInsertionPt());
9354 for (const auto &[IVPhi, II] : LVL.getInductionVars()) {
9355 auto *Inc = cast<PHINode>(IVPhi->getIncomingValueForBlock(PH));
9357 IVPhi, II, BypassBuilder, ExpandedSCEVs, MainVectorTripCount,
9358 LVL.getPrimaryInduction());
9359 // TODO: Directly add as extra operand to the VPResumePHI recipe.
9360 Inc->setIncomingValueForBlock(BypassBlock, V);
9361 }
9362}
9363
9364/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector
9365// loop, after both plans have executed, updating branches from the iteration
9366// and runtime checks of the main loop, as well as updating various phis. \p
9367// InstsToMove contains instructions that need to be moved to the preheader of
9368// the epilogue vector loop.
9370 VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI,
9372 DenseMap<const SCEV *, Value *> &ExpandedSCEVs, GeneratedRTChecks &Checks,
9373 ArrayRef<Instruction *> InstsToMove) {
9374 BasicBlock *VecEpilogueIterationCountCheck =
9375 cast<VPIRBasicBlock>(EpiPlan.getEntry())->getIRBasicBlock();
9376
9377 BasicBlock *VecEpiloguePreHeader =
9378 cast<BranchInst>(VecEpilogueIterationCountCheck->getTerminator())
9379 ->getSuccessor(1);
9380 // Adjust the control flow taking the state info from the main loop
9381 // vectorization into account.
9383 "expected this to be saved from the previous pass.");
9384 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
9386 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
9387
9389 VecEpilogueIterationCountCheck},
9391 VecEpiloguePreHeader}});
9392
9393 BasicBlock *ScalarPH =
9394 cast<VPIRBasicBlock>(EpiPlan.getScalarPreheader())->getIRBasicBlock();
9396 VecEpilogueIterationCountCheck, ScalarPH);
9397 DTU.applyUpdates(
9399 VecEpilogueIterationCountCheck},
9401
9402 // Adjust the terminators of runtime check blocks and phis using them.
9403 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
9404 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
9405 if (SCEVCheckBlock) {
9406 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
9407 VecEpilogueIterationCountCheck, ScalarPH);
9408 DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock,
9409 VecEpilogueIterationCountCheck},
9410 {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}});
9411 }
9412 if (MemCheckBlock) {
9413 MemCheckBlock->getTerminator()->replaceUsesOfWith(
9414 VecEpilogueIterationCountCheck, ScalarPH);
9415 DTU.applyUpdates(
9416 {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck},
9417 {DominatorTree::Insert, MemCheckBlock, ScalarPH}});
9418 }
9419
9420 // The vec.epilog.iter.check block may contain Phi nodes from inductions
9421 // or reductions which merge control-flow from the latch block and the
9422 // middle block. Update the incoming values here and move the Phi into the
9423 // preheader.
9424 SmallVector<PHINode *, 4> PhisInBlock(
9425 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
9426
9427 for (PHINode *Phi : PhisInBlock) {
9428 Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt());
9429 Phi->replaceIncomingBlockWith(
9430 VecEpilogueIterationCountCheck->getSinglePredecessor(),
9431 VecEpilogueIterationCountCheck);
9432
9433 // If the phi doesn't have an incoming value from the
9434 // EpilogueIterationCountCheck, we are done. Otherwise remove the
9435 // incoming value and also those from other check blocks. This is needed
9436 // for reduction phis only.
9437 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
9438 return EPI.EpilogueIterationCountCheck == IncB;
9439 }))
9440 continue;
9441 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
9442 if (SCEVCheckBlock)
9443 Phi->removeIncomingValue(SCEVCheckBlock);
9444 if (MemCheckBlock)
9445 Phi->removeIncomingValue(MemCheckBlock);
9446 }
9447
9448 auto IP = VecEpiloguePreHeader->getFirstNonPHIIt();
9449 for (auto *I : InstsToMove)
9450 I->moveBefore(IP);
9451
9452 // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop
9453 // after executing the main loop. We need to update the resume values of
9454 // inductions and reductions during epilogue vectorization.
9455 fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan,
9456 LVL, ExpandedSCEVs, EPI.VectorTripCount);
9457}
9458
9460 assert((EnableVPlanNativePath || L->isInnermost()) &&
9461 "VPlan-native path is not enabled. Only process inner loops.");
9462
9463 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
9464 << L->getHeader()->getParent()->getName() << "' from "
9465 << L->getLocStr() << "\n");
9466
9467 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
9468
9469 LLVM_DEBUG(
9470 dbgs() << "LV: Loop hints:"
9471 << " force="
9473 ? "disabled"
9475 ? "enabled"
9476 : "?"))
9477 << " width=" << Hints.getWidth()
9478 << " interleave=" << Hints.getInterleave() << "\n");
9479
9480 // Function containing loop
9481 Function *F = L->getHeader()->getParent();
9482
9483 // Looking at the diagnostic output is the only way to determine if a loop
9484 // was vectorized (other than looking at the IR or machine code), so it
9485 // is important to generate an optimization remark for each loop. Most of
9486 // these messages are generated as OptimizationRemarkAnalysis. Remarks
9487 // generated as OptimizationRemark and OptimizationRemarkMissed are
9488 // less verbose reporting vectorized loops and unvectorized loops that may
9489 // benefit from vectorization, respectively.
9490
9491 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9492 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9493 return false;
9494 }
9495
9496 PredicatedScalarEvolution PSE(*SE, *L);
9497
9498 // Query this against the original loop and save it here because the profile
9499 // of the original loop header may change as the transformation happens.
9500 bool OptForSize = llvm::shouldOptimizeForSize(
9501 L->getHeader(), PSI,
9502 PSI && PSI->hasProfileSummary() ? &GetBFI() : nullptr,
9504
9505 // Check if it is legal to vectorize the loop.
9506 LoopVectorizationRequirements Requirements;
9507 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
9508 &Requirements, &Hints, DB, AC,
9509 /*AllowRuntimeSCEVChecks=*/!OptForSize, AA);
9511 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9512 Hints.emitRemarkWithHints();
9513 return false;
9514 }
9515
9516 if (LVL.hasUncountableEarlyExit()) {
9518 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
9519 "early exit is not enabled",
9520 "UncountableEarlyExitLoopsDisabled", ORE, L);
9521 return false;
9522 }
9523 SmallVector<BasicBlock *, 8> ExitingBlocks;
9524 L->getExitingBlocks(ExitingBlocks);
9525 // TODO: Support multiple uncountable early exits.
9526 if (ExitingBlocks.size() - LVL.getCountableExitingBlocks().size() > 1) {
9527 reportVectorizationFailure("Auto-vectorization of loops with multiple "
9528 "uncountable early exits is not yet supported",
9529 "MultipleUncountableEarlyExits", ORE, L);
9530 return false;
9531 }
9532 }
9533
9534 if (!LVL.getPotentiallyFaultingLoads().empty()) {
9535 reportVectorizationFailure("Auto-vectorization of loops with potentially "
9536 "faulting load is not supported",
9537 "PotentiallyFaultingLoadsNotSupported", ORE, L);
9538 return false;
9539 }
9540
9541 // Entrance to the VPlan-native vectorization path. Outer loops are processed
9542 // here. They may require CFG and instruction level transformations before
9543 // even evaluating whether vectorization is profitable. Since we cannot modify
9544 // the incoming IR, we need to build VPlan upfront in the vectorization
9545 // pipeline.
9546 if (!L->isInnermost())
9547 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9548 ORE, GetBFI, OptForSize, Hints,
9549 Requirements);
9550
9551 assert(L->isInnermost() && "Inner loop expected.");
9552
9553 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9554 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9555
9556 // If an override option has been passed in for interleaved accesses, use it.
9557 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9558 UseInterleaved = EnableInterleavedMemAccesses;
9559
9560 // Analyze interleaved memory accesses.
9561 if (UseInterleaved)
9563
9564 if (LVL.hasUncountableEarlyExit()) {
9565 BasicBlock *LoopLatch = L->getLoopLatch();
9566 if (IAI.requiresScalarEpilogue() ||
9568 [LoopLatch](BasicBlock *BB) { return BB != LoopLatch; })) {
9569 reportVectorizationFailure("Auto-vectorization of early exit loops "
9570 "requiring a scalar epilogue is unsupported",
9571 "UncountableEarlyExitUnsupported", ORE, L);
9572 return false;
9573 }
9574 }
9575
9576 // Check the function attributes and profiles to find out if this function
9577 // should be optimized for size.
9579 getScalarEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, LVL, &IAI);
9580
9581 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9582 // count by optimizing for size, to minimize overheads.
9583 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
9584 if (ExpectedTC && ExpectedTC->isFixed() &&
9585 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
9586 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9587 << "This loop is worth vectorizing only if no scalar "
9588 << "iteration overheads are incurred.");
9590 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9591 else {
9592 LLVM_DEBUG(dbgs() << "\n");
9593 // Predicate tail-folded loops are efficient even when the loop
9594 // iteration count is low. However, setting the epilogue policy to
9595 // `CM_ScalarEpilogueNotAllowedLowTripLoop` prevents vectorizing loops
9596 // with runtime checks. It's more effective to let
9597 // `isOutsideLoopWorkProfitable` determine if vectorization is
9598 // beneficial for the loop.
9601 }
9602 }
9603
9604 // Check the function attributes to see if implicit floats or vectors are
9605 // allowed.
9606 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9608 "Can't vectorize when the NoImplicitFloat attribute is used",
9609 "loop not vectorized due to NoImplicitFloat attribute",
9610 "NoImplicitFloat", ORE, L);
9611 Hints.emitRemarkWithHints();
9612 return false;
9613 }
9614
9615 // Check if the target supports potentially unsafe FP vectorization.
9616 // FIXME: Add a check for the type of safety issue (denormal, signaling)
9617 // for the target we're vectorizing for, to make sure none of the
9618 // additional fp-math flags can help.
9619 if (Hints.isPotentiallyUnsafe() &&
9620 TTI->isFPVectorizationPotentiallyUnsafe()) {
9622 "Potentially unsafe FP op prevents vectorization",
9623 "loop not vectorized due to unsafe FP support.",
9624 "UnsafeFP", ORE, L);
9625 Hints.emitRemarkWithHints();
9626 return false;
9627 }
9628
9629 bool AllowOrderedReductions;
9630 // If the flag is set, use that instead and override the TTI behaviour.
9631 if (ForceOrderedReductions.getNumOccurrences() > 0)
9632 AllowOrderedReductions = ForceOrderedReductions;
9633 else
9634 AllowOrderedReductions = TTI->enableOrderedReductions();
9635 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
9636 ORE->emit([&]() {
9637 auto *ExactFPMathInst = Requirements.getExactFPInst();
9638 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
9639 ExactFPMathInst->getDebugLoc(),
9640 ExactFPMathInst->getParent())
9641 << "loop not vectorized: cannot prove it is safe to reorder "
9642 "floating-point operations";
9643 });
9644 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
9645 "reorder floating-point operations\n");
9646 Hints.emitRemarkWithHints();
9647 return false;
9648 }
9649
9650 // Use the cost model.
9651 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9652 GetBFI, F, &Hints, IAI, OptForSize);
9653 // Use the planner for vectorization.
9654 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints,
9655 ORE);
9656
9657 // Get user vectorization factor and interleave count.
9658 ElementCount UserVF = Hints.getWidth();
9659 unsigned UserIC = Hints.getInterleave();
9660 if (UserIC > 1 && !LVL.isSafeForAnyVectorWidth())
9661 UserIC = 1;
9662
9663 // Plan how to best vectorize.
9664 LVP.plan(UserVF, UserIC);
9666 unsigned IC = 1;
9667
9668 if (ORE->allowExtraAnalysis(LV_NAME))
9670
9671 GeneratedRTChecks Checks(PSE, DT, LI, TTI, CM.CostKind);
9672 if (LVP.hasPlanWithVF(VF.Width)) {
9673 // Select the interleave count.
9674 IC = LVP.selectInterleaveCount(LVP.getPlanFor(VF.Width), VF.Width, VF.Cost);
9675
9676 unsigned SelectedIC = std::max(IC, UserIC);
9677 // Optimistically generate runtime checks if they are needed. Drop them if
9678 // they turn out to not be profitable.
9679 if (VF.Width.isVector() || SelectedIC > 1) {
9680 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC,
9681 *ORE);
9682
9683 // Bail out early if either the SCEV or memory runtime checks are known to
9684 // fail. In that case, the vector loop would never execute.
9685 using namespace llvm::PatternMatch;
9686 if (Checks.getSCEVChecks().first &&
9687 match(Checks.getSCEVChecks().first, m_One()))
9688 return false;
9689 if (Checks.getMemRuntimeChecks().first &&
9690 match(Checks.getMemRuntimeChecks().first, m_One()))
9691 return false;
9692 }
9693
9694 // Check if it is profitable to vectorize with runtime checks.
9695 bool ForceVectorization =
9697 VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM,
9698 CM.CostKind, CM.PSE, L);
9699 if (!ForceVectorization &&
9700 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
9701 LVP.getPlanFor(VF.Width), SEL,
9702 CM.getVScaleForTuning())) {
9703 ORE->emit([&]() {
9705 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
9706 L->getHeader())
9707 << "loop not vectorized: cannot prove it is safe to reorder "
9708 "memory operations";
9709 });
9710 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
9711 Hints.emitRemarkWithHints();
9712 return false;
9713 }
9714 }
9715
9716 // Identify the diagnostic messages that should be produced.
9717 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9718 bool VectorizeLoop = true, InterleaveLoop = true;
9719 if (VF.Width.isScalar()) {
9720 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9721 VecDiagMsg = {
9722 "VectorizationNotBeneficial",
9723 "the cost-model indicates that vectorization is not beneficial"};
9724 VectorizeLoop = false;
9725 }
9726
9727 if (UserIC == 1 && Hints.getInterleave() > 1) {
9729 "UserIC should only be ignored due to unsafe dependencies");
9730 LLVM_DEBUG(dbgs() << "LV: Ignoring user-specified interleave count.\n");
9731 IntDiagMsg = {"InterleavingUnsafe",
9732 "Ignoring user-specified interleave count due to possibly "
9733 "unsafe dependencies in the loop."};
9734 InterleaveLoop = false;
9735 } else if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
9736 // Tell the user interleaving was avoided up-front, despite being explicitly
9737 // requested.
9738 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9739 "interleaving should be avoided up front\n");
9740 IntDiagMsg = {"InterleavingAvoided",
9741 "Ignoring UserIC, because interleaving was avoided up front"};
9742 InterleaveLoop = false;
9743 } else if (IC == 1 && UserIC <= 1) {
9744 // Tell the user interleaving is not beneficial.
9745 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9746 IntDiagMsg = {
9747 "InterleavingNotBeneficial",
9748 "the cost-model indicates that interleaving is not beneficial"};
9749 InterleaveLoop = false;
9750 if (UserIC == 1) {
9751 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9752 IntDiagMsg.second +=
9753 " and is explicitly disabled or interleave count is set to 1";
9754 }
9755 } else if (IC > 1 && UserIC == 1) {
9756 // Tell the user interleaving is beneficial, but it explicitly disabled.
9757 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
9758 "disabled.\n");
9759 IntDiagMsg = {"InterleavingBeneficialButDisabled",
9760 "the cost-model indicates that interleaving is beneficial "
9761 "but is explicitly disabled or interleave count is set to 1"};
9762 InterleaveLoop = false;
9763 }
9764
9765 // If there is a histogram in the loop, do not just interleave without
9766 // vectorizing. The order of operations will be incorrect without the
9767 // histogram intrinsics, which are only used for recipes with VF > 1.
9768 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
9769 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
9770 << "to histogram operations.\n");
9771 IntDiagMsg = {
9772 "HistogramPreventsScalarInterleaving",
9773 "Unable to interleave without vectorization due to constraints on "
9774 "the order of histogram operations"};
9775 InterleaveLoop = false;
9776 }
9777
9778 // Override IC if user provided an interleave count.
9779 IC = UserIC > 0 ? UserIC : IC;
9780
9781 // FIXME: Enable interleaving for FindLast reductions.
9782 if (InterleaveLoop && hasFindLastReductionPhi(LVP.getPlanFor(VF.Width))) {
9783 LLVM_DEBUG(dbgs() << "LV: Not interleaving due to FindLast reduction.\n");
9784 IntDiagMsg = {"FindLastPreventsScalarInterleaving",
9785 "Unable to interleave due to FindLast reduction."};
9786 InterleaveLoop = false;
9787 IC = 1;
9788 }
9789
9790 // Emit diagnostic messages, if any.
9791 const char *VAPassName = Hints.vectorizeAnalysisPassName();
9792 if (!VectorizeLoop && !InterleaveLoop) {
9793 // Do not vectorize or interleaving the loop.
9794 ORE->emit([&]() {
9795 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9796 L->getStartLoc(), L->getHeader())
9797 << VecDiagMsg.second;
9798 });
9799 ORE->emit([&]() {
9800 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9801 L->getStartLoc(), L->getHeader())
9802 << IntDiagMsg.second;
9803 });
9804 return false;
9805 }
9806
9807 if (!VectorizeLoop && InterleaveLoop) {
9808 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9809 ORE->emit([&]() {
9810 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9811 L->getStartLoc(), L->getHeader())
9812 << VecDiagMsg.second;
9813 });
9814 } else if (VectorizeLoop && !InterleaveLoop) {
9815 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9816 << ") in " << L->getLocStr() << '\n');
9817 ORE->emit([&]() {
9818 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
9819 L->getStartLoc(), L->getHeader())
9820 << IntDiagMsg.second;
9821 });
9822 } else if (VectorizeLoop && InterleaveLoop) {
9823 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9824 << ") in " << L->getLocStr() << '\n');
9825 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9826 }
9827
9828 // Report the vectorization decision.
9829 if (VF.Width.isScalar()) {
9830 using namespace ore;
9831 assert(IC > 1);
9832 ORE->emit([&]() {
9833 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
9834 L->getHeader())
9835 << "interleaved loop (interleaved count: "
9836 << NV("InterleaveCount", IC) << ")";
9837 });
9838 } else {
9839 // Report the vectorization decision.
9840 reportVectorization(ORE, L, VF, IC);
9841 }
9842 if (ORE->allowExtraAnalysis(LV_NAME))
9844
9845 // If we decided that it is *legal* to interleave or vectorize the loop, then
9846 // do it.
9847
9848 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
9849 // Consider vectorizing the epilogue too if it's profitable.
9850 VectorizationFactor EpilogueVF =
9852 if (EpilogueVF.Width.isVector()) {
9853 std::unique_ptr<VPlan> BestMainPlan(BestPlan.duplicate());
9854
9855 // The first pass vectorizes the main loop and creates a scalar epilogue
9856 // to be vectorized by executing the plan (potentially with a different
9857 // factor) again shortly afterwards.
9858 VPlan &BestEpiPlan = LVP.getPlanFor(EpilogueVF.Width);
9859 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
9860 BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph");
9861 preparePlanForMainVectorLoop(*BestMainPlan, BestEpiPlan);
9862 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1,
9863 BestEpiPlan);
9864 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
9865 Checks, *BestMainPlan);
9866 auto ExpandedSCEVs = LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF,
9867 *BestMainPlan, MainILV, DT, false);
9868 ++LoopsVectorized;
9869
9870 // Second pass vectorizes the epilogue and adjusts the control flow
9871 // edges from the first pass.
9872 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
9873 Checks, BestEpiPlan);
9875 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, *PSE.getSE());
9876 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
9877 true);
9878 connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, LVL, ExpandedSCEVs,
9879 Checks, InstsToMove);
9880 ++LoopsEpilogueVectorized;
9881 } else {
9882 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, Checks,
9883 BestPlan);
9884 // TODO: Move to general VPlan pipeline once epilogue loops are also
9885 // supported.
9887 BestPlan, VF.Width, IC, PSE);
9888 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
9890
9891 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false);
9892 ++LoopsVectorized;
9893 }
9894
9895 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
9896 "DT not preserved correctly");
9897 assert(!verifyFunction(*F, &dbgs()));
9898
9899 return true;
9900}
9901
9903
9904 // Don't attempt if
9905 // 1. the target claims to have no vector registers, and
9906 // 2. interleaving won't help ILP.
9907 //
9908 // The second condition is necessary because, even if the target has no
9909 // vector registers, loop vectorization may still enable scalar
9910 // interleaving.
9911 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
9912 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
9913 return LoopVectorizeResult(false, false);
9914
9915 bool Changed = false, CFGChanged = false;
9916
9917 // The vectorizer requires loops to be in simplified form.
9918 // Since simplification may add new inner loops, it has to run before the
9919 // legality and profitability checks. This means running the loop vectorizer
9920 // will simplify all loops, regardless of whether anything end up being
9921 // vectorized.
9922 for (const auto &L : *LI)
9923 Changed |= CFGChanged |=
9924 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9925
9926 // Build up a worklist of inner-loops to vectorize. This is necessary as
9927 // the act of vectorizing or partially unrolling a loop creates new loops
9928 // and can invalidate iterators across the loops.
9929 SmallVector<Loop *, 8> Worklist;
9930
9931 for (Loop *L : *LI)
9932 collectSupportedLoops(*L, LI, ORE, Worklist);
9933
9934 LoopsAnalyzed += Worklist.size();
9935
9936 // Now walk the identified inner loops.
9937 while (!Worklist.empty()) {
9938 Loop *L = Worklist.pop_back_val();
9939
9940 // For the inner loops we actually process, form LCSSA to simplify the
9941 // transform.
9942 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
9943
9944 Changed |= CFGChanged |= processLoop(L);
9945
9946 if (Changed) {
9947 LAIs->clear();
9948
9949#ifndef NDEBUG
9950 if (VerifySCEV)
9951 SE->verify();
9952#endif
9953 }
9954 }
9955
9956 // Process each loop nest in the function.
9957 return LoopVectorizeResult(Changed, CFGChanged);
9958}
9959
9962 LI = &AM.getResult<LoopAnalysis>(F);
9963 // There are no loops in the function. Return before computing other
9964 // expensive analyses.
9965 if (LI->empty())
9966 return PreservedAnalyses::all();
9975 AA = &AM.getResult<AAManager>(F);
9976
9977 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
9978 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
9979 GetBFI = [&AM, &F]() -> BlockFrequencyInfo & {
9981 };
9982 LoopVectorizeResult Result = runImpl(F);
9983 if (!Result.MadeAnyChange)
9984 return PreservedAnalyses::all();
9986
9987 if (isAssignmentTrackingEnabled(*F.getParent())) {
9988 for (auto &BB : F)
9990 }
9991
9992 PA.preserve<LoopAnalysis>();
9996
9997 if (Result.MadeCFGChange) {
9998 // Making CFG changes likely means a loop got vectorized. Indicate that
9999 // extra simplification passes should be run.
10000 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10001 // be run if runtime checks have been added.
10004 } else {
10006 }
10007 return PA;
10008}
10009
10011 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10012 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10013 OS, MapClassName2PassName);
10014
10015 OS << '<';
10016 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10017 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10018 OS << '>';
10019}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
@ PostInc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI, TargetLibraryInfo &TLI)
Definition CostModel.cpp:74
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Legalize the Machine IR a function s Machine IR
Definition Legalizer.cpp:81
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static cl::opt< bool > WidenIV("loop-flatten-widen-iv", cl::Hidden, cl::init(true), cl::desc("Widen the loop induction variables, if possible, so " "overflow checks won't reject flattening"))
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
static Value * createInductionAdditionalBypassValues(PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount, Instruction *OldInduction)
static void fixReductionScalarResumeWhenVectorizingEpilog(VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock)
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, bool OptForSize, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, LoopVectorizationLegality &LVL, DenseMap< const SCEV *, Value * > &ExpandedSCEVs, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove)
Connect the epilogue vector loop generated for EpiPlan to the main vector.
static bool planContainsAdditionalSimplifications(VPlan &Plan, VPCostContext &CostCtx, Loop *TheLoop, ElementCount VF)
Return true if the original loop \ TheLoop contains any instructions that do not have corresponding r...
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static const SCEV * getAddressAccessSCEV(Value *Ptr, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets the address access SCEV for Ptr, if it should be used for cost modeling according to isAddressSC...
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static bool hasFindLastReductionPhi(VPlan &Plan)
Returns true if the VPlan contains a VPReductionPHIRecipe with FindLast recurrence kind.
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, ScalarEpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, LoopVectorizationLegality &LVL, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount)
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={})
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
#define RUN_VPLAN_PASS(PASS,...)
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1555
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1527
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getTemporary()
Definition DebugLoc.h:160
static DebugLoc getUnknown()
Definition DebugLoc.h:161
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:294
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB)
Introduces a new VPIRBasicBlock for CheckIRBB to Plan between the vector preheader and its predecesso...
BasicBlock * emitIterationCountCheck(BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
Value * createIterationCountCheck(BasicBlock *VectorPH, ElementCount VF, unsigned UF) const
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Check, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the main loop strategy (i....
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:764
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:729
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2775
A struct for saving information about induction variables.
const SCEV * getStep() const
ArrayRef< Instruction * > getCastInsts() const
Returns an ArrayRef to the type cast instructions in the induction update chain, that are redundant w...
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
Value * TripCount
Trip count of the original loop.
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
Value * getTripCount() const
Returns the original loop trip count.
friend class LoopVectorizationPlanner
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, GeneratedRTChecks &RTChecks, VPlan &Plan)
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool isCast() const
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:342
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool useWideActiveLaneMask() const
Returns true if the use of wide lane masks is requested and the loop is using tail-folding with a lan...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
BlockFrequencyInfo * BFI
The BlockFrequencyInfo returned from GetBFI.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
BlockFrequencyInfo & getBFI()
Returns the BlockFrequencyInfo for the function if cached, otherwise fetches it via GetBFI.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< unsigned > getMaxSafeElements() const
Return maximum safe number of elements to be processed per vector iteration, which do not prevent sto...
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
uint64_t getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, const BasicBlock *BB)
A helper function that returns how much we should divide the cost of a predicated block by.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool OptForSize
Whether this loop should be optimized for size based on function attribute or profile information.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind)
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
bool shouldConsiderRegPressureForVF(ElementCount VF)
Loop * TheLoop
The loop that we evaluate.
TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
std::optional< unsigned > getVScaleForTuning() const
Return the value of vscale used for tuning the cost model.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
bool preferPredicatedLoop() const
Returns true if tail-folding is preferred over a scalar epilogue.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool usePredicatedReductionSelect() const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF)
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool isScalarWithPredication(Instruction *I, ElementCount VF)
Returns true if I is an instruction which requires predication and for which our chosen predication s...
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
std::function< BlockFrequencyInfo &()> GetBFI
A function to lazily fetch BlockFrequencyInfo.
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, bool OptForSize)
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
FixedScalableVFPair MaxPermissibleVFWithoutMaxBW
The highest VF possible for this loop, without using MaxBandwidth.
const SmallPtrSetImpl< PHINode * > & getInLoopReductions() const
Returns the set of in-loop reduction PHIs.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
const SmallPtrSetImpl< const Instruction * > & getPotentiallyFaultingLoads() const
Returns potentially faulting loads.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool hasUncountableEarlyExit() const
Returns true if the loop has uncountable early exits, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MainLoopVF, unsigned IC)
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1593
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1644
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1577
VectorizationFactor computeBestVF()
Compute and return the most profitable vectorization factor.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool VectorizingEpilogue)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1558
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1738
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
bool allowReordering() const
When enabling loop hints are provided we allow the vectorizer to change the order of operations that ...
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:67
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:61
Metadata node.
Definition Metadata.h:1080
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:235
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
static bool isSignedRecurrenceKind(RecurKind Kind)
Returns true if recurrece kind is a signed redux kind.
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
bool hasUsesOutsideReductionChain() const
Returns true if the reduction PHI has any uses outside the reduction chain.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
static bool isFindLastRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
LLVM_ABI SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindLastIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static LLVM_ABI bool isFloatingPointRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is a floating point kind.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
Value * getSentinelValue() const
Returns the sentinel value for FindFirstIV & FindLastIV recurrences to replace the start value.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
LLVM_ABI bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
LLVM_ABI bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing operands with the given types.
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
LLVM_ABI InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI bool supportsScalableVectors() const
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing an instruction.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:89
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:98
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4090
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:4117
iterator end()
Definition VPlan.h:4127
iterator begin()
Recipe iterator methods.
Definition VPlan.h:4125
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4178
InstructionCost cost(ElementCount VF, VPCostContext &Ctx) override
Return the cost of this VPBasicBlock.
Definition VPlan.cpp:777
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:230
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition VPlan.cpp:637
bool empty() const
Definition VPlan.h:4136
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:200
void setName(const Twine &newName)
Definition VPlan.h:166
size_t getNumSuccessors() const
Definition VPlan.h:219
void swapSuccessors()
Swap successors of the block. The block must have exactly 2 successors.
Definition VPlan.h:322
size_t getNumPredecessors() const
Definition VPlan.h:220
VPlan * getPlan()
Definition VPlan.cpp:175
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:180
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:209
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:198
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:263
static void insertOnEdge(VPBlockBase *From, VPBlockBase *To, VPBlockBase *BlockPtr)
Inserts BlockPtr on the edge between From and To.
Definition VPlanUtils.h:284
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
Definition VPlanUtils.h:215
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:241
VPlan-based builder utility analogous to IRBuilder.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
Canonical scalar induction phi of the vector loop.
Definition VPlan.h:3664
VPIRValue * getStartValue() const
Returns the start value of the canonical induction.
Definition VPlan.h:3686
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:427
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:400
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:2141
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2184
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2173
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
Definition VPlan.h:1894
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:4243
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1141
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1188
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1245
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1236
unsigned getOpcode() const
Definition VPlan.h:1300
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2801
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1481
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:387
VPBasicBlock * getParent()
Definition VPlan.h:462
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:536
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPValue * getBlockInMask(VPBasicBlock *VPBB) const
Returns the entry mask for block VPBB or null if the mask is all-true.
VPRecipeBase * tryToCreateWidenNonPhiRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for a non-phi recipe R if one can be created within the given VF R...
VPValue * getVPValueOrAddLiveIn(Value *V)
VPReplicateRecipe * handleReplication(VPInstruction *VPI, VFRange &Range)
Build a VPReplicationRecipe for VPI.
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
Definition VPlan.h:2593
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
Definition VPlan.h:2596
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2590
A recipe to represent inloop, ordered or partial reduction operations.
Definition VPlan.h:2894
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4278
const VPBlockBase * getEntry() const
Definition VPlan.h:4314
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the region.
Definition VPlan.h:4376
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:3050
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:588
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:651
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:258
operand_range operands()
Definition VPlanValue.h:326
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:302
unsigned getNumOperands() const
Definition VPlanValue.h:296
operand_iterator op_begin()
Definition VPlanValue.h:322
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:297
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:46
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
Definition VPlan.cpp:135
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:125
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:71
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1394
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1398
user_range users()
Definition VPlanValue.h:125
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1999
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1688
A recipe for handling GEP instructions.
Definition VPlan.h:1936
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2287
A recipe for widened phis.
Definition VPlan.h:2423
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1632
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4408
bool hasVF(ElementCount VF) const
Definition VPlan.h:4605
VPBasicBlock * getEntry()
Definition VPlan.h:4497
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4587
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4555
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
Definition VPlan.h:4612
bool hasUF(unsigned UF) const
Definition VPlan.h:4623
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4545
VPSymbolicValue & getVectorTripCount()
The vector trip count.
Definition VPlan.h:4584
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4647
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1031
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4761
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1013
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4569
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4522
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4536
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:921
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4541
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4502
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1173
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:155
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr bool isZero() const
Definition TypeSize.h:153
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
class_match< const SCEV > m_SCEV()
AllRecipe_match< Instruction::Select, Op0_t, Op1_t, Op2_t > m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start)
Match FindIV result pattern: select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),...
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, AllRecipe_match< Instruction::SExt, Op0_t > > m_ZExtOrSExt(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
VPIRFlags getFlagsFromIndDesc(const InductionDescriptor &ID)
Extracts and returns NoWrap and FastMath flags from the induction binop in ID.
Definition VPlanUtils.h:94
VPRecipeBase * findRecipe(VPValue *Start, PredT Pred)
Search Start's users for a recipe satisfying Pred, looking through recipes with definitions.
Definition VPlanUtils.h:111
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:683
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
LLVM_ABI_FOR_TEST cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan, bool VerifyLate=false)
Verify invariants for general VPlans.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
LLVM_ABI bool VerifySCEV
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:243
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:149
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI_FOR_TEST cl::opt< bool > EnableWideActiveLaneMask
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
LLVM_ABI cl::opt< bool > EnableLoopVectorization
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:421
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1835
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:129
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Add
Sum of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
ScalarEpilogueLowering
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
cl::opt< bool > EnableVPlanNativePath
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:330
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
LLVM_ABI_FOR_TEST cl::opt< bool > PrintAfterEachVPlanPass
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:77
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
std::function< BlockFrequencyInfo &()> GetBFI
TargetTransformInfo * TTI
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
unsigned getPredBlockCostDivisor(BasicBlock *BB) const
LoopVectorizationCostModel & CM
bool isLegacyUniformAfterVectorization(Instruction *I, ElementCount VF) const
Return true if I is considered uniform-after-vectorization in the legacy cost model for VF.
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
TargetTransformInfo::TargetCostKind CostKind
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A struct that represents some properties of the register usage of a loop.
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening load operations, using the address to load from and an optional mask.
Definition VPlan.h:3452
A recipe for widening store operations, using the stored value, the address to store to and an option...
Definition VPlan.h:3535
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlan &Plan, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializePacksAndUnpacks(VPlan &Plan)
Add explicit Build[Struct]Vector recipes to Pack multiple scalar values into vectors and Unpack recip...
static bool handleMultiUseReductions(VPlan &Plan)
Try to legalize reductions with multiple in-loop uses.
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE, LoopVersioning *LVer=nullptr)
Create a base VPlan0, serving as the common starting point for all later candidates.
static void optimizeInductionExitUsers(VPlan &Plan, DenseMap< VPValue *, VPValue * > &EndValues, PredicatedScalarEvolution &PSE)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static LLVM_ABI_FOR_TEST void handleEarlyExits(VPlan &Plan, bool HasUncountableExit)
Update Plan to account for all early exits.
static void canonicalizeEVLLoops(VPlan &Plan)
Transform EVL loops to use variable-length stepping after region dissolution.
static void createInLoopReductionRecipes(VPlan &Plan, const DenseMap< VPBasicBlock *, VPValue * > &BlockMaskCache, const DenseSet< BasicBlock * > &BlocksNeedingPredication, ElementCount MinVF)
Create VPReductionRecipes for in-loop reductions.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, bool CheckNeededWithTailFolding, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE)
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, VPRecipeBuilder &RecipeBuilder, const bool &ScalarEpilogueAllowed)
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static bool handleFindLastReductions(VPlan &Plan)
Check if Plan contains any FindLast reductions.
static void narrowInterleaveGroups(VPlan &Plan, ElementCount VF, TypeSize VectorRegWidth)
Try to convert a plan with interleave groups with VF elements to a plan with the interleave groups re...
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void expandBranchOnTwoConds(VPlan &Plan)
Expand BranchOnTwoConds instructions into explicit CFG with BranchOnCond instructions.
static void hoistPredicatedLoads(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Hoist predicated loads from the same address to the loop entry block, if they are guaranteed to execu...
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range)
Handle users in the exit block for first order reductions in the original exit block.
static void createHeaderPhiRecipes(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &OrigLoop, const MapVector< PHINode *, InductionDescriptor > &Inductions, const MapVector< PHINode *, RecurrenceDescriptor > &Reductions, const SmallPtrSetImpl< const PHINode * > &FixedOrderRecurrences, const SmallPtrSetImpl< PHINode * > &InLoopReductions, bool AllowReordering)
Replace VPPhi recipes in Plan's header with corresponding VPHeaderPHIRecipe subclasses for inductions...
static DenseMap< VPBasicBlock *, VPValue * > introduceMasksAndLinearize(VPlan &Plan, bool FoldTail)
Predicate and linearize the control-flow in the only loop region of Plan.
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPEVLBasedIVPHIRecipe and related recipes to Plan and replaces all uses except the canonical IV...
static void optimizeEVLMasks(VPlan &Plan)
Optimize recipes which use an EVL-based header mask to VP intrinsics, for example:
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static void removeBranchOnConst(VPlan &Plan)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue)
Materialize vector trip count computations to a set of VPInstructions.
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static void sinkPredicatedStores(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Sink predicated stores to the same address with complementary predicates (P and NOT P) to an uncondit...
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace each replicating VPReplicateRecipe and VPInstruction outside of any replicate region in Plan ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void createPartialReductions(VPlan &Plan, VPCostContext &CostCtx, VFRange &Range)
Detect and create partial reduction recipes for scaled reductions in Plan.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static LLVM_ABI_FOR_TEST void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Try to have all users of fixed-order recurrences appear after the recipe defining their previous valu...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void materializeVFAndVFxUF(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize VF and VFxUF to be computed explicitly using VPInstructions.
static void addMinimumVectorEpilogueIterationCheck(VPlan &Plan, Value *TripCount, Value *VectorTripCount, bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE)
Add a check to Plan to see if the epilogue vector loop should be executed.
static void updateScalarResumePhis(VPlan &Plan, DenseMap< VPValue *, VPValue * > &IVEndValues)
Update the resume phis in the scalar preheader after creating wide recipes for first-order recurrence...
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool RequiresScalarEpilogueCheck, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks