LLVM 22.0.0git
VPlanUtils.cpp
Go to the documentation of this file.
1//===- VPlanUtils.cpp - VPlan-related utilities ---------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "VPlanUtils.h"
10#include "VPlanCFG.h"
11#include "VPlanDominatorTree.h"
12#include "VPlanPatternMatch.h"
13#include "llvm/ADT/TypeSwitch.h"
16
17using namespace llvm;
18using namespace llvm::VPlanPatternMatch;
19
21 return all_of(Def->users(),
22 [Def](const VPUser *U) { return U->usesFirstLaneOnly(Def); });
23}
24
26 return all_of(Def->users(),
27 [Def](const VPUser *U) { return U->usesFirstPartOnly(Def); });
28}
29
31 return all_of(Def->users(),
32 [Def](const VPUser *U) { return U->usesScalars(Def); });
33}
34
36 if (auto *E = dyn_cast<SCEVConstant>(Expr))
37 return Plan.getOrAddLiveIn(E->getValue());
38 // Skip SCEV expansion if Expr is a SCEVUnknown wrapping a non-instruction
39 // value. Otherwise the value may be defined in a loop and using it directly
40 // will break LCSSA form. The SCEV expansion takes care of preserving LCSSA
41 // form.
42 auto *U = dyn_cast<SCEVUnknown>(Expr);
43 if (U && !isa<Instruction>(U->getValue()))
44 return Plan.getOrAddLiveIn(U->getValue());
45 auto *Expanded = new VPExpandSCEVRecipe(Expr);
46 Plan.getEntry()->appendRecipe(Expanded);
47 return Expanded;
48}
49
50bool vputils::isHeaderMask(const VPValue *V, const VPlan &Plan) {
52 return true;
53
54 auto IsWideCanonicalIV = [](VPValue *A) {
58 };
59
60 VPValue *A, *B;
61
62 auto m_CanonicalScalarIVSteps =
64 m_One(), m_Specific(&Plan.getVF()));
65
67 return B == Plan.getTripCount() &&
68 (match(A, m_CanonicalScalarIVSteps) || IsWideCanonicalIV(A));
69
70 // For scalar plans, the header mask uses the scalar steps.
71 if (match(V, m_ICmp(m_CanonicalScalarIVSteps,
73 assert(Plan.hasScalarVFOnly() &&
74 "Non-scalar VF using scalar IV steps for header mask?");
75 return true;
76 }
77
78 return match(V, m_ICmp(m_VPValue(A), m_VPValue(B))) && IsWideCanonicalIV(A) &&
79 B == Plan.getBackedgeTakenCount();
80}
81
83 ScalarEvolution &SE, const Loop *L) {
84 if (V->isLiveIn()) {
85 if (Value *LiveIn = V->getLiveInIRValue())
86 return SE.getSCEV(LiveIn);
87 return SE.getCouldNotCompute();
88 }
89
90 // TODO: Support constructing SCEVs for more recipes as needed.
91 return TypeSwitch<const VPRecipeBase *, const SCEV *>(V->getDefiningRecipe())
93 [](const VPExpandSCEVRecipe *R) { return R->getSCEV(); })
94 .Case<VPCanonicalIVPHIRecipe>([&SE, L](const VPCanonicalIVPHIRecipe *R) {
95 if (!L)
96 return SE.getCouldNotCompute();
97 const SCEV *Start = getSCEVExprForVPValue(R->getOperand(0), SE, L);
98 return SE.getAddRecExpr(Start, SE.getOne(Start->getType()), L,
100 })
101 .Case<VPWidenIntOrFpInductionRecipe>(
102 [&SE, L](const VPWidenIntOrFpInductionRecipe *R) {
103 const SCEV *Step = getSCEVExprForVPValue(R->getStepValue(), SE, L);
104 if (!L || isa<SCEVCouldNotCompute>(Step))
105 return SE.getCouldNotCompute();
106 const SCEV *Start =
107 getSCEVExprForVPValue(R->getStartValue(), SE, L);
108 return SE.getAddRecExpr(Start, Step, L, SCEV::FlagAnyWrap);
109 })
110 .Case<VPDerivedIVRecipe>([&SE, L](const VPDerivedIVRecipe *R) {
111 const SCEV *Start = getSCEVExprForVPValue(R->getOperand(0), SE, L);
112 const SCEV *IV = getSCEVExprForVPValue(R->getOperand(1), SE, L);
113 const SCEV *Scale = getSCEVExprForVPValue(R->getOperand(2), SE, L);
114 if (any_of(ArrayRef({Start, IV, Scale}), IsaPred<SCEVCouldNotCompute>))
115 return SE.getCouldNotCompute();
116
117 return SE.getAddExpr(SE.getTruncateOrSignExtend(Start, IV->getType()),
119 Scale, IV->getType())));
120 })
121 .Case<VPScalarIVStepsRecipe>([&SE, L](const VPScalarIVStepsRecipe *R) {
122 const SCEV *IV = getSCEVExprForVPValue(R->getOperand(0), SE, L);
123 const SCEV *Step = getSCEVExprForVPValue(R->getOperand(1), SE, L);
125 !Step->isOne())
126 return SE.getCouldNotCompute();
127 return SE.getMulExpr(SE.getTruncateOrSignExtend(IV, Step->getType()),
128 Step);
129 })
130 .Case<VPReplicateRecipe>([&SE, L](const VPReplicateRecipe *R) {
131 if (R->getOpcode() != Instruction::GetElementPtr)
132 return SE.getCouldNotCompute();
133
134 const SCEV *Base = getSCEVExprForVPValue(R->getOperand(0), SE, L);
136 return SE.getCouldNotCompute();
137
138 SmallVector<const SCEV *> IndexExprs;
139 for (VPValue *Index : drop_begin(R->operands())) {
140 const SCEV *IndexExpr = getSCEVExprForVPValue(Index, SE, L);
141 if (isa<SCEVCouldNotCompute>(IndexExpr))
142 return SE.getCouldNotCompute();
143 IndexExprs.push_back(IndexExpr);
144 }
145
146 Type *SrcElementTy = cast<GetElementPtrInst>(R->getUnderlyingInstr())
147 ->getSourceElementType();
148 return SE.getGEPExpr(Base, IndexExprs, SrcElementTy);
149 })
150 .Default([&SE](const VPRecipeBase *) { return SE.getCouldNotCompute(); });
151}
152
153/// Returns true if \p Opcode preserves uniformity, i.e., if all operands are
154/// uniform, the result will also be uniform.
155static bool preservesUniformity(unsigned Opcode) {
156 if (Instruction::isBinaryOp(Opcode) || Instruction::isCast(Opcode))
157 return true;
158 switch (Opcode) {
159 case Instruction::GetElementPtr:
160 case Instruction::ICmp:
161 case Instruction::FCmp:
162 case Instruction::Select:
166 return true;
167 default:
168 return false;
169 }
170}
171
173 // A live-in must be uniform across the scope of VPlan.
174 if (VPV->isLiveIn())
175 return true;
176
177 if (auto *Rep = dyn_cast<VPReplicateRecipe>(VPV)) {
178 const VPRegionBlock *RegionOfR = Rep->getRegion();
179 // Don't consider recipes in replicate regions as uniform yet; their first
180 // lane cannot be accessed when executing the replicate region for other
181 // lanes.
182 if (RegionOfR && RegionOfR->isReplicator())
183 return false;
184 return Rep->isSingleScalar() || (preservesUniformity(Rep->getOpcode()) &&
185 all_of(Rep->operands(), isSingleScalar));
186 }
190 if (auto *WidenR = dyn_cast<VPWidenRecipe>(VPV)) {
191 return preservesUniformity(WidenR->getOpcode()) &&
192 all_of(WidenR->operands(), isSingleScalar);
193 }
194 if (auto *VPI = dyn_cast<VPInstruction>(VPV))
195 return VPI->isSingleScalar() || VPI->isVectorToScalar() ||
196 (preservesUniformity(VPI->getOpcode()) &&
197 all_of(VPI->operands(), isSingleScalar));
199 return false;
200 if (isa<VPReductionRecipe>(VPV))
201 return true;
202 if (auto *Expr = dyn_cast<VPExpressionRecipe>(VPV))
203 return Expr->isSingleScalar();
204
205 // VPExpandSCEVRecipes must be placed in the entry and are always uniform.
206 return isa<VPExpandSCEVRecipe>(VPV);
207}
208
210 // Live-ins are uniform.
211 if (V->isLiveIn())
212 return true;
213
214 VPRecipeBase *R = V->getDefiningRecipe();
215 if (R && V->isDefinedOutsideLoopRegions()) {
216 if (match(V->getDefiningRecipe(),
218 return false;
219 return all_of(R->operands(), isUniformAcrossVFsAndUFs);
220 }
221
222 auto *CanonicalIV =
223 R->getParent()->getEnclosingLoopRegion()->getCanonicalIV();
224 // Canonical IV chain is uniform.
225 if (V == CanonicalIV || V == CanonicalIV->getBackedgeValue())
226 return true;
227
229 .Case<VPDerivedIVRecipe>([](const auto *R) { return true; })
230 .Case<VPReplicateRecipe>([](const auto *R) {
231 // Be conservative about side-effects, except for the
232 // known-side-effecting assumes and stores, which we know will be
233 // uniform.
234 return R->isSingleScalar() &&
235 (!R->mayHaveSideEffects() ||
236 isa<AssumeInst, StoreInst>(R->getUnderlyingInstr())) &&
237 all_of(R->operands(), isUniformAcrossVFsAndUFs);
238 })
239 .Case<VPWidenRecipe>([](const auto *R) {
240 return preservesUniformity(R->getOpcode()) &&
241 all_of(R->operands(), isUniformAcrossVFsAndUFs);
242 })
243 .Case<VPInstruction>([](const auto *VPI) {
244 return (VPI->isScalarCast() &&
245 isUniformAcrossVFsAndUFs(VPI->getOperand(0))) ||
246 (preservesUniformity(VPI->getOpcode()) &&
247 all_of(VPI->operands(), isUniformAcrossVFsAndUFs));
248 })
249 .Case<VPWidenCastRecipe>([](const auto *R) {
250 // A cast is uniform according to its operand.
251 return isUniformAcrossVFsAndUFs(R->getOperand(0));
252 })
253 .Default([](const VPRecipeBase *) { // A value is considered non-uniform
254 // unless proven otherwise.
255 return false;
256 });
257}
258
260 auto DepthFirst = vp_depth_first_shallow(Plan.getEntry());
261 auto I = find_if(DepthFirst, [&VPDT](VPBlockBase *VPB) {
262 return VPBlockUtils::isHeader(VPB, VPDT);
263 });
264 return I == DepthFirst.end() ? nullptr : cast<VPBasicBlock>(*I);
265}
266
268 if (!R)
269 return 1;
270 if (auto *RR = dyn_cast<VPReductionPHIRecipe>(R))
271 return RR->getVFScaleFactor();
272 if (auto *RR = dyn_cast<VPPartialReductionRecipe>(R))
273 return RR->getVFScaleFactor();
274 if (auto *ER = dyn_cast<VPExpressionRecipe>(R))
275 return ER->getVFScaleFactor();
276 assert(
279 "getting scaling factor of reduction-start-vector not implemented yet");
280 return 1;
281}
282
283std::optional<VPValue *>
287 // Given a VPlan like the following (just including the recipes contributing
288 // to loop control exiting here, not the actual work), we're looking to match
289 // the recipes contributing to the uncountable exit condition comparison
290 // (here, vp<%4>) back to either live-ins or the address nodes for the load
291 // used as part of the uncountable exit comparison so that we can copy them
292 // to a preheader and rotate the address in the loop to the next vector
293 // iteration.
294 //
295 // Currently, the address of the load is restricted to a GEP with 2 operands
296 // and a live-in base address. This constraint may be relaxed later.
297 //
298 // VPlan ' for UF>=1' {
299 // Live-in vp<%0> = VF
300 // Live-in ir<64> = original trip-count
301 //
302 // entry:
303 // Successor(s): preheader, vector.ph
304 //
305 // vector.ph:
306 // Successor(s): vector loop
307 //
308 // <x1> vector loop: {
309 // vector.body:
310 // EMIT vp<%2> = CANONICAL-INDUCTION ir<0>
311 // vp<%3> = SCALAR-STEPS vp<%2>, ir<1>, vp<%0>
312 // CLONE ir<%ee.addr> = getelementptr ir<0>, vp<%3>
313 // WIDEN ir<%ee.load> = load ir<%ee.addr>
314 // WIDEN vp<%4> = icmp eq ir<%ee.load>, ir<0>
315 // EMIT vp<%5> = any-of vp<%4>
316 // EMIT vp<%6> = add vp<%2>, vp<%0>
317 // EMIT vp<%7> = icmp eq vp<%6>, ir<64>
318 // EMIT vp<%8> = or vp<%5>, vp<%7>
319 // EMIT branch-on-cond vp<%8>
320 // No successors
321 // }
322 // Successor(s): middle.block
323 //
324 // middle.block:
325 // Successor(s): preheader
326 //
327 // preheader:
328 // No successors
329 // }
330
331 // Find the uncountable loop exit condition.
332 auto *Region = Plan.getVectorLoopRegion();
333 VPValue *UncountableCondition = nullptr;
334 if (!match(Region->getExitingBasicBlock()->getTerminator(),
336 m_AnyOf(m_VPValue(UncountableCondition)), m_VPValue())))))
337 return std::nullopt;
338
340 Worklist.push_back(UncountableCondition);
341 while (!Worklist.empty()) {
342 VPValue *V = Worklist.pop_back_val();
343
344 // Any value defined outside the loop does not need to be copied.
345 if (V->isDefinedOutsideLoopRegions())
346 continue;
347
348 // FIXME: Remove the single user restriction; it's here because we're
349 // starting with the simplest set of loops we can, and multiple
350 // users means needing to add PHI nodes in the transform.
351 if (V->getNumUsers() > 1)
352 return std::nullopt;
353
354 VPValue *Op1, *Op2;
355 // Walk back through recipes until we find at least one load from memory.
356 if (match(V, m_ICmp(m_VPValue(Op1), m_VPValue(Op2)))) {
357 Worklist.push_back(Op1);
358 Worklist.push_back(Op2);
359 Recipes.push_back(V->getDefiningRecipe());
360 } else if (auto *Load = dyn_cast<VPWidenLoadRecipe>(V)) {
361 // Reject masked loads for the time being; they make the exit condition
362 // more complex.
363 if (Load->isMasked())
364 return std::nullopt;
365
366 VPValue *GEP = Load->getAddr();
368 return std::nullopt;
369
370 Recipes.push_back(Load);
371 Recipes.push_back(GEP->getDefiningRecipe());
372 GEPs.push_back(GEP->getDefiningRecipe());
373 } else
374 return std::nullopt;
375 }
376
377 return UncountableCondition;
378}
379
381 const VPDominatorTree &VPDT) {
382 auto *VPBB = dyn_cast<VPBasicBlock>(VPB);
383 if (!VPBB)
384 return false;
385
386 // If VPBB is in a region R, VPBB is a loop header if R is a loop region with
387 // VPBB as its entry, i.e., free of predecessors.
388 if (auto *R = VPBB->getParent())
389 return !R->isReplicator() && !VPBB->hasPredecessors();
390
391 // A header dominates its second predecessor (the latch), with the other
392 // predecessor being the preheader
393 return VPB->getPredecessors().size() == 2 &&
394 VPDT.dominates(VPB, VPB->getPredecessors()[1]);
395}
396
398 const VPDominatorTree &VPDT) {
399 // A latch has a header as its second successor, with its other successor
400 // leaving the loop. A preheader OTOH has a header as its first (and only)
401 // successor.
402 return VPB->getNumSuccessors() == 2 &&
403 VPBlockUtils::isHeader(VPB->getSuccessors()[1], VPDT);
404}
405
406std::optional<MemoryLocation>
408 auto *M = dyn_cast<VPIRMetadata>(&R);
409 if (!M)
410 return std::nullopt;
412 // Populate noalias metadata from VPIRMetadata.
413 if (MDNode *NoAliasMD = M->getMetadata(LLVMContext::MD_noalias))
414 Loc.AATags.NoAlias = NoAliasMD;
415 if (MDNode *AliasScopeMD = M->getMetadata(LLVMContext::MD_alias_scope))
416 Loc.AATags.Scope = AliasScopeMD;
417 return Loc;
418}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Hexagon Common GEP
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility analysis objects describing memory locations.
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file implements dominator tree analysis for a single level of a VPlan's H-CFG.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static bool preservesUniformity(unsigned Opcode)
Returns true if Opcode preserves uniformity, i.e., if all operands are uniform, the result will also ...
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
bool isCast() const
bool isBinaryOp() const
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Metadata node.
Definition Metadata.h:1078
Representation for a specific memory location.
This class represents an analyzed expression in the program.
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< const SCEV * > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:88
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:97
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:3986
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition VPlan.h:4061
A recipe for vectorizing a phi-node as a sequence of mask-based select instructions.
Definition VPlan.h:2483
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:80
size_t getNumSuccessors() const
Definition VPlan.h:218
const VPBlocksTy & getPredecessors() const
Definition VPlan.h:203
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:197
static bool isLatch(const VPBlockBase *VPB, const VPDominatorTree &VPDT)
Returns true if VPB is a loop latch, using isHeader().
static bool isHeader(const VPBlockBase *VPB, const VPDominatorTree &VPDT)
Returns true if VPB is a loop header, based on regions or VPDT in their absence.
Canonical scalar induction phi of the vector loop.
Definition VPlan.h:3567
A recipe for converting the input value IV value to the corresponding value of an IV with different s...
Definition VPlan.h:3736
Template specialization of the standard LLVM dominator tree utility for VPBlockBases.
Recipe to expand a SCEV expression.
Definition VPlan.h:3529
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1109
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:386
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4174
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
Definition VPlan.h:4242
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the region.
Definition VPlan.h:4272
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2955
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition VPlan.h:3806
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:207
operand_range operands()
Definition VPlanValue.h:275
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:48
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:131
bool isLiveIn() const
Returns true if this VPValue is a live-in, i.e. defined outside the VPlan.
Definition VPlanValue.h:178
A recipe for handling GEP instructions.
Definition VPlan.h:1844
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2198
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4304
VPBasicBlock * getEntry()
Definition VPlan.h:4397
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4491
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4459
VPValue * getBackedgeTakenCount() const
Definition VPlan.h:4485
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1011
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4551
bool hasScalarVFOnly() const
Definition VPlan.h:4520
LLVM Value Representation.
Definition Value.h:75
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
VPInstruction_match< VPInstruction::AnyOf > m_AnyOf()
AllRecipe_commutative_match< Instruction::Or, Op0_t, Op1_t > m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1)
VPScalarIVSteps_match< Op0_t, Op1_t, Op2_t > m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
GEPLikeRecipe_match< Op0_t, Op1_t > m_GetElementPtr(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::ActiveLaneMask, Op0_t, Op1_t, Op2_t > m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
bind_ty< VPInstruction > m_VPInstruction(VPInstruction *&V)
Match a VPInstruction, capturing if we match.
VPInstruction_match< VPInstruction::BranchOnCond > m_BranchOnCond()
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
bool isUniformAcrossVFsAndUFs(VPValue *V)
Checks if V is uniform across all VF lanes and UF parts.
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
std::optional< MemoryLocation > getMemoryLocation(const VPRecipeBase &R)
Return a MemoryLocation for R with noalias metadata populated from R, if the recipe is supported and ...
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
unsigned getVFScaleFactor(VPRecipeBase *R)
Get the VF scaling factor applied to the recipe's output, if the recipe has one.
bool isHeaderMask(const VPValue *V, const VPlan &Plan)
Return true if V is a header mask in Plan.
LLVM_ABI_FOR_TEST std::optional< VPValue * > getRecipesForUncountableExit(VPlan &Plan, SmallVectorImpl< VPRecipeBase * > &Recipes, SmallVectorImpl< VPRecipeBase * > &GEPs)
Returns the VPValue representing the uncountable exit comparison used by AnyOf if the recipes it depe...
const SCEV * getSCEVExprForVPValue(const VPValue *V, ScalarEvolution &SE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
A recipe for widening select instructions.
Definition VPlan.h:1797