LLVM 23.0.0git
VPlanUtils.cpp
Go to the documentation of this file.
1//===- VPlanUtils.cpp - VPlan-related utilities ---------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "VPlanUtils.h"
10#include "VPlanAnalysis.h"
11#include "VPlanCFG.h"
12#include "VPlanDominatorTree.h"
13#include "VPlanPatternMatch.h"
14#include "llvm/ADT/TypeSwitch.h"
18
19using namespace llvm;
20using namespace llvm::VPlanPatternMatch;
21using namespace llvm::SCEVPatternMatch;
22
24 return all_of(Def->users(),
25 [Def](const VPUser *U) { return U->usesFirstLaneOnly(Def); });
26}
27
29 return all_of(Def->users(),
30 [Def](const VPUser *U) { return U->usesFirstPartOnly(Def); });
31}
32
34 return all_of(Def->users(),
35 [Def](const VPUser *U) { return U->usesScalars(Def); });
36}
37
39 if (auto *E = dyn_cast<SCEVConstant>(Expr))
40 return Plan.getOrAddLiveIn(E->getValue());
41 // Skip SCEV expansion if Expr is a SCEVUnknown wrapping a non-instruction
42 // value. Otherwise the value may be defined in a loop and using it directly
43 // will break LCSSA form. The SCEV expansion takes care of preserving LCSSA
44 // form.
45 auto *U = dyn_cast<SCEVUnknown>(Expr);
46 if (U && !isa<Instruction>(U->getValue()))
47 return Plan.getOrAddLiveIn(U->getValue());
48 auto *Expanded = new VPExpandSCEVRecipe(Expr);
49 Plan.getEntry()->appendRecipe(Expanded);
50 return Expanded;
51}
52
53bool vputils::isHeaderMask(const VPValue *V, const VPlan &Plan) {
55 return true;
56
57 auto IsWideCanonicalIV = [](VPValue *A) {
61 };
62
63 VPValue *A, *B;
64
65 auto m_CanonicalScalarIVSteps =
67 m_One(), m_Specific(&Plan.getVF()));
68
70 return B == Plan.getTripCount() &&
71 (match(A, m_CanonicalScalarIVSteps) || IsWideCanonicalIV(A));
72
73 // For scalar plans, the header mask uses the scalar steps.
74 if (match(V, m_ICmp(m_CanonicalScalarIVSteps,
76 assert(Plan.hasScalarVFOnly() &&
77 "Non-scalar VF using scalar IV steps for header mask?");
78 return true;
79 }
80
81 return match(V, m_ICmp(m_VPValue(A), m_VPValue(B))) && IsWideCanonicalIV(A) &&
82 B == Plan.getBackedgeTakenCount();
83}
84
85/// Returns true if \p R propagates poison from any operand to its result.
89 [](const VPRecipeBase *) { return true; })
90 .Case([](const VPReplicateRecipe *Rep) {
91 // GEP and casts propagate poison from all operands.
92 unsigned Opcode = Rep->getOpcode();
93 return Opcode == Instruction::GetElementPtr ||
94 Instruction::isCast(Opcode);
95 })
96 .Default([](const VPRecipeBase *) { return false; });
97}
98
99/// Returns true if \p V being poison is guaranteed to trigger UB because it
100/// propagates to the address of a memory recipe.
101static bool poisonGuaranteesUB(const VPValue *V) {
104
105 Worklist.push_back(V);
106
107 while (!Worklist.empty()) {
108 const VPValue *Current = Worklist.pop_back_val();
109 if (!Visited.insert(Current).second)
110 continue;
111
112 for (VPUser *U : Current->users()) {
113 // Check if Current is used as an address operand for load/store.
114 if (auto *MemR = dyn_cast<VPWidenMemoryRecipe>(U)) {
115 if (MemR->getAddr() == Current)
116 return true;
117 continue;
118 }
119 if (auto *Rep = dyn_cast<VPReplicateRecipe>(U)) {
120 unsigned Opcode = Rep->getOpcode();
121 if ((Opcode == Instruction::Load && Rep->getOperand(0) == Current) ||
122 (Opcode == Instruction::Store && Rep->getOperand(1) == Current))
123 return true;
124 }
125
126 // Check if poison propagates through this recipe to any of its users.
127 auto *R = cast<VPRecipeBase>(U);
128 for (const VPValue *Op : R->operands()) {
129 if (Op == Current && propagatesPoisonFromRecipeOp(R)) {
130 Worklist.push_back(R->getVPSingleValue());
131 break;
132 }
133 }
134 }
135 }
136
137 return false;
138}
139
142 const Loop *L) {
143 ScalarEvolution &SE = *PSE.getSE();
145 Value *LiveIn = V->getUnderlyingValue();
146 if (LiveIn && SE.isSCEVable(LiveIn->getType()))
147 return SE.getSCEV(LiveIn);
148 return SE.getCouldNotCompute();
149 }
150
151 // Helper to create SCEVs for binary and unary operations.
152 auto CreateSCEV =
154 function_ref<const SCEV *(ArrayRef<const SCEV *>)> CreateFn)
155 -> const SCEV * {
157 for (VPValue *Op : Ops) {
158 const SCEV *S = getSCEVExprForVPValue(Op, PSE, L);
160 return SE.getCouldNotCompute();
161 SCEVOps.push_back(S);
162 }
163 return CreateFn(SCEVOps);
164 };
165
166 VPValue *LHSVal, *RHSVal;
167 if (match(V, m_Add(m_VPValue(LHSVal), m_VPValue(RHSVal))))
168 return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
169 return SE.getAddExpr(Ops[0], Ops[1], SCEV::FlagAnyWrap, 0);
170 });
171 if (match(V, m_Sub(m_VPValue(LHSVal), m_VPValue(RHSVal))))
172 return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
173 return SE.getMinusSCEV(Ops[0], Ops[1], SCEV::FlagAnyWrap, 0);
174 });
175 if (match(V, m_Not(m_VPValue(LHSVal)))) {
176 // not X = xor X, -1 = -1 - X
177 return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
178 return SE.getMinusSCEV(SE.getMinusOne(Ops[0]->getType()), Ops[0]);
179 });
180 }
181 if (match(V, m_Trunc(m_VPValue(LHSVal)))) {
182 const VPlan *Plan = V->getDefiningRecipe()->getParent()->getPlan();
183 Type *DestTy = VPTypeAnalysis(*Plan).inferScalarType(V);
184 return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
185 return SE.getTruncateExpr(Ops[0], DestTy);
186 });
187 }
188 if (match(V, m_ZExt(m_VPValue(LHSVal)))) {
189 const VPlan *Plan = V->getDefiningRecipe()->getParent()->getPlan();
190 Type *DestTy = VPTypeAnalysis(*Plan).inferScalarType(V);
191 return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
192 return SE.getZeroExtendExpr(Ops[0], DestTy);
193 });
194 }
195 if (match(V, m_SExt(m_VPValue(LHSVal)))) {
196 const VPlan *Plan = V->getDefiningRecipe()->getParent()->getPlan();
197 Type *DestTy = VPTypeAnalysis(*Plan).inferScalarType(V);
198
199 // Mirror SCEV's createSCEV handling for sext(sub nsw): push sign extension
200 // onto the operands before computing the subtraction.
201 VPValue *SubLHS, *SubRHS;
202 auto *SubR = dyn_cast<VPRecipeWithIRFlags>(LHSVal);
203 if (match(LHSVal, m_Sub(m_VPValue(SubLHS), m_VPValue(SubRHS))) && SubR &&
204 SubR->hasNoSignedWrap() && poisonGuaranteesUB(LHSVal)) {
205 const SCEV *V1 = getSCEVExprForVPValue(SubLHS, PSE, L);
206 const SCEV *V2 = getSCEVExprForVPValue(SubRHS, PSE, L);
208 return SE.getMinusSCEV(SE.getSignExtendExpr(V1, DestTy),
209 SE.getSignExtendExpr(V2, DestTy), SCEV::FlagNSW);
210 }
211
212 return CreateSCEV({LHSVal}, [&](ArrayRef<const SCEV *> Ops) {
213 return SE.getSignExtendExpr(Ops[0], DestTy);
214 });
215 }
216 if (match(V,
218 return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
219 return SE.getUMaxExpr(Ops[0], Ops[1]);
220 });
221 if (match(V,
223 return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
224 return SE.getSMaxExpr(Ops[0], Ops[1]);
225 });
226 if (match(V,
228 return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
229 return SE.getUMinExpr(Ops[0], Ops[1]);
230 });
231 if (match(V,
233 return CreateSCEV({LHSVal, RHSVal}, [&](ArrayRef<const SCEV *> Ops) {
234 return SE.getSMinExpr(Ops[0], Ops[1]);
235 });
236
238 Type *SourceElementType;
239 if (match(V, m_GetElementPtr(SourceElementType, Ops))) {
240 const SCEV *GEPExpr = CreateSCEV(Ops, [&](ArrayRef<const SCEV *> Ops) {
241 return SE.getGEPExpr(Ops.front(), Ops.drop_front(), SourceElementType);
242 });
243 return PSE.getPredicatedSCEV(GEPExpr);
244 }
245
246 // TODO: Support constructing SCEVs for more recipes as needed.
247 const VPRecipeBase *DefR = V->getDefiningRecipe();
248 const SCEV *Expr =
250 .Case([](const VPExpandSCEVRecipe *R) { return R->getSCEV(); })
251 .Case([&SE, &PSE, L](const VPCanonicalIVPHIRecipe *R) {
252 if (!L)
253 return SE.getCouldNotCompute();
254 const SCEV *Start = getSCEVExprForVPValue(R->getOperand(0), PSE, L);
255 return SE.getAddRecExpr(Start, SE.getOne(Start->getType()), L,
257 })
258 .Case([&SE, &PSE, L](const VPWidenIntOrFpInductionRecipe *R) {
259 const SCEV *Step = getSCEVExprForVPValue(R->getStepValue(), PSE, L);
260 if (!L || isa<SCEVCouldNotCompute>(Step))
261 return SE.getCouldNotCompute();
262 const SCEV *Start =
263 getSCEVExprForVPValue(R->getStartValue(), PSE, L);
264 const SCEV *AddRec =
265 SE.getAddRecExpr(Start, Step, L, SCEV::FlagAnyWrap);
266 if (R->getTruncInst())
267 return SE.getTruncateExpr(AddRec, R->getScalarType());
268 return AddRec;
269 })
270 .Case([&SE, &PSE, L](const VPWidenPointerInductionRecipe *R) {
271 const SCEV *Start =
272 getSCEVExprForVPValue(R->getStartValue(), PSE, L);
273 if (!L || isa<SCEVCouldNotCompute>(Start))
274 return SE.getCouldNotCompute();
275 const SCEV *Step = getSCEVExprForVPValue(R->getStepValue(), PSE, L);
276 if (isa<SCEVCouldNotCompute>(Step))
277 return SE.getCouldNotCompute();
278 return SE.getAddRecExpr(Start, Step, L, SCEV::FlagAnyWrap);
279 })
280 .Case([&SE, &PSE, L](const VPDerivedIVRecipe *R) {
281 const SCEV *Start = getSCEVExprForVPValue(R->getOperand(0), PSE, L);
282 const SCEV *IV = getSCEVExprForVPValue(R->getOperand(1), PSE, L);
283 const SCEV *Scale = getSCEVExprForVPValue(R->getOperand(2), PSE, L);
284 if (any_of(ArrayRef({Start, IV, Scale}),
286 return SE.getCouldNotCompute();
287
288 return SE.getAddExpr(
289 SE.getTruncateOrSignExtend(Start, IV->getType()),
290 SE.getMulExpr(
291 IV, SE.getTruncateOrSignExtend(Scale, IV->getType())));
292 })
293 .Case([&SE, &PSE, L](const VPScalarIVStepsRecipe *R) {
294 const SCEV *IV = getSCEVExprForVPValue(R->getOperand(0), PSE, L);
295 const SCEV *Step = getSCEVExprForVPValue(R->getOperand(1), PSE, L);
297 return SE.getCouldNotCompute();
298 return SE.getTruncateOrSignExtend(IV, Step->getType());
299 })
300 .Default(
301 [&SE](const VPRecipeBase *) { return SE.getCouldNotCompute(); });
302
303 return PSE.getPredicatedSCEV(Expr);
304}
305
307 const Loop *L) {
308 // If address is an SCEVAddExpr, we require that all operands must be either
309 // be invariant or a (possibly sign-extend) affine AddRec.
310 if (auto *PtrAdd = dyn_cast<SCEVAddExpr>(Addr)) {
311 return all_of(PtrAdd->operands(), [&SE, L](const SCEV *Op) {
312 return SE.isLoopInvariant(Op, L) ||
313 match(Op, m_scev_SExt(m_scev_AffineAddRec(m_SCEV(), m_SCEV()))) ||
314 match(Op, m_scev_AffineAddRec(m_SCEV(), m_SCEV()));
315 });
316 }
317
318 // Otherwise, check if address is loop invariant or an affine add recurrence.
319 return SE.isLoopInvariant(Addr, L) ||
321}
322
323/// Returns true if \p Opcode preserves uniformity, i.e., if all operands are
324/// uniform, the result will also be uniform.
325static bool preservesUniformity(unsigned Opcode) {
326 if (Instruction::isBinaryOp(Opcode) || Instruction::isCast(Opcode))
327 return true;
328 switch (Opcode) {
329 case Instruction::Freeze:
330 case Instruction::GetElementPtr:
331 case Instruction::ICmp:
332 case Instruction::FCmp:
333 case Instruction::Select:
337 return true;
338 default:
339 return false;
340 }
341}
342
344 // A live-in must be uniform across the scope of VPlan.
346 return true;
347
348 if (auto *Rep = dyn_cast<VPReplicateRecipe>(VPV)) {
349 const VPRegionBlock *RegionOfR = Rep->getRegion();
350 // Don't consider recipes in replicate regions as uniform yet; their first
351 // lane cannot be accessed when executing the replicate region for other
352 // lanes.
353 if (RegionOfR && RegionOfR->isReplicator())
354 return false;
355 return Rep->isSingleScalar() || (preservesUniformity(Rep->getOpcode()) &&
356 all_of(Rep->operands(), isSingleScalar));
357 }
360 if (auto *WidenR = dyn_cast<VPWidenRecipe>(VPV)) {
361 return preservesUniformity(WidenR->getOpcode()) &&
362 all_of(WidenR->operands(), isSingleScalar);
363 }
364 if (auto *VPI = dyn_cast<VPInstruction>(VPV))
365 return VPI->isSingleScalar() || VPI->isVectorToScalar() ||
366 (preservesUniformity(VPI->getOpcode()) &&
367 all_of(VPI->operands(), isSingleScalar));
368 if (auto *RR = dyn_cast<VPReductionRecipe>(VPV))
369 return !RR->isPartialReduction();
372 return true;
373 if (auto *Expr = dyn_cast<VPExpressionRecipe>(VPV))
374 return Expr->isSingleScalar();
375
376 // VPExpandSCEVRecipes must be placed in the entry and are always uniform.
377 return isa<VPExpandSCEVRecipe>(VPV);
378}
379
381 // Live-ins are uniform.
383 return true;
384
385 VPRecipeBase *R = V->getDefiningRecipe();
386 if (R && V->isDefinedOutsideLoopRegions()) {
387 if (match(V->getDefiningRecipe(),
389 return false;
390 return all_of(R->operands(), isUniformAcrossVFsAndUFs);
391 }
392
393 auto *CanonicalIV =
394 R->getParent()->getEnclosingLoopRegion()->getCanonicalIV();
395 // Canonical IV chain is uniform.
396 if (V == CanonicalIV || V == CanonicalIV->getBackedgeValue())
397 return true;
398
400 .Case([](const VPDerivedIVRecipe *R) { return true; })
401 .Case([](const VPReplicateRecipe *R) {
402 // Be conservative about side-effects, except for the
403 // known-side-effecting assumes and stores, which we know will be
404 // uniform.
405 return R->isSingleScalar() &&
406 (!R->mayHaveSideEffects() ||
407 isa<AssumeInst, StoreInst>(R->getUnderlyingInstr())) &&
408 all_of(R->operands(), isUniformAcrossVFsAndUFs);
409 })
410 .Case([](const VPWidenRecipe *R) {
411 return preservesUniformity(R->getOpcode()) &&
412 all_of(R->operands(), isUniformAcrossVFsAndUFs);
413 })
414 .Case([](const VPInstruction *VPI) {
415 return (VPI->isScalarCast() &&
419 })
420 .Case([](const VPWidenCastRecipe *R) {
421 // A cast is uniform according to its operand.
422 return isUniformAcrossVFsAndUFs(R->getOperand(0));
423 })
424 .Default([](const VPRecipeBase *) { // A value is considered non-uniform
425 // unless proven otherwise.
426 return false;
427 });
428}
429
431 auto DepthFirst = vp_depth_first_shallow(Plan.getEntry());
432 auto I = find_if(DepthFirst, [&VPDT](VPBlockBase *VPB) {
433 return VPBlockUtils::isHeader(VPB, VPDT);
434 });
435 return I == DepthFirst.end() ? nullptr : cast<VPBasicBlock>(*I);
436}
437
439 if (!R)
440 return 1;
441 if (auto *RR = dyn_cast<VPReductionPHIRecipe>(R))
442 return RR->getVFScaleFactor();
443 if (auto *RR = dyn_cast<VPReductionRecipe>(R))
444 return RR->getVFScaleFactor();
445 if (auto *ER = dyn_cast<VPExpressionRecipe>(R))
446 return ER->getVFScaleFactor();
447 assert(
450 "getting scaling factor of reduction-start-vector not implemented yet");
451 return 1;
452}
453
454std::optional<VPValue *>
458 // Given a VPlan like the following (just including the recipes contributing
459 // to loop control exiting here, not the actual work), we're looking to match
460 // the recipes contributing to the uncountable exit condition comparison
461 // (here, vp<%4>) back to either live-ins or the address nodes for the load
462 // used as part of the uncountable exit comparison so that we can copy them
463 // to a preheader and rotate the address in the loop to the next vector
464 // iteration.
465 //
466 // Currently, the address of the load is restricted to a GEP with 2 operands
467 // and a live-in base address. This constraint may be relaxed later.
468 //
469 // VPlan ' for UF>=1' {
470 // Live-in vp<%0> = VF
471 // Live-in ir<64> = original trip-count
472 //
473 // entry:
474 // Successor(s): preheader, vector.ph
475 //
476 // vector.ph:
477 // Successor(s): vector loop
478 //
479 // <x1> vector loop: {
480 // vector.body:
481 // EMIT vp<%2> = CANONICAL-INDUCTION ir<0>
482 // vp<%3> = SCALAR-STEPS vp<%2>, ir<1>, vp<%0>
483 // CLONE ir<%ee.addr> = getelementptr ir<0>, vp<%3>
484 // WIDEN ir<%ee.load> = load ir<%ee.addr>
485 // WIDEN vp<%4> = icmp eq ir<%ee.load>, ir<0>
486 // EMIT vp<%5> = any-of vp<%4>
487 // EMIT vp<%6> = add vp<%2>, vp<%0>
488 // EMIT vp<%7> = icmp eq vp<%6>, ir<64>
489 // EMIT branch-on-two-conds vp<%5>, vp<%7>
490 // No successors
491 // }
492 // Successor(s): early.exit, middle.block
493 //
494 // middle.block:
495 // Successor(s): preheader
496 //
497 // preheader:
498 // No successors
499 // }
500
501 // Find the uncountable loop exit condition.
502 auto *Region = Plan.getVectorLoopRegion();
503 VPValue *UncountableCondition = nullptr;
504 if (!match(Region->getExitingBasicBlock()->getTerminator(),
505 m_BranchOnTwoConds(m_AnyOf(m_VPValue(UncountableCondition)),
506 m_VPValue())))
507 return std::nullopt;
508
510 Worklist.push_back(UncountableCondition);
511 while (!Worklist.empty()) {
512 VPValue *V = Worklist.pop_back_val();
513
514 // Any value defined outside the loop does not need to be copied.
515 if (V->isDefinedOutsideLoopRegions())
516 continue;
517
518 // FIXME: Remove the single user restriction; it's here because we're
519 // starting with the simplest set of loops we can, and multiple
520 // users means needing to add PHI nodes in the transform.
521 if (V->getNumUsers() > 1)
522 return std::nullopt;
523
524 VPValue *Op1, *Op2;
525 // Walk back through recipes until we find at least one load from memory.
526 if (match(V, m_ICmp(m_VPValue(Op1), m_VPValue(Op2)))) {
527 Worklist.push_back(Op1);
528 Worklist.push_back(Op2);
529 Recipes.push_back(V->getDefiningRecipe());
530 } else if (auto *Load = dyn_cast<VPWidenLoadRecipe>(V)) {
531 // Reject masked loads for the time being; they make the exit condition
532 // more complex.
533 if (Load->isMasked())
534 return std::nullopt;
535
536 VPValue *GEP = Load->getAddr();
538 return std::nullopt;
539
540 Recipes.push_back(Load);
541 Recipes.push_back(GEP->getDefiningRecipe());
542 GEPs.push_back(GEP->getDefiningRecipe());
543 } else
544 return std::nullopt;
545 }
546
547 return UncountableCondition;
548}
549
551 const VPDominatorTree &VPDT) {
552 auto *VPBB = dyn_cast<VPBasicBlock>(VPB);
553 if (!VPBB)
554 return false;
555
556 // If VPBB is in a region R, VPBB is a loop header if R is a loop region with
557 // VPBB as its entry, i.e., free of predecessors.
558 if (auto *R = VPBB->getParent())
559 return !R->isReplicator() && !VPBB->hasPredecessors();
560
561 // A header dominates its second predecessor (the latch), with the other
562 // predecessor being the preheader
563 return VPB->getPredecessors().size() == 2 &&
564 VPDT.dominates(VPB, VPB->getPredecessors()[1]);
565}
566
568 const VPDominatorTree &VPDT) {
569 // A latch has a header as its second successor, with its other successor
570 // leaving the loop. A preheader OTOH has a header as its first (and only)
571 // successor.
572 return VPB->getNumSuccessors() == 2 &&
573 VPBlockUtils::isHeader(VPB->getSuccessors()[1], VPDT);
574}
575
576std::optional<MemoryLocation>
578 auto *M = dyn_cast<VPIRMetadata>(&R);
579 if (!M)
580 return std::nullopt;
582 // Populate noalias metadata from VPIRMetadata.
583 if (MDNode *NoAliasMD = M->getMetadata(LLVMContext::MD_noalias))
584 Loc.AATags.NoAlias = NoAliasMD;
585 if (MDNode *AliasScopeMD = M->getMetadata(LLVMContext::MD_alias_scope))
586 Loc.AATags.Scope = AliasScopeMD;
587 return Loc;
588}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Hexagon Common GEP
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility analysis objects describing memory locations.
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file implements dominator tree analysis for a single level of a VPlan's H-CFG.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static bool propagatesPoisonFromRecipeOp(const VPRecipeBase *R)
Returns true if R propagates poison from any operand to its result.
static bool preservesUniformity(unsigned Opcode)
Returns true if Opcode preserves uniformity, i.e., if all operands are uniform, the result will also ...
static bool poisonGuaranteesUB(const VPValue *V)
Returns true if V being poison is guaranteed to trigger UB because it propagates to the address of a ...
static const uint32_t IV[8]
Definition blake3_impl.h:83
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
bool isCast() const
bool isBinaryOp() const
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Metadata node.
Definition Metadata.h:1080
Representation for a specific memory location.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEV * getPredicatedSCEV(const SCEV *Expr)
Returns the rewritten SCEV for Expr in the context of the current SCEV predicate.
This class represents an analyzed expression in the program.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
LLVM_ABI const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< const SCEV * > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:89
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:98
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4081
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition VPlan.h:4156
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
size_t getNumSuccessors() const
Definition VPlan.h:219
const VPBlocksTy & getPredecessors() const
Definition VPlan.h:204
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:198
static bool isLatch(const VPBlockBase *VPB, const VPDominatorTree &VPDT)
Returns true if VPB is a loop latch, using isHeader().
static bool isHeader(const VPBlockBase *VPB, const VPDominatorTree &VPDT)
Returns true if VPB is a loop header, based on regions or VPDT in their absence.
Canonical scalar induction phi of the vector loop.
Definition VPlan.h:3655
A recipe for converting the input value IV value to the corresponding value of an IV with different s...
Definition VPlan.h:3825
Template specialization of the standard LLVM dominator tree utility for VPBlockBases.
Recipe to expand a SCEV expression.
Definition VPlan.h:3617
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1141
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1236
unsigned getOpcode() const
Definition VPlan.h:1300
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:387
bool isScalarCast() const
Return true if the recipe is a scalar cast.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4269
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
Definition VPlan.h:4337
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the region.
Definition VPlan.h:4367
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:3041
unsigned getOpcode() const
Definition VPlan.h:3111
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition VPlan.h:3897
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:258
operand_range operands()
Definition VPlanValue.h:326
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:297
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:46
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:125
user_range users()
Definition VPlanValue.h:125
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1991
A recipe to compute the pointers for widened memory accesses of SourceElementTy.
Definition VPlan.h:2052
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1680
A recipe for handling GEP instructions.
Definition VPlan.h:1928
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2278
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1632
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4399
VPBasicBlock * getEntry()
Definition VPlan.h:4488
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4578
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4546
VPValue * getBackedgeTakenCount() const
Definition VPlan.h:4572
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4638
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1031
bool hasScalarVFOnly() const
Definition VPlan.h:4607
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
An efficient, type-erasing, non-owning reference to a callable.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bool match(const SCEV *S, const Pattern &P)
class_match< const SCEV > m_SCEV()
VPInstruction_match< VPInstruction::AnyOf > m_AnyOf()
VPScalarIVSteps_match< Op0_t, Op1_t, Op2_t > m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
GEPLikeRecipe_match< Op0_t, Op1_t > m_GetElementPtr(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::BranchOnTwoConds > m_BranchOnTwoConds()
VPInstruction_match< VPInstruction::ActiveLaneMask, Op0_t, Op1_t, Op2_t > m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
bind_ty< VPInstruction > m_VPInstruction(VPInstruction *&V)
Match a VPInstruction, capturing if we match.
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
bool isUniformAcrossVFsAndUFs(VPValue *V)
Checks if V is uniform across all VF lanes and UF parts.
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
std::optional< MemoryLocation > getMemoryLocation(const VPRecipeBase &R)
Return a MemoryLocation for R with noalias metadata populated from R, if the recipe is supported and ...
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
unsigned getVFScaleFactor(VPRecipeBase *R)
Get the VF scaling factor applied to the recipe's output, if the recipe has one.
bool isHeaderMask(const VPValue *V, const VPlan &Plan)
Return true if V is a header mask in Plan.
LLVM_ABI_FOR_TEST std::optional< VPValue * > getRecipesForUncountableExit(VPlan &Plan, SmallVectorImpl< VPRecipeBase * > &Recipes, SmallVectorImpl< VPRecipeBase * > &GEPs)
Returns the VPValue representing the uncountable exit comparison used by AnyOf if the recipes it depe...
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866