LLVM 22.0.0git
VPlanAnalysis.cpp
Go to the documentation of this file.
1//===- VPlanAnalysis.cpp - Various Analyses working on VPlan ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "VPlanAnalysis.h"
10#include "VPlan.h"
11#include "VPlanCFG.h"
12#include "VPlanDominatorTree.h"
13#include "VPlanHelpers.h"
14#include "VPlanPatternMatch.h"
16#include "llvm/ADT/TypeSwitch.h"
19#include "llvm/IR/Instruction.h"
21
22using namespace llvm;
23using namespace VPlanPatternMatch;
24
25#define DEBUG_TYPE "vplan"
26
28 if (auto LoopRegion = Plan.getVectorLoopRegion()) {
29 if (const auto *CanIV = dyn_cast<VPCanonicalIVPHIRecipe>(
30 &LoopRegion->getEntryBasicBlock()->front())) {
31 CanonicalIVTy = CanIV->getScalarType();
32 return;
33 }
34 }
35
36 // If there's no canonical IV, retrieve the type from the trip count
37 // expression.
38 auto *TC = Plan.getTripCount();
39 if (TC->isLiveIn()) {
40 CanonicalIVTy = TC->getLiveInIRValue()->getType();
41 return;
42 }
43 CanonicalIVTy = cast<VPExpandSCEVRecipe>(TC)->getSCEV()->getType();
44}
45
46Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPBlendRecipe *R) {
47 Type *ResTy = inferScalarType(R->getIncomingValue(0));
48 for (unsigned I = 1, E = R->getNumIncomingValues(); I != E; ++I) {
49 VPValue *Inc = R->getIncomingValue(I);
50 assert(inferScalarType(Inc) == ResTy &&
51 "different types inferred for different incoming values");
52 CachedTypes[Inc] = ResTy;
53 }
54 return ResTy;
55}
56
57Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) {
58 // Set the result type from the first operand, check if the types for all
59 // other operands match and cache them.
60 auto SetResultTyFromOp = [this, R]() {
61 Type *ResTy = inferScalarType(R->getOperand(0));
62 for (unsigned Op = 1; Op != R->getNumOperands(); ++Op) {
63 VPValue *OtherV = R->getOperand(Op);
64 assert(inferScalarType(OtherV) == ResTy &&
65 "different types inferred for different operands");
66 CachedTypes[OtherV] = ResTy;
67 }
68 return ResTy;
69 };
70
71 unsigned Opcode = R->getOpcode();
73 return SetResultTyFromOp();
74
75 switch (Opcode) {
76 case Instruction::ExtractElement:
77 case Instruction::Freeze:
80 return inferScalarType(R->getOperand(0));
81 case Instruction::Select: {
82 Type *ResTy = inferScalarType(R->getOperand(1));
83 VPValue *OtherV = R->getOperand(2);
84 assert(inferScalarType(OtherV) == ResTy &&
85 "different types inferred for different operands");
86 CachedTypes[OtherV] = ResTy;
87 return ResTy;
88 }
89 case Instruction::ICmp:
90 case Instruction::FCmp:
92 assert(inferScalarType(R->getOperand(0)) ==
93 inferScalarType(R->getOperand(1)) &&
94 "different types inferred for different operands");
95 return IntegerType::get(Ctx, 1);
97 return inferScalarType(R->getOperand(1));
100 return inferScalarType(R->getOperand(0));
101 }
103 return Type::getIntNTy(Ctx, 32);
104 case Instruction::PHI:
105 // Infer the type of first operand only, as other operands of header phi's
106 // may lead to infinite recursion.
107 return inferScalarType(R->getOperand(0));
116 return SetResultTyFromOp();
118 return inferScalarType(R->getOperand(1));
121 return Type::getIntNTy(Ctx, 64);
124 return inferScalarType(R->getOperand(0));
126 return inferScalarType(R->getOperand(0));
128 assert(inferScalarType(R->getOperand(0))->isIntegerTy(1) &&
129 inferScalarType(R->getOperand(1))->isIntegerTy(1) &&
130 "LogicalAnd operands should be bool");
131 return IntegerType::get(Ctx, 1);
135 // Return the type based on first operand.
136 return inferScalarType(R->getOperand(0));
139 return Type::getVoidTy(Ctx);
140 default:
141 break;
142 }
143 // Type inference not implemented for opcode.
144 LLVM_DEBUG({
145 dbgs() << "LV: Found unhandled opcode for: ";
146 R->getVPSingleValue()->dump();
147 });
148 llvm_unreachable("Unhandled opcode!");
149}
150
151Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenRecipe *R) {
152 unsigned Opcode = R->getOpcode();
153 if (Instruction::isBinaryOp(Opcode) || Instruction::isShift(Opcode) ||
155 Type *ResTy = inferScalarType(R->getOperand(0));
156 assert(ResTy == inferScalarType(R->getOperand(1)) &&
157 "types for both operands must match for binary op");
158 CachedTypes[R->getOperand(1)] = ResTy;
159 return ResTy;
160 }
161
162 switch (Opcode) {
163 case Instruction::ICmp:
164 case Instruction::FCmp:
165 return IntegerType::get(Ctx, 1);
166 case Instruction::FNeg:
167 case Instruction::Freeze:
168 return inferScalarType(R->getOperand(0));
169 case Instruction::ExtractValue: {
170 assert(R->getNumOperands() == 2 && "expected single level extractvalue");
171 auto *StructTy = cast<StructType>(inferScalarType(R->getOperand(0)));
172 auto *CI = cast<ConstantInt>(R->getOperand(1)->getLiveInIRValue());
173 return StructTy->getTypeAtIndex(CI->getZExtValue());
174 }
175 default:
176 break;
177 }
178
179 // Type inference not implemented for opcode.
180 LLVM_DEBUG({
181 dbgs() << "LV: Found unhandled opcode for: ";
182 R->getVPSingleValue()->dump();
183 });
184 llvm_unreachable("Unhandled opcode!");
185}
186
187Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
188 auto &CI = *cast<CallInst>(R->getUnderlyingInstr());
189 return CI.getType();
190}
191
192Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
194 "Store recipes should not define any values");
195 return cast<LoadInst>(&R->getIngredient())->getType();
196}
197
198Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenSelectRecipe *R) {
199 Type *ResTy = inferScalarType(R->getOperand(1));
200 VPValue *OtherV = R->getOperand(2);
201 assert(inferScalarType(OtherV) == ResTy &&
202 "different types inferred for different operands");
203 CachedTypes[OtherV] = ResTy;
204 return ResTy;
205}
206
207Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPReplicateRecipe *R) {
208 unsigned Opcode = R->getUnderlyingInstr()->getOpcode();
209
210 if (Instruction::isBinaryOp(Opcode) || Instruction::isShift(Opcode) ||
212 Type *ResTy = inferScalarType(R->getOperand(0));
213 assert(ResTy == inferScalarType(R->getOperand(1)) &&
214 "inferred types for operands of binary op don't match");
215 CachedTypes[R->getOperand(1)] = ResTy;
216 return ResTy;
217 }
218
219 if (Instruction::isCast(Opcode))
220 return R->getUnderlyingInstr()->getType();
221
222 switch (Opcode) {
223 case Instruction::Call: {
224 unsigned CallIdx = R->getNumOperands() - (R->isPredicated() ? 2 : 1);
225 return cast<Function>(R->getOperand(CallIdx)->getLiveInIRValue())
226 ->getReturnType();
227 }
228 case Instruction::Select: {
229 Type *ResTy = inferScalarType(R->getOperand(1));
230 assert(ResTy == inferScalarType(R->getOperand(2)) &&
231 "inferred types for operands of select op don't match");
232 CachedTypes[R->getOperand(2)] = ResTy;
233 return ResTy;
234 }
235 case Instruction::ICmp:
236 case Instruction::FCmp:
237 return IntegerType::get(Ctx, 1);
238 case Instruction::Alloca:
239 case Instruction::ExtractValue:
240 return R->getUnderlyingInstr()->getType();
241 case Instruction::Freeze:
242 case Instruction::FNeg:
243 case Instruction::GetElementPtr:
244 return inferScalarType(R->getOperand(0));
245 case Instruction::Load:
246 return cast<LoadInst>(R->getUnderlyingInstr())->getType();
247 case Instruction::Store:
248 // FIXME: VPReplicateRecipes with store opcodes still define a result
249 // VPValue, so we need to handle them here. Remove the code here once this
250 // is modeled accurately in VPlan.
251 return Type::getVoidTy(Ctx);
252 default:
253 break;
254 }
255 // Type inference not implemented for opcode.
256 LLVM_DEBUG({
257 dbgs() << "LV: Found unhandled opcode for: ";
258 R->getVPSingleValue()->dump();
259 });
260 llvm_unreachable("Unhandled opcode");
261}
262
264 if (Type *CachedTy = CachedTypes.lookup(V))
265 return CachedTy;
266
267 if (V->isLiveIn()) {
268 if (auto *IRValue = V->getLiveInIRValue())
269 return IRValue->getType();
270 // All VPValues without any underlying IR value (like the vector trip count
271 // or the backedge-taken count) have the same type as the canonical IV.
272 return CanonicalIVTy;
273 }
274
275 Type *ResultTy =
276 TypeSwitch<const VPRecipeBase *, Type *>(V->getDefiningRecipe())
280 [this](const auto *R) {
281 // Handle header phi recipes, except VPWidenIntOrFpInduction
282 // which needs special handling due it being possibly truncated.
283 // TODO: consider inferring/caching type of siblings, e.g.,
284 // backedge value, here and in cases below.
285 return inferScalarType(R->getStartValue());
286 })
287 .Case<VPWidenIntOrFpInductionRecipe, VPDerivedIVRecipe>(
288 [](const auto *R) { return R->getScalarType(); })
292 [this](const VPRecipeBase *R) {
293 return inferScalarType(R->getOperand(0));
294 })
295 // VPInstructionWithType must be handled before VPInstruction.
298 [](const auto *R) { return R->getResultType(); })
301 [this](const auto *R) { return inferScalarTypeForRecipe(R); })
302 .Case<VPInterleaveBase>([V](const auto *R) {
303 // TODO: Use info from interleave group.
304 return V->getUnderlyingValue()->getType();
305 })
306 .Case<VPExpandSCEVRecipe>([](const VPExpandSCEVRecipe *R) {
307 return R->getSCEV()->getType();
308 })
309 .Case<VPReductionRecipe>([this](const auto *R) {
310 return inferScalarType(R->getChainOp());
311 })
312 .Case<VPExpressionRecipe>([this](const auto *R) {
313 return inferScalarType(R->getOperandOfResultType());
314 });
315
316 assert(ResultTy && "could not infer type for the given VPValue");
317 CachedTypes[V] = ResultTy;
318 return ResultTy;
319}
320
322 VPlan &Plan, DenseSet<VPRecipeBase *> &EphRecipes) {
323 // First, collect seed recipes which are operands of assumes.
327 for (VPRecipeBase &R : *VPBB) {
328 auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
329 if (!RepR || !match(RepR, m_Intrinsic<Intrinsic::assume>()))
330 continue;
331 Worklist.push_back(RepR);
332 EphRecipes.insert(RepR);
333 }
334 }
335
336 // Process operands of candidates in worklist and add them to the set of
337 // ephemeral recipes, if they don't have side-effects and are only used by
338 // other ephemeral recipes.
339 while (!Worklist.empty()) {
340 VPRecipeBase *Cur = Worklist.pop_back_val();
341 for (VPValue *Op : Cur->operands()) {
342 auto *OpR = Op->getDefiningRecipe();
343 if (!OpR || OpR->mayHaveSideEffects() || EphRecipes.contains(OpR))
344 continue;
345 if (any_of(Op->users(), [EphRecipes](VPUser *U) {
346 auto *UR = dyn_cast<VPRecipeBase>(U);
347 return !UR || !EphRecipes.contains(UR);
348 }))
349 continue;
350 EphRecipes.insert(OpR);
351 Worklist.push_back(OpR);
352 }
353 }
354}
355
358
360 const VPRecipeBase *B) {
361 if (A == B)
362 return false;
363
364 auto LocalComesBefore = [](const VPRecipeBase *A, const VPRecipeBase *B) {
365 for (auto &R : *A->getParent()) {
366 if (&R == A)
367 return true;
368 if (&R == B)
369 return false;
370 }
371 llvm_unreachable("recipe not found");
372 };
373 const VPBlockBase *ParentA = A->getParent();
374 const VPBlockBase *ParentB = B->getParent();
375 if (ParentA == ParentB)
376 return LocalComesBefore(A, B);
377
378#ifndef NDEBUG
379 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
380 VPRegionBlock *Region = R->getRegion();
381 if (Region && Region->isReplicator()) {
382 assert(Region->getNumSuccessors() == 1 &&
383 Region->getNumPredecessors() == 1 && "Expected SESE region!");
384 assert(R->getParent()->size() == 1 &&
385 "A recipe in an original replicator region must be the only "
386 "recipe in its block");
387 return Region;
388 }
389 return nullptr;
390 };
391 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(A)) &&
392 "No replicate regions expected at this point");
393 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(B)) &&
394 "No replicate regions expected at this point");
395#endif
396 return Base::properlyDominates(ParentA, ParentB);
397}
398
400 unsigned OverrideMaxNumRegs) const {
401 return any_of(MaxLocalUsers, [&TTI, &OverrideMaxNumRegs](auto &LU) {
402 return LU.second > (OverrideMaxNumRegs > 0
403 ? OverrideMaxNumRegs
404 : TTI.getNumberOfRegisters(LU.first));
405 });
406}
407
410 const SmallPtrSetImpl<const Value *> &ValuesToIgnore) {
411 // Each 'key' in the map opens a new interval. The values
412 // of the map are the index of the 'last seen' usage of the
413 // VPValue that is the key.
415
416 // Maps indices to recipes.
418 // Marks the end of each interval.
419 IntervalMap EndPoint;
420 // Saves the list of VPValues that are used in the loop.
422 // Saves the list of values that are used in the loop but are defined outside
423 // the loop (not including non-recipe values such as arguments and
424 // constants).
425 SmallSetVector<VPValue *, 8> LoopInvariants;
426 LoopInvariants.insert(&Plan.getVectorTripCount());
427
428 // We scan the loop in a topological order in order and assign a number to
429 // each recipe. We use RPO to ensure that defs are met before their users. We
430 // assume that each recipe that has in-loop users starts an interval. We
431 // record every time that an in-loop value is used, so we have a list of the
432 // first occurences of each recipe and last occurrence of each VPValue.
433 VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
435 LoopRegion);
437 if (!VPBB->getParent())
438 break;
439 for (VPRecipeBase &R : *VPBB) {
440 Idx2Recipe.push_back(&R);
441
442 // Save the end location of each USE.
443 for (VPValue *U : R.operands()) {
444 auto *DefR = U->getDefiningRecipe();
445
446 // Ignore non-recipe values such as arguments, constants, etc.
447 // FIXME: Might need some motivation why these values are ignored. If
448 // for example an argument is used inside the loop it will increase the
449 // register pressure (so shouldn't we add it to LoopInvariants).
450 if (!DefR && (!U->getLiveInIRValue() ||
451 !isa<Instruction>(U->getLiveInIRValue())))
452 continue;
453
454 // If this recipe is outside the loop then record it and continue.
455 if (!DefR) {
456 LoopInvariants.insert(U);
457 continue;
458 }
459
460 // Overwrite previous end points.
461 EndPoint[U] = Idx2Recipe.size();
462 Ends.insert(U);
463 }
464 }
465 if (VPBB == LoopRegion->getExiting()) {
466 // VPWidenIntOrFpInductionRecipes are used implicitly at the end of the
467 // exiting block, where their increment will get materialized eventually.
468 for (auto &R : LoopRegion->getEntryBasicBlock()->phis()) {
469 if (auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&R)) {
470 EndPoint[WideIV] = Idx2Recipe.size();
471 Ends.insert(WideIV);
472 }
473 }
474 }
475 }
476
477 // Saves the list of intervals that end with the index in 'key'.
478 using VPValueList = SmallVector<VPValue *, 2>;
480
481 // Next, we transpose the EndPoints into a multi map that holds the list of
482 // intervals that *end* at a specific location.
483 for (auto &Interval : EndPoint)
484 TransposeEnds[Interval.second].push_back(Interval.first);
485
486 SmallPtrSet<VPValue *, 8> OpenIntervals;
489
490 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
491
492 VPTypeAnalysis TypeInfo(Plan);
493
494 const auto &TTICapture = TTI;
495 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
496 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty) ||
497 (VF.isScalable() &&
498 !TTICapture.isElementTypeLegalForScalableVector(Ty)))
499 return 0;
500 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
501 };
502
503 // We scan the instructions linearly and record each time that a new interval
504 // starts, by placing it in a set. If we find this value in TransposEnds then
505 // we remove it from the set. The max register usage is the maximum register
506 // usage of the recipes of the set.
507 for (unsigned int Idx = 0, Sz = Idx2Recipe.size(); Idx < Sz; ++Idx) {
508 VPRecipeBase *R = Idx2Recipe[Idx];
509
510 // Remove all of the VPValues that end at this location.
511 VPValueList &List = TransposeEnds[Idx];
512 for (VPValue *ToRemove : List)
513 OpenIntervals.erase(ToRemove);
514
515 // Ignore recipes that are never used within the loop and do not have side
516 // effects.
517 if (none_of(R->definedValues(),
518 [&Ends](VPValue *Def) { return Ends.count(Def); }) &&
519 !R->mayHaveSideEffects())
520 continue;
521
522 // Skip recipes for ignored values.
523 // TODO: Should mark recipes for ephemeral values that cannot be removed
524 // explictly in VPlan.
525 if (isa<VPSingleDefRecipe>(R) &&
526 ValuesToIgnore.contains(
527 cast<VPSingleDefRecipe>(R)->getUnderlyingValue()))
528 continue;
529
530 // For each VF find the maximum usage of registers.
531 for (unsigned J = 0, E = VFs.size(); J < E; ++J) {
532 // Count the number of registers used, per register class, given all open
533 // intervals.
534 // Note that elements in this SmallMapVector will be default constructed
535 // as 0. So we can use "RegUsage[ClassID] += n" in the code below even if
536 // there is no previous entry for ClassID.
538
539 for (auto *VPV : OpenIntervals) {
540 // Skip artificial values or values that weren't present in the original
541 // loop.
542 // TODO: Remove skipping values that weren't present in the original
543 // loop after removing the legacy
544 // LoopVectorizationCostModel::calculateRegisterUsage
546 VPBranchOnMaskRecipe>(VPV) ||
548 continue;
549
550 if (VFs[J].isScalar() ||
555 (cast<VPReductionPHIRecipe>(VPV))->isInLoop())) {
556 unsigned ClassID =
557 TTI.getRegisterClassForType(false, TypeInfo.inferScalarType(VPV));
558 // FIXME: The target might use more than one register for the type
559 // even in the scalar case.
560 RegUsage[ClassID] += 1;
561 } else {
562 // The output from scaled phis and scaled reductions actually has
563 // fewer lanes than the VF.
564 unsigned ScaleFactor =
565 vputils::getVFScaleFactor(VPV->getDefiningRecipe());
566 ElementCount VF = VFs[J];
567 if (ScaleFactor > 1) {
568 VF = VFs[J].divideCoefficientBy(ScaleFactor);
569 LLVM_DEBUG(dbgs() << "LV(REG): Scaled down VF from " << VFs[J]
570 << " to " << VF << " for " << *R << "\n";);
571 }
572
573 Type *ScalarTy = TypeInfo.inferScalarType(VPV);
574 unsigned ClassID = TTI.getRegisterClassForType(true, ScalarTy);
575 RegUsage[ClassID] += GetRegUsage(ScalarTy, VF);
576 }
577 }
578
579 for (const auto &Pair : RegUsage) {
580 auto &Entry = MaxUsages[J][Pair.first];
581 Entry = std::max(Entry, Pair.second);
582 }
583 }
584
585 LLVM_DEBUG(dbgs() << "LV(REG): At #" << Idx << " Interval # "
586 << OpenIntervals.size() << '\n');
587
588 // Add used VPValues defined by the current recipe to the list of open
589 // intervals.
590 for (VPValue *DefV : R->definedValues())
591 if (Ends.contains(DefV))
592 OpenIntervals.insert(DefV);
593 }
594
595 // We also search for instructions that are defined outside the loop, but are
596 // used inside the loop. We need this number separately from the max-interval
597 // usage number because when we unroll, loop-invariant values do not take
598 // more register.
600 for (unsigned Idx = 0, End = VFs.size(); Idx < End; ++Idx) {
601 // Note that elements in this SmallMapVector will be default constructed
602 // as 0. So we can use "Invariant[ClassID] += n" in the code below even if
603 // there is no previous entry for ClassID.
605
606 for (auto *In : LoopInvariants) {
607 // FIXME: The target might use more than one register for the type
608 // even in the scalar case.
609 bool IsScalar = vputils::onlyScalarValuesUsed(In);
610
611 ElementCount VF = IsScalar ? ElementCount::getFixed(1) : VFs[Idx];
612 unsigned ClassID = TTI.getRegisterClassForType(
613 VF.isVector(), TypeInfo.inferScalarType(In));
614 Invariant[ClassID] += GetRegUsage(TypeInfo.inferScalarType(In), VF);
615 }
616
617 LLVM_DEBUG({
618 dbgs() << "LV(REG): VF = " << VFs[Idx] << '\n';
619 dbgs() << "LV(REG): Found max usage: " << MaxUsages[Idx].size()
620 << " item\n";
621 for (const auto &pair : MaxUsages[Idx]) {
622 dbgs() << "LV(REG): RegisterClass: "
623 << TTI.getRegisterClassName(pair.first) << ", " << pair.second
624 << " registers\n";
625 }
626 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
627 << " item\n";
628 for (const auto &pair : Invariant) {
629 dbgs() << "LV(REG): RegisterClass: "
630 << TTI.getRegisterClassName(pair.first) << ", " << pair.second
631 << " registers\n";
632 }
633 });
634
635 RU.LoopInvariantRegs = Invariant;
636 RU.MaxLocalUsers = MaxUsages[Idx];
637 RUs[Idx] = RU;
638 }
639
640 return RUs;
641}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
ReachingDefInfo InstSet & ToRemove
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define I(x, y, z)
Definition MD5.cpp:57
std::pair< uint64_t, uint64_t > Interval
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file implements dominator tree analysis for a single level of a VPlan's H-CFG.
This file contains the declarations of different VPlan-related auxiliary helpers.
This file contains the declarations of the Vectorization Plan base classes:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Core dominator tree base class.
bool properlyDominates(const DomTreeNodeBase< VPBlockBase > *A, const DomTreeNodeBase< VPBlockBase > *B) const
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
bool isCast() const
bool isBinaryOp() const
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
bool isUnaryOp() const
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:88
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:97
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
A recipe for generating the active lane mask for the vector loop that is used to predicate the vector...
Definition VPlan.h:3603
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:3964
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4052
A recipe for vectorizing a phi-node as a sequence of mask-based select instructions.
Definition VPlan.h:2512
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:166
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:211
A recipe for generating conditional branches on the bits of a mask.
Definition VPlan.h:3016
Canonical scalar induction phi of the vector loop.
Definition VPlan.h:3547
A recipe for converting the input value IV value to the corresponding value of an IV with different s...
Definition VPlan.h:3714
bool properlyDominates(const VPRecipeBase *A, const VPRecipeBase *B)
A recipe for generating the phi node for the current index of elements, adjusted in accordance with E...
Definition VPlan.h:3635
Recipe to expand a SCEV expression.
Definition VPlan.h:3509
A specialization of VPInstruction augmenting it with a dedicated result type, to be used when the opc...
Definition VPlan.h:1256
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1036
@ ExtractLane
Extracts a single lane (first operand) from a set of vector operands.
Definition VPlan.h:1127
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1074
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1130
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
Definition VPlan.h:1071
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1121
@ BuildVector
Creates a fixed-width vector containing all operands.
Definition VPlan.h:1066
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
Definition VPlan.h:1063
@ CanonicalIVIncrementForPart
Definition VPlan.h:1056
VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when control converges back from ...
Definition VPlan.h:3203
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:387
A recipe for handling reduction phis.
Definition VPlan.h:2427
A recipe to represent inloop, ordered or partial reduction operations.
Definition VPlan.h:2779
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4152
const VPBlockBase * getEntry() const
Definition VPlan.h:4188
const VPBlockBase * getExiting() const
Definition VPlan.h:4200
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2935
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition VPlan.h:3784
An analysis for type-inference for VPValues.
LLVMContext & getContext()
Return the LLVMContext used by the analysis.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
VPTypeAnalysis(const VPlan &Plan)
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:202
operand_range operands()
Definition VPlanValue.h:270
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:46
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1911
A recipe to compute the pointers for widened memory accesses of IndexTy.
Definition VPlan.h:1971
A recipe for widening Call instructions using library calls.
Definition VPlan.h:1702
A Recipe for widening the canonical induction variable of the vector loop.
Definition VPlan.h:3677
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1552
A recipe for handling GEP instructions.
Definition VPlan.h:1848
A recipe for widening vector intrinsics.
Definition VPlan.h:1602
A common base class for widening memory operations.
Definition VPlan.h:3246
A recipe for widened phis.
Definition VPlan.h:2326
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1512
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4282
VPValue & getVectorTripCount()
The vector trip count.
Definition VPlan.h:4462
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1011
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
unsigned getVFScaleFactor(VPRecipeBase *R)
Get the VF scaling factor applied to the recipe's output, if the recipe has one.
This is an optimization pass for GlobalISel generic memory operations.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:243
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
TargetTransformInfo TTI
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
A recipe for handling first-order recurrence phis.
Definition VPlan.h:2368
A struct that represents some properties of the register usage of a loop.
SmallMapVector< unsigned, unsigned, 4 > MaxLocalUsers
Holds the maximum number of concurrent live intervals in the loop.
bool exceedsMaxNumRegs(const TargetTransformInfo &TTI, unsigned OverrideMaxNumRegs=0) const
Check if any of the tracked live intervals exceeds the number of available registers for the target.
SmallMapVector< unsigned, unsigned, 4 > LoopInvariantRegs
Holds the number of loop invariant values that are used in the loop.
A recipe for widening select instructions.
Definition VPlan.h:1801