LLVM 23.0.0git
VPlanUnroll.cpp
Go to the documentation of this file.
1//===-- VPlanUnroll.cpp - VPlan unroller ----------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements explicit unrolling for VPlans.
11///
12//===----------------------------------------------------------------------===//
13
14#include "VPRecipeBuilder.h"
15#include "VPlan.h"
16#include "VPlanAnalysis.h"
17#include "VPlanCFG.h"
18#include "VPlanHelpers.h"
19#include "VPlanPatternMatch.h"
20#include "VPlanTransforms.h"
21#include "VPlanUtils.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/ScopeExit.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/Intrinsics.h"
28
29using namespace llvm;
30using namespace llvm::VPlanPatternMatch;
31
32namespace {
33
34/// Helper to hold state needed for unrolling. It holds the Plan to unroll by
35/// UF. It also holds copies of VPValues across UF-1 unroll parts to facilitate
36/// the unrolling transformation, where the original VPValues are retained for
37/// part zero.
38class UnrollState {
39 /// Plan to unroll.
40 VPlan &Plan;
41 /// Unroll factor to unroll by.
42 const unsigned UF;
43 /// Analysis for types.
44 VPTypeAnalysis TypeInfo;
45
46 /// Unrolling may create recipes that should not be unrolled themselves.
47 /// Those are tracked in ToSkip.
48 SmallPtrSet<VPRecipeBase *, 8> ToSkip;
49
50 // Associate with each VPValue of part 0 its unrolled instances of parts 1,
51 // ..., UF-1.
52 DenseMap<VPValue *, SmallVector<VPValue *>> VPV2Parts;
53
54 /// Unroll replicate region \p VPR by cloning the region UF - 1 times.
55 void unrollReplicateRegionByUF(VPRegionBlock *VPR);
56
57 /// Unroll recipe \p R by cloning it UF - 1 times, unless it is uniform across
58 /// all parts.
59 void unrollRecipeByUF(VPRecipeBase &R);
60
61 /// Unroll header phi recipe \p R. How exactly the recipe gets unrolled
62 /// depends on the concrete header phi. Inserts newly created recipes at \p
63 /// InsertPtForPhi.
64 void unrollHeaderPHIByUF(VPHeaderPHIRecipe *R,
65 VPBasicBlock::iterator InsertPtForPhi);
66
67 /// Unroll a widen induction recipe \p IV. This introduces recipes to compute
68 /// the induction steps for each part.
69 void unrollWidenInductionByUF(VPWidenInductionRecipe *IV,
70 VPBasicBlock::iterator InsertPtForPhi);
71
72 VPValue *getConstantInt(unsigned Part) {
73 Type *CanIVIntTy = Plan.getVectorLoopRegion()->getCanonicalIVType();
74 return Plan.getConstantInt(CanIVIntTy, Part);
75 }
76
77public:
78 UnrollState(VPlan &Plan, unsigned UF) : Plan(Plan), UF(UF), TypeInfo(Plan) {}
79
80 void unrollBlock(VPBlockBase *VPB);
81
82 VPValue *getValueForPart(VPValue *V, unsigned Part) {
84 return V;
85 assert((VPV2Parts.contains(V) && VPV2Parts[V].size() >= Part) &&
86 "accessed value does not exist");
87 return VPV2Parts[V][Part - 1];
88 }
89
90 /// Given a single original recipe \p OrigR (of part zero), and its copy \p
91 /// CopyR for part \p Part, map every VPValue defined by \p OrigR to its
92 /// corresponding VPValue defined by \p CopyR.
93 void addRecipeForPart(VPRecipeBase *OrigR, VPRecipeBase *CopyR,
94 unsigned Part) {
95 for (const auto &[Idx, VPV] : enumerate(OrigR->definedValues())) {
96 const auto &[V, _] = VPV2Parts.try_emplace(VPV);
97 assert(V->second.size() == Part - 1 && "earlier parts not set");
98 V->second.push_back(CopyR->getVPValue(Idx));
99 }
100 }
101
102 /// Given a uniform recipe \p R, add it for all parts.
103 void addUniformForAllParts(VPSingleDefRecipe *R) {
104 const auto &[V, Inserted] = VPV2Parts.try_emplace(R);
105 assert(Inserted && "uniform value already added");
106 for (unsigned Part = 0; Part != UF; ++Part)
107 V->second.push_back(R);
108 }
109
110 bool contains(VPValue *VPV) const { return VPV2Parts.contains(VPV); }
111
112 /// Update \p R's operand at \p OpIdx with its corresponding VPValue for part
113 /// \p P.
114 void remapOperand(VPRecipeBase *R, unsigned OpIdx, unsigned Part) {
115 auto *Op = R->getOperand(OpIdx);
116 R->setOperand(OpIdx, getValueForPart(Op, Part));
117 }
118
119 /// Update \p R's operands with their corresponding VPValues for part \p P.
120 void remapOperands(VPRecipeBase *R, unsigned Part) {
121 for (const auto &[OpIdx, Op] : enumerate(R->operands()))
122 R->setOperand(OpIdx, getValueForPart(Op, Part));
123 }
124};
125} // namespace
126
128 unsigned Part, VPlan &Plan,
129 VPTypeAnalysis &TypeInfo) {
130 if (Part == 0)
131 return;
132
133 VPBuilder Builder(Steps);
134 Type *BaseIVTy = TypeInfo.inferScalarType(Steps->getOperand(0));
135 Type *IntStepTy =
136 IntegerType::get(BaseIVTy->getContext(), BaseIVTy->getScalarSizeInBits());
137 VPValue *StartIndex = Steps->getVFValue();
138 if (Part > 1) {
139 StartIndex = Builder.createOverflowingOp(
140 Instruction::Mul,
141 {StartIndex,
142 Plan.getConstantInt(TypeInfo.inferScalarType(StartIndex), Part)});
143 }
144 StartIndex = Builder.createScalarSExtOrTrunc(
145 StartIndex, IntStepTy, TypeInfo.inferScalarType(StartIndex),
146 Steps->getDebugLoc());
147
148 if (BaseIVTy->isFloatingPointTy())
149 StartIndex = Builder.createScalarCast(Instruction::SIToFP, StartIndex,
150 BaseIVTy, Steps->getDebugLoc());
151
152 Steps->setStartIndex(StartIndex);
153}
154
155void UnrollState::unrollReplicateRegionByUF(VPRegionBlock *VPR) {
156 VPBlockBase *InsertPt = VPR->getSingleSuccessor();
157 for (unsigned Part = 1; Part != UF; ++Part) {
158 auto *Copy = VPR->clone();
159 VPBlockUtils::insertBlockBefore(Copy, InsertPt);
160
161 auto PartI = vp_depth_first_shallow(Copy->getEntry());
162 auto Part0 = vp_depth_first_shallow(VPR->getEntry());
163 for (const auto &[PartIVPBB, Part0VPBB] :
166 for (const auto &[PartIR, Part0R] : zip(*PartIVPBB, *Part0VPBB)) {
167 remapOperands(&PartIR, Part);
168 if (auto *Steps = dyn_cast<VPScalarIVStepsRecipe>(&PartIR))
169 addStartIndexForScalarSteps(Steps, Part, Plan, TypeInfo);
170
171 addRecipeForPart(&Part0R, &PartIR, Part);
172 }
173 }
174 }
175}
176
177void UnrollState::unrollWidenInductionByUF(
178 VPWidenInductionRecipe *IV, VPBasicBlock::iterator InsertPtForPhi) {
179 VPBasicBlock *PH = cast<VPBasicBlock>(
180 IV->getParent()->getEnclosingLoopRegion()->getSinglePredecessor());
181 Type *IVTy = TypeInfo.inferScalarType(IV);
182 auto &ID = IV->getInductionDescriptor();
183 FastMathFlags FMF;
184 VPIRFlags::WrapFlagsTy WrapFlags(false, false);
185 if (auto *IntOrFPInd = dyn_cast<VPWidenIntOrFpInductionRecipe>(IV)) {
186 if (IntOrFPInd->hasFastMathFlags())
187 FMF = IntOrFPInd->getFastMathFlags();
188 if (IntOrFPInd->hasNoWrapFlags())
189 WrapFlags = IntOrFPInd->getNoWrapFlags();
190 }
191
192 VPValue *ScalarStep = IV->getStepValue();
193 VPBuilder Builder(PH);
194 Type *VectorStepTy =
195 IVTy->isPointerTy() ? TypeInfo.inferScalarType(ScalarStep) : IVTy;
196 VPInstruction *VectorStep = Builder.createNaryOp(
197 VPInstruction::WideIVStep, {&Plan.getVF(), ScalarStep}, VectorStepTy, FMF,
198 IV->getDebugLoc());
199
200 ToSkip.insert(VectorStep);
201
202 // Now create recipes to compute the induction steps for part 1 .. UF. Part 0
203 // remains the header phi. Parts > 0 are computed by adding Step to the
204 // previous part. The header phi recipe will get 2 new operands: the step
205 // value for a single part and the last part, used to compute the backedge
206 // value during VPWidenInductionRecipe::execute.
207 // %Part.0 = VPWidenInductionRecipe %Start, %ScalarStep, %VectorStep, %Part.3
208 // %Part.1 = %Part.0 + %VectorStep
209 // %Part.2 = %Part.1 + %VectorStep
210 // %Part.3 = %Part.2 + %VectorStep
211 //
212 // The newly added recipes are added to ToSkip to avoid interleaving them
213 // again.
214 VPValue *Prev = IV;
215 Builder.setInsertPoint(IV->getParent(), InsertPtForPhi);
216 unsigned AddOpc;
217 VPIRFlags AddFlags;
218 if (IVTy->isPointerTy()) {
220 AddFlags = GEPNoWrapFlags::none();
221 } else if (IVTy->isFloatingPointTy()) {
222 AddOpc = ID.getInductionOpcode();
223 AddFlags = FMF;
224 } else {
225 AddOpc = Instruction::Add;
226 AddFlags = WrapFlags;
228 AddFlags = VPIRFlags::WrapFlagsTy(/*NUW=*/true, /*NSW=*/false);
229 }
230 for (unsigned Part = 1; Part != UF; ++Part) {
231 std::string Name =
232 Part > 1 ? "step.add." + std::to_string(Part) : "step.add";
233
234 VPInstruction *Add =
235 Builder.createNaryOp(AddOpc,
236 {
237 Prev,
238 VectorStep,
239 },
240 AddFlags, IV->getDebugLoc(), Name);
241 ToSkip.insert(Add);
242 addRecipeForPart(IV, Add, Part);
243 Prev = Add;
244 }
245 IV->addOperand(VectorStep);
246 IV->addOperand(Prev);
247}
248
249void UnrollState::unrollHeaderPHIByUF(VPHeaderPHIRecipe *R,
250 VPBasicBlock::iterator InsertPtForPhi) {
251 // First-order recurrences pass a single vector or scalar through their header
252 // phis, irrespective of interleaving.
254 return;
255
256 // Generate step vectors for each unrolled part.
257 if (auto *IV = dyn_cast<VPWidenInductionRecipe>(R)) {
258 unrollWidenInductionByUF(IV, InsertPtForPhi);
259 return;
260 }
261
262 auto *RdxPhi = dyn_cast<VPReductionPHIRecipe>(R);
263 if (RdxPhi && RdxPhi->isOrdered())
264 return;
265
266 auto InsertPt = std::next(R->getIterator());
267 for (unsigned Part = 1; Part != UF; ++Part) {
268 VPRecipeBase *Copy = R->clone();
269 Copy->insertBefore(*R->getParent(), InsertPt);
270 addRecipeForPart(R, Copy, Part);
271 if (RdxPhi) {
272 // If the start value is a ReductionStartVector, use the identity value
273 // (second operand) for unrolled parts. If the scaling factor is > 1,
274 // create a new ReductionStartVector with the scale factor and both
275 // operands set to the identity value.
276 if (auto *VPI = dyn_cast<VPInstruction>(RdxPhi->getStartValue())) {
277 assert(VPI->getOpcode() == VPInstruction::ReductionStartVector &&
278 "unexpected start VPInstruction");
279 if (Part != 1)
280 continue;
281 VPValue *StartV;
282 if (match(VPI->getOperand(2), m_One())) {
283 StartV = VPI->getOperand(1);
284 } else {
285 auto *C = VPI->clone();
286 C->setOperand(0, C->getOperand(1));
287 C->insertAfter(VPI);
288 StartV = C;
289 }
290 for (unsigned Part = 1; Part != UF; ++Part)
291 VPV2Parts[VPI][Part - 1] = StartV;
292 }
293 } else {
295 "unexpected header phi recipe not needing unrolled part");
296 }
297 }
298}
299
300/// Handle non-header-phi recipes.
301void UnrollState::unrollRecipeByUF(VPRecipeBase &R) {
303 return;
304
305 if (auto *VPI = dyn_cast<VPInstruction>(&R)) {
307 addUniformForAllParts(VPI);
308 return;
309 }
310 }
311 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
312 if (isa<StoreInst>(RepR->getUnderlyingValue()) &&
313 RepR->getOperand(1)->isDefinedOutsideLoopRegions()) {
314 // Stores to an invariant address only need to store the last part.
315 remapOperands(&R, UF - 1);
316 return;
317 }
318 if (match(RepR,
320 addUniformForAllParts(RepR);
321 return;
322 }
323 }
324
325 // Unroll non-uniform recipes.
326 auto InsertPt = std::next(R.getIterator());
327 VPBasicBlock &VPBB = *R.getParent();
328 for (unsigned Part = 1; Part != UF; ++Part) {
329 VPRecipeBase *Copy = R.clone();
330 Copy->insertBefore(VPBB, InsertPt);
331 addRecipeForPart(&R, Copy, Part);
332
333 // Phi operands are updated once all other recipes have been unrolled.
334 if (isa<VPWidenPHIRecipe>(Copy))
335 continue;
336
337 VPValue *Op;
339 m_VPValue(), m_VPValue(Op)))) {
340 Copy->setOperand(0, getValueForPart(Op, Part - 1));
341 Copy->setOperand(1, getValueForPart(Op, Part));
342 continue;
343 }
344 if (auto *VPR = dyn_cast<VPVectorPointerRecipe>(&R)) {
345 VPBuilder Builder(VPR);
346 const DataLayout &DL = Plan.getDataLayout();
347 Type *IndexTy = DL.getIndexType(TypeInfo.inferScalarType(VPR));
348 Type *VFTy = TypeInfo.inferScalarType(&Plan.getVF());
349 VPValue *VF = Builder.createScalarZExtOrTrunc(
350 &Plan.getVF(), IndexTy, VFTy, DebugLoc::getUnknown());
351 // VFxUF does not wrap, so VF * Part also cannot wrap.
352 VPValue *VFxPart = Builder.createOverflowingOp(
353 Instruction::Mul, {VF, Plan.getConstantInt(IndexTy, Part)},
354 {true, true});
355 Copy->setOperand(0, VPR->getOperand(0));
356 Copy->addOperand(VFxPart);
357 continue;
358 }
359 if (auto *Red = dyn_cast<VPReductionRecipe>(&R)) {
360 auto *Phi = dyn_cast<VPReductionPHIRecipe>(R.getOperand(0));
361 if (Phi && Phi->isOrdered()) {
362 auto &Parts = VPV2Parts[Phi];
363 if (Part == 1) {
364 Parts.clear();
365 Parts.push_back(Red);
366 }
367 Parts.push_back(Copy->getVPSingleValue());
368 Phi->setOperand(1, Copy->getVPSingleValue());
369 }
370 }
371 if (auto *VEPR = dyn_cast<VPVectorEndPointerRecipe>(Copy)) {
372 // Materialize PartN offset for VectorEndPointer.
373 VEPR->setOperand(0, R.getOperand(0));
374 VEPR->setOperand(1, R.getOperand(1));
375 VEPR->materializeOffset(Part);
376 continue;
377 }
378
379 remapOperands(Copy, Part);
380
381 if (auto *ScalarIVSteps = dyn_cast<VPScalarIVStepsRecipe>(Copy))
382 addStartIndexForScalarSteps(ScalarIVSteps, Part, Plan, TypeInfo);
383
384 // Add operand indicating the part to generate code for, to recipes still
385 // requiring it.
387 Copy->addOperand(getConstantInt(Part));
388
389 if (match(Copy,
391 VPBuilder Builder(Copy);
392 VPValue *ScaledByPart = Builder.createOverflowingOp(
393 Instruction::Mul, {Copy->getOperand(1), getConstantInt(Part)});
394 Copy->setOperand(1, ScaledByPart);
395 }
396 }
397 if (auto *VEPR = dyn_cast<VPVectorEndPointerRecipe>(&R)) {
398 // Materialize Part0 offset for VectorEndPointer.
399 VEPR->materializeOffset();
400 }
401}
402
403void UnrollState::unrollBlock(VPBlockBase *VPB) {
404 auto *VPR = dyn_cast<VPRegionBlock>(VPB);
405 if (VPR) {
406 if (VPR->isReplicator())
407 return unrollReplicateRegionByUF(VPR);
408
409 // Traverse blocks in region in RPO to ensure defs are visited before uses
410 // across blocks.
411 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
412 RPOT(VPR->getEntry());
413 for (VPBlockBase *VPB : RPOT)
414 unrollBlock(VPB);
415 return;
416 }
417
418 // VPB is a VPBasicBlock; unroll it, i.e., unroll its recipes.
419 auto *VPBB = cast<VPBasicBlock>(VPB);
420 auto InsertPtForPhi = VPBB->getFirstNonPhi();
421 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
422 if (ToSkip.contains(&R) || isa<VPIRInstruction>(&R))
423 continue;
424
425 // Add all VPValues for all parts to AnyOf, FirstActiveLaneMask and
426 // ComputeReductionResult which combine all parts to compute the final
427 // value.
428 VPValue *Op1;
430 match(&R, m_FirstActiveLane(m_VPValue(Op1))) ||
431 match(&R, m_LastActiveLane(m_VPValue(Op1))) ||
433 addUniformForAllParts(cast<VPInstruction>(&R));
434 for (unsigned Part = 1; Part != UF; ++Part)
435 R.addOperand(getValueForPart(Op1, Part));
436 continue;
437 }
438 VPValue *Op0;
439 if (match(&R, m_ExtractLane(m_VPValue(Op0), m_VPValue(Op1)))) {
440 addUniformForAllParts(cast<VPInstruction>(&R));
441 for (unsigned Part = 1; Part != UF; ++Part)
442 R.addOperand(getValueForPart(Op1, Part));
443 continue;
444 }
445
446 VPValue *Op2;
448 m_VPValue(Op2)))) {
449 addUniformForAllParts(cast<VPInstruction>(&R));
450 for (unsigned Part = 1; Part != UF; ++Part) {
451 R.addOperand(getValueForPart(Op1, Part));
452 R.addOperand(getValueForPart(Op2, Part));
453 }
454 continue;
455 }
456
457 if (Plan.hasScalarVFOnly()) {
458 if (match(&R, m_ExtractLastPart(m_VPValue(Op0))) ||
460 auto *I = cast<VPInstruction>(&R);
461 bool IsPenultimatePart =
463 unsigned PartIdx = IsPenultimatePart ? UF - 2 : UF - 1;
464 // For scalar VF, directly use the scalar part value.
465 I->replaceAllUsesWith(getValueForPart(Op0, PartIdx));
466 continue;
467 }
468 }
469 // For vector VF, the penultimate element is always extracted from the last part.
472 addUniformForAllParts(cast<VPSingleDefRecipe>(&R));
473 R.setOperand(0, getValueForPart(Op0, UF - 1));
474 continue;
475 }
476
477 auto *SingleDef = dyn_cast<VPSingleDefRecipe>(&R);
478 if (SingleDef && vputils::isUniformAcrossVFsAndUFs(SingleDef)) {
479 addUniformForAllParts(SingleDef);
480 continue;
481 }
482
483 if (auto *H = dyn_cast<VPHeaderPHIRecipe>(&R)) {
484 unrollHeaderPHIByUF(H, InsertPtForPhi);
485 continue;
486 }
487
488 unrollRecipeByUF(R);
489 }
490}
491
492void VPlanTransforms::unrollByUF(VPlan &Plan, unsigned UF) {
493 assert(UF > 0 && "Unroll factor must be positive");
494 Plan.setUF(UF);
495 llvm::scope_exit Cleanup([&Plan, UF]() {
496 auto Iter = vp_depth_first_deep(Plan.getEntry());
497 // Remove recipes that are redundant after unrolling.
499 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
500 auto *VPI = dyn_cast<VPInstruction>(&R);
501 if (VPI &&
502 VPI->getOpcode() == VPInstruction::CanonicalIVIncrementForPart &&
503 VPI->getOperand(1) == &Plan.getVF()) {
504 VPI->replaceAllUsesWith(VPI->getOperand(0));
505 VPI->eraseFromParent();
506 }
507 }
508 }
509
510 Type *TCTy = VPTypeAnalysis(Plan).inferScalarType(Plan.getTripCount());
511 Plan.getUF().replaceAllUsesWith(Plan.getConstantInt(TCTy, UF));
512 });
513 if (UF == 1) {
514 return;
515 }
516
517 UnrollState Unroller(Plan, UF);
518
519 // Iterate over all blocks in the plan starting from Entry, and unroll
520 // recipes inside them. This includes the vector preheader and middle blocks,
521 // which may set up or post-process per-part values.
523 Plan.getEntry());
524 for (VPBlockBase *VPB : RPOT)
525 Unroller.unrollBlock(VPB);
526
527 unsigned Part = 1;
528 // Remap operands of cloned header phis to update backedge values. The header
529 // phis cloned during unrolling are just after the header phi for part 0.
530 // Reset Part to 1 when reaching the first (part 0) recipe of a block.
531 for (VPRecipeBase &H :
533 // The second operand of Fixed Order Recurrence phi's, feeding the spliced
534 // value across the backedge, needs to remap to the last part of the spliced
535 // value.
537 Unroller.remapOperand(&H, 1, UF - 1);
538 continue;
539 }
540 if (Unroller.contains(H.getVPSingleValue())) {
541 Part = 1;
542 continue;
543 }
544 Unroller.remapOperands(&H, Part);
545 Part++;
546 }
547
549}
550
551/// Add a lane offset to the start index of \p Steps.
552static void addLaneToStartIndex(VPScalarIVStepsRecipe *Steps, unsigned Lane,
553 VPlan &Plan, VPRecipeBase *InsertPt) {
554 assert(Lane > 0 && "Zero lane adds no offset to start index");
555 VPTypeAnalysis TypeInfo(Plan);
556 Type *BaseIVTy = TypeInfo.inferScalarType(Steps->getOperand(0));
557
558 VPValue *OldStartIndex = Steps->getStartIndex();
559 VPValue *LaneOffset;
560 unsigned AddOpcode;
561 // TODO: Retrieve the flags from Steps unconditionally.
562 VPIRFlags Flags;
563 if (BaseIVTy->isFloatingPointTy()) {
564 int SignedLane = static_cast<int>(Lane);
565 if (!OldStartIndex && Steps->getInductionOpcode() == Instruction::FSub)
566 SignedLane = -SignedLane;
567 LaneOffset = Plan.getOrAddLiveIn(ConstantFP::get(BaseIVTy, SignedLane));
568 AddOpcode = Steps->getInductionOpcode();
569 Flags = VPIRFlags(FastMathFlags());
570 } else {
571 unsigned BaseIVBits = BaseIVTy->getScalarSizeInBits();
572 LaneOffset = Plan.getConstantInt(
573 APInt(BaseIVBits, Lane, /*isSigned*/ false, /*implicitTrunc*/ true));
574 AddOpcode = Instruction::Add;
575 Flags = VPIRFlags(VPIRFlags::WrapFlagsTy(false, false));
576 }
577
578 VPValue *NewStartIndex = LaneOffset;
579 if (OldStartIndex) {
580 VPBuilder Builder(InsertPt);
581 NewStartIndex =
582 Builder.createNaryOp(AddOpcode, {OldStartIndex, LaneOffset}, Flags);
583 }
584 Steps->setStartIndex(NewStartIndex);
585}
586
587/// Create a single-scalar clone of \p DefR (must be a VPReplicateRecipe,
588/// VPInstruction or VPScalarIVStepsRecipe) for lane \p Lane. Use \p
589/// Def2LaneDefs to look up scalar definitions for operands of \DefR.
590static VPValue *
591cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy,
592 VPSingleDefRecipe *DefR, VPLane Lane,
593 const DenseMap<VPValue *, SmallVector<VPValue *>> &Def2LaneDefs) {
595 "DefR must be a VPReplicateRecipe, VPInstruction or "
596 "VPScalarIVStepsRecipe");
597 VPValue *Op;
599 auto LaneDefs = Def2LaneDefs.find(Op);
600 if (LaneDefs != Def2LaneDefs.end())
601 return LaneDefs->second[Lane.getKnownLane()];
602
603 VPValue *Idx = Plan.getConstantInt(IdxTy, Lane.getKnownLane());
604 return Builder.createNaryOp(Instruction::ExtractElement, {Op, Idx});
605 }
606
607 // Collect the operands at Lane, creating extracts as needed.
609 for (VPValue *Op : DefR->operands()) {
610 // If Op is a definition that has been unrolled, directly use the clone for
611 // the corresponding lane.
612 auto LaneDefs = Def2LaneDefs.find(Op);
613 if (LaneDefs != Def2LaneDefs.end()) {
614 NewOps.push_back(LaneDefs->second[Lane.getKnownLane()]);
615 continue;
616 }
617 if (Lane.getKind() == VPLane::Kind::ScalableLast) {
618 // Look through mandatory Unpack.
619 [[maybe_unused]] bool Matched =
621 assert(Matched && "original op must have been Unpack");
622 auto *ExtractPart =
623 Builder.createNaryOp(VPInstruction::ExtractLastPart, {Op});
624 NewOps.push_back(
625 Builder.createNaryOp(VPInstruction::ExtractLastLane, {ExtractPart}));
626 continue;
627 }
629 NewOps.push_back(Op);
630 continue;
631 }
632
633 // Look through buildvector to avoid unnecessary extracts.
634 if (match(Op, m_BuildVector())) {
635 NewOps.push_back(
636 cast<VPInstruction>(Op)->getOperand(Lane.getKnownLane()));
637 continue;
638 }
639 VPValue *Idx = Plan.getConstantInt(IdxTy, Lane.getKnownLane());
640 VPValue *Ext = Builder.createNaryOp(Instruction::ExtractElement, {Op, Idx});
641 NewOps.push_back(Ext);
642 }
643
645 if (auto *RepR = dyn_cast<VPReplicateRecipe>(DefR)) {
646 // TODO: have cloning of replicate recipes also provide the desired result
647 // coupled with setting its operands to NewOps (deriving IsSingleScalar and
648 // Mask from the operands?)
649 New = new VPReplicateRecipe(RepR->getUnderlyingInstr(), NewOps,
650 /*IsSingleScalar=*/true, /*Mask=*/nullptr,
651 *RepR, *RepR, RepR->getDebugLoc());
652 } else {
653 New = DefR->clone();
654 for (const auto &[Idx, Op] : enumerate(NewOps)) {
655 New->setOperand(Idx, Op);
656 }
657 if (auto *Steps = dyn_cast<VPScalarIVStepsRecipe>(New)) {
658 // Skip lane 0: an absent start index is implicitly zero.
659 unsigned KnownLane = Lane.getKnownLane();
660 if (KnownLane != 0)
661 addLaneToStartIndex(Steps, KnownLane, Plan, DefR);
662 }
663 }
664 New->insertBefore(DefR);
665 return New;
666}
667
668/// Convert recipes in region blocks to operate on a single lane 0.
669/// VPReplicateRecipes are converted to single-scalar ones, branch-on-mask is
670/// converted into BranchOnCond and extracts are created as needed.
672 VPBlockBase *Entry,
673 ElementCount VF) {
674 VPValue *Idx0 = Plan.getZero(IdxTy);
675 VPTypeAnalysis TypeInfo(Plan);
676 for (VPBlockBase *VPB : vp_depth_first_shallow(Entry)) {
678 VPBuilder Builder(&OldR);
680 "must not contain extracts before conversion");
681
682 // For scalar VF, operands are already scalar; no extraction needed.
683 if (!VF.isScalar()) {
684 for (const auto &[I, Op] : enumerate(OldR.operands())) {
685 // Skip operands that don't need extraction: values defined in the
686 // same block (already scalar), or values that are already single
687 // scalars.
688 auto *DefR = Op->getDefiningRecipe();
690 DefR->getParent() == VPB) ||
692 continue;
693
694 // Extract lane zero from values defined outside the region.
695 VPValue *Extract = Builder.createNaryOp(
696 Instruction::ExtractElement, {Op, Idx0}, OldR.getDebugLoc());
697 OldR.setOperand(I, Extract);
698 }
699 }
700
701 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&OldR)) {
702 auto *NewR =
703 new VPReplicateRecipe(RepR->getUnderlyingInstr(), RepR->operands(),
704 /* IsSingleScalar=*/true, /*Mask=*/nullptr,
705 *RepR, *RepR, RepR->getDebugLoc());
706 NewR->insertBefore(RepR);
707 RepR->replaceAllUsesWith(NewR);
708 RepR->eraseFromParent();
709 } else if (auto *BranchOnMask = dyn_cast<VPBranchOnMaskRecipe>(&OldR)) {
710 Builder.createNaryOp(VPInstruction::BranchOnCond,
711 {BranchOnMask->getOperand(0)},
712 BranchOnMask->getDebugLoc());
713 BranchOnMask->eraseFromParent();
714 } else if (auto *PredPhi = dyn_cast<VPPredInstPHIRecipe>(&OldR)) {
715 VPValue *PredOp = PredPhi->getOperand(0);
716 Type *PredTy = TypeInfo.inferScalarType(PredOp);
717 VPValue *PoisonVal = Plan.getOrAddLiveIn(PoisonValue::get(PredTy));
718
719 VPPhi *NewPhi = Builder.createScalarPhi({PoisonVal, PredOp},
720 PredPhi->getDebugLoc());
721 PredPhi->replaceAllUsesWith(NewPhi);
722 PredPhi->eraseFromParent();
723 } else {
725 (isa<VPInstruction>(OldR) &&
726 vputils::isSingleScalar(OldR.getVPSingleValue()))) &&
727 "unexpected unhandled recipe");
728 }
729 }
730 }
731}
732
733/// Update recipes in the cloned blocks rooted at \p NewEntry to match \p Lane,
734/// using the original blocks rooted at \p OldEntry as reference.
735static void processLaneForReplicateRegion(VPlan &Plan, Type *IdxTy,
736 unsigned Lane, VPBasicBlock *OldEntry,
737 VPBasicBlock *NewEntry) {
738 DenseMap<VPValue *, VPValue *> Old2NewVPValues;
739 VPValue *IdxLane = Plan.getConstantInt(IdxTy, Lane);
740 for (const auto &[OldBB, NewBB] :
742 vp_depth_first_shallow(NewEntry))) {
743 for (auto &&[OldR, NewR] :
745 for (const auto &[OldV, NewV] :
746 zip_equal(OldR.definedValues(), NewR.definedValues()))
747 Old2NewVPValues[OldV] = NewV;
748
749 // Remap operands to use lane-specific values.
750 for (const auto &[I, OldOp] : enumerate(NewR.operands())) {
751 // Use cloned value if operand was defined in the region.
752 if (auto *NewOp = Old2NewVPValues.lookup(OldOp))
753 NewR.setOperand(I, NewOp);
754 }
755
756 if (auto *Steps = dyn_cast<VPScalarIVStepsRecipe>(&NewR))
757 addLaneToStartIndex(Steps, Lane, Plan, Steps);
758 else if (match(&NewR, m_ExtractElement(m_VPValue(), m_ZeroInt())))
759 NewR.setOperand(1, IdxLane);
760 }
761 }
762}
763
764/// Dissolve a single replicate region by replicating its blocks for each lane
765/// of \p VF. The region is disconnected, its blocks are reparented, cloned for
766/// each lane, and reconnected in sequence.
768 VPlan &Plan, Type *IdxTy) {
769 VPBlockBase *FirstLaneEntry = Region->getEntry();
770 VPBlockBase *FirstLaneExiting = Region->getExiting();
771
772 // Disconnect and dissolve the region.
773 VPBlockBase *Predecessor = Region->getSinglePredecessor();
774 assert(Predecessor && "Replicate region must have a single predecessor");
775 VPBlockBase *Successor = Region->getSingleSuccessor();
776 assert(Successor && "Replicate region must have a single successor");
779
780 VPRegionBlock *ParentRegion = Region->getParent();
781 for (VPBlockBase *VPB : vp_depth_first_shallow(FirstLaneEntry))
782 VPB->setParent(ParentRegion);
783
784 // Process the original blocks for lane 0: converting their recipes to
785 // single-scalar.
786 convertRecipesInRegionBlocksToSingleScalar(Plan, IdxTy, FirstLaneEntry, VF);
787
788 // Clone converted blocks for remaining lanes and process each in reverse
789 // order, connecting each lane's Exiting block to the subsequent lane's entry.
790 VPBlockBase *NextLaneEntry = Successor;
791 unsigned NumLanes = VF.getFixedValue();
792 for (int Lane = NumLanes - 1; Lane > 0; --Lane) {
793 const auto &[CurrentLaneEntry, CurrentLaneExiting] =
794 VPBlockUtils::cloneFrom(FirstLaneEntry);
795 for (VPBlockBase *VPB : vp_depth_first_shallow(CurrentLaneEntry))
796 VPB->setParent(ParentRegion);
797 processLaneForReplicateRegion(Plan, IdxTy, Lane,
798 cast<VPBasicBlock>(FirstLaneEntry),
799 cast<VPBasicBlock>(CurrentLaneEntry));
800 VPBlockUtils::connectBlocks(CurrentLaneExiting, NextLaneEntry);
801 NextLaneEntry = CurrentLaneEntry;
802 }
803
804 // Connect Predecessor to FirstLaneEntry, and FirstLaneRegionExit to
805 // NextLaneEntry which is the second lane region entry. The latter is
806 // done last so that earlier clonings from FirstLaneEntry stop at
807 // FirstLaneExiting.
808 VPBlockUtils::connectBlocks(Predecessor, FirstLaneEntry);
809 VPBlockUtils::connectBlocks(FirstLaneExiting, NextLaneEntry);
810}
811
812/// Collect and dissolve all replicate regions in the vector loop, replicating
813/// their blocks and recipes for each lane of \p VF.
815 Type *IdxTy) {
816 // Collect all replicate regions before modifying the CFG.
817 SmallVector<VPRegionBlock *> ReplicateRegions;
820 // Skip regions with live-outs when vectorizing as packing scalar results
821 // back into vectors is not yet implemented.
822 if (Region->isReplicator() &&
823 (VF.isScalar() || Region->getExitingBasicBlock()->empty()))
824 ReplicateRegions.push_back(Region);
825 }
826
827 assert((ReplicateRegions.empty() || !VF.isScalable()) &&
828 "cannot replicate across scalable VFs");
829
830 // Dissolve replicate regions by replicating their blocks for each lane.
831 for (VPRegionBlock *Region : ReplicateRegions)
832 dissolveReplicateRegion(Region, VF, Plan, IdxTy);
833
835}
836
838 Type *IdxTy = IntegerType::get(
840
841 if (Plan.hasScalarVFOnly()) {
842 // When Plan is only unrolled by UF, replicating by VF amounts to dissolving
843 // replicate regions.
844 replicateReplicateRegionsByVF(Plan, VF, IdxTy);
845 return;
846 }
847
848 // Visit all VPBBs outside the loop region and directly inside the top-level
849 // loop region.
850 auto VPBBsOutsideLoopRegion = VPBlockUtils::blocksOnly<VPBasicBlock>(
852 auto VPBBsInsideLoopRegion = VPBlockUtils::blocksOnly<VPBasicBlock>(
854 auto VPBBsToUnroll =
855 concat<VPBasicBlock *>(VPBBsOutsideLoopRegion, VPBBsInsideLoopRegion);
856 // A mapping of current VPValue definitions to collections of new VPValues
857 // defined per lane. Serves to hook-up potential users of current VPValue
858 // definition that are replicated-per-VF later.
860 // The removal of current recipes being replaced by new ones needs to be
861 // delayed after Def2LaneDefs is no longer in use.
863 for (VPBasicBlock *VPBB : VPBBsToUnroll) {
864 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
867 cast<VPReplicateRecipe>(&R)->isSingleScalar()) ||
868 (isa<VPInstruction>(&R) &&
869 !cast<VPInstruction>(&R)->doesGeneratePerAllLanes() &&
871 continue;
872
873 auto *DefR = cast<VPSingleDefRecipe>(&R);
874 VPBuilder Builder(DefR);
875 if (DefR->getNumUsers() == 0) {
876 // Create single-scalar version of DefR for all lanes.
877 for (unsigned I = 0; I != VF.getKnownMinValue(); ++I)
878 cloneForLane(Plan, Builder, IdxTy, DefR, VPLane(I), Def2LaneDefs);
879 DefR->eraseFromParent();
880 continue;
881 }
882 /// Create single-scalar version of DefR for all lanes.
883 SmallVector<VPValue *> LaneDefs;
884 for (unsigned I = 0; I != VF.getKnownMinValue(); ++I)
885 LaneDefs.push_back(
886 cloneForLane(Plan, Builder, IdxTy, DefR, VPLane(I), Def2LaneDefs));
887
888 Def2LaneDefs[DefR] = LaneDefs;
889 /// Users that only demand the first lane can use the definition for lane
890 /// 0.
891 DefR->replaceUsesWithIf(LaneDefs[0], [DefR](VPUser &U, unsigned) {
892 return U.usesFirstLaneOnly(DefR);
893 });
894
895 // Update each build vector user that currently has DefR as its only
896 // operand, to have all LaneDefs as its operands.
897 for (VPUser *U : to_vector(DefR->users())) {
898 auto *VPI = dyn_cast<VPInstruction>(U);
899 if (!VPI || (VPI->getOpcode() != VPInstruction::BuildVector &&
900 VPI->getOpcode() != VPInstruction::BuildStructVector))
901 continue;
902 assert(VPI->getNumOperands() == 1 &&
903 "Build(Struct)Vector must have a single operand before "
904 "replicating by VF");
905 VPI->setOperand(0, LaneDefs[0]);
906 for (VPValue *LaneDef : drop_begin(LaneDefs))
907 VPI->addOperand(LaneDef);
908 }
909 ToRemove.push_back(DefR);
910 }
911 }
912 for (auto *R : reverse(ToRemove))
913 R->eraseFromParent();
914
915 replicateReplicateRegionsByVF(Plan, VF, IdxTy);
916}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
ReachingDefInfo InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isCanonical(const MDString *S)
ManagedStatic< HTTPClientCleanup > Cleanup
#define _
#define I(x, y, z)
Definition MD5.cpp:57
#define H(x, y, z)
Definition MD5.cpp:56
MachineInstr unsigned OpIdx
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:483
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static ConstantInt * getConstantInt(Value *V, const DataLayout &DL)
Extract ConstantInt from value, looking through IntToPtr and PointerNullValue.
This file contains the declarations of different VPlan-related auxiliary helpers.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
This file provides utility VPlan to VPlan transformations.
static void addLaneToStartIndex(VPScalarIVStepsRecipe *Steps, unsigned Lane, VPlan &Plan, VPRecipeBase *InsertPt)
Add a lane offset to the start index of Steps.
static void replicateReplicateRegionsByVF(VPlan &Plan, ElementCount VF, Type *IdxTy)
Collect and dissolve all replicate regions in the vector loop, replicating their blocks and recipes f...
static VPValue * cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy, VPSingleDefRecipe *DefR, VPLane Lane, const DenseMap< VPValue *, SmallVector< VPValue * > > &Def2LaneDefs)
Create a single-scalar clone of DefR (must be a VPReplicateRecipe, VPInstruction or VPScalarIVStepsRe...
static void addStartIndexForScalarSteps(VPScalarIVStepsRecipe *Steps, unsigned Part, VPlan &Plan, VPTypeAnalysis &TypeInfo)
static void convertRecipesInRegionBlocksToSingleScalar(VPlan &Plan, Type *IdxTy, VPBlockBase *Entry, ElementCount VF)
Convert recipes in region blocks to operate on a single lane 0.
static void dissolveReplicateRegion(VPRegionBlock *Region, ElementCount VF, VPlan &Plan, Type *IdxTy)
Dissolve a single replicate region by replicating its blocks for each lane of VF.
static void processLaneForReplicateRegion(VPlan &Plan, Type *IdxTy, unsigned Lane, VPBasicBlock *OldEntry, VPBasicBlock *NewEntry)
Update recipes in the cloned blocks rooted at NewEntry to match Lane, using the original blocks roote...
static void remapOperands(VPBlockBase *Entry, VPBlockBase *NewEntry, DenseMap< VPValue *, VPValue * > &Old2NewVPValues)
Definition VPlan.cpp:1205
This file contains the declarations of the Vectorization Plan base classes:
static const uint32_t IV[8]
Definition blake3_impl.h:83
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static DebugLoc getUnknown()
Definition DebugLoc.h:161
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
static GEPNoWrapFlags none()
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
RegionT * getParent() const
Get the parent of the Region.
Definition RegionInfo.h:362
BlockT * getEntry() const
Get the entry BasicBlock of the Region.
Definition RegionInfo.h:320
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4160
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:4187
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4248
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:232
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:98
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:182
void setParent(VPRegionBlock *P)
Definition VPlan.h:201
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:231
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
Definition VPlanUtils.h:222
static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To)
Disconnect VPBlockBases From and To bi-directionally.
Definition VPlanUtils.h:240
static void insertBlockBefore(VPBlockBase *NewBlock, VPBlockBase *BlockPtr)
Insert disconnected block NewBlock before Blockptr.
Definition VPlanUtils.h:186
static auto blocksOnly(T &&Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:276
static std::pair< VPBlockBase *, VPBlockBase * > cloneFrom(VPBlockBase *Entry)
Clone the CFG for all nodes reachable from Entry, including cloning the blocks and their recipes.
Definition VPlan.cpp:693
VPlan-based builder utility analogous to IRBuilder.
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
Definition VPlanValue.h:483
ArrayRef< VPRecipeValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition VPlanValue.h:493
BasicBlock * getIRBasicBlock() const
Definition VPlan.h:4337
Class to record and manage LLVM IR flags.
Definition VPlan.h:688
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
Definition VPlan.h:1314
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
Definition VPlan.h:1267
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1318
@ BuildVector
Creates a fixed-width vector containing all operands.
Definition VPlan.h:1262
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
Definition VPlan.h:1259
@ CanonicalIVIncrementForPart
Definition VPlan.h:1243
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
Kind getKind() const
Returns the Kind of lane offset.
unsigned getKnownLane() const
Returns a compile-time known value for the lane index and asserts if the lane can only be calculated ...
@ ScalableLast
For ScalableLast, Lane is the offset from the start of the last N-element subvector in a scalable vec...
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:406
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:554
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4370
VPRegionBlock * clone() override
Clone all blocks in the single-entry single-exit region of the block and their recipes without updati...
Definition VPlan.cpp:743
const VPBlockBase * getEntry() const
Definition VPlan.h:4414
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
Definition VPlan.h:4446
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:3196
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition VPlan.h:3980
Instruction::BinaryOps getInductionOpcode() const
Definition VPlan.h:4051
void setStartIndex(VPValue *StartIndex)
Set or add the StartIndex operand.
Definition VPlan.h:4037
VPValue * getStartIndex() const
Return the StartIndex, or null if known to be zero, valid only after unrolling.
Definition VPlan.h:4032
VPValue * getVFValue() const
Return the number of scalars to produce per unroll part, used to compute StartIndex during unrolling.
Definition VPlan.h:4028
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:606
VPSingleDefRecipe * clone() override=0
Clone the current recipe.
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:329
operand_range operands()
Definition VPlanValue.h:397
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:368
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:49
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1498
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4518
const DataLayout & getDataLayout() const
Definition VPlan.h:4714
VPBasicBlock * getEntry()
Definition VPlan.h:4610
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4669
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4782
VPIRValue * getZero(Type *Ty)
Return a VPIRValue wrapping the null value of type Ty.
Definition VPlan.h:4808
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1095
VPSymbolicValue & getUF()
Returns the UF of the vector loop region.
Definition VPlan.h:4705
bool hasScalarVFOnly() const
Definition VPlan.h:4750
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4655
VPSymbolicValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4701
void setUF(unsigned UF)
Definition VPlan.h:4765
VPIRValue * getConstantInt(Type *Ty, uint64_t Val, bool IsSigned=false)
Return a VPIRValue wrapping a ConstantInt with the given type and value.
Definition VPlan.h:4816
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
VPInstruction_match< VPInstruction::ExtractLastLane, VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > > m_ExtractLastLaneOfLastPart(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ComputeReductionResult, Op0_t > m_ComputeReductionResult(const Op0_t &Op0)
VPInstruction_match< VPInstruction::LastActiveLane, Op0_t > m_LastActiveLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLastActive, Op0_t, Op1_t, Op2_t > m_ExtractLastActive(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
VPInstruction_match< Instruction::ExtractElement, Op0_t, Op1_t > m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
auto m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BuildVector > m_BuildVector()
BuildVector is matches only its opcode, w/o matching its operands as the number of operands is not fi...
VPInstruction_match< VPInstruction::ExtractPenultimateElement, Op0_t > m_ExtractPenultimateElement(const Op0_t &Op0)
match_bind< VPInstruction > m_VPInstruction(VPInstruction *&V)
Match a VPInstruction, capturing if we match.
VPInstruction_match< VPInstruction::FirstActiveLane, Op0_t > m_FirstActiveLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BranchOnCond > m_BranchOnCond()
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
bool isUniformAcrossVFsAndUFs(VPValue *V)
Checks if V is uniform across all VF lanes and UF parts.
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:253
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:265
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1152
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
bool isa_and_present(const Y &Val)
isa_and_present<X> - Functionally identical to isa, except that a null value is accepted.
Definition Casting.h:669
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Add
Sum of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static bool mergeBlocksIntoPredecessors(VPlan &Plan)
Remove redundant VPBasicBlocks by merging them into their single predecessor if the latter has a sing...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace replicating VPReplicateRecipe, VPScalarIVStepsRecipe and VPInstruction in Plan with VF single...