LLVM 19.0.0git
VPlanTransforms.cpp
Go to the documentation of this file.
1//===-- VPlanTransforms.cpp - Utility VPlan to VPlan transforms -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements a set of utility VPlan to VPlan transformations.
11///
12//===----------------------------------------------------------------------===//
13
14#include "VPlanTransforms.h"
15#include "VPRecipeBuilder.h"
16#include "VPlanAnalysis.h"
17#include "VPlanCFG.h"
18#include "VPlanDominatorTree.h"
19#include "VPlanPatternMatch.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
25#include "llvm/IR/Intrinsics.h"
27
28using namespace llvm;
29
31 VPlanPtr &Plan,
33 GetIntOrFpInductionDescriptor,
34 ScalarEvolution &SE, const TargetLibraryInfo &TLI) {
35
37 Plan->getEntry());
38 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
39 VPRecipeBase *Term = VPBB->getTerminator();
40 auto EndIter = Term ? Term->getIterator() : VPBB->end();
41 // Introduce each ingredient into VPlan.
42 for (VPRecipeBase &Ingredient :
43 make_early_inc_range(make_range(VPBB->begin(), EndIter))) {
44
45 VPValue *VPV = Ingredient.getVPSingleValue();
46 Instruction *Inst = cast<Instruction>(VPV->getUnderlyingValue());
47
48 VPRecipeBase *NewRecipe = nullptr;
49 if (auto *VPPhi = dyn_cast<VPWidenPHIRecipe>(&Ingredient)) {
50 auto *Phi = cast<PHINode>(VPPhi->getUnderlyingValue());
51 const auto *II = GetIntOrFpInductionDescriptor(Phi);
52 if (!II)
53 continue;
54
55 VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue());
56 VPValue *Step =
57 vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE);
58 NewRecipe = new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, *II);
59 } else {
60 assert(isa<VPInstruction>(&Ingredient) &&
61 "only VPInstructions expected here");
62 assert(!isa<PHINode>(Inst) && "phis should be handled above");
63 // Create VPWidenMemoryRecipe for loads and stores.
64 if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
65 NewRecipe = new VPWidenLoadRecipe(
66 *Load, Ingredient.getOperand(0), nullptr /*Mask*/,
67 false /*Consecutive*/, false /*Reverse*/,
68 Ingredient.getDebugLoc());
69 } else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
70 NewRecipe = new VPWidenStoreRecipe(
71 *Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
72 nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/,
73 Ingredient.getDebugLoc());
74 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
75 NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
76 } else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
77 NewRecipe = new VPWidenCallRecipe(
78 *CI, drop_end(Ingredient.operands()),
79 getVectorIntrinsicIDForCall(CI, &TLI), CI->getDebugLoc());
80 } else if (SelectInst *SI = dyn_cast<SelectInst>(Inst)) {
81 NewRecipe = new VPWidenSelectRecipe(*SI, Ingredient.operands());
82 } else if (auto *CI = dyn_cast<CastInst>(Inst)) {
83 NewRecipe = new VPWidenCastRecipe(
84 CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), *CI);
85 } else {
86 NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands());
87 }
88 }
89
90 NewRecipe->insertBefore(&Ingredient);
91 if (NewRecipe->getNumDefinedValues() == 1)
92 VPV->replaceAllUsesWith(NewRecipe->getVPSingleValue());
93 else
94 assert(NewRecipe->getNumDefinedValues() == 0 &&
95 "Only recpies with zero or one defined values expected");
96 Ingredient.eraseFromParent();
97 }
98 }
99}
100
101static bool sinkScalarOperands(VPlan &Plan) {
102 auto Iter = vp_depth_first_deep(Plan.getEntry());
103 bool Changed = false;
104 // First, collect the operands of all recipes in replicate blocks as seeds for
105 // sinking.
107 for (VPRegionBlock *VPR : VPBlockUtils::blocksOnly<VPRegionBlock>(Iter)) {
108 VPBasicBlock *EntryVPBB = VPR->getEntryBasicBlock();
109 if (!VPR->isReplicator() || EntryVPBB->getSuccessors().size() != 2)
110 continue;
111 VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(EntryVPBB->getSuccessors()[0]);
112 if (!VPBB || VPBB->getSingleSuccessor() != VPR->getExitingBasicBlock())
113 continue;
114 for (auto &Recipe : *VPBB) {
115 for (VPValue *Op : Recipe.operands())
116 if (auto *Def =
117 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
118 WorkList.insert(std::make_pair(VPBB, Def));
119 }
120 }
121
122 bool ScalarVFOnly = Plan.hasScalarVFOnly();
123 // Try to sink each replicate or scalar IV steps recipe in the worklist.
124 for (unsigned I = 0; I != WorkList.size(); ++I) {
125 VPBasicBlock *SinkTo;
126 VPSingleDefRecipe *SinkCandidate;
127 std::tie(SinkTo, SinkCandidate) = WorkList[I];
128 if (SinkCandidate->getParent() == SinkTo ||
129 SinkCandidate->mayHaveSideEffects() ||
130 SinkCandidate->mayReadOrWriteMemory())
131 continue;
132 if (auto *RepR = dyn_cast<VPReplicateRecipe>(SinkCandidate)) {
133 if (!ScalarVFOnly && RepR->isUniform())
134 continue;
135 } else if (!isa<VPScalarIVStepsRecipe>(SinkCandidate))
136 continue;
137
138 bool NeedsDuplicating = false;
139 // All recipe users of the sink candidate must be in the same block SinkTo
140 // or all users outside of SinkTo must be uniform-after-vectorization (
141 // i.e., only first lane is used) . In the latter case, we need to duplicate
142 // SinkCandidate.
143 auto CanSinkWithUser = [SinkTo, &NeedsDuplicating,
144 SinkCandidate](VPUser *U) {
145 auto *UI = dyn_cast<VPRecipeBase>(U);
146 if (!UI)
147 return false;
148 if (UI->getParent() == SinkTo)
149 return true;
150 NeedsDuplicating = UI->onlyFirstLaneUsed(SinkCandidate);
151 // We only know how to duplicate VPRecipeRecipes for now.
152 return NeedsDuplicating && isa<VPReplicateRecipe>(SinkCandidate);
153 };
154 if (!all_of(SinkCandidate->users(), CanSinkWithUser))
155 continue;
156
157 if (NeedsDuplicating) {
158 if (ScalarVFOnly)
159 continue;
160 Instruction *I = SinkCandidate->getUnderlyingInstr();
161 auto *Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true);
162 // TODO: add ".cloned" suffix to name of Clone's VPValue.
163
164 Clone->insertBefore(SinkCandidate);
165 SinkCandidate->replaceUsesWithIf(Clone, [SinkTo](VPUser &U, unsigned) {
166 return cast<VPRecipeBase>(&U)->getParent() != SinkTo;
167 });
168 }
169 SinkCandidate->moveBefore(*SinkTo, SinkTo->getFirstNonPhi());
170 for (VPValue *Op : SinkCandidate->operands())
171 if (auto *Def =
172 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
173 WorkList.insert(std::make_pair(SinkTo, Def));
174 Changed = true;
175 }
176 return Changed;
177}
178
179/// If \p R is a region with a VPBranchOnMaskRecipe in the entry block, return
180/// the mask.
182 auto *EntryBB = dyn_cast<VPBasicBlock>(R->getEntry());
183 if (!EntryBB || EntryBB->size() != 1 ||
184 !isa<VPBranchOnMaskRecipe>(EntryBB->begin()))
185 return nullptr;
186
187 return cast<VPBranchOnMaskRecipe>(&*EntryBB->begin())->getOperand(0);
188}
189
190/// If \p R is a triangle region, return the 'then' block of the triangle.
192 auto *EntryBB = cast<VPBasicBlock>(R->getEntry());
193 if (EntryBB->getNumSuccessors() != 2)
194 return nullptr;
195
196 auto *Succ0 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[0]);
197 auto *Succ1 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[1]);
198 if (!Succ0 || !Succ1)
199 return nullptr;
200
201 if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1)
202 return nullptr;
203 if (Succ0->getSingleSuccessor() == Succ1)
204 return Succ0;
205 if (Succ1->getSingleSuccessor() == Succ0)
206 return Succ1;
207 return nullptr;
208}
209
210// Merge replicate regions in their successor region, if a replicate region
211// is connected to a successor replicate region with the same predicate by a
212// single, empty VPBasicBlock.
214 SetVector<VPRegionBlock *> DeletedRegions;
215
216 // Collect replicate regions followed by an empty block, followed by another
217 // replicate region with matching masks to process front. This is to avoid
218 // iterator invalidation issues while merging regions.
220 for (VPRegionBlock *Region1 : VPBlockUtils::blocksOnly<VPRegionBlock>(
221 vp_depth_first_deep(Plan.getEntry()))) {
222 if (!Region1->isReplicator())
223 continue;
224 auto *MiddleBasicBlock =
225 dyn_cast_or_null<VPBasicBlock>(Region1->getSingleSuccessor());
226 if (!MiddleBasicBlock || !MiddleBasicBlock->empty())
227 continue;
228
229 auto *Region2 =
230 dyn_cast_or_null<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
231 if (!Region2 || !Region2->isReplicator())
232 continue;
233
234 VPValue *Mask1 = getPredicatedMask(Region1);
235 VPValue *Mask2 = getPredicatedMask(Region2);
236 if (!Mask1 || Mask1 != Mask2)
237 continue;
238
239 assert(Mask1 && Mask2 && "both region must have conditions");
240 WorkList.push_back(Region1);
241 }
242
243 // Move recipes from Region1 to its successor region, if both are triangles.
244 for (VPRegionBlock *Region1 : WorkList) {
245 if (DeletedRegions.contains(Region1))
246 continue;
247 auto *MiddleBasicBlock = cast<VPBasicBlock>(Region1->getSingleSuccessor());
248 auto *Region2 = cast<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
249
250 VPBasicBlock *Then1 = getPredicatedThenBlock(Region1);
251 VPBasicBlock *Then2 = getPredicatedThenBlock(Region2);
252 if (!Then1 || !Then2)
253 continue;
254
255 // Note: No fusion-preventing memory dependencies are expected in either
256 // region. Such dependencies should be rejected during earlier dependence
257 // checks, which guarantee accesses can be re-ordered for vectorization.
258 //
259 // Move recipes to the successor region.
260 for (VPRecipeBase &ToMove : make_early_inc_range(reverse(*Then1)))
261 ToMove.moveBefore(*Then2, Then2->getFirstNonPhi());
262
263 auto *Merge1 = cast<VPBasicBlock>(Then1->getSingleSuccessor());
264 auto *Merge2 = cast<VPBasicBlock>(Then2->getSingleSuccessor());
265
266 // Move VPPredInstPHIRecipes from the merge block to the successor region's
267 // merge block. Update all users inside the successor region to use the
268 // original values.
269 for (VPRecipeBase &Phi1ToMove : make_early_inc_range(reverse(*Merge1))) {
270 VPValue *PredInst1 =
271 cast<VPPredInstPHIRecipe>(&Phi1ToMove)->getOperand(0);
272 VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue();
273 Phi1ToMoveV->replaceUsesWithIf(PredInst1, [Then2](VPUser &U, unsigned) {
274 auto *UI = dyn_cast<VPRecipeBase>(&U);
275 return UI && UI->getParent() == Then2;
276 });
277
278 Phi1ToMove.moveBefore(*Merge2, Merge2->begin());
279 }
280
281 // Finally, remove the first region.
282 for (VPBlockBase *Pred : make_early_inc_range(Region1->getPredecessors())) {
283 VPBlockUtils::disconnectBlocks(Pred, Region1);
284 VPBlockUtils::connectBlocks(Pred, MiddleBasicBlock);
285 }
286 VPBlockUtils::disconnectBlocks(Region1, MiddleBasicBlock);
287 DeletedRegions.insert(Region1);
288 }
289
290 for (VPRegionBlock *ToDelete : DeletedRegions)
291 delete ToDelete;
292 return !DeletedRegions.empty();
293}
294
296 VPlan &Plan) {
297 Instruction *Instr = PredRecipe->getUnderlyingInstr();
298 // Build the triangular if-then region.
299 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
300 assert(Instr->getParent() && "Predicated instruction not in any basic block");
301 auto *BlockInMask = PredRecipe->getMask();
302 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
303 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
304
305 // Replace predicated replicate recipe with a replicate recipe without a
306 // mask but in the replicate region.
307 auto *RecipeWithoutMask = new VPReplicateRecipe(
308 PredRecipe->getUnderlyingInstr(),
309 make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())),
310 PredRecipe->isUniform());
311 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask);
312
313 VPPredInstPHIRecipe *PHIRecipe = nullptr;
314 if (PredRecipe->getNumUsers() != 0) {
315 PHIRecipe = new VPPredInstPHIRecipe(RecipeWithoutMask);
316 PredRecipe->replaceAllUsesWith(PHIRecipe);
317 PHIRecipe->setOperand(0, RecipeWithoutMask);
318 }
319 PredRecipe->eraseFromParent();
320 auto *Exiting = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
321 VPRegionBlock *Region = new VPRegionBlock(Entry, Exiting, RegionName, true);
322
323 // Note: first set Entry as region entry and then connect successors starting
324 // from it in order, to propagate the "parent" of each VPBasicBlock.
325 VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry);
326 VPBlockUtils::connectBlocks(Pred, Exiting);
327
328 return Region;
329}
330
331static void addReplicateRegions(VPlan &Plan) {
333 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
334 vp_depth_first_deep(Plan.getEntry()))) {
335 for (VPRecipeBase &R : *VPBB)
336 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
337 if (RepR->isPredicated())
338 WorkList.push_back(RepR);
339 }
340 }
341
342 unsigned BBNum = 0;
343 for (VPReplicateRecipe *RepR : WorkList) {
344 VPBasicBlock *CurrentBlock = RepR->getParent();
345 VPBasicBlock *SplitBlock = CurrentBlock->splitAt(RepR->getIterator());
346
347 BasicBlock *OrigBB = RepR->getUnderlyingInstr()->getParent();
349 OrigBB->hasName() ? OrigBB->getName() + "." + Twine(BBNum++) : "");
350 // Record predicated instructions for above packing optimizations.
352 Region->setParent(CurrentBlock->getParent());
354 VPBlockUtils::connectBlocks(CurrentBlock, Region);
356 }
357}
358
359/// Remove redundant VPBasicBlocks by merging them into their predecessor if
360/// the predecessor has a single successor.
363 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
364 vp_depth_first_deep(Plan.getEntry()))) {
365 auto *PredVPBB =
366 dyn_cast_or_null<VPBasicBlock>(VPBB->getSinglePredecessor());
367 if (PredVPBB && PredVPBB->getNumSuccessors() == 1)
368 WorkList.push_back(VPBB);
369 }
370
371 for (VPBasicBlock *VPBB : WorkList) {
372 VPBasicBlock *PredVPBB = cast<VPBasicBlock>(VPBB->getSinglePredecessor());
373 for (VPRecipeBase &R : make_early_inc_range(*VPBB))
374 R.moveBefore(*PredVPBB, PredVPBB->end());
375 VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
376 auto *ParentRegion = cast_or_null<VPRegionBlock>(VPBB->getParent());
377 if (ParentRegion && ParentRegion->getExiting() == VPBB)
378 ParentRegion->setExiting(PredVPBB);
379 for (auto *Succ : to_vector(VPBB->successors())) {
381 VPBlockUtils::connectBlocks(PredVPBB, Succ);
382 }
383 delete VPBB;
384 }
385 return !WorkList.empty();
386}
387
389 // Convert masked VPReplicateRecipes to if-then region blocks.
391
392 bool ShouldSimplify = true;
393 while (ShouldSimplify) {
394 ShouldSimplify = sinkScalarOperands(Plan);
395 ShouldSimplify |= mergeReplicateRegionsIntoSuccessors(Plan);
396 ShouldSimplify |= mergeBlocksIntoPredecessors(Plan);
397 }
398}
399
400/// Remove redundant casts of inductions.
401///
402/// Such redundant casts are casts of induction variables that can be ignored,
403/// because we already proved that the casted phi is equal to the uncasted phi
404/// in the vectorized loop. There is no need to vectorize the cast - the same
405/// value can be used for both the phi and casts in the vector loop.
407 for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
408 auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
409 if (!IV || IV->getTruncInst())
410 continue;
411
412 // A sequence of IR Casts has potentially been recorded for IV, which
413 // *must be bypassed* when the IV is vectorized, because the vectorized IV
414 // will produce the desired casted value. This sequence forms a def-use
415 // chain and is provided in reverse order, ending with the cast that uses
416 // the IV phi. Search for the recipe of the last cast in the chain and
417 // replace it with the original IV. Note that only the final cast is
418 // expected to have users outside the cast-chain and the dead casts left
419 // over will be cleaned up later.
420 auto &Casts = IV->getInductionDescriptor().getCastInsts();
421 VPValue *FindMyCast = IV;
422 for (Instruction *IRCast : reverse(Casts)) {
423 VPSingleDefRecipe *FoundUserCast = nullptr;
424 for (auto *U : FindMyCast->users()) {
425 auto *UserCast = dyn_cast<VPSingleDefRecipe>(U);
426 if (UserCast && UserCast->getUnderlyingValue() == IRCast) {
427 FoundUserCast = UserCast;
428 break;
429 }
430 }
431 FindMyCast = FoundUserCast;
432 }
433 FindMyCast->replaceAllUsesWith(IV);
434 }
435}
436
437/// Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV
438/// recipe, if it exists.
440 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
441 VPWidenCanonicalIVRecipe *WidenNewIV = nullptr;
442 for (VPUser *U : CanonicalIV->users()) {
443 WidenNewIV = dyn_cast<VPWidenCanonicalIVRecipe>(U);
444 if (WidenNewIV)
445 break;
446 }
447
448 if (!WidenNewIV)
449 return;
450
452 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
453 auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
454
455 if (!WidenOriginalIV || !WidenOriginalIV->isCanonical() ||
456 WidenOriginalIV->getScalarType() != WidenNewIV->getScalarType())
457 continue;
458
459 // Replace WidenNewIV with WidenOriginalIV if WidenOriginalIV provides
460 // everything WidenNewIV's users need. That is, WidenOriginalIV will
461 // generate a vector phi or all users of WidenNewIV demand the first lane
462 // only.
463 if (any_of(WidenOriginalIV->users(),
464 [WidenOriginalIV](VPUser *U) {
465 return !U->usesScalars(WidenOriginalIV);
466 }) ||
467 vputils::onlyFirstLaneUsed(WidenNewIV)) {
468 WidenNewIV->replaceAllUsesWith(WidenOriginalIV);
469 WidenNewIV->eraseFromParent();
470 return;
471 }
472 }
473}
474
475/// Returns true if \p R is dead and can be removed.
476static bool isDeadRecipe(VPRecipeBase &R) {
477 using namespace llvm::PatternMatch;
478 // Do remove conditional assume instructions as their conditions may be
479 // flattened.
480 auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
481 bool IsConditionalAssume =
482 RepR && RepR->isPredicated() &&
483 match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>());
484 if (IsConditionalAssume)
485 return true;
486
487 if (R.mayHaveSideEffects())
488 return false;
489
490 // Recipe is dead if no user keeps the recipe alive.
491 return all_of(R.definedValues(),
492 [](VPValue *V) { return V->getNumUsers() == 0; });
493}
494
495static void removeDeadRecipes(VPlan &Plan) {
497 Plan.getEntry());
498
499 for (VPBasicBlock *VPBB : reverse(VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT))) {
500 // The recipes in the block are processed in reverse order, to catch chains
501 // of dead recipes.
502 for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) {
503 if (isDeadRecipe(R))
504 R.eraseFromParent();
505 }
506 }
507}
508
511 Instruction::BinaryOps InductionOpcode,
512 FPMathOperator *FPBinOp,
513 ScalarEvolution &SE, Instruction *TruncI,
514 VPValue *StartV, VPValue *Step,
517 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
518 VPSingleDefRecipe *BaseIV = CanonicalIV;
519 if (!CanonicalIV->isCanonical(Kind, StartV, Step)) {
520 BaseIV = new VPDerivedIVRecipe(Kind, FPBinOp, StartV, CanonicalIV, Step);
521 HeaderVPBB->insert(BaseIV, IP);
522 }
523
524 // Truncate base induction if needed.
526 SE.getContext());
527 Type *ResultTy = TypeInfo.inferScalarType(BaseIV);
528 if (TruncI) {
529 Type *TruncTy = TruncI->getType();
530 assert(ResultTy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits() &&
531 "Not truncating.");
532 assert(ResultTy->isIntegerTy() && "Truncation requires an integer type");
533 BaseIV = new VPScalarCastRecipe(Instruction::Trunc, BaseIV, TruncTy);
534 HeaderVPBB->insert(BaseIV, IP);
535 ResultTy = TruncTy;
536 }
537
538 // Truncate step if needed.
539 Type *StepTy = TypeInfo.inferScalarType(Step);
540 if (ResultTy != StepTy) {
541 assert(StepTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits() &&
542 "Not truncating.");
543 assert(StepTy->isIntegerTy() && "Truncation requires an integer type");
544 Step = new VPScalarCastRecipe(Instruction::Trunc, Step, ResultTy);
545 auto *VecPreheader =
546 cast<VPBasicBlock>(HeaderVPBB->getSingleHierarchicalPredecessor());
547 VecPreheader->appendRecipe(Step->getDefiningRecipe());
548 }
549
551 BaseIV, Step, InductionOpcode,
552 FPBinOp ? FPBinOp->getFastMathFlags() : FastMathFlags());
553 HeaderVPBB->insert(Steps, IP);
554 return Steps;
555}
556
557/// Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd
558/// (IndStart, ScalarIVSteps (0, Step)) if only its scalar values are used, as
559/// VPWidenPointerInductionRecipe will generate vectors only. If some users
560/// require vectors while other require scalars, the scalar uses need to extract
561/// the scalars from the generated vectors (Note that this is different to how
562/// int/fp inductions are handled). Also optimize VPWidenIntOrFpInductionRecipe,
563/// if any of its users needs scalar values, by providing them scalar steps
564/// built on the canonical scalar IV and update the original IV's users. This is
565/// an optional optimization to reduce the needs of vector extracts.
569 bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1));
570 VPBasicBlock::iterator InsertPt = HeaderVPBB->getFirstNonPhi();
571 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
572 // Replace wide pointer inductions which have only their scalars used by
573 // PtrAdd(IndStart, ScalarIVSteps (0, Step)).
574 if (auto *PtrIV = dyn_cast<VPWidenPointerInductionRecipe>(&Phi)) {
575 if (!PtrIV->onlyScalarsGenerated(Plan.hasScalableVF()))
576 continue;
577
578 const InductionDescriptor &ID = PtrIV->getInductionDescriptor();
579 VPValue *StartV =
580 Plan.getOrAddLiveIn(ConstantInt::get(ID.getStep()->getType(), 0));
581 VPValue *StepV = PtrIV->getOperand(1);
582 VPRecipeBase *Steps =
584 Instruction::Add, nullptr, SE, nullptr, StartV,
585 StepV, InsertPt)
587
588 auto *Recipe =
590 {PtrIV->getStartValue(), Steps->getVPSingleValue()},
591 PtrIV->getDebugLoc(), "next.gep");
592
593 Recipe->insertAfter(Steps);
594 PtrIV->replaceAllUsesWith(Recipe);
595 continue;
596 }
597
598 // Replace widened induction with scalar steps for users that only use
599 // scalars.
600 auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
601 if (!WideIV)
602 continue;
603 if (HasOnlyVectorVFs && none_of(WideIV->users(), [WideIV](VPUser *U) {
604 return U->usesScalars(WideIV);
605 }))
606 continue;
607
608 const InductionDescriptor &ID = WideIV->getInductionDescriptor();
610 Plan, ID.getKind(), ID.getInductionOpcode(),
611 dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()), SE,
612 WideIV->getTruncInst(), WideIV->getStartValue(), WideIV->getStepValue(),
613 InsertPt);
614
615 // Update scalar users of IV to use Step instead.
616 if (!HasOnlyVectorVFs)
617 WideIV->replaceAllUsesWith(Steps);
618 else
619 WideIV->replaceUsesWithIf(Steps, [WideIV](VPUser &U, unsigned) {
620 return U.usesScalars(WideIV);
621 });
622 }
623}
624
625/// Remove redundant EpxandSCEVRecipes in \p Plan's entry block by replacing
626/// them with already existing recipes expanding the same SCEV expression.
629
630 for (VPRecipeBase &R :
632 auto *ExpR = dyn_cast<VPExpandSCEVRecipe>(&R);
633 if (!ExpR)
634 continue;
635
636 auto I = SCEV2VPV.insert({ExpR->getSCEV(), ExpR});
637 if (I.second)
638 continue;
639 ExpR->replaceAllUsesWith(I.first->second);
640 ExpR->eraseFromParent();
641 }
642}
643
645 SmallVector<VPValue *> WorkList;
647 WorkList.push_back(V);
648
649 while (!WorkList.empty()) {
650 VPValue *Cur = WorkList.pop_back_val();
651 if (!Seen.insert(Cur).second)
652 continue;
654 if (!R)
655 continue;
656 if (!isDeadRecipe(*R))
657 continue;
658 WorkList.append(R->op_begin(), R->op_end());
659 R->eraseFromParent();
660 }
661}
662
664 unsigned BestUF,
666 assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan");
667 assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan");
668 VPBasicBlock *ExitingVPBB =
670 auto *Term = &ExitingVPBB->back();
671 // Try to simplify the branch condition if TC <= VF * UF when preparing to
672 // execute the plan for the main vector loop. We only do this if the
673 // terminator is:
674 // 1. BranchOnCount, or
675 // 2. BranchOnCond where the input is Not(ActiveLaneMask).
676 using namespace llvm::VPlanPatternMatch;
677 if (!match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
678 !match(Term,
679 m_BranchOnCond(m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue())))))
680 return;
681
682 Type *IdxTy =
684 const SCEV *TripCount = createTripCountSCEV(IdxTy, PSE);
685 ScalarEvolution &SE = *PSE.getSE();
686 ElementCount NumElements = BestVF.multiplyCoefficientBy(BestUF);
687 const SCEV *C = SE.getElementCount(TripCount->getType(), NumElements);
688 if (TripCount->isZero() ||
689 !SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C))
690 return;
691
692 LLVMContext &Ctx = SE.getContext();
693 auto *BOC =
696
697 SmallVector<VPValue *> PossiblyDead(Term->operands());
698 Term->eraseFromParent();
699 for (VPValue *Op : PossiblyDead)
701 ExitingVPBB->appendRecipe(BOC);
702 Plan.setVF(BestVF);
703 Plan.setUF(BestUF);
704 // TODO: Further simplifications are possible
705 // 1. Replace inductions with constants.
706 // 2. Replace vector loop region with VPBasicBlock.
707}
708
709#ifndef NDEBUG
711 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
712 if (Region && Region->isReplicator()) {
713 assert(Region->getNumSuccessors() == 1 &&
714 Region->getNumPredecessors() == 1 && "Expected SESE region!");
715 assert(R->getParent()->size() == 1 &&
716 "A recipe in an original replicator region must be the only "
717 "recipe in its block");
718 return Region;
719 }
720 return nullptr;
721}
722#endif
723
724static bool properlyDominates(const VPRecipeBase *A, const VPRecipeBase *B,
725 VPDominatorTree &VPDT) {
726 if (A == B)
727 return false;
728
729 auto LocalComesBefore = [](const VPRecipeBase *A, const VPRecipeBase *B) {
730 for (auto &R : *A->getParent()) {
731 if (&R == A)
732 return true;
733 if (&R == B)
734 return false;
735 }
736 llvm_unreachable("recipe not found");
737 };
738 const VPBlockBase *ParentA = A->getParent();
739 const VPBlockBase *ParentB = B->getParent();
740 if (ParentA == ParentB)
741 return LocalComesBefore(A, B);
742
743 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(A)) &&
744 "No replicate regions expected at this point");
745 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(B)) &&
746 "No replicate regions expected at this point");
747 return VPDT.properlyDominates(ParentA, ParentB);
748}
749
750/// Sink users of \p FOR after the recipe defining the previous value \p
751/// Previous of the recurrence. \returns true if all users of \p FOR could be
752/// re-arranged as needed or false if it is not possible.
753static bool
755 VPRecipeBase *Previous,
756 VPDominatorTree &VPDT) {
757 // Collect recipes that need sinking.
760 Seen.insert(Previous);
761 auto TryToPushSinkCandidate = [&](VPRecipeBase *SinkCandidate) {
762 // The previous value must not depend on the users of the recurrence phi. In
763 // that case, FOR is not a fixed order recurrence.
764 if (SinkCandidate == Previous)
765 return false;
766
767 if (isa<VPHeaderPHIRecipe>(SinkCandidate) ||
768 !Seen.insert(SinkCandidate).second ||
769 properlyDominates(Previous, SinkCandidate, VPDT))
770 return true;
771
772 if (SinkCandidate->mayHaveSideEffects())
773 return false;
774
775 WorkList.push_back(SinkCandidate);
776 return true;
777 };
778
779 // Recursively sink users of FOR after Previous.
780 WorkList.push_back(FOR);
781 for (unsigned I = 0; I != WorkList.size(); ++I) {
782 VPRecipeBase *Current = WorkList[I];
783 assert(Current->getNumDefinedValues() == 1 &&
784 "only recipes with a single defined value expected");
785
786 for (VPUser *User : Current->getVPSingleValue()->users()) {
787 if (auto *R = dyn_cast<VPRecipeBase>(User))
788 if (!TryToPushSinkCandidate(R))
789 return false;
790 }
791 }
792
793 // Keep recipes to sink ordered by dominance so earlier instructions are
794 // processed first.
795 sort(WorkList, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
796 return properlyDominates(A, B, VPDT);
797 });
798
799 for (VPRecipeBase *SinkCandidate : WorkList) {
800 if (SinkCandidate == FOR)
801 continue;
802
803 SinkCandidate->moveAfter(Previous);
804 Previous = SinkCandidate;
805 }
806 return true;
807}
808
810 VPBuilder &Builder) {
811 VPDominatorTree VPDT;
812 VPDT.recalculate(Plan);
813
815 for (VPRecipeBase &R :
817 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
818 RecurrencePhis.push_back(FOR);
819
820 for (VPFirstOrderRecurrencePHIRecipe *FOR : RecurrencePhis) {
822 VPRecipeBase *Previous = FOR->getBackedgeValue()->getDefiningRecipe();
823 // Fixed-order recurrences do not contain cycles, so this loop is guaranteed
824 // to terminate.
825 while (auto *PrevPhi =
826 dyn_cast_or_null<VPFirstOrderRecurrencePHIRecipe>(Previous)) {
827 assert(PrevPhi->getParent() == FOR->getParent());
828 assert(SeenPhis.insert(PrevPhi).second);
829 Previous = PrevPhi->getBackedgeValue()->getDefiningRecipe();
830 }
831
832 if (!sinkRecurrenceUsersAfterPrevious(FOR, Previous, VPDT))
833 return false;
834
835 // Introduce a recipe to combine the incoming and previous values of a
836 // fixed-order recurrence.
837 VPBasicBlock *InsertBlock = Previous->getParent();
838 if (isa<VPHeaderPHIRecipe>(Previous))
839 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
840 else
841 Builder.setInsertPoint(InsertBlock, std::next(Previous->getIterator()));
842
843 auto *RecurSplice = cast<VPInstruction>(
845 {FOR, FOR->getBackedgeValue()}));
846
847 FOR->replaceAllUsesWith(RecurSplice);
848 // Set the first operand of RecurSplice to FOR again, after replacing
849 // all users.
850 RecurSplice->setOperand(0, FOR);
851 }
852 return true;
853}
854
856 SetVector<VPUser *> Users(V->user_begin(), V->user_end());
857 for (unsigned I = 0; I != Users.size(); ++I) {
858 VPRecipeBase *Cur = dyn_cast<VPRecipeBase>(Users[I]);
859 if (!Cur || isa<VPHeaderPHIRecipe>(Cur))
860 continue;
861 for (VPValue *V : Cur->definedValues())
862 Users.insert(V->user_begin(), V->user_end());
863 }
864 return Users.takeVector();
865}
866
868 for (VPRecipeBase &R :
870 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
871 if (!PhiR)
872 continue;
873 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
874 RecurKind RK = RdxDesc.getRecurrenceKind();
875 if (RK != RecurKind::Add && RK != RecurKind::Mul)
876 continue;
877
878 for (VPUser *U : collectUsersRecursively(PhiR))
879 if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(U)) {
880 RecWithFlags->dropPoisonGeneratingFlags();
881 }
882 }
883}
884
885/// Try to simplify recipe \p R.
886static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
887 // Try to remove redundant blend recipes.
888 if (auto *Blend = dyn_cast<VPBlendRecipe>(&R)) {
889 VPValue *Inc0 = Blend->getIncomingValue(0);
890 for (unsigned I = 1; I != Blend->getNumIncomingValues(); ++I)
891 if (Inc0 != Blend->getIncomingValue(I))
892 return;
893 Blend->replaceAllUsesWith(Inc0);
894 Blend->eraseFromParent();
895 return;
896 }
897
898 using namespace llvm::VPlanPatternMatch;
899 VPValue *A;
900 if (match(&R, m_Trunc(m_ZExtOrSExt(m_VPValue(A))))) {
901 VPValue *Trunc = R.getVPSingleValue();
902 Type *TruncTy = TypeInfo.inferScalarType(Trunc);
903 Type *ATy = TypeInfo.inferScalarType(A);
904 if (TruncTy == ATy) {
905 Trunc->replaceAllUsesWith(A);
906 } else {
907 // Don't replace a scalarizing recipe with a widened cast.
908 if (isa<VPReplicateRecipe>(&R))
909 return;
910 if (ATy->getScalarSizeInBits() < TruncTy->getScalarSizeInBits()) {
911
912 unsigned ExtOpcode = match(R.getOperand(0), m_SExt(m_VPValue()))
913 ? Instruction::SExt
914 : Instruction::ZExt;
915 auto *VPC =
916 new VPWidenCastRecipe(Instruction::CastOps(ExtOpcode), A, TruncTy);
917 VPC->insertBefore(&R);
918 Trunc->replaceAllUsesWith(VPC);
919 } else if (ATy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits()) {
920 auto *VPC = new VPWidenCastRecipe(Instruction::Trunc, A, TruncTy);
921 VPC->insertBefore(&R);
922 Trunc->replaceAllUsesWith(VPC);
923 }
924 }
925#ifndef NDEBUG
926 // Verify that the cached type info is for both A and its users is still
927 // accurate by comparing it to freshly computed types.
928 VPTypeAnalysis TypeInfo2(
929 R.getParent()->getPlan()->getCanonicalIV()->getScalarType(),
930 TypeInfo.getContext());
931 assert(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A));
932 for (VPUser *U : A->users()) {
933 auto *R = dyn_cast<VPRecipeBase>(U);
934 if (!R)
935 continue;
936 for (VPValue *VPV : R->definedValues())
937 assert(TypeInfo.inferScalarType(VPV) == TypeInfo2.inferScalarType(VPV));
938 }
939#endif
940 }
941
942 if (match(&R, m_CombineOr(m_Mul(m_VPValue(A), m_SpecificInt(1)),
943 m_Mul(m_SpecificInt(1), m_VPValue(A)))))
944 return R.getVPSingleValue()->replaceAllUsesWith(A);
945}
946
947/// Try to simplify the recipes in \p Plan.
948static void simplifyRecipes(VPlan &Plan, LLVMContext &Ctx) {
950 Plan.getEntry());
951 VPTypeAnalysis TypeInfo(Plan.getCanonicalIV()->getScalarType(), Ctx);
952 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
953 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
954 simplifyRecipe(R, TypeInfo);
955 }
956 }
957}
958
960 VPlan &Plan, const MapVector<Instruction *, uint64_t> &MinBWs,
961 LLVMContext &Ctx) {
962#ifndef NDEBUG
963 // Count the processed recipes and cross check the count later with MinBWs
964 // size, to make sure all entries in MinBWs have been handled.
965 unsigned NumProcessedRecipes = 0;
966#endif
967 // Keep track of created truncates, so they can be re-used. Note that we
968 // cannot use RAUW after creating a new truncate, as this would could make
969 // other uses have different types for their operands, making them invalidly
970 // typed.
972 VPTypeAnalysis TypeInfo(Plan.getCanonicalIV()->getScalarType(), Ctx);
973 VPBasicBlock *PH = Plan.getEntry();
974 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
976 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
979 continue;
980
981 VPValue *ResultVPV = R.getVPSingleValue();
982 auto *UI = cast_or_null<Instruction>(ResultVPV->getUnderlyingValue());
983 unsigned NewResSizeInBits = MinBWs.lookup(UI);
984 if (!NewResSizeInBits)
985 continue;
986
987#ifndef NDEBUG
988 NumProcessedRecipes++;
989#endif
990 // If the value wasn't vectorized, we must maintain the original scalar
991 // type. Skip those here, after incrementing NumProcessedRecipes. Also
992 // skip casts which do not need to be handled explicitly here, as
993 // redundant casts will be removed during recipe simplification.
994 if (isa<VPReplicateRecipe, VPWidenCastRecipe>(&R)) {
995#ifndef NDEBUG
996 // If any of the operands is a live-in and not used by VPWidenRecipe or
997 // VPWidenSelectRecipe, but in MinBWs, make sure it is counted as
998 // processed as well. When MinBWs is currently constructed, there is no
999 // information about whether recipes are widened or replicated and in
1000 // case they are reciplicated the operands are not truncated. Counting
1001 // them them here ensures we do not miss any recipes in MinBWs.
1002 // TODO: Remove once the analysis is done on VPlan.
1003 for (VPValue *Op : R.operands()) {
1004 if (!Op->isLiveIn())
1005 continue;
1006 auto *UV = dyn_cast_or_null<Instruction>(Op->getUnderlyingValue());
1007 if (UV && MinBWs.contains(UV) && !ProcessedTruncs.contains(Op) &&
1008 all_of(Op->users(), [](VPUser *U) {
1009 return !isa<VPWidenRecipe, VPWidenSelectRecipe>(U);
1010 })) {
1011 // Add an entry to ProcessedTruncs to avoid counting the same
1012 // operand multiple times.
1013 ProcessedTruncs[Op] = nullptr;
1014 NumProcessedRecipes += 1;
1015 }
1016 }
1017#endif
1018 continue;
1019 }
1020
1021 Type *OldResTy = TypeInfo.inferScalarType(ResultVPV);
1022 unsigned OldResSizeInBits = OldResTy->getScalarSizeInBits();
1023 assert(OldResTy->isIntegerTy() && "only integer types supported");
1024 (void)OldResSizeInBits;
1025
1026 auto *NewResTy = IntegerType::get(Ctx, NewResSizeInBits);
1027
1028 // Any wrapping introduced by shrinking this operation shouldn't be
1029 // considered undefined behavior. So, we can't unconditionally copy
1030 // arithmetic wrapping flags to VPW.
1031 if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
1032 VPW->dropPoisonGeneratingFlags();
1033
1034 if (OldResSizeInBits != NewResSizeInBits) {
1035 // Extend result to original width.
1036 auto *Ext =
1037 new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy);
1038 Ext->insertAfter(&R);
1039 ResultVPV->replaceAllUsesWith(Ext);
1040 Ext->setOperand(0, ResultVPV);
1041 assert(OldResSizeInBits > NewResSizeInBits && "Nothing to shrink?");
1042 } else
1043 assert(cast<VPWidenRecipe>(&R)->getOpcode() == Instruction::ICmp &&
1044 "Only ICmps should not need extending the result.");
1045
1046 assert(!isa<VPWidenStoreRecipe>(&R) && "stores cannot be narrowed");
1047 if (isa<VPWidenLoadRecipe>(&R))
1048 continue;
1049
1050 // Shrink operands by introducing truncates as needed.
1051 unsigned StartIdx = isa<VPWidenSelectRecipe>(&R) ? 1 : 0;
1052 for (unsigned Idx = StartIdx; Idx != R.getNumOperands(); ++Idx) {
1053 auto *Op = R.getOperand(Idx);
1054 unsigned OpSizeInBits =
1056 if (OpSizeInBits == NewResSizeInBits)
1057 continue;
1058 assert(OpSizeInBits > NewResSizeInBits && "nothing to truncate");
1059 auto [ProcessedIter, IterIsEmpty] =
1060 ProcessedTruncs.insert({Op, nullptr});
1061 VPWidenCastRecipe *NewOp =
1062 IterIsEmpty
1063 ? new VPWidenCastRecipe(Instruction::Trunc, Op, NewResTy)
1064 : ProcessedIter->second;
1065 R.setOperand(Idx, NewOp);
1066 if (!IterIsEmpty)
1067 continue;
1068 ProcessedIter->second = NewOp;
1069 if (!Op->isLiveIn()) {
1070 NewOp->insertBefore(&R);
1071 } else {
1072 PH->appendRecipe(NewOp);
1073#ifndef NDEBUG
1074 auto *OpInst = dyn_cast<Instruction>(Op->getLiveInIRValue());
1075 bool IsContained = MinBWs.contains(OpInst);
1076 NumProcessedRecipes += IsContained;
1077#endif
1078 }
1079 }
1080
1081 }
1082 }
1083
1084 assert(MinBWs.size() == NumProcessedRecipes &&
1085 "some entries in MinBWs haven't been processed");
1086}
1087
1091
1092 simplifyRecipes(Plan, SE.getContext());
1094 removeDeadRecipes(Plan);
1095
1097
1100}
1101
1102// Add a VPActiveLaneMaskPHIRecipe and related recipes to \p Plan and replace
1103// the loop terminator with a branch-on-cond recipe with the negated
1104// active-lane-mask as operand. Note that this turns the loop into an
1105// uncountable one. Only the existing terminator is replaced, all other existing
1106// recipes/users remain unchanged, except for poison-generating flags being
1107// dropped from the canonical IV increment. Return the created
1108// VPActiveLaneMaskPHIRecipe.
1109//
1110// The function uses the following definitions:
1111//
1112// %TripCount = DataWithControlFlowWithoutRuntimeCheck ?
1113// calculate-trip-count-minus-VF (original TC) : original TC
1114// %IncrementValue = DataWithControlFlowWithoutRuntimeCheck ?
1115// CanonicalIVPhi : CanonicalIVIncrement
1116// %StartV is the canonical induction start value.
1117//
1118// The function adds the following recipes:
1119//
1120// vector.ph:
1121// %TripCount = calculate-trip-count-minus-VF (original TC)
1122// [if DataWithControlFlowWithoutRuntimeCheck]
1123// %EntryInc = canonical-iv-increment-for-part %StartV
1124// %EntryALM = active-lane-mask %EntryInc, %TripCount
1125//
1126// vector.body:
1127// ...
1128// %P = active-lane-mask-phi [ %EntryALM, %vector.ph ], [ %ALM, %vector.body ]
1129// ...
1130// %InLoopInc = canonical-iv-increment-for-part %IncrementValue
1131// %ALM = active-lane-mask %InLoopInc, TripCount
1132// %Negated = Not %ALM
1133// branch-on-cond %Negated
1134//
1137 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
1138 VPBasicBlock *EB = TopRegion->getExitingBasicBlock();
1139 auto *CanonicalIVPHI = Plan.getCanonicalIV();
1140 VPValue *StartV = CanonicalIVPHI->getStartValue();
1141
1142 auto *CanonicalIVIncrement =
1143 cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1144 // TODO: Check if dropping the flags is needed if
1145 // !DataAndControlFlowWithoutRuntimeCheck.
1146 CanonicalIVIncrement->dropPoisonGeneratingFlags();
1147 DebugLoc DL = CanonicalIVIncrement->getDebugLoc();
1148 // We can't use StartV directly in the ActiveLaneMask VPInstruction, since
1149 // we have to take unrolling into account. Each part needs to start at
1150 // Part * VF
1151 auto *VecPreheader = cast<VPBasicBlock>(TopRegion->getSinglePredecessor());
1152 VPBuilder Builder(VecPreheader);
1153
1154 // Create the ActiveLaneMask instruction using the correct start values.
1155 VPValue *TC = Plan.getTripCount();
1156
1157 VPValue *TripCount, *IncrementValue;
1159 // When the loop is guarded by a runtime overflow check for the loop
1160 // induction variable increment by VF, we can increment the value before
1161 // the get.active.lane mask and use the unmodified tripcount.
1162 IncrementValue = CanonicalIVIncrement;
1163 TripCount = TC;
1164 } else {
1165 // When avoiding a runtime check, the active.lane.mask inside the loop
1166 // uses a modified trip count and the induction variable increment is
1167 // done after the active.lane.mask intrinsic is called.
1168 IncrementValue = CanonicalIVPHI;
1170 {TC}, DL);
1171 }
1172 auto *EntryIncrement = Builder.createOverflowingOp(
1173 VPInstruction::CanonicalIVIncrementForPart, {StartV}, {false, false}, DL,
1174 "index.part.next");
1175
1176 // Create the active lane mask instruction in the VPlan preheader.
1177 auto *EntryALM =
1178 Builder.createNaryOp(VPInstruction::ActiveLaneMask, {EntryIncrement, TC},
1179 DL, "active.lane.mask.entry");
1180
1181 // Now create the ActiveLaneMaskPhi recipe in the main loop using the
1182 // preheader ActiveLaneMask instruction.
1183 auto LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc());
1184 LaneMaskPhi->insertAfter(CanonicalIVPHI);
1185
1186 // Create the active lane mask for the next iteration of the loop before the
1187 // original terminator.
1188 VPRecipeBase *OriginalTerminator = EB->getTerminator();
1189 Builder.setInsertPoint(OriginalTerminator);
1190 auto *InLoopIncrement =
1192 {IncrementValue}, {false, false}, DL);
1193 auto *ALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask,
1194 {InLoopIncrement, TripCount}, DL,
1195 "active.lane.mask.next");
1196 LaneMaskPhi->addOperand(ALM);
1197
1198 // Replace the original terminator with BranchOnCond. We have to invert the
1199 // mask here because a true condition means jumping to the exit block.
1200 auto *NotMask = Builder.createNot(ALM, DL);
1201 Builder.createNaryOp(VPInstruction::BranchOnCond, {NotMask}, DL);
1202 OriginalTerminator->eraseFromParent();
1203 return LaneMaskPhi;
1204}
1205
1206/// Replaces (ICMP_ULE, WideCanonicalIV, backedge-taken-count) pattern using
1207/// the given \p Idiom.
1208static void
1210 function_ref<bool(VPUser &, unsigned)> Cond = {}) {
1211 auto *FoundWidenCanonicalIVUser =
1212 find_if(Plan.getCanonicalIV()->users(),
1213 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1214 if (FoundWidenCanonicalIVUser == Plan.getCanonicalIV()->users().end())
1215 return;
1216 auto *WideCanonicalIV =
1217 cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1218 // Walk users of WideCanonicalIV and replace all compares of the form
1219 // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with
1220 // the given idiom VPValue.
1222 for (VPUser *U : SmallVector<VPUser *>(WideCanonicalIV->users())) {
1223 auto *CompareToReplace = dyn_cast<VPInstruction>(U);
1224 if (!CompareToReplace ||
1225 CompareToReplace->getOpcode() != Instruction::ICmp ||
1226 CompareToReplace->getPredicate() != CmpInst::ICMP_ULE ||
1227 CompareToReplace->getOperand(1) != BTC)
1228 continue;
1229
1230 assert(CompareToReplace->getOperand(0) == WideCanonicalIV &&
1231 "WidenCanonicalIV must be the first operand of the compare");
1232 if (Cond) {
1233 CompareToReplace->replaceUsesWithIf(&Idiom, Cond);
1234 if (!CompareToReplace->getNumUsers())
1235 CompareToReplace->eraseFromParent();
1236 } else {
1237 CompareToReplace->replaceAllUsesWith(&Idiom);
1238 CompareToReplace->eraseFromParent();
1239 }
1240 }
1241 if (!WideCanonicalIV->getNumUsers())
1242 WideCanonicalIV->eraseFromParent();
1243}
1244
1246 VPlan &Plan, bool UseActiveLaneMaskForControlFlow,
1249 UseActiveLaneMaskForControlFlow) &&
1250 "DataAndControlFlowWithoutRuntimeCheck implies "
1251 "UseActiveLaneMaskForControlFlow");
1252
1253 auto FoundWidenCanonicalIVUser =
1254 find_if(Plan.getCanonicalIV()->users(),
1255 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1256 assert(FoundWidenCanonicalIVUser &&
1257 "Must have widened canonical IV when tail folding!");
1258 auto *WideCanonicalIV =
1259 cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1260 VPSingleDefRecipe *LaneMask;
1261 if (UseActiveLaneMaskForControlFlow) {
1264 } else {
1265 VPBuilder B = VPBuilder::getToInsertAfter(WideCanonicalIV);
1266 LaneMask = B.createNaryOp(VPInstruction::ActiveLaneMask,
1267 {WideCanonicalIV, Plan.getTripCount()}, nullptr,
1268 "active.lane.mask");
1269 }
1270
1271 // Walk users of WideCanonicalIV and replace all compares of the form
1272 // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an
1273 // active-lane-mask.
1274 replaceHeaderPredicateWith(Plan, *LaneMask);
1275}
1276
1277/// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and
1278/// replaces all uses except the canonical IV increment of
1279/// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe
1280/// is used only for loop iterations counting after this transformation.
1281///
1282/// The function uses the following definitions:
1283/// %StartV is the canonical induction start value.
1284///
1285/// The function adds the following recipes:
1286///
1287/// vector.ph:
1288/// ...
1289///
1290/// vector.body:
1291/// ...
1292/// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1293/// [ %NextEVLIV, %vector.body ]
1294/// %VPEVL = EXPLICIT-VECTOR-LENGTH %EVLPhi, original TC
1295/// ...
1296/// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
1297/// ...
1298///
1301 auto *CanonicalIVPHI = Plan.getCanonicalIV();
1302 VPValue *StartV = CanonicalIVPHI->getStartValue();
1303
1304 // TODO: revisit this and try to remove the mask operand.
1305 // Walk VPWidenMemoryInstructionRecipe users of WideCanonicalIV and replace
1306 // all compares of the form (ICMP_ULE, WideCanonicalIV, backedge-taken-count),
1307 // used as mask in VPWidenMemoryInstructionRecipe, with an all-true-mask.
1308 Value *TrueMask =
1309 ConstantInt::getTrue(CanonicalIVPHI->getScalarType()->getContext());
1310 VPValue *VPTrueMask = Plan.getOrAddLiveIn(TrueMask);
1311 replaceHeaderPredicateWith(Plan, *VPTrueMask, [](VPUser &U, unsigned) {
1312 return isa<VPWidenMemoryRecipe>(U);
1313 });
1314 // Now create the ExplicitVectorLengthPhi recipe in the main loop.
1315 auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc());
1316 EVLPhi->insertAfter(CanonicalIVPHI);
1318 {EVLPhi, Plan.getTripCount()});
1319 VPEVL->insertBefore(*Header, Header->getFirstNonPhi());
1320
1321 auto *CanonicalIVIncrement =
1322 cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1323 VPSingleDefRecipe *OpVPEVL = VPEVL;
1324 if (unsigned IVSize = CanonicalIVPHI->getScalarType()->getScalarSizeInBits();
1325 IVSize != 32) {
1326 OpVPEVL = new VPScalarCastRecipe(IVSize < 32 ? Instruction::Trunc
1327 : Instruction::ZExt,
1328 OpVPEVL, CanonicalIVPHI->getScalarType());
1329 OpVPEVL->insertBefore(CanonicalIVIncrement);
1330 }
1331 auto *NextEVLIV =
1332 new VPInstruction(Instruction::Add, {OpVPEVL, EVLPhi},
1333 {CanonicalIVIncrement->hasNoUnsignedWrap(),
1334 CanonicalIVIncrement->hasNoSignedWrap()},
1335 CanonicalIVIncrement->getDebugLoc(), "index.evl.next");
1336 NextEVLIV->insertBefore(CanonicalIVIncrement);
1337 EVLPhi->addOperand(NextEVLIV);
1338
1339 // Replace all uses of VPCanonicalIVPHIRecipe by
1340 // VPEVLBasedIVPHIRecipe except for the canonical IV increment.
1341 CanonicalIVPHI->replaceAllUsesWith(EVLPhi);
1342 CanonicalIVIncrement->setOperand(0, CanonicalIVPHI);
1343 // TODO: support unroll factor > 1.
1344 Plan.setUF(1);
1345}
1346
1348 VPlan &Plan, function_ref<bool(BasicBlock *)> BlockNeedsPredication) {
1349 // Collect recipes in the backward slice of `Root` that may generate a poison
1350 // value that is used after vectorization.
1352 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1354 Worklist.push_back(Root);
1355
1356 // Traverse the backward slice of Root through its use-def chain.
1357 while (!Worklist.empty()) {
1358 VPRecipeBase *CurRec = Worklist.back();
1359 Worklist.pop_back();
1360
1361 if (!Visited.insert(CurRec).second)
1362 continue;
1363
1364 // Prune search if we find another recipe generating a widen memory
1365 // instruction. Widen memory instructions involved in address computation
1366 // will lead to gather/scatter instructions, which don't need to be
1367 // handled.
1368 if (isa<VPWidenMemoryRecipe>(CurRec) || isa<VPInterleaveRecipe>(CurRec) ||
1369 isa<VPScalarIVStepsRecipe>(CurRec) || isa<VPHeaderPHIRecipe>(CurRec))
1370 continue;
1371
1372 // This recipe contributes to the address computation of a widen
1373 // load/store. If the underlying instruction has poison-generating flags,
1374 // drop them directly.
1375 if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
1376 VPValue *A, *B;
1377 using namespace llvm::VPlanPatternMatch;
1378 // Dropping disjoint from an OR may yield incorrect results, as some
1379 // analysis may have converted it to an Add implicitly (e.g. SCEV used
1380 // for dependence analysis). Instead, replace it with an equivalent Add.
1381 // This is possible as all users of the disjoint OR only access lanes
1382 // where the operands are disjoint or poison otherwise.
1383 if (match(RecWithFlags, m_Or(m_VPValue(A), m_VPValue(B))) &&
1384 RecWithFlags->isDisjoint()) {
1385 VPBuilder Builder(RecWithFlags);
1386 VPInstruction *New = Builder.createOverflowingOp(
1387 Instruction::Add, {A, B}, {false, false},
1388 RecWithFlags->getDebugLoc());
1389 RecWithFlags->replaceAllUsesWith(New);
1390 RecWithFlags->eraseFromParent();
1391 CurRec = New;
1392 } else
1393 RecWithFlags->dropPoisonGeneratingFlags();
1394 } else {
1395 Instruction *Instr = dyn_cast_or_null<Instruction>(
1396 CurRec->getVPSingleValue()->getUnderlyingValue());
1397 (void)Instr;
1398 assert((!Instr || !Instr->hasPoisonGeneratingFlags()) &&
1399 "found instruction with poison generating flags not covered by "
1400 "VPRecipeWithIRFlags");
1401 }
1402
1403 // Add new definitions to the worklist.
1404 for (VPValue *operand : CurRec->operands())
1405 if (VPRecipeBase *OpDef = operand->getDefiningRecipe())
1406 Worklist.push_back(OpDef);
1407 }
1408 });
1409
1410 // Traverse all the recipes in the VPlan and collect the poison-generating
1411 // recipes in the backward slice starting at the address of a VPWidenRecipe or
1412 // VPInterleaveRecipe.
1413 auto Iter = vp_depth_first_deep(Plan.getEntry());
1414 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1415 for (VPRecipeBase &Recipe : *VPBB) {
1416 if (auto *WidenRec = dyn_cast<VPWidenMemoryRecipe>(&Recipe)) {
1417 Instruction &UnderlyingInstr = WidenRec->getIngredient();
1418 VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
1419 if (AddrDef && WidenRec->isConsecutive() &&
1420 BlockNeedsPredication(UnderlyingInstr.getParent()))
1421 collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1422 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1423 VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
1424 if (AddrDef) {
1425 // Check if any member of the interleave group needs predication.
1426 const InterleaveGroup<Instruction> *InterGroup =
1427 InterleaveRec->getInterleaveGroup();
1428 bool NeedPredication = false;
1429 for (int I = 0, NumMembers = InterGroup->getNumMembers();
1430 I < NumMembers; ++I) {
1431 Instruction *Member = InterGroup->getMember(I);
1432 if (Member)
1433 NeedPredication |= BlockNeedsPredication(Member->getParent());
1434 }
1435
1436 if (NeedPredication)
1437 collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1438 }
1439 }
1440 }
1441 }
1442}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
ReachingDefAnalysis InstSet & ToRemove
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Hexagon Common GEP
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool mergeBlocksIntoPredecessors(Loop &L, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU, ScalarEvolution &SE)
#define I(x, y, z)
Definition: MD5.cpp:58
if(VerifyEach)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file implements dominator tree analysis for a single level of a VPlan's H-CFG.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
static bool sinkScalarOperands(VPlan &Plan)
static void removeRedundantInductionCasts(VPlan &Plan)
Remove redundant casts of inductions.
static void simplifyRecipes(VPlan &Plan, LLVMContext &Ctx)
Try to simplify the recipes in Plan.
static bool sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR, VPRecipeBase *Previous, VPDominatorTree &VPDT)
Sink users of FOR after the recipe defining the previous value Previous of the recurrence.
static bool mergeReplicateRegionsIntoSuccessors(VPlan &Plan)
static VPActiveLaneMaskPHIRecipe * addVPLaneMaskPhiAndUpdateExitBranch(VPlan &Plan, bool DataAndControlFlowWithoutRuntimeCheck)
static bool isDeadRecipe(VPRecipeBase &R)
Returns true if R is dead and can be removed.
static void addReplicateRegions(VPlan &Plan)
static void legalizeAndOptimizeInductions(VPlan &Plan, ScalarEvolution &SE)
Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd (IndStart, ScalarIVSteps (0,...
static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo)
Try to simplify recipe R.
static VPRegionBlock * GetReplicateRegion(VPRecipeBase *R)
static void removeRedundantExpandSCEVRecipes(VPlan &Plan)
Remove redundant EpxandSCEVRecipes in Plan's entry block by replacing them with already existing reci...
static bool properlyDominates(const VPRecipeBase *A, const VPRecipeBase *B, VPDominatorTree &VPDT)
static SmallVector< VPUser * > collectUsersRecursively(VPValue *V)
static VPValue * createScalarIVSteps(VPlan &Plan, InductionDescriptor::InductionKind Kind, Instruction::BinaryOps InductionOpcode, FPMathOperator *FPBinOp, ScalarEvolution &SE, Instruction *TruncI, VPValue *StartV, VPValue *Step, VPBasicBlock::iterator IP)
static void recursivelyDeleteDeadRecipes(VPValue *V)
static void removeDeadRecipes(VPlan &Plan)
static VPRegionBlock * createReplicateRegion(VPReplicateRecipe *PredRecipe, VPlan &Plan)
static VPBasicBlock * getPredicatedThenBlock(VPRegionBlock *R)
If R is a triangle region, return the 'then' block of the triangle.
VPValue * getPredicatedMask(VPRegionBlock *R)
If R is a region with a VPBranchOnMaskRecipe in the entry block, return the mask.
static void removeRedundantCanonicalIVs(VPlan &Plan)
Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV recipe, if it exists.
static void replaceHeaderPredicateWith(VPlan &Plan, VPValue &Idiom, function_ref< bool(VPUser &, unsigned)> Cond={})
Replaces (ICMP_ULE, WideCanonicalIV, backedge-taken-count) pattern using the given Idiom.
This file provides utility VPlan to VPlan transformations.
static const uint32_t IV[8]
Definition: blake3_impl.h:78
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
This class represents a function call, abstracting a target machine's calling convention.
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:1019
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:849
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Core dominator tree base class.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:296
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:201
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Definition: Operator.h:319
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
A struct for saving information about induction variables.
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_IntInduction
Integer induction variable. Step = C.
const BasicBlock * getParent() const
Definition: Instruction.h:152
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
The group of interleaved loads/stores sharing the same stride and close to each other.
Definition: VectorUtils.h:444
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
Definition: VectorUtils.h:514
uint32_t getNumMembers() const
Definition: VectorUtils.h:462
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool contains(const KeyT &Key) const
Definition: MapVector.h:163
ValueT lookup(const KeyT &Key) const
Definition: MapVector.h:110
size_type size() const
Definition: MapVector.h:60
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:71
RecurKind getRecurrenceKind() const
This class represents an analyzed expression in the program.
bool isZero() const
Return true if the expression is a constant zero.
Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getElementCount(Type *Ty, ElementCount EC)
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition: SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
bool contains(const key_type &key) const
Check if the SetVector contains the given key.
Definition: SetVector.h:254
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
Provides information about what library functions are available for the current target.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
op_range operands()
Definition: User.h:242
A recipe for generating the active lane mask for the vector loop that is used to predicate the vector...
Definition: VPlan.h:2529
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:2739
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition: VPlan.h:2807
RecipeListTy::iterator iterator
Instruction iterators...
Definition: VPlan.h:2760
iterator end()
Definition: VPlan.h:2770
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition: VPlan.h:2817
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition: VPlan.cpp:210
VPBasicBlock * splitAt(iterator SplitAt)
Split current block at SplitAt by inserting a new block between the current block and its successors ...
Definition: VPlan.cpp:521
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition: VPlan.cpp:584
const VPRecipeBase & back() const
Definition: VPlan.h:2782
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition: VPlan.h:2798
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:426
VPRegionBlock * getParent()
Definition: VPlan.h:498
const VPBasicBlock * getExitingBasicBlock() const
Definition: VPlan.cpp:175
VPBlockBase * getSinglePredecessor() const
Definition: VPlan.h:539
const VPBasicBlock * getEntryBasicBlock() const
Definition: VPlan.cpp:153
VPBlockBase * getSingleHierarchicalPredecessor()
Definition: VPlan.h:585
VPBlockBase * getSingleSuccessor() const
Definition: VPlan.h:533
const VPBlocksTy & getSuccessors() const
Definition: VPlan.h:523
static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse, VPBlockBase *BlockPtr)
Insert disconnected VPBlockBases IfTrue and IfFalse after BlockPtr.
Definition: VPlan.h:3312
static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To)
Disconnect VPBlockBases From and To bi-directionally.
Definition: VPlan.h:3340
static void connectBlocks(VPBlockBase *From, VPBlockBase *To)
Connect VPBlockBases From and To bi-directionally.
Definition: VPlan.h:3329
A recipe for generating conditional branches on the bits of a mask.
Definition: VPlan.h:2207
VPlan-based builder utility analogous to IRBuilder.
static VPBuilder getToInsertAfter(VPRecipeBase *R)
Create a VPBuilder to insert after R.
VPInstruction * createOverflowingOp(unsigned Opcode, std::initializer_list< VPValue * > Operands, VPRecipeWithIRFlags::WrapFlagsTy WrapFlags, DebugLoc DL={}, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPValue * createNot(VPValue *Operand, DebugLoc DL={}, const Twine &Name="")
void setInsertPoint(VPBasicBlock *TheBB)
This specifies that created VPInstructions should be appended to the end of the specified block.
Canonical scalar induction phi of the vector loop.
Definition: VPlan.h:2472
Type * getScalarType() const
Returns the scalar type of the induction.
Definition: VPlan.h:2501
bool isCanonical(InductionDescriptor::InductionKind Kind, VPValue *Start, VPValue *Step) const
Check if the induction described by Kind, /p Start and Step is canonical, i.e.
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition: VPlanValue.h:426
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition: VPlanValue.h:421
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition: VPlanValue.h:399
A recipe for converting the input value IV value to the corresponding value of an IV with different s...
Definition: VPlan.h:2632
A recipe for generating the phi node for the current index of elements, adjusted in accordance with E...
Definition: VPlan.h:2561
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition: VPlan.h:1660
This is a concrete Recipe that models a single VPlan-level instruction.
Definition: VPlan.h:1166
@ FirstOrderRecurrenceSplice
Definition: VPlan.h:1172
@ CanonicalIVIncrementForPart
Definition: VPlan.h:1182
@ CalculateTripCountMinusVF
Definition: VPlan.h:1180
VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when control converges back from ...
Definition: VPlan.h:2258
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition: VPlan.h:718
bool mayReadOrWriteMemory() const
Returns true if the recipe may read from or write to memory.
Definition: VPlan.h:804
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
VPBasicBlock * getParent()
Definition: VPlan.h:743
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition: VPlan.h:809
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition: VPlan.h:2872
const VPBlockBase * getEntry() const
Definition: VPlan.h:2911
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition: VPlan.h:2134
bool isUniform() const
Definition: VPlan.h:2174
VPValue * getMask()
Return the mask of a predicated VPReplicateRecipe.
Definition: VPlan.h:2198
VPScalarCastRecipe is a recipe to create scalar cast instructions.
Definition: VPlan.h:1417
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition: VPlan.h:2689
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition: VPlan.h:835
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition: VPlan.h:895
An analysis for type-inference for VPValues.
Definition: VPlanAnalysis.h:36
LLVMContext & getContext()
Return the LLVMContext used by the analysis.
Definition: VPlanAnalysis.h:61
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition: VPlanValue.h:203
operand_range operands()
Definition: VPlanValue.h:278
void setOperand(unsigned I, VPValue *New)
Definition: VPlanValue.h:258
operand_iterator op_end()
Definition: VPlanValue.h:276
operand_iterator op_begin()
Definition: VPlanValue.h:274
void addOperand(VPValue *Operand)
Definition: VPlanValue.h:247
Value * getUnderlyingValue()
Return the underlying Value attached to this VPValue.
Definition: VPlanValue.h:77
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition: VPlan.cpp:118
void replaceAllUsesWith(VPValue *New)
Definition: VPlan.cpp:1270
unsigned getNumUsers() const
Definition: VPlanValue.h:112
Value * getLiveInIRValue()
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition: VPlanValue.h:173
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition: VPlan.cpp:1274
user_range users()
Definition: VPlanValue.h:133
A recipe for widening Call instructions.
Definition: VPlan.h:1456
A Recipe for widening the canonical induction variable of the vector loop.
Definition: VPlan.h:2597
const Type * getScalarType() const
Returns the scalar type of the induction.
Definition: VPlan.h:2623
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition: VPlan.h:1367
A recipe for handling GEP instructions.
Definition: VPlan.h:1529
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition: VPlan.h:1684
VPWidenRecipe is a recipe for producing a copy of vector type its ingredient.
Definition: VPlan.h:1335
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition: VPlan.h:2973
bool hasScalableVF()
Definition: VPlan.h:3105
VPBasicBlock * getEntry()
Definition: VPlan.h:3066
VPValue * getTripCount() const
The trip count of the original loop.
Definition: VPlan.h:3070
VPValue * getOrCreateBackedgeTakenCount()
The backedge taken count of the original loop.
Definition: VPlan.h:3084
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition: VPlan.h:3157
bool hasVF(ElementCount VF)
Definition: VPlan.h:3104
bool hasUF(unsigned UF) const
Definition: VPlan.h:3111
void setVF(ElementCount VF)
Definition: VPlan.h:3098
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
Definition: VPlan.h:3126
bool hasScalarVFOnly() const
Definition: VPlan.h:3109
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
Definition: VPlan.h:3165
void setUF(unsigned UF)
Definition: VPlan.h:3113
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition: TypeSize.h:243
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:109
IteratorT end() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:918
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
CastOperator_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:234
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, ScalarEvolution &SE)
Get or create a VPValue that corresponds to the expansion of Expr.
Definition: VPlan.cpp:1459
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
Definition: VPlan.cpp:1449
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
const SCEV * createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE, Loop *OrigLoop)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition: VPlanCFG.h:226
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
std::unique_ptr< VPlan > VPlanPtr
Definition: VPlan.h:134
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Definition: SmallVector.h:1312
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition: STLExtras.h:336
RecurKind
These are the kinds of recurrences that we support.
Definition: IVDescriptors.h:34
@ Mul
Product of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
A recipe for handling first-order recurrence phis.
Definition: VPlan.h:1854
A recipe for widening load operations, using the address to load from and an optional mask.
Definition: VPlan.h:2363
A recipe for widening select instructions.
Definition: VPlan.h:1495
A recipe for widening store operations, using the stored value, the address to store to and an option...
Definition: VPlan.h:2402
static void addExplicitVectorLength(VPlan &Plan)
Add a VPEVLBasedIVPHIRecipe and related recipes to Plan and replaces all uses except the canonical IV...
static void createAndOptimizeReplicateRegions(VPlan &Plan)
Wrap predicated VPReplicateRecipes with a mask operand in an if-then region block and remove the mask...
static void dropPoisonGeneratingRecipes(VPlan &Plan, function_ref< bool(BasicBlock *)> BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void optimize(VPlan &Plan, ScalarEvolution &SE)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void VPInstructionsToVPRecipes(VPlanPtr &Plan, function_ref< const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, ScalarEvolution &SE, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs, LLVMContext &Ctx)
Insert truncates and extends for any truncated recipe.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Sink users of fixed-order recurrences after the recipe defining their previous value.
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.