LLVM 18.0.0git
VPlanTransforms.cpp
Go to the documentation of this file.
1//===-- VPlanTransforms.cpp - Utility VPlan to VPlan transforms -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements a set of utility VPlan to VPlan transformations.
11///
12//===----------------------------------------------------------------------===//
13
14#include "VPlanTransforms.h"
15#include "VPRecipeBuilder.h"
16#include "VPlanCFG.h"
17#include "VPlanDominatorTree.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
23#include "llvm/IR/Intrinsics.h"
25
26using namespace llvm;
27
28using namespace llvm::PatternMatch;
29
31 VPlanPtr &Plan,
33 GetIntOrFpInductionDescriptor,
34 ScalarEvolution &SE, const TargetLibraryInfo &TLI) {
35
37 Plan->getEntry());
38 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
39 VPRecipeBase *Term = VPBB->getTerminator();
40 auto EndIter = Term ? Term->getIterator() : VPBB->end();
41 // Introduce each ingredient into VPlan.
42 for (VPRecipeBase &Ingredient :
43 make_early_inc_range(make_range(VPBB->begin(), EndIter))) {
44
45 VPValue *VPV = Ingredient.getVPSingleValue();
46 Instruction *Inst = cast<Instruction>(VPV->getUnderlyingValue());
47
48 VPRecipeBase *NewRecipe = nullptr;
49 if (auto *VPPhi = dyn_cast<VPWidenPHIRecipe>(&Ingredient)) {
50 auto *Phi = cast<PHINode>(VPPhi->getUnderlyingValue());
51 if (const auto *II = GetIntOrFpInductionDescriptor(Phi)) {
52 VPValue *Start = Plan->getVPValueOrAddLiveIn(II->getStartValue());
53 VPValue *Step =
54 vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE);
55 NewRecipe = new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, *II);
56 } else {
57 Plan->addVPValue(Phi, VPPhi);
58 continue;
59 }
60 } else {
61 assert(isa<VPInstruction>(&Ingredient) &&
62 "only VPInstructions expected here");
63 assert(!isa<PHINode>(Inst) && "phis should be handled above");
64 // Create VPWidenMemoryInstructionRecipe for loads and stores.
65 if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
66 NewRecipe = new VPWidenMemoryInstructionRecipe(
67 *Load, Ingredient.getOperand(0), nullptr /*Mask*/,
68 false /*Consecutive*/, false /*Reverse*/);
69 } else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
70 NewRecipe = new VPWidenMemoryInstructionRecipe(
71 *Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
72 nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/);
73 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
74 NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
75 } else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
76 NewRecipe =
77 new VPWidenCallRecipe(*CI, drop_end(Ingredient.operands()),
79 } else if (SelectInst *SI = dyn_cast<SelectInst>(Inst)) {
80 NewRecipe = new VPWidenSelectRecipe(*SI, Ingredient.operands());
81 } else if (auto *CI = dyn_cast<CastInst>(Inst)) {
82 NewRecipe = new VPWidenCastRecipe(
83 CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), CI);
84 } else {
85 NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands());
86 }
87 }
88
89 NewRecipe->insertBefore(&Ingredient);
90 if (NewRecipe->getNumDefinedValues() == 1)
91 VPV->replaceAllUsesWith(NewRecipe->getVPSingleValue());
92 else
93 assert(NewRecipe->getNumDefinedValues() == 0 &&
94 "Only recpies with zero or one defined values expected");
95 Ingredient.eraseFromParent();
96 }
97 }
98}
99
100static bool sinkScalarOperands(VPlan &Plan) {
101 auto Iter = vp_depth_first_deep(Plan.getEntry());
102 bool Changed = false;
103 // First, collect the operands of all recipes in replicate blocks as seeds for
104 // sinking.
106 for (VPRegionBlock *VPR : VPBlockUtils::blocksOnly<VPRegionBlock>(Iter)) {
107 VPBasicBlock *EntryVPBB = VPR->getEntryBasicBlock();
108 if (!VPR->isReplicator() || EntryVPBB->getSuccessors().size() != 2)
109 continue;
110 VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(EntryVPBB->getSuccessors()[0]);
111 if (!VPBB || VPBB->getSingleSuccessor() != VPR->getExitingBasicBlock())
112 continue;
113 for (auto &Recipe : *VPBB) {
114 for (VPValue *Op : Recipe.operands())
115 if (auto *Def = Op->getDefiningRecipe())
116 WorkList.insert(std::make_pair(VPBB, Def));
117 }
118 }
119
120 bool ScalarVFOnly = Plan.hasScalarVFOnly();
121 // Try to sink each replicate or scalar IV steps recipe in the worklist.
122 for (unsigned I = 0; I != WorkList.size(); ++I) {
123 VPBasicBlock *SinkTo;
124 VPRecipeBase *SinkCandidate;
125 std::tie(SinkTo, SinkCandidate) = WorkList[I];
126 if (SinkCandidate->getParent() == SinkTo ||
127 SinkCandidate->mayHaveSideEffects() ||
128 SinkCandidate->mayReadOrWriteMemory())
129 continue;
130 if (auto *RepR = dyn_cast<VPReplicateRecipe>(SinkCandidate)) {
131 if (!ScalarVFOnly && RepR->isUniform())
132 continue;
133 } else if (!isa<VPScalarIVStepsRecipe>(SinkCandidate))
134 continue;
135
136 bool NeedsDuplicating = false;
137 // All recipe users of the sink candidate must be in the same block SinkTo
138 // or all users outside of SinkTo must be uniform-after-vectorization (
139 // i.e., only first lane is used) . In the latter case, we need to duplicate
140 // SinkCandidate.
141 auto CanSinkWithUser = [SinkTo, &NeedsDuplicating,
142 SinkCandidate](VPUser *U) {
143 auto *UI = dyn_cast<VPRecipeBase>(U);
144 if (!UI)
145 return false;
146 if (UI->getParent() == SinkTo)
147 return true;
148 NeedsDuplicating =
149 UI->onlyFirstLaneUsed(SinkCandidate->getVPSingleValue());
150 // We only know how to duplicate VPRecipeRecipes for now.
151 return NeedsDuplicating && isa<VPReplicateRecipe>(SinkCandidate);
152 };
153 if (!all_of(SinkCandidate->getVPSingleValue()->users(), CanSinkWithUser))
154 continue;
155
156 if (NeedsDuplicating) {
157 if (ScalarVFOnly)
158 continue;
159 Instruction *I = cast<Instruction>(
160 cast<VPReplicateRecipe>(SinkCandidate)->getUnderlyingValue());
161 auto *Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true);
162 // TODO: add ".cloned" suffix to name of Clone's VPValue.
163
164 Clone->insertBefore(SinkCandidate);
165 for (auto *U : to_vector(SinkCandidate->getVPSingleValue()->users())) {
166 auto *UI = cast<VPRecipeBase>(U);
167 if (UI->getParent() == SinkTo)
168 continue;
169
170 for (unsigned Idx = 0; Idx != UI->getNumOperands(); Idx++) {
171 if (UI->getOperand(Idx) != SinkCandidate->getVPSingleValue())
172 continue;
173 UI->setOperand(Idx, Clone);
174 }
175 }
176 }
177 SinkCandidate->moveBefore(*SinkTo, SinkTo->getFirstNonPhi());
178 for (VPValue *Op : SinkCandidate->operands())
179 if (auto *Def = Op->getDefiningRecipe())
180 WorkList.insert(std::make_pair(SinkTo, Def));
181 Changed = true;
182 }
183 return Changed;
184}
185
186/// If \p R is a region with a VPBranchOnMaskRecipe in the entry block, return
187/// the mask.
189 auto *EntryBB = dyn_cast<VPBasicBlock>(R->getEntry());
190 if (!EntryBB || EntryBB->size() != 1 ||
191 !isa<VPBranchOnMaskRecipe>(EntryBB->begin()))
192 return nullptr;
193
194 return cast<VPBranchOnMaskRecipe>(&*EntryBB->begin())->getOperand(0);
195}
196
197/// If \p R is a triangle region, return the 'then' block of the triangle.
199 auto *EntryBB = cast<VPBasicBlock>(R->getEntry());
200 if (EntryBB->getNumSuccessors() != 2)
201 return nullptr;
202
203 auto *Succ0 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[0]);
204 auto *Succ1 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[1]);
205 if (!Succ0 || !Succ1)
206 return nullptr;
207
208 if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1)
209 return nullptr;
210 if (Succ0->getSingleSuccessor() == Succ1)
211 return Succ0;
212 if (Succ1->getSingleSuccessor() == Succ0)
213 return Succ1;
214 return nullptr;
215}
216
217// Merge replicate regions in their successor region, if a replicate region
218// is connected to a successor replicate region with the same predicate by a
219// single, empty VPBasicBlock.
221 SetVector<VPRegionBlock *> DeletedRegions;
222
223 // Collect replicate regions followed by an empty block, followed by another
224 // replicate region with matching masks to process front. This is to avoid
225 // iterator invalidation issues while merging regions.
227 for (VPRegionBlock *Region1 : VPBlockUtils::blocksOnly<VPRegionBlock>(
228 vp_depth_first_deep(Plan.getEntry()))) {
229 if (!Region1->isReplicator())
230 continue;
231 auto *MiddleBasicBlock =
232 dyn_cast_or_null<VPBasicBlock>(Region1->getSingleSuccessor());
233 if (!MiddleBasicBlock || !MiddleBasicBlock->empty())
234 continue;
235
236 auto *Region2 =
237 dyn_cast_or_null<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
238 if (!Region2 || !Region2->isReplicator())
239 continue;
240
241 VPValue *Mask1 = getPredicatedMask(Region1);
242 VPValue *Mask2 = getPredicatedMask(Region2);
243 if (!Mask1 || Mask1 != Mask2)
244 continue;
245
246 assert(Mask1 && Mask2 && "both region must have conditions");
247 WorkList.push_back(Region1);
248 }
249
250 // Move recipes from Region1 to its successor region, if both are triangles.
251 for (VPRegionBlock *Region1 : WorkList) {
252 if (DeletedRegions.contains(Region1))
253 continue;
254 auto *MiddleBasicBlock = cast<VPBasicBlock>(Region1->getSingleSuccessor());
255 auto *Region2 = cast<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
256
257 VPBasicBlock *Then1 = getPredicatedThenBlock(Region1);
258 VPBasicBlock *Then2 = getPredicatedThenBlock(Region2);
259 if (!Then1 || !Then2)
260 continue;
261
262 // Note: No fusion-preventing memory dependencies are expected in either
263 // region. Such dependencies should be rejected during earlier dependence
264 // checks, which guarantee accesses can be re-ordered for vectorization.
265 //
266 // Move recipes to the successor region.
267 for (VPRecipeBase &ToMove : make_early_inc_range(reverse(*Then1)))
268 ToMove.moveBefore(*Then2, Then2->getFirstNonPhi());
269
270 auto *Merge1 = cast<VPBasicBlock>(Then1->getSingleSuccessor());
271 auto *Merge2 = cast<VPBasicBlock>(Then2->getSingleSuccessor());
272
273 // Move VPPredInstPHIRecipes from the merge block to the successor region's
274 // merge block. Update all users inside the successor region to use the
275 // original values.
276 for (VPRecipeBase &Phi1ToMove : make_early_inc_range(reverse(*Merge1))) {
277 VPValue *PredInst1 =
278 cast<VPPredInstPHIRecipe>(&Phi1ToMove)->getOperand(0);
279 VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue();
280 for (VPUser *U : to_vector(Phi1ToMoveV->users())) {
281 auto *UI = dyn_cast<VPRecipeBase>(U);
282 if (!UI || UI->getParent() != Then2)
283 continue;
284 for (unsigned I = 0, E = U->getNumOperands(); I != E; ++I) {
285 if (Phi1ToMoveV != U->getOperand(I))
286 continue;
287 U->setOperand(I, PredInst1);
288 }
289 }
290
291 Phi1ToMove.moveBefore(*Merge2, Merge2->begin());
292 }
293
294 // Finally, remove the first region.
295 for (VPBlockBase *Pred : make_early_inc_range(Region1->getPredecessors())) {
296 VPBlockUtils::disconnectBlocks(Pred, Region1);
297 VPBlockUtils::connectBlocks(Pred, MiddleBasicBlock);
298 }
299 VPBlockUtils::disconnectBlocks(Region1, MiddleBasicBlock);
300 DeletedRegions.insert(Region1);
301 }
302
303 for (VPRegionBlock *ToDelete : DeletedRegions)
304 delete ToDelete;
305 return !DeletedRegions.empty();
306}
307
309 VPlan &Plan) {
310 Instruction *Instr = PredRecipe->getUnderlyingInstr();
311 // Build the triangular if-then region.
312 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
313 assert(Instr->getParent() && "Predicated instruction not in any basic block");
314 auto *BlockInMask = PredRecipe->getMask();
315 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
316 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
317
318 // Replace predicated replicate recipe with a replicate recipe without a
319 // mask but in the replicate region.
320 auto *RecipeWithoutMask = new VPReplicateRecipe(
321 PredRecipe->getUnderlyingInstr(),
322 make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())),
323 PredRecipe->isUniform());
324 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask);
325
326 VPPredInstPHIRecipe *PHIRecipe = nullptr;
327 if (PredRecipe->getNumUsers() != 0) {
328 PHIRecipe = new VPPredInstPHIRecipe(RecipeWithoutMask);
329 PredRecipe->replaceAllUsesWith(PHIRecipe);
330 PHIRecipe->setOperand(0, RecipeWithoutMask);
331 }
332 PredRecipe->eraseFromParent();
333 auto *Exiting = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
334 VPRegionBlock *Region = new VPRegionBlock(Entry, Exiting, RegionName, true);
335
336 // Note: first set Entry as region entry and then connect successors starting
337 // from it in order, to propagate the "parent" of each VPBasicBlock.
338 VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry);
339 VPBlockUtils::connectBlocks(Pred, Exiting);
340
341 return Region;
342}
343
344static void addReplicateRegions(VPlan &Plan) {
346 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
347 vp_depth_first_deep(Plan.getEntry()))) {
348 for (VPRecipeBase &R : *VPBB)
349 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
350 if (RepR->isPredicated())
351 WorkList.push_back(RepR);
352 }
353 }
354
355 unsigned BBNum = 0;
356 for (VPReplicateRecipe *RepR : WorkList) {
357 VPBasicBlock *CurrentBlock = RepR->getParent();
358 VPBasicBlock *SplitBlock = CurrentBlock->splitAt(RepR->getIterator());
359
360 BasicBlock *OrigBB = RepR->getUnderlyingInstr()->getParent();
362 OrigBB->hasName() ? OrigBB->getName() + "." + Twine(BBNum++) : "");
363 // Record predicated instructions for above packing optimizations.
365 Region->setParent(CurrentBlock->getParent());
367 VPBlockUtils::connectBlocks(CurrentBlock, Region);
369 }
370}
371
373 // Convert masked VPReplicateRecipes to if-then region blocks.
375
376 bool ShouldSimplify = true;
377 while (ShouldSimplify) {
378 ShouldSimplify = sinkScalarOperands(Plan);
379 ShouldSimplify |= mergeReplicateRegionsIntoSuccessors(Plan);
380 ShouldSimplify |= VPlanTransforms::mergeBlocksIntoPredecessors(Plan);
381 }
382}
383bool VPlanTransforms::mergeBlocksIntoPredecessors(VPlan &Plan) {
385 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
386 vp_depth_first_deep(Plan.getEntry()))) {
387 auto *PredVPBB =
388 dyn_cast_or_null<VPBasicBlock>(VPBB->getSinglePredecessor());
389 if (PredVPBB && PredVPBB->getNumSuccessors() == 1)
390 WorkList.push_back(VPBB);
391 }
392
393 for (VPBasicBlock *VPBB : WorkList) {
394 VPBasicBlock *PredVPBB = cast<VPBasicBlock>(VPBB->getSinglePredecessor());
395 for (VPRecipeBase &R : make_early_inc_range(*VPBB))
396 R.moveBefore(*PredVPBB, PredVPBB->end());
397 VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
398 auto *ParentRegion = cast_or_null<VPRegionBlock>(VPBB->getParent());
399 if (ParentRegion && ParentRegion->getExiting() == VPBB)
400 ParentRegion->setExiting(PredVPBB);
401 for (auto *Succ : to_vector(VPBB->successors())) {
403 VPBlockUtils::connectBlocks(PredVPBB, Succ);
404 }
405 delete VPBB;
406 }
407 return !WorkList.empty();
408}
409
410void VPlanTransforms::removeRedundantInductionCasts(VPlan &Plan) {
411 for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
412 auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
413 if (!IV || IV->getTruncInst())
414 continue;
415
416 // A sequence of IR Casts has potentially been recorded for IV, which
417 // *must be bypassed* when the IV is vectorized, because the vectorized IV
418 // will produce the desired casted value. This sequence forms a def-use
419 // chain and is provided in reverse order, ending with the cast that uses
420 // the IV phi. Search for the recipe of the last cast in the chain and
421 // replace it with the original IV. Note that only the final cast is
422 // expected to have users outside the cast-chain and the dead casts left
423 // over will be cleaned up later.
424 auto &Casts = IV->getInductionDescriptor().getCastInsts();
425 VPValue *FindMyCast = IV;
426 for (Instruction *IRCast : reverse(Casts)) {
427 VPRecipeBase *FoundUserCast = nullptr;
428 for (auto *U : FindMyCast->users()) {
429 auto *UserCast = cast<VPRecipeBase>(U);
430 if (UserCast->getNumDefinedValues() == 1 &&
431 UserCast->getVPSingleValue()->getUnderlyingValue() == IRCast) {
432 FoundUserCast = UserCast;
433 break;
434 }
435 }
436 FindMyCast = FoundUserCast->getVPSingleValue();
437 }
438 FindMyCast->replaceAllUsesWith(IV);
439 }
440}
441
442void VPlanTransforms::removeRedundantCanonicalIVs(VPlan &Plan) {
443 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
444 VPWidenCanonicalIVRecipe *WidenNewIV = nullptr;
445 for (VPUser *U : CanonicalIV->users()) {
446 WidenNewIV = dyn_cast<VPWidenCanonicalIVRecipe>(U);
447 if (WidenNewIV)
448 break;
449 }
450
451 if (!WidenNewIV)
452 return;
453
455 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
456 auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
457
458 if (!WidenOriginalIV || !WidenOriginalIV->isCanonical() ||
459 WidenOriginalIV->getScalarType() != WidenNewIV->getScalarType())
460 continue;
461
462 // Replace WidenNewIV with WidenOriginalIV if WidenOriginalIV provides
463 // everything WidenNewIV's users need. That is, WidenOriginalIV will
464 // generate a vector phi or all users of WidenNewIV demand the first lane
465 // only.
466 if (any_of(WidenOriginalIV->users(),
467 [WidenOriginalIV](VPUser *U) {
468 return !U->usesScalars(WidenOriginalIV);
469 }) ||
470 vputils::onlyFirstLaneUsed(WidenNewIV)) {
471 WidenNewIV->replaceAllUsesWith(WidenOriginalIV);
472 WidenNewIV->eraseFromParent();
473 return;
474 }
475 }
476}
477
478void VPlanTransforms::removeDeadRecipes(VPlan &Plan) {
480 Plan.getEntry());
481
482 for (VPBasicBlock *VPBB : reverse(VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT))) {
483 // The recipes in the block are processed in reverse order, to catch chains
484 // of dead recipes.
485 for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) {
486 // A user keeps R alive:
487 if (any_of(R.definedValues(),
488 [](VPValue *V) { return V->getNumUsers(); }))
489 continue;
490
491 // Having side effects keeps R alive, but do remove conditional assume
492 // instructions as their conditions may be flattened.
493 auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
494 bool IsConditionalAssume =
495 RepR && RepR->isPredicated() &&
496 match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>());
497 if (R.mayHaveSideEffects() && !IsConditionalAssume)
498 continue;
499
500 R.eraseFromParent();
501 }
502 }
503}
504
506 ScalarEvolution &SE, Instruction *TruncI,
507 Type *IVTy, VPValue *StartV,
508 VPValue *Step) {
510 auto IP = HeaderVPBB->getFirstNonPhi();
511 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
512 Type *TruncTy = TruncI ? TruncI->getType() : IVTy;
513 VPValue *BaseIV = CanonicalIV;
514 if (!CanonicalIV->isCanonical(ID.getKind(), StartV, Step, TruncTy)) {
515 BaseIV = new VPDerivedIVRecipe(ID, StartV, CanonicalIV, Step,
516 TruncI ? TruncI->getType() : nullptr);
517 HeaderVPBB->insert(BaseIV->getDefiningRecipe(), IP);
518 }
519
520 VPScalarIVStepsRecipe *Steps = new VPScalarIVStepsRecipe(ID, BaseIV, Step);
521 HeaderVPBB->insert(Steps, IP);
522 return Steps;
523}
524
525void VPlanTransforms::optimizeInductions(VPlan &Plan, ScalarEvolution &SE) {
528 bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1));
529 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
530 auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
531 if (!WideIV)
532 continue;
533 if (HasOnlyVectorVFs && none_of(WideIV->users(), [WideIV](VPUser *U) {
534 return U->usesScalars(WideIV);
535 }))
536 continue;
537
538 const InductionDescriptor &ID = WideIV->getInductionDescriptor();
540 Plan, ID, SE, WideIV->getTruncInst(), WideIV->getPHINode()->getType(),
541 WideIV->getStartValue(), WideIV->getStepValue());
542
543 // Update scalar users of IV to use Step instead. Use SetVector to ensure
544 // the list of users doesn't contain duplicates.
545 SetVector<VPUser *> Users(WideIV->user_begin(), WideIV->user_end());
546 for (VPUser *U : Users) {
547 if (HasOnlyVectorVFs && !U->usesScalars(WideIV))
548 continue;
549 for (unsigned I = 0, E = U->getNumOperands(); I != E; I++) {
550 if (U->getOperand(I) != WideIV)
551 continue;
552 U->setOperand(I, Steps);
553 }
554 }
555 }
556}
557
558void VPlanTransforms::removeRedundantExpandSCEVRecipes(VPlan &Plan) {
560
561 for (VPRecipeBase &R :
563 auto *ExpR = dyn_cast<VPExpandSCEVRecipe>(&R);
564 if (!ExpR)
565 continue;
566
567 auto I = SCEV2VPV.insert({ExpR->getSCEV(), ExpR});
568 if (I.second)
569 continue;
570 ExpR->replaceAllUsesWith(I.first->second);
571 ExpR->eraseFromParent();
572 }
573}
574
576 VPInstruction *Not = dyn_cast<VPInstruction>(Term->getOperand(0));
577 if (!Not || Not->getOpcode() != VPInstruction::Not)
578 return false;
579
580 VPInstruction *ALM = dyn_cast<VPInstruction>(Not->getOperand(0));
581 return ALM && ALM->getOpcode() == VPInstruction::ActiveLaneMask;
582}
583
585 unsigned BestUF,
587 assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan");
588 assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan");
589 VPBasicBlock *ExitingVPBB =
591 auto *Term = dyn_cast<VPInstruction>(&ExitingVPBB->back());
592 // Try to simplify the branch condition if TC <= VF * UF when preparing to
593 // execute the plan for the main vector loop. We only do this if the
594 // terminator is:
595 // 1. BranchOnCount, or
596 // 2. BranchOnCond where the input is Not(ActiveLaneMask).
597 if (!Term || (Term->getOpcode() != VPInstruction::BranchOnCount &&
598 (Term->getOpcode() != VPInstruction::BranchOnCond ||
600 return;
601
602 Type *IdxTy =
604 const SCEV *TripCount = createTripCountSCEV(IdxTy, PSE);
605 ScalarEvolution &SE = *PSE.getSE();
606 const SCEV *C =
607 SE.getConstant(TripCount->getType(), BestVF.getKnownMinValue() * BestUF);
608 if (TripCount->isZero() ||
609 !SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C))
610 return;
611
612 LLVMContext &Ctx = SE.getContext();
613 auto *BOC = new VPInstruction(
616 Term->eraseFromParent();
617 ExitingVPBB->appendRecipe(BOC);
618 Plan.setVF(BestVF);
619 Plan.setUF(BestUF);
620 // TODO: Further simplifications are possible
621 // 1. Replace inductions with constants.
622 // 2. Replace vector loop region with VPBasicBlock.
623}
624
625#ifndef NDEBUG
627 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
628 if (Region && Region->isReplicator()) {
629 assert(Region->getNumSuccessors() == 1 &&
630 Region->getNumPredecessors() == 1 && "Expected SESE region!");
631 assert(R->getParent()->size() == 1 &&
632 "A recipe in an original replicator region must be the only "
633 "recipe in its block");
634 return Region;
635 }
636 return nullptr;
637}
638#endif
639
640static bool properlyDominates(const VPRecipeBase *A, const VPRecipeBase *B,
641 VPDominatorTree &VPDT) {
642 if (A == B)
643 return false;
644
645 auto LocalComesBefore = [](const VPRecipeBase *A, const VPRecipeBase *B) {
646 for (auto &R : *A->getParent()) {
647 if (&R == A)
648 return true;
649 if (&R == B)
650 return false;
651 }
652 llvm_unreachable("recipe not found");
653 };
654 const VPBlockBase *ParentA = A->getParent();
655 const VPBlockBase *ParentB = B->getParent();
656 if (ParentA == ParentB)
657 return LocalComesBefore(A, B);
658
659 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(A)) &&
660 "No replicate regions expected at this point");
661 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(B)) &&
662 "No replicate regions expected at this point");
663 return VPDT.properlyDominates(ParentA, ParentB);
664}
665
666/// Sink users of \p FOR after the recipe defining the previous value \p
667/// Previous of the recurrence. \returns true if all users of \p FOR could be
668/// re-arranged as needed or false if it is not possible.
669static bool
671 VPRecipeBase *Previous,
672 VPDominatorTree &VPDT) {
673 // Collect recipes that need sinking.
676 Seen.insert(Previous);
677 auto TryToPushSinkCandidate = [&](VPRecipeBase *SinkCandidate) {
678 // The previous value must not depend on the users of the recurrence phi. In
679 // that case, FOR is not a fixed order recurrence.
680 if (SinkCandidate == Previous)
681 return false;
682
683 if (isa<VPHeaderPHIRecipe>(SinkCandidate) ||
684 !Seen.insert(SinkCandidate).second ||
685 properlyDominates(Previous, SinkCandidate, VPDT))
686 return true;
687
688 if (SinkCandidate->mayHaveSideEffects())
689 return false;
690
691 WorkList.push_back(SinkCandidate);
692 return true;
693 };
694
695 // Recursively sink users of FOR after Previous.
696 WorkList.push_back(FOR);
697 for (unsigned I = 0; I != WorkList.size(); ++I) {
698 VPRecipeBase *Current = WorkList[I];
699 assert(Current->getNumDefinedValues() == 1 &&
700 "only recipes with a single defined value expected");
701
702 for (VPUser *User : Current->getVPSingleValue()->users()) {
703 if (auto *R = dyn_cast<VPRecipeBase>(User))
704 if (!TryToPushSinkCandidate(R))
705 return false;
706 }
707 }
708
709 // Keep recipes to sink ordered by dominance so earlier instructions are
710 // processed first.
711 sort(WorkList, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
712 return properlyDominates(A, B, VPDT);
713 });
714
715 for (VPRecipeBase *SinkCandidate : WorkList) {
716 if (SinkCandidate == FOR)
717 continue;
718
719 SinkCandidate->moveAfter(Previous);
720 Previous = SinkCandidate;
721 }
722 return true;
723}
724
726 VPBuilder &Builder) {
727 VPDominatorTree VPDT;
728 VPDT.recalculate(Plan);
729
731 for (VPRecipeBase &R :
733 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
734 RecurrencePhis.push_back(FOR);
735
736 for (VPFirstOrderRecurrencePHIRecipe *FOR : RecurrencePhis) {
738 VPRecipeBase *Previous = FOR->getBackedgeValue()->getDefiningRecipe();
739 // Fixed-order recurrences do not contain cycles, so this loop is guaranteed
740 // to terminate.
741 while (auto *PrevPhi =
742 dyn_cast_or_null<VPFirstOrderRecurrencePHIRecipe>(Previous)) {
743 assert(PrevPhi->getParent() == FOR->getParent());
744 assert(SeenPhis.insert(PrevPhi).second);
745 Previous = PrevPhi->getBackedgeValue()->getDefiningRecipe();
746 }
747
748 if (!sinkRecurrenceUsersAfterPrevious(FOR, Previous, VPDT))
749 return false;
750
751 // Introduce a recipe to combine the incoming and previous values of a
752 // fixed-order recurrence.
753 VPBasicBlock *InsertBlock = Previous->getParent();
754 if (isa<VPHeaderPHIRecipe>(Previous))
755 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
756 else
757 Builder.setInsertPoint(InsertBlock, std::next(Previous->getIterator()));
758
759 auto *RecurSplice = cast<VPInstruction>(
761 {FOR, FOR->getBackedgeValue()}));
762
763 FOR->replaceAllUsesWith(RecurSplice);
764 // Set the first operand of RecurSplice to FOR again, after replacing
765 // all users.
766 RecurSplice->setOperand(0, FOR);
767 }
768 return true;
769}
770
772 for (VPRecipeBase &R :
774 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
775 if (!PhiR)
776 continue;
777 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
778 RecurKind RK = RdxDesc.getRecurrenceKind();
779 if (RK != RecurKind::Add && RK != RecurKind::Mul)
780 continue;
781
783 Worklist.insert(PhiR);
784
785 for (unsigned I = 0; I != Worklist.size(); ++I) {
786 VPValue *Cur = Worklist[I];
787 if (auto *RecWithFlags =
788 dyn_cast<VPRecipeWithIRFlags>(Cur->getDefiningRecipe())) {
789 RecWithFlags->dropPoisonGeneratingFlags();
790 }
791
792 for (VPUser *U : Cur->users()) {
793 auto *UserRecipe = dyn_cast<VPRecipeBase>(U);
794 if (!UserRecipe)
795 continue;
796 for (VPValue *V : UserRecipe->definedValues())
797 Worklist.insert(V);
798 }
799 }
800 }
801}
802
803/// Returns true is \p V is constant one.
804static bool isConstantOne(VPValue *V) {
805 if (!V->isLiveIn())
806 return false;
807 auto *C = dyn_cast<ConstantInt>(V->getLiveInIRValue());
808 return C && C->isOne();
809}
810
811/// Returns the llvm::Instruction opcode for \p R.
812static unsigned getOpcodeForRecipe(VPRecipeBase &R) {
813 if (auto *WidenR = dyn_cast<VPWidenRecipe>(&R))
814 return WidenR->getUnderlyingInstr()->getOpcode();
815 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R))
816 return RepR->getUnderlyingInstr()->getOpcode();
817 if (auto *VPI = dyn_cast<VPInstruction>(&R))
818 return VPI->getOpcode();
819 return 0;
820}
821
822/// Try to simplify recipe \p R.
824 unsigned Opcode = getOpcodeForRecipe(R);
825 if (Opcode == Instruction::Mul) {
826 VPValue *A = R.getOperand(0);
827 VPValue *B = R.getOperand(1);
828 if (isConstantOne(A))
829 return R.getVPSingleValue()->replaceAllUsesWith(B);
830 if (isConstantOne(B))
831 return R.getVPSingleValue()->replaceAllUsesWith(A);
832 }
833}
834
835/// Try to simplify the recipes in \p Plan.
836static void simplifyRecipes(VPlan &Plan) {
838 Plan.getEntry());
839 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
840 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
842 }
843 }
844}
845
847 removeRedundantCanonicalIVs(Plan);
848 removeRedundantInductionCasts(Plan);
849
850 optimizeInductions(Plan, SE);
851 simplifyRecipes(Plan);
852 removeDeadRecipes(Plan);
853
855
856 removeRedundantExpandSCEVRecipes(Plan);
857 mergeBlocksIntoPredecessors(Plan);
858}
859
860// Add a VPActiveLaneMaskPHIRecipe and related recipes to \p Plan and replace
861// the loop terminator with a branch-on-cond recipe with the negated
862// active-lane-mask as operand. Note that this turns the loop into an
863// uncountable one. Only the existing terminator is replaced, all other existing
864// recipes/users remain unchanged, except for poison-generating flags being
865// dropped from the canonical IV increment. Return the created
866// VPActiveLaneMaskPHIRecipe.
867//
868// The function uses the following definitions:
869//
870// %TripCount = DataWithControlFlowWithoutRuntimeCheck ?
871// calculate-trip-count-minus-VF (original TC) : original TC
872// %IncrementValue = DataWithControlFlowWithoutRuntimeCheck ?
873// CanonicalIVPhi : CanonicalIVIncrement
874// %StartV is the canonical induction start value.
875//
876// The function adds the following recipes:
877//
878// vector.ph:
879// %TripCount = calculate-trip-count-minus-VF (original TC)
880// [if DataWithControlFlowWithoutRuntimeCheck]
881// %EntryInc = canonical-iv-increment-for-part %StartV
882// %EntryALM = active-lane-mask %EntryInc, %TripCount
883//
884// vector.body:
885// ...
886// %P = active-lane-mask-phi [ %EntryALM, %vector.ph ], [ %ALM, %vector.body ]
887// ...
888// %InLoopInc = canonical-iv-increment-for-part %IncrementValue
889// %ALM = active-lane-mask %InLoopInc, TripCount
890// %Negated = Not %ALM
891// branch-on-cond %Negated
892//
895 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
896 VPBasicBlock *EB = TopRegion->getExitingBasicBlock();
897 auto *CanonicalIVPHI = Plan.getCanonicalIV();
898 VPValue *StartV = CanonicalIVPHI->getStartValue();
899
900 auto *CanonicalIVIncrement =
901 cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
902 // TODO: Check if dropping the flags is needed if
903 // !DataAndControlFlowWithoutRuntimeCheck.
904 CanonicalIVIncrement->dropPoisonGeneratingFlags();
905 DebugLoc DL = CanonicalIVIncrement->getDebugLoc();
906 // We can't use StartV directly in the ActiveLaneMask VPInstruction, since
907 // we have to take unrolling into account. Each part needs to start at
908 // Part * VF
909 auto *VecPreheader = cast<VPBasicBlock>(TopRegion->getSinglePredecessor());
910 VPBuilder Builder(VecPreheader);
911
912 // Create the ActiveLaneMask instruction using the correct start values.
913 VPValue *TC = Plan.getTripCount();
914
915 VPValue *TripCount, *IncrementValue;
917 // When the loop is guarded by a runtime overflow check for the loop
918 // induction variable increment by VF, we can increment the value before
919 // the get.active.lane mask and use the unmodified tripcount.
920 IncrementValue = CanonicalIVIncrement;
921 TripCount = TC;
922 } else {
923 // When avoiding a runtime check, the active.lane.mask inside the loop
924 // uses a modified trip count and the induction variable increment is
925 // done after the active.lane.mask intrinsic is called.
926 IncrementValue = CanonicalIVPHI;
927 TripCount = Builder.createNaryOp(VPInstruction::CalculateTripCountMinusVF,
928 {TC}, DL);
929 }
930 auto *EntryIncrement = Builder.createOverflowingOp(
931 VPInstruction::CanonicalIVIncrementForPart, {StartV}, {false, false}, DL,
932 "index.part.next");
933
934 // Create the active lane mask instruction in the VPlan preheader.
935 auto *EntryALM =
936 Builder.createNaryOp(VPInstruction::ActiveLaneMask, {EntryIncrement, TC},
937 DL, "active.lane.mask.entry");
938
939 // Now create the ActiveLaneMaskPhi recipe in the main loop using the
940 // preheader ActiveLaneMask instruction.
941 auto LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc());
942 LaneMaskPhi->insertAfter(CanonicalIVPHI);
943
944 // Create the active lane mask for the next iteration of the loop before the
945 // original terminator.
946 VPRecipeBase *OriginalTerminator = EB->getTerminator();
947 Builder.setInsertPoint(OriginalTerminator);
948 auto *InLoopIncrement =
950 {IncrementValue}, {false, false}, DL);
951 auto *ALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask,
952 {InLoopIncrement, TripCount}, DL,
953 "active.lane.mask.next");
954 LaneMaskPhi->addOperand(ALM);
955
956 // Replace the original terminator with BranchOnCond. We have to invert the
957 // mask here because a true condition means jumping to the exit block.
958 auto *NotMask = Builder.createNot(ALM, DL);
959 Builder.createNaryOp(VPInstruction::BranchOnCond, {NotMask}, DL);
960 OriginalTerminator->eraseFromParent();
961 return LaneMaskPhi;
962}
963
965 VPlan &Plan, bool UseActiveLaneMaskForControlFlow,
968 UseActiveLaneMaskForControlFlow &&
969 "DataAndControlFlowWithoutRuntimeCheck implies "
970 "UseActiveLaneMaskForControlFlow");
971
972 auto FoundWidenCanonicalIVUser =
973 find_if(Plan.getCanonicalIV()->users(),
974 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
975 assert(FoundWidenCanonicalIVUser &&
976 "Must have widened canonical IV when tail folding!");
977 auto *WideCanonicalIV =
978 cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
979 VPRecipeBase *LaneMask;
980 if (UseActiveLaneMaskForControlFlow) {
983 } else {
985 {WideCanonicalIV, Plan.getTripCount()},
986 nullptr, "active.lane.mask");
987 LaneMask->insertAfter(WideCanonicalIV);
988 }
989
990 // Walk users of WideCanonicalIV and replace all compares of the form
991 // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an
992 // active-lane-mask.
994 for (VPUser *U : SmallVector<VPUser *>(WideCanonicalIV->users())) {
995 auto *CompareToReplace = dyn_cast<VPInstruction>(U);
996 if (!CompareToReplace ||
997 CompareToReplace->getOpcode() != Instruction::ICmp ||
998 CompareToReplace->getPredicate() != CmpInst::ICMP_ULE ||
999 CompareToReplace->getOperand(1) != BTC)
1000 continue;
1001
1002 assert(CompareToReplace->getOperand(0) == WideCanonicalIV &&
1003 "WidenCanonicalIV must be the first operand of the compare");
1004 CompareToReplace->replaceAllUsesWith(LaneMask->getVPSingleValue());
1005 CompareToReplace->eraseFromParent();
1006 }
1007}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
ReachingDefAnalysis InstSet & ToRemove
assume Assume Builder
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Hexagon Common GEP
static bool isConstantOne(const Value *Val)
isConstantOne - Return true only if val is constant int 1
Definition: IRBuilder.cpp:295
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define I(x, y, z)
Definition: MD5.cpp:58
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file implements dominator tree analysis for a single level of a VPlan's H-CFG.
static bool canSimplifyBranchOnCond(VPInstruction *Term)
static bool sinkScalarOperands(VPlan &Plan)
static bool sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR, VPRecipeBase *Previous, VPDominatorTree &VPDT)
Sink users of FOR after the recipe defining the previous value Previous of the recurrence.
static bool mergeReplicateRegionsIntoSuccessors(VPlan &Plan)
static VPActiveLaneMaskPHIRecipe * addVPLaneMaskPhiAndUpdateExitBranch(VPlan &Plan, bool DataAndControlFlowWithoutRuntimeCheck)
static void addReplicateRegions(VPlan &Plan)
static VPRegionBlock * GetReplicateRegion(VPRecipeBase *R)
static bool properlyDominates(const VPRecipeBase *A, const VPRecipeBase *B, VPDominatorTree &VPDT)
static unsigned getOpcodeForRecipe(VPRecipeBase &R)
Returns the llvm::Instruction opcode for R.
static void simplifyRecipe(VPRecipeBase &R)
Try to simplify recipe R.
static void simplifyRecipes(VPlan &Plan)
Try to simplify the recipes in Plan.
static VPRegionBlock * createReplicateRegion(VPReplicateRecipe *PredRecipe, VPlan &Plan)
static VPBasicBlock * getPredicatedThenBlock(VPRegionBlock *R)
If R is a triangle region, return the 'then' block of the triangle.
VPValue * getPredicatedMask(VPRegionBlock *R)
If R is a region with a VPBranchOnMaskRecipe in the entry block, return the mask.
static VPValue * createScalarIVSteps(VPlan &Plan, const InductionDescriptor &ID, ScalarEvolution &SE, Instruction *TruncI, Type *IVTy, VPValue *StartV, VPValue *Step)
This file provides utility VPlan to VPlan transformations.
static const uint32_t IV[8]
Definition: blake3_impl.h:78
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
This class represents a function call, abstracting a target machine's calling convention.
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:737
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:833
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Core dominator tree base class.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:291
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
A struct for saving information about induction variables.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:72
RecurKind getRecurrenceKind() const
This class represents an analyzed expression in the program.
bool isZero() const
Return true if the expression is a constant zero.
Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
const SCEV * getConstant(ConstantInt *V)
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition: SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
bool contains(const key_type &key) const
Check if the SetVector contains the given key.
Definition: SetVector.h:254
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
Provides information about what library functions are available for the current target.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
op_range operands()
Definition: User.h:242
A recipe for generating the active lane mask for the vector loop that is used to predicate the vector...
Definition: VPlan.h:2105
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:2253
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition: VPlan.h:2321
iterator end()
Definition: VPlan.h:2284
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition: VPlan.h:2331
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition: VPlan.cpp:209
VPBasicBlock * splitAt(iterator SplitAt)
Split current block at SplitAt by inserting a new block between the current block and its successors ...
Definition: VPlan.cpp:511
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition: VPlan.cpp:575
const VPRecipeBase & back() const
Definition: VPlan.h:2296
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition: VPlan.h:2312
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:420
VPRegionBlock * getParent()
Definition: VPlan.h:492
const VPBasicBlock * getExitingBasicBlock() const
Definition: VPlan.cpp:174
VPBlockBase * getSinglePredecessor() const
Definition: VPlan.h:533
const VPBasicBlock * getEntryBasicBlock() const
Definition: VPlan.cpp:152
VPBlockBase * getSingleSuccessor() const
Definition: VPlan.h:527
const VPBlocksTy & getSuccessors() const
Definition: VPlan.h:517
static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse, VPBlockBase *BlockPtr)
Insert disconnected VPBlockBases IfTrue and IfFalse after BlockPtr.
Definition: VPlan.h:2819
static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To)
Disconnect VPBlockBases From and To bi-directionally.
Definition: VPlan.h:2847
static void connectBlocks(VPBlockBase *From, VPBlockBase *To)
Connect VPBlockBases From and To bi-directionally.
Definition: VPlan.h:2836
A recipe for generating conditional branches on the bits of a mask.
Definition: VPlan.h:1858
VPlan-based builder utility analogous to IRBuilder.
Canonical scalar induction phi of the vector loop.
Definition: VPlan.h:2060
bool isCanonical(InductionDescriptor::InductionKind Kind, VPValue *Start, VPValue *Step, Type *Ty) const
Check if the induction described by Kind, /p Start and Step is canonical, i.e.
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition: VPlanValue.h:415
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition: VPlanValue.h:388
A recipe for converting the canonical IV value to the corresponding value of an IV with different sta...
Definition: VPlan.h:2161
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition: VPlan.h:1369
This is a concrete Recipe that models a single VPlan-level instruction.
Definition: VPlan.h:1018
@ FirstOrderRecurrenceSplice
Definition: VPlan.h:1024
@ CanonicalIVIncrementForPart
Definition: VPlan.h:1035
@ CalculateTripCountMinusVF
Definition: VPlan.h:1031
unsigned getOpcode() const
Definition: VPlan.h:1085
VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when control converges back from ...
Definition: VPlan.h:1905
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition: VPlan.h:707
bool mayReadOrWriteMemory() const
Returns true if the recipe may read from or write to memory.
Definition: VPlan.h:799
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
Instruction * getUnderlyingInstr()
Returns the underlying instruction, if the recipe is a VPValue or nullptr otherwise.
Definition: VPlan.h:767
VPBasicBlock * getParent()
Definition: VPlan.h:729
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition: VPlan.h:2377
const VPBlockBase * getEntry() const
Definition: VPlan.h:2416
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition: VPlan.h:1795
bool isUniform() const
Definition: VPlan.h:1827
VPValue * getMask()
Return the mask of a predicated VPReplicateRecipe.
Definition: VPlan.h:1851
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition: VPlan.h:2209
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition: VPlanValue.h:204
operand_range operands()
Definition: VPlanValue.h:279
void setOperand(unsigned I, VPValue *New)
Definition: VPlanValue.h:259
operand_iterator op_end()
Definition: VPlanValue.h:277
operand_iterator op_begin()
Definition: VPlanValue.h:275
VPValue * getOperand(unsigned N) const
Definition: VPlanValue.h:254
Value * getUnderlyingValue()
Return the underlying Value attached to this VPValue.
Definition: VPlanValue.h:84
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition: VPlan.cpp:117
void replaceAllUsesWith(VPValue *New)
Definition: VPlan.cpp:1109
unsigned getNumUsers() const
Definition: VPlanValue.h:119
Value * getLiveInIRValue()
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition: VPlanValue.h:180
user_range users()
Definition: VPlanValue.h:147
A recipe for widening Call instructions.
Definition: VPlan.h:1216
A Recipe for widening the canonical induction variable of the vector loop.
Definition: VPlan.h:2130
const Type * getScalarType() const
Returns the scalar type of the induction.
Definition: VPlan.h:2152
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition: VPlan.h:1178
A recipe for handling GEP instructions.
Definition: VPlan.h:1278
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition: VPlan.h:1393
A Recipe for widening load/store operations.
Definition: VPlan.h:1938
VPWidenRecipe is a recipe for producing a copy of vector type its ingredient.
Definition: VPlan.h:1154
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition: VPlan.h:2474
VPBasicBlock * getEntry()
Definition: VPlan.h:2566
VPValue * getTripCount() const
The trip count of the original loop.
Definition: VPlan.h:2570
VPValue * getOrCreateBackedgeTakenCount()
The backedge taken count of the original loop.
Definition: VPlan.h:2576
VPValue * getVPValueOrAddLiveIn(Value *V)
Gets the VPValue for V or adds a new live-in (if none exists yet) for V.
Definition: VPlan.h:2635
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition: VPlan.h:2668
bool hasVF(ElementCount VF)
Definition: VPlan.h:2597
bool hasUF(unsigned UF) const
Definition: VPlan.h:2601
void setVF(ElementCount VF)
Definition: VPlan.h:2591
bool hasScalarVFOnly() const
Definition: VPlan.h:2599
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
Definition: VPlan.h:2676
void setUF(unsigned UF)
Definition: VPlan.h:2603
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:378
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:163
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:82
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, ScalarEvolution &SE)
Get or create a VPValue that corresponds to the expansion of Expr.
Definition: VPlan.cpp:1228
bool onlyFirstLaneUsed(VPValue *Def)
Returns true if only the first lane of Def is used.
Definition: VPlan.cpp:1223
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1727
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
const SCEV * createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE, Loop *OrigLoop)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:666
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition: VPlanCFG.h:226
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1734
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:429
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1652
std::unique_ptr< VPlan > VPlanPtr
Definition: VPlan.h:131
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1741
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Definition: SmallVector.h:1303
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition: STLExtras.h:337
RecurKind
These are the kinds of recurrences that we support.
Definition: IVDescriptors.h:35
@ Mul
Product of integers.
@ Add
Sum of integers.
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1754
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
A recipe for handling first-order recurrence phis.
Definition: VPlan.h:1548
A recipe for widening select instructions.
Definition: VPlan.h:1249
static void createAndOptimizeReplicateRegions(VPlan &Plan)
Wrap predicated VPReplicateRecipes with a mask operand in an if-then region block and remove the mask...
static void optimize(VPlan &Plan, ScalarEvolution &SE)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void VPInstructionsToVPRecipes(VPlanPtr &Plan, function_ref< const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, ScalarEvolution &SE, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Sink users of fixed-order recurrences after the recipe defining their previous value.
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.