LLVM 20.0.0git
VPlanRecipes.cpp
Go to the documentation of this file.
1//===- VPlanRecipes.cpp - Implementations for VPlan recipes ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file contains implementations for different VPlan recipes.
11///
12//===----------------------------------------------------------------------===//
13
14#include "VPlan.h"
15#include "VPlanAnalysis.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Twine.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/Instruction.h"
24#include "llvm/IR/Type.h"
25#include "llvm/IR/Value.h"
28#include "llvm/Support/Debug.h"
33#include <cassert>
34
35using namespace llvm;
36
38
39namespace llvm {
41}
43
44#define LV_NAME "loop-vectorize"
45#define DEBUG_TYPE LV_NAME
46
48 switch (getVPDefID()) {
49 case VPInterleaveSC:
50 return cast<VPInterleaveRecipe>(this)->getNumStoreOperands() > 0;
51 case VPWidenStoreEVLSC:
52 case VPWidenStoreSC:
53 return true;
54 case VPReplicateSC:
55 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
56 ->mayWriteToMemory();
57 case VPWidenCallSC:
58 return !cast<VPWidenCallRecipe>(this)
59 ->getCalledScalarFunction()
60 ->onlyReadsMemory();
61 case VPBranchOnMaskSC:
62 case VPScalarIVStepsSC:
63 case VPPredInstPHISC:
64 return false;
65 case VPBlendSC:
66 case VPReductionEVLSC:
67 case VPReductionSC:
68 case VPWidenCanonicalIVSC:
69 case VPWidenCastSC:
70 case VPWidenGEPSC:
71 case VPWidenIntOrFpInductionSC:
72 case VPWidenLoadEVLSC:
73 case VPWidenLoadSC:
74 case VPWidenPHISC:
75 case VPWidenSC:
76 case VPWidenSelectSC: {
77 const Instruction *I =
78 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
79 (void)I;
80 assert((!I || !I->mayWriteToMemory()) &&
81 "underlying instruction may write to memory");
82 return false;
83 }
84 default:
85 return true;
86 }
87}
88
90 switch (getVPDefID()) {
91 case VPWidenLoadEVLSC:
92 case VPWidenLoadSC:
93 return true;
94 case VPReplicateSC:
95 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
96 ->mayReadFromMemory();
97 case VPWidenCallSC:
98 return !cast<VPWidenCallRecipe>(this)
99 ->getCalledScalarFunction()
100 ->onlyWritesMemory();
101 case VPBranchOnMaskSC:
102 case VPPredInstPHISC:
103 case VPScalarIVStepsSC:
104 case VPWidenStoreEVLSC:
105 case VPWidenStoreSC:
106 return false;
107 case VPBlendSC:
108 case VPReductionEVLSC:
109 case VPReductionSC:
110 case VPWidenCanonicalIVSC:
111 case VPWidenCastSC:
112 case VPWidenGEPSC:
113 case VPWidenIntOrFpInductionSC:
114 case VPWidenPHISC:
115 case VPWidenSC:
116 case VPWidenSelectSC: {
117 const Instruction *I =
118 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
119 (void)I;
120 assert((!I || !I->mayReadFromMemory()) &&
121 "underlying instruction may read from memory");
122 return false;
123 }
124 default:
125 return true;
126 }
127}
128
130 switch (getVPDefID()) {
131 case VPDerivedIVSC:
132 case VPPredInstPHISC:
133 case VPScalarCastSC:
134 return false;
135 case VPInstructionSC:
136 switch (cast<VPInstruction>(this)->getOpcode()) {
137 case Instruction::Or:
138 case Instruction::ICmp:
139 case Instruction::Select:
147 return false;
148 default:
149 return true;
150 }
151 case VPWidenCallSC: {
152 Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
153 return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
154 }
155 case VPBlendSC:
156 case VPReductionEVLSC:
157 case VPReductionSC:
158 case VPScalarIVStepsSC:
159 case VPWidenCanonicalIVSC:
160 case VPWidenCastSC:
161 case VPWidenGEPSC:
162 case VPWidenIntOrFpInductionSC:
163 case VPWidenPHISC:
164 case VPWidenPointerInductionSC:
165 case VPWidenSC:
166 case VPWidenSelectSC: {
167 const Instruction *I =
168 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
169 (void)I;
170 assert((!I || !I->mayHaveSideEffects()) &&
171 "underlying instruction has side-effects");
172 return false;
173 }
174 case VPInterleaveSC:
175 return mayWriteToMemory();
176 case VPWidenLoadEVLSC:
177 case VPWidenLoadSC:
178 case VPWidenStoreEVLSC:
179 case VPWidenStoreSC:
180 assert(
181 cast<VPWidenMemoryRecipe>(this)->getIngredient().mayHaveSideEffects() ==
183 "mayHaveSideffects result for ingredient differs from this "
184 "implementation");
185 return mayWriteToMemory();
186 case VPReplicateSC: {
187 auto *R = cast<VPReplicateRecipe>(this);
188 return R->getUnderlyingInstr()->mayHaveSideEffects();
189 }
190 default:
191 return true;
192 }
193}
194
196 VPValue *ExitValue = getOperand(0);
197 auto Lane = vputils::isUniformAfterVectorization(ExitValue)
200 VPBasicBlock *MiddleVPBB =
201 cast<VPBasicBlock>(Plan.getVectorLoopRegion()->getSingleSuccessor());
202 VPRecipeBase *ExitingRecipe = ExitValue->getDefiningRecipe();
203 auto *ExitingVPBB = ExitingRecipe ? ExitingRecipe->getParent() : nullptr;
204 // Values leaving the vector loop reach live out phi's in the exiting block
205 // via middle block.
206 auto *PredVPBB = !ExitingVPBB || ExitingVPBB->getEnclosingLoopRegion()
207 ? MiddleVPBB
208 : ExitingVPBB;
209 BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
210 // Set insertion point in PredBB in case an extract needs to be generated.
211 // TODO: Model extracts explicitly.
212 State.Builder.SetInsertPoint(PredBB, PredBB->getFirstNonPHIIt());
213 Value *V = State.get(ExitValue, VPIteration(State.UF - 1, Lane));
214 if (Phi->getBasicBlockIndex(PredBB) != -1)
215 Phi->setIncomingValueForBlock(PredBB, V);
216 else
217 Phi->addIncoming(V, PredBB);
218}
219
220#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
222 O << "Live-out ";
224 O << " = ";
226 O << "\n";
227}
228#endif
229
231 assert(!Parent && "Recipe already in some VPBasicBlock");
232 assert(InsertPos->getParent() &&
233 "Insertion position not in any VPBasicBlock");
234 InsertPos->getParent()->insert(this, InsertPos->getIterator());
235}
236
239 assert(!Parent && "Recipe already in some VPBasicBlock");
240 assert(I == BB.end() || I->getParent() == &BB);
241 BB.insert(this, I);
242}
243
245 assert(!Parent && "Recipe already in some VPBasicBlock");
246 assert(InsertPos->getParent() &&
247 "Insertion position not in any VPBasicBlock");
248 InsertPos->getParent()->insert(this, std::next(InsertPos->getIterator()));
249}
250
252 assert(getParent() && "Recipe not in any VPBasicBlock");
254 Parent = nullptr;
255}
256
258 assert(getParent() && "Recipe not in any VPBasicBlock");
260}
261
264 insertAfter(InsertPos);
265}
266
270 insertBefore(BB, I);
271}
272
273/// Return the underlying instruction to be used for computing \p R's cost via
274/// the legacy cost model. Return nullptr if there's no suitable instruction.
276 if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
277 return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
278 if (auto *IG = dyn_cast<VPInterleaveRecipe>(R))
279 return IG->getInsertPos();
280 if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
281 return &WidenMem->getIngredient();
282 return nullptr;
283}
284
286 auto *UI = getInstructionForCost(this);
287 if (UI && Ctx.skipCostComputation(UI, VF.isVector()))
288 return 0;
289
290 InstructionCost RecipeCost = computeCost(VF, Ctx);
291 if (UI && ForceTargetInstructionCost.getNumOccurrences() > 0 &&
292 RecipeCost.isValid())
294
295 LLVM_DEBUG({
296 dbgs() << "Cost of " << RecipeCost << " for VF " << VF << ": ";
297 dump();
298 });
299 return RecipeCost;
300}
301
303 VPCostContext &Ctx) const {
304 // Compute the cost for the recipe falling back to the legacy cost model using
305 // the underlying instruction. If there is no underlying instruction, returns
306 // 0.
308 if (UI && isa<VPReplicateRecipe>(this)) {
309 // VPReplicateRecipe may be cloned as part of an existing VPlan-to-VPlan
310 // transform, avoid computing their cost multiple times for now.
311 Ctx.SkipCostComputation.insert(UI);
312 }
313 return UI ? Ctx.getLegacyCost(UI, VF) : 0;
314}
315
317 assert(OpType == OperationType::FPMathOp &&
318 "recipe doesn't have fast math flags");
319 FastMathFlags Res;
320 Res.setAllowReassoc(FMFs.AllowReassoc);
321 Res.setNoNaNs(FMFs.NoNaNs);
322 Res.setNoInfs(FMFs.NoInfs);
323 Res.setNoSignedZeros(FMFs.NoSignedZeros);
324 Res.setAllowReciprocal(FMFs.AllowReciprocal);
325 Res.setAllowContract(FMFs.AllowContract);
326 Res.setApproxFunc(FMFs.ApproxFunc);
327 return Res;
328}
329
332 const Twine &Name)
333 : VPRecipeWithIRFlags(VPDef::VPInstructionSC, ArrayRef<VPValue *>({A, B}),
334 Pred, DL),
335 Opcode(Opcode), Name(Name.str()) {
336 assert(Opcode == Instruction::ICmp &&
337 "only ICmp predicates supported at the moment");
338}
339
341 std::initializer_list<VPValue *> Operands,
342 FastMathFlags FMFs, DebugLoc DL, const Twine &Name)
343 : VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, FMFs, DL),
344 Opcode(Opcode), Name(Name.str()) {
345 // Make sure the VPInstruction is a floating-point operation.
346 assert(isFPMathOp() && "this op can't take fast-math flags");
347}
348
349bool VPInstruction::doesGeneratePerAllLanes() const {
350 return Opcode == VPInstruction::PtrAdd && !vputils::onlyFirstLaneUsed(this);
351}
352
353bool VPInstruction::canGenerateScalarForFirstLane() const {
355 return true;
357 return true;
358 switch (Opcode) {
359 case Instruction::ICmp:
366 return true;
367 default:
368 return false;
369 }
370}
371
372Value *VPInstruction::generatePerLane(VPTransformState &State,
373 const VPIteration &Lane) {
374 IRBuilderBase &Builder = State.Builder;
375
377 "only PtrAdd opcodes are supported for now");
378 return Builder.CreatePtrAdd(State.get(getOperand(0), Lane),
379 State.get(getOperand(1), Lane), Name);
380}
381
382Value *VPInstruction::generatePerPart(VPTransformState &State, unsigned Part) {
383 IRBuilderBase &Builder = State.Builder;
384
386 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
387 Value *A = State.get(getOperand(0), Part, OnlyFirstLaneUsed);
388 Value *B = State.get(getOperand(1), Part, OnlyFirstLaneUsed);
389 auto *Res =
390 Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B, Name);
391 if (auto *I = dyn_cast<Instruction>(Res))
392 setFlags(I);
393 return Res;
394 }
395
396 switch (getOpcode()) {
397 case VPInstruction::Not: {
398 Value *A = State.get(getOperand(0), Part);
399 return Builder.CreateNot(A, Name);
400 }
401 case Instruction::ICmp: {
402 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
403 Value *A = State.get(getOperand(0), Part, OnlyFirstLaneUsed);
404 Value *B = State.get(getOperand(1), Part, OnlyFirstLaneUsed);
405 return Builder.CreateCmp(getPredicate(), A, B, Name);
406 }
407 case Instruction::Select: {
408 Value *Cond = State.get(getOperand(0), Part);
409 Value *Op1 = State.get(getOperand(1), Part);
410 Value *Op2 = State.get(getOperand(2), Part);
411 return Builder.CreateSelect(Cond, Op1, Op2, Name);
412 }
414 // Get first lane of vector induction variable.
415 Value *VIVElem0 = State.get(getOperand(0), VPIteration(Part, 0));
416 // Get the original loop tripcount.
417 Value *ScalarTC = State.get(getOperand(1), VPIteration(Part, 0));
418
419 // If this part of the active lane mask is scalar, generate the CMP directly
420 // to avoid unnecessary extracts.
421 if (State.VF.isScalar())
422 return Builder.CreateCmp(CmpInst::Predicate::ICMP_ULT, VIVElem0, ScalarTC,
423 Name);
424
425 auto *Int1Ty = Type::getInt1Ty(Builder.getContext());
426 auto *PredTy = VectorType::get(Int1Ty, State.VF);
427 return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
428 {PredTy, ScalarTC->getType()},
429 {VIVElem0, ScalarTC}, nullptr, Name);
430 }
432 // Generate code to combine the previous and current values in vector v3.
433 //
434 // vector.ph:
435 // v_init = vector(..., ..., ..., a[-1])
436 // br vector.body
437 //
438 // vector.body
439 // i = phi [0, vector.ph], [i+4, vector.body]
440 // v1 = phi [v_init, vector.ph], [v2, vector.body]
441 // v2 = a[i, i+1, i+2, i+3];
442 // v3 = vector(v1(3), v2(0, 1, 2))
443
444 // For the first part, use the recurrence phi (v1), otherwise v2.
445 auto *V1 = State.get(getOperand(0), 0);
446 Value *PartMinus1 = Part == 0 ? V1 : State.get(getOperand(1), Part - 1);
447 if (!PartMinus1->getType()->isVectorTy())
448 return PartMinus1;
449 Value *V2 = State.get(getOperand(1), Part);
450 return Builder.CreateVectorSplice(PartMinus1, V2, -1, Name);
451 }
453 if (Part != 0)
454 return State.get(this, 0, /*IsScalar*/ true);
455
456 Value *ScalarTC = State.get(getOperand(0), {0, 0});
457 Value *Step =
458 createStepForVF(Builder, ScalarTC->getType(), State.VF, State.UF);
459 Value *Sub = Builder.CreateSub(ScalarTC, Step);
460 Value *Cmp = Builder.CreateICmp(CmpInst::Predicate::ICMP_UGT, ScalarTC, Step);
461 Value *Zero = ConstantInt::get(ScalarTC->getType(), 0);
462 return Builder.CreateSelect(Cmp, Sub, Zero);
463 }
465 // Compute EVL
466 auto GetEVL = [=](VPTransformState &State, Value *AVL) {
467 assert(AVL->getType()->isIntegerTy() &&
468 "Requested vector length should be an integer.");
469
470 // TODO: Add support for MaxSafeDist for correct loop emission.
471 assert(State.VF.isScalable() && "Expected scalable vector factor.");
472 Value *VFArg = State.Builder.getInt32(State.VF.getKnownMinValue());
473
474 Value *EVL = State.Builder.CreateIntrinsic(
475 State.Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length,
476 {AVL, VFArg, State.Builder.getTrue()});
477 return EVL;
478 };
479 // TODO: Restructure this code with an explicit remainder loop, vsetvli can
480 // be outside of the main loop.
481 assert(Part == 0 && "No unrolling expected for predicated vectorization.");
482 // Compute VTC - IV as the AVL (requested vector length).
483 Value *Index = State.get(getOperand(0), VPIteration(0, 0));
484 Value *TripCount = State.get(getOperand(1), VPIteration(0, 0));
485 Value *AVL = State.Builder.CreateSub(TripCount, Index);
486 Value *EVL = GetEVL(State, AVL);
487 return EVL;
488 }
490 auto *IV = State.get(getOperand(0), VPIteration(0, 0));
491 if (Part == 0)
492 return IV;
493
494 // The canonical IV is incremented by the vectorization factor (num of SIMD
495 // elements) times the unroll part.
496 Value *Step = createStepForVF(Builder, IV->getType(), State.VF, Part);
497 return Builder.CreateAdd(IV, Step, Name, hasNoUnsignedWrap(),
499 }
501 if (Part != 0)
502 return nullptr;
503
504 Value *Cond = State.get(getOperand(0), VPIteration(Part, 0));
505 // Replace the temporary unreachable terminator with a new conditional
506 // branch, hooking it up to backward destination for exiting blocks now and
507 // to forward destination(s) later when they are created.
508 BranchInst *CondBr =
509 Builder.CreateCondBr(Cond, Builder.GetInsertBlock(), nullptr);
510 CondBr->setSuccessor(0, nullptr);
512
513 if (!getParent()->isExiting())
514 return CondBr;
515
516 VPRegionBlock *ParentRegion = getParent()->getParent();
517 VPBasicBlock *Header = ParentRegion->getEntryBasicBlock();
518 CondBr->setSuccessor(1, State.CFG.VPBB2IRBB[Header]);
519 return CondBr;
520 }
522 if (Part != 0)
523 return nullptr;
524 // First create the compare.
525 Value *IV = State.get(getOperand(0), Part, /*IsScalar*/ true);
526 Value *TC = State.get(getOperand(1), Part, /*IsScalar*/ true);
527 Value *Cond = Builder.CreateICmpEQ(IV, TC);
528
529 // Now create the branch.
530 auto *Plan = getParent()->getPlan();
531 VPRegionBlock *TopRegion = Plan->getVectorLoopRegion();
532 VPBasicBlock *Header = TopRegion->getEntry()->getEntryBasicBlock();
533
534 // Replace the temporary unreachable terminator with a new conditional
535 // branch, hooking it up to backward destination (the header) now and to the
536 // forward destination (the exit/middle block) later when it is created.
537 // Note that CreateCondBr expects a valid BB as first argument, so we need
538 // to set it to nullptr later.
539 BranchInst *CondBr = Builder.CreateCondBr(Cond, Builder.GetInsertBlock(),
540 State.CFG.VPBB2IRBB[Header]);
541 CondBr->setSuccessor(0, nullptr);
543 return CondBr;
544 }
546 if (Part != 0)
547 return State.get(this, 0, /*IsScalar*/ true);
548
549 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
550 // and will be removed by breaking up the recipe further.
551 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
552 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
553 // Get its reduction variable descriptor.
554 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
555
556 RecurKind RK = RdxDesc.getRecurrenceKind();
557
558 VPValue *LoopExitingDef = getOperand(1);
559 Type *PhiTy = OrigPhi->getType();
560 VectorParts RdxParts(State.UF);
561 for (unsigned Part = 0; Part < State.UF; ++Part)
562 RdxParts[Part] = State.get(LoopExitingDef, Part, PhiR->isInLoop());
563
564 // If the vector reduction can be performed in a smaller type, we truncate
565 // then extend the loop exit value to enable InstCombine to evaluate the
566 // entire expression in the smaller type.
567 // TODO: Handle this in truncateToMinBW.
568 if (State.VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
569 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), State.VF);
570 for (unsigned Part = 0; Part < State.UF; ++Part)
571 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
572 }
573 // Reduce all of the unrolled parts into a single vector.
574 Value *ReducedPartRdx = RdxParts[0];
575 unsigned Op = RecurrenceDescriptor::getOpcode(RK);
577 Op = Instruction::Or;
578
579 if (PhiR->isOrdered()) {
580 ReducedPartRdx = RdxParts[State.UF - 1];
581 } else {
582 // Floating-point operations should have some FMF to enable the reduction.
584 Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
585 for (unsigned Part = 1; Part < State.UF; ++Part) {
586 Value *RdxPart = RdxParts[Part];
587 if (Op != Instruction::ICmp && Op != Instruction::FCmp)
588 ReducedPartRdx = Builder.CreateBinOp(
589 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
590 else
591 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
592 }
593 }
594
595 // Create the reduction after the loop. Note that inloop reductions create
596 // the target reduction in the loop using a Reduction recipe.
597 if ((State.VF.isVector() ||
599 !PhiR->isInLoop()) {
600 ReducedPartRdx =
601 createTargetReduction(Builder, RdxDesc, ReducedPartRdx, OrigPhi);
602 // If the reduction can be performed in a smaller type, we need to extend
603 // the reduction to the wider type before we branch to the original loop.
604 if (PhiTy != RdxDesc.getRecurrenceType())
605 ReducedPartRdx = RdxDesc.isSigned()
606 ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
607 : Builder.CreateZExt(ReducedPartRdx, PhiTy);
608 }
609
610 // If there were stores of the reduction value to a uniform memory address
611 // inside the loop, create the final store here.
612 if (StoreInst *SI = RdxDesc.IntermediateStore) {
613 auto *NewSI = Builder.CreateAlignedStore(
614 ReducedPartRdx, SI->getPointerOperand(), SI->getAlign());
615 propagateMetadata(NewSI, SI);
616 }
617
618 return ReducedPartRdx;
619 }
621 if (Part != 0)
622 return State.get(this, 0, /*IsScalar*/ true);
623
624 auto *CI = cast<ConstantInt>(getOperand(1)->getLiveInIRValue());
625 unsigned Offset = CI->getZExtValue();
626 assert(Offset > 0 && "Offset from end must be positive");
627 Value *Res;
628 if (State.VF.isVector()) {
629 assert(Offset <= State.VF.getKnownMinValue() &&
630 "invalid offset to extract from");
631 // Extract lane VF - Offset from the operand.
632 Res = State.get(
633 getOperand(0),
634 VPIteration(State.UF - 1, VPLane::getLaneFromEnd(State.VF, Offset)));
635 } else {
636 assert(Offset <= State.UF && "invalid offset to extract from");
637 // When loop is unrolled without vectorizing, retrieve UF - Offset.
638 Res = State.get(getOperand(0), State.UF - Offset);
639 }
640 if (isa<ExtractElementInst>(Res))
641 Res->setName(Name);
642 return Res;
643 }
645 Value *A = State.get(getOperand(0), Part);
646 Value *B = State.get(getOperand(1), Part);
647 return Builder.CreateLogicalAnd(A, B, Name);
648 }
651 "can only generate first lane for PtrAdd");
652 Value *Ptr = State.get(getOperand(0), Part, /* IsScalar */ true);
653 Value *Addend = State.get(getOperand(1), Part, /* IsScalar */ true);
654 return Builder.CreatePtrAdd(Ptr, Addend, Name);
655 }
657 if (Part != 0)
658 return State.get(this, 0, /*IsScalar*/ true);
659 Value *IncomingFromVPlanPred =
660 State.get(getOperand(0), Part, /* IsScalar */ true);
661 Value *IncomingFromOtherPreds =
662 State.get(getOperand(1), Part, /* IsScalar */ true);
663 auto *NewPhi =
664 Builder.CreatePHI(IncomingFromOtherPreds->getType(), 2, Name);
665 BasicBlock *VPlanPred =
666 State.CFG
667 .VPBB2IRBB[cast<VPBasicBlock>(getParent()->getSinglePredecessor())];
668 NewPhi->addIncoming(IncomingFromVPlanPred, VPlanPred);
669 for (auto *OtherPred : predecessors(Builder.GetInsertBlock())) {
670 assert(OtherPred != VPlanPred &&
671 "VPlan predecessors should not be connected yet");
672 NewPhi->addIncoming(IncomingFromOtherPreds, OtherPred);
673 }
674 return NewPhi;
675 }
676
677 default:
678 llvm_unreachable("Unsupported opcode for instruction");
679 }
680}
681
685}
686
689}
690
691#if !defined(NDEBUG)
692bool VPInstruction::isFPMathOp() const {
693 // Inspired by FPMathOperator::classof. Notable differences are that we don't
694 // support Call, PHI and Select opcodes here yet.
695 return Opcode == Instruction::FAdd || Opcode == Instruction::FMul ||
696 Opcode == Instruction::FNeg || Opcode == Instruction::FSub ||
697 Opcode == Instruction::FDiv || Opcode == Instruction::FRem ||
698 Opcode == Instruction::FCmp || Opcode == Instruction::Select;
699}
700#endif
701
703 assert(!State.Instance && "VPInstruction executing an Instance");
705 assert((hasFastMathFlags() == isFPMathOp() ||
706 getOpcode() == Instruction::Select) &&
707 "Recipe not a FPMathOp but has fast-math flags?");
708 if (hasFastMathFlags())
711 bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
714 bool GeneratesPerAllLanes = doesGeneratePerAllLanes();
715 bool OnlyFirstPartUsed = vputils::onlyFirstPartUsed(this);
716 for (unsigned Part = 0; Part < State.UF; ++Part) {
717 if (GeneratesPerAllLanes) {
718 for (unsigned Lane = 0, NumLanes = State.VF.getKnownMinValue();
719 Lane != NumLanes; ++Lane) {
720 Value *GeneratedValue = generatePerLane(State, VPIteration(Part, Lane));
721 assert(GeneratedValue && "generatePerLane must produce a value");
722 State.set(this, GeneratedValue, VPIteration(Part, Lane));
723 }
724 continue;
725 }
726
727 if (Part != 0 && OnlyFirstPartUsed && hasResult()) {
728 Value *Part0 = State.get(this, 0, /*IsScalar*/ GeneratesPerFirstLaneOnly);
729 State.set(this, Part0, Part,
730 /*IsScalar*/ GeneratesPerFirstLaneOnly);
731 continue;
732 }
733
734 Value *GeneratedValue = generatePerPart(State, Part);
735 if (!hasResult())
736 continue;
737 assert(GeneratedValue && "generatePerPart must produce a value");
738 assert((GeneratedValue->getType()->isVectorTy() ==
739 !GeneratesPerFirstLaneOnly ||
740 State.VF.isScalar()) &&
741 "scalar value but not only first lane defined");
742 State.set(this, GeneratedValue, Part,
743 /*IsScalar*/ GeneratesPerFirstLaneOnly);
744 }
745}
746
748 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
750 return vputils::onlyFirstLaneUsed(this);
751
752 switch (getOpcode()) {
753 default:
754 return false;
755 case Instruction::ICmp:
757 // TODO: Cover additional opcodes.
758 return vputils::onlyFirstLaneUsed(this);
766 return true;
767 };
768 llvm_unreachable("switch should return");
769}
770
772 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
774 return vputils::onlyFirstPartUsed(this);
775
776 switch (getOpcode()) {
777 default:
778 return false;
779 case Instruction::ICmp:
780 case Instruction::Select:
781 return vputils::onlyFirstPartUsed(this);
785 return true;
786 };
787 llvm_unreachable("switch should return");
788}
789
790#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
792 VPSlotTracker SlotTracker(getParent()->getPlan());
793 print(dbgs(), "", SlotTracker);
794}
795
797 VPSlotTracker &SlotTracker) const {
798 O << Indent << "EMIT ";
799
800 if (hasResult()) {
802 O << " = ";
803 }
804
805 switch (getOpcode()) {
807 O << "not";
808 break;
810 O << "combined load";
811 break;
813 O << "combined store";
814 break;
816 O << "active lane mask";
817 break;
819 O << "resume-phi";
820 break;
822 O << "EXPLICIT-VECTOR-LENGTH";
823 break;
825 O << "first-order splice";
826 break;
828 O << "branch-on-cond";
829 break;
831 O << "TC > VF ? TC - VF : 0";
832 break;
834 O << "VF * Part +";
835 break;
837 O << "branch-on-count";
838 break;
840 O << "extract-from-end";
841 break;
843 O << "compute-reduction-result";
844 break;
846 O << "logical-and";
847 break;
849 O << "ptradd";
850 break;
851 default:
853 }
854
855 printFlags(O);
857
858 if (auto DL = getDebugLoc()) {
859 O << ", !dbg ";
860 DL.print(O);
861 }
862}
863#endif
864
866 assert(State.VF.isVector() && "not widening");
867 Function *CalledScalarFn = getCalledScalarFunction();
868 assert(!isDbgInfoIntrinsic(CalledScalarFn->getIntrinsicID()) &&
869 "DbgInfoIntrinsic should have been dropped during VPlan construction");
871
872 bool UseIntrinsic = VectorIntrinsicID != Intrinsic::not_intrinsic;
873 FunctionType *VFTy = nullptr;
874 if (Variant)
875 VFTy = Variant->getFunctionType();
876 for (unsigned Part = 0; Part < State.UF; ++Part) {
877 SmallVector<Type *, 2> TysForDecl;
878 // Add return type if intrinsic is overloaded on it.
879 if (UseIntrinsic &&
880 isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1))
881 TysForDecl.push_back(VectorType::get(
882 CalledScalarFn->getReturnType()->getScalarType(), State.VF));
884 for (const auto &I : enumerate(arg_operands())) {
885 // Some intrinsics have a scalar argument - don't replace it with a
886 // vector.
887 Value *Arg;
888 if (UseIntrinsic &&
889 isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index()))
890 Arg = State.get(I.value(), VPIteration(0, 0));
891 // Some vectorized function variants may also take a scalar argument,
892 // e.g. linear parameters for pointers. This needs to be the scalar value
893 // from the start of the respective part when interleaving.
894 else if (VFTy && !VFTy->getParamType(I.index())->isVectorTy())
895 Arg = State.get(I.value(), VPIteration(Part, 0));
896 else
897 Arg = State.get(I.value(), Part);
898 if (UseIntrinsic &&
899 isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index()))
900 TysForDecl.push_back(Arg->getType());
901 Args.push_back(Arg);
902 }
903
904 Function *VectorF;
905 if (UseIntrinsic) {
906 // Use vector version of the intrinsic.
907 Module *M = State.Builder.GetInsertBlock()->getModule();
908 VectorF = Intrinsic::getDeclaration(M, VectorIntrinsicID, TysForDecl);
909 assert(VectorF && "Can't retrieve vector intrinsic.");
910 } else {
911#ifndef NDEBUG
912 assert(Variant != nullptr && "Can't create vector function.");
913#endif
914 VectorF = Variant;
915 }
916
917 auto *CI = cast_or_null<CallInst>(getUnderlyingInstr());
919 if (CI)
920 CI->getOperandBundlesAsDefs(OpBundles);
921
922 CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
923
924 if (isa<FPMathOperator>(V))
925 V->copyFastMathFlags(CI);
926
927 if (!V->getType()->isVoidTy())
928 State.set(this, V, Part);
929 State.addMetadata(V, CI);
930 }
931}
932
933#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
935 VPSlotTracker &SlotTracker) const {
936 O << Indent << "WIDEN-CALL ";
937
938 Function *CalledFn = getCalledScalarFunction();
939 if (CalledFn->getReturnType()->isVoidTy())
940 O << "void ";
941 else {
943 O << " = ";
944 }
945
946 O << "call @" << CalledFn->getName() << "(";
948 Op->printAsOperand(O, SlotTracker);
949 });
950 O << ")";
951
952 if (VectorIntrinsicID)
953 O << " (using vector intrinsic)";
954 else {
955 O << " (using library function";
956 if (Variant->hasName())
957 O << ": " << Variant->getName();
958 O << ")";
959 }
960}
961
963 VPSlotTracker &SlotTracker) const {
964 O << Indent << "WIDEN-SELECT ";
966 O << " = select ";
968 O << ", ";
970 O << ", ";
972 O << (isInvariantCond() ? " (condition is loop invariant)" : "");
973}
974#endif
975
978
979 // The condition can be loop invariant but still defined inside the
980 // loop. This means that we can't just use the original 'cond' value.
981 // We have to take the 'vectorized' value and pick the first lane.
982 // Instcombine will make this a no-op.
983 auto *InvarCond =
984 isInvariantCond() ? State.get(getCond(), VPIteration(0, 0)) : nullptr;
985
986 for (unsigned Part = 0; Part < State.UF; ++Part) {
987 Value *Cond = InvarCond ? InvarCond : State.get(getCond(), Part);
988 Value *Op0 = State.get(getOperand(1), Part);
989 Value *Op1 = State.get(getOperand(2), Part);
990 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
991 State.set(this, Sel, Part);
992 State.addMetadata(Sel, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
993 }
994}
995
996VPRecipeWithIRFlags::FastMathFlagsTy::FastMathFlagsTy(
997 const FastMathFlags &FMF) {
998 AllowReassoc = FMF.allowReassoc();
999 NoNaNs = FMF.noNaNs();
1000 NoInfs = FMF.noInfs();
1001 NoSignedZeros = FMF.noSignedZeros();
1002 AllowReciprocal = FMF.allowReciprocal();
1003 AllowContract = FMF.allowContract();
1004 ApproxFunc = FMF.approxFunc();
1005}
1006
1007#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1009 switch (OpType) {
1010 case OperationType::Cmp:
1012 break;
1013 case OperationType::DisjointOp:
1015 O << " disjoint";
1016 break;
1017 case OperationType::PossiblyExactOp:
1018 if (ExactFlags.IsExact)
1019 O << " exact";
1020 break;
1021 case OperationType::OverflowingBinOp:
1022 if (WrapFlags.HasNUW)
1023 O << " nuw";
1024 if (WrapFlags.HasNSW)
1025 O << " nsw";
1026 break;
1027 case OperationType::FPMathOp:
1029 break;
1030 case OperationType::GEPOp:
1031 if (GEPFlags.IsInBounds)
1032 O << " inbounds";
1033 break;
1034 case OperationType::NonNegOp:
1035 if (NonNegFlags.NonNeg)
1036 O << " nneg";
1037 break;
1038 case OperationType::Other:
1039 break;
1040 }
1041 if (getNumOperands() > 0)
1042 O << " ";
1043}
1044#endif
1045
1048 auto &Builder = State.Builder;
1049 switch (Opcode) {
1050 case Instruction::Call:
1051 case Instruction::Br:
1052 case Instruction::PHI:
1053 case Instruction::GetElementPtr:
1054 case Instruction::Select:
1055 llvm_unreachable("This instruction is handled by a different recipe.");
1056 case Instruction::UDiv:
1057 case Instruction::SDiv:
1058 case Instruction::SRem:
1059 case Instruction::URem:
1060 case Instruction::Add:
1061 case Instruction::FAdd:
1062 case Instruction::Sub:
1063 case Instruction::FSub:
1064 case Instruction::FNeg:
1065 case Instruction::Mul:
1066 case Instruction::FMul:
1067 case Instruction::FDiv:
1068 case Instruction::FRem:
1069 case Instruction::Shl:
1070 case Instruction::LShr:
1071 case Instruction::AShr:
1072 case Instruction::And:
1073 case Instruction::Or:
1074 case Instruction::Xor: {
1075 // Just widen unops and binops.
1076 for (unsigned Part = 0; Part < State.UF; ++Part) {
1078 for (VPValue *VPOp : operands())
1079 Ops.push_back(State.get(VPOp, Part));
1080
1081 Value *V = Builder.CreateNAryOp(Opcode, Ops);
1082
1083 if (auto *VecOp = dyn_cast<Instruction>(V))
1084 setFlags(VecOp);
1085
1086 // Use this vector value for all users of the original instruction.
1087 State.set(this, V, Part);
1088 State.addMetadata(V, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1089 }
1090
1091 break;
1092 }
1093 case Instruction::Freeze: {
1094 for (unsigned Part = 0; Part < State.UF; ++Part) {
1095 Value *Op = State.get(getOperand(0), Part);
1096
1097 Value *Freeze = Builder.CreateFreeze(Op);
1098 State.set(this, Freeze, Part);
1099 }
1100 break;
1101 }
1102 case Instruction::ICmp:
1103 case Instruction::FCmp: {
1104 // Widen compares. Generate vector compares.
1105 bool FCmp = Opcode == Instruction::FCmp;
1106 for (unsigned Part = 0; Part < State.UF; ++Part) {
1107 Value *A = State.get(getOperand(0), Part);
1108 Value *B = State.get(getOperand(1), Part);
1109 Value *C = nullptr;
1110 if (FCmp) {
1111 // Propagate fast math flags.
1112 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1113 if (auto *I = dyn_cast_or_null<Instruction>(getUnderlyingValue()))
1114 Builder.setFastMathFlags(I->getFastMathFlags());
1115 C = Builder.CreateFCmp(getPredicate(), A, B);
1116 } else {
1117 C = Builder.CreateICmp(getPredicate(), A, B);
1118 }
1119 State.set(this, C, Part);
1120 State.addMetadata(C, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1121 }
1122
1123 break;
1124 }
1125 default:
1126 // This instruction is not vectorized by simple widening.
1127 LLVM_DEBUG(dbgs() << "LV: Found an unhandled opcode : "
1128 << Instruction::getOpcodeName(Opcode));
1129 llvm_unreachable("Unhandled instruction!");
1130 } // end of switch.
1131
1132#if !defined(NDEBUG)
1133 // Verify that VPlan type inference results agree with the type of the
1134 // generated values.
1135 for (unsigned Part = 0; Part < State.UF; ++Part) {
1137 State.VF) == State.get(this, Part)->getType() &&
1138 "inferred type and type from generated instructions do not match");
1139 }
1140#endif
1141}
1142
1143#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1145 VPSlotTracker &SlotTracker) const {
1146 O << Indent << "WIDEN ";
1148 O << " = " << Instruction::getOpcodeName(Opcode);
1149 printFlags(O);
1151}
1152#endif
1153
1156 auto &Builder = State.Builder;
1157 /// Vectorize casts.
1158 assert(State.VF.isVector() && "Not vectorizing?");
1159 Type *DestTy = VectorType::get(getResultType(), State.VF);
1160 VPValue *Op = getOperand(0);
1161 for (unsigned Part = 0; Part < State.UF; ++Part) {
1162 if (Part > 0 && Op->isLiveIn()) {
1163 // FIXME: Remove once explicit unrolling is implemented using VPlan.
1164 State.set(this, State.get(this, 0), Part);
1165 continue;
1166 }
1167 Value *A = State.get(Op, Part);
1168 Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
1169 State.set(this, Cast, Part);
1170 State.addMetadata(Cast, cast_or_null<Instruction>(getUnderlyingValue()));
1171 }
1172}
1173
1174#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1176 VPSlotTracker &SlotTracker) const {
1177 O << Indent << "WIDEN-CAST ";
1179 O << " = " << Instruction::getOpcodeName(Opcode) << " ";
1180 printFlags(O);
1182 O << " to " << *getResultType();
1183}
1184#endif
1185
1186/// This function adds
1187/// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
1188/// to each vector element of Val. The sequence starts at StartIndex.
1189/// \p Opcode is relevant for FP induction variable.
1190static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
1192 IRBuilderBase &Builder) {
1193 assert(VF.isVector() && "only vector VFs are supported");
1194
1195 // Create and check the types.
1196 auto *ValVTy = cast<VectorType>(Val->getType());
1197 ElementCount VLen = ValVTy->getElementCount();
1198
1199 Type *STy = Val->getType()->getScalarType();
1200 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1201 "Induction Step must be an integer or FP");
1202 assert(Step->getType() == STy && "Step has wrong type");
1203
1205
1206 // Create a vector of consecutive numbers from zero to VF.
1207 VectorType *InitVecValVTy = ValVTy;
1208 if (STy->isFloatingPointTy()) {
1209 Type *InitVecValSTy =
1211 InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
1212 }
1213 Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
1214
1215 // Splat the StartIdx
1216 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
1217
1218 if (STy->isIntegerTy()) {
1219 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
1220 Step = Builder.CreateVectorSplat(VLen, Step);
1221 assert(Step->getType() == Val->getType() && "Invalid step vec");
1222 // FIXME: The newly created binary instructions should contain nsw/nuw
1223 // flags, which can be found from the original scalar operations.
1224 Step = Builder.CreateMul(InitVec, Step);
1225 return Builder.CreateAdd(Val, Step, "induction");
1226 }
1227
1228 // Floating point induction.
1229 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1230 "Binary Opcode should be specified for FP induction");
1231 InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
1232 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
1233
1234 Step = Builder.CreateVectorSplat(VLen, Step);
1235 Value *MulOp = Builder.CreateFMul(InitVec, Step);
1236 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1237}
1238
1239/// A helper function that returns an integer or floating-point constant with
1240/// value C.
1242 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
1243 : ConstantFP::get(Ty, C);
1244}
1245
1247 ElementCount VF) {
1248 assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1249 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1250 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1251 return B.CreateUIToFP(RuntimeVF, FTy);
1252}
1253
1255 assert(!State.Instance && "Int or FP induction being replicated.");
1256
1257 Value *Start = getStartValue()->getLiveInIRValue();
1259 TruncInst *Trunc = getTruncInst();
1260 IRBuilderBase &Builder = State.Builder;
1261 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1262 assert(State.VF.isVector() && "must have vector VF");
1263
1264 // The value from the original loop to which we are mapping the new induction
1265 // variable.
1266 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1267
1268 // Fast-math-flags propagate from the original induction instruction.
1269 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1270 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
1271 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
1272
1273 // Now do the actual transformations, and start with fetching the step value.
1274 Value *Step = State.get(getStepValue(), VPIteration(0, 0));
1275
1276 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1277 "Expected either an induction phi-node or a truncate of it!");
1278
1279 // Construct the initial value of the vector IV in the vector loop preheader
1280 auto CurrIP = Builder.saveIP();
1281 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
1282 Builder.SetInsertPoint(VectorPH->getTerminator());
1283 if (isa<TruncInst>(EntryVal)) {
1284 assert(Start->getType()->isIntegerTy() &&
1285 "Truncation requires an integer type");
1286 auto *TruncType = cast<IntegerType>(EntryVal->getType());
1287 Step = Builder.CreateTrunc(Step, TruncType);
1288 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1289 }
1290
1291 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
1292 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
1293 Value *SteppedStart = getStepVector(
1294 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
1295
1296 // We create vector phi nodes for both integer and floating-point induction
1297 // variables. Here, we determine the kind of arithmetic we will perform.
1300 if (Step->getType()->isIntegerTy()) {
1301 AddOp = Instruction::Add;
1302 MulOp = Instruction::Mul;
1303 } else {
1304 AddOp = ID.getInductionOpcode();
1305 MulOp = Instruction::FMul;
1306 }
1307
1308 // Multiply the vectorization factor by the step using integer or
1309 // floating-point arithmetic as appropriate.
1310 Type *StepType = Step->getType();
1311 Value *RuntimeVF;
1312 if (Step->getType()->isFloatingPointTy())
1313 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
1314 else
1315 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
1316 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
1317
1318 // Create a vector splat to use in the induction update.
1319 //
1320 // FIXME: If the step is non-constant, we create the vector splat with
1321 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1322 // handle a constant vector splat.
1323 Value *SplatVF = isa<Constant>(Mul)
1324 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
1325 : Builder.CreateVectorSplat(State.VF, Mul);
1326 Builder.restoreIP(CurrIP);
1327
1328 // We may need to add the step a number of times, depending on the unroll
1329 // factor. The last of those goes into the PHI.
1330 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind");
1331 VecInd->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
1332 VecInd->setDebugLoc(EntryVal->getDebugLoc());
1333 Instruction *LastInduction = VecInd;
1334 for (unsigned Part = 0; Part < State.UF; ++Part) {
1335 State.set(this, LastInduction, Part);
1336
1337 if (isa<TruncInst>(EntryVal))
1338 State.addMetadata(LastInduction, EntryVal);
1339
1340 LastInduction = cast<Instruction>(
1341 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
1342 LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1343 }
1344
1345 LastInduction->setName("vec.ind.next");
1346 VecInd->addIncoming(SteppedStart, VectorPH);
1347 // Add induction update using an incorrect block temporarily. The phi node
1348 // will be fixed after VPlan execution. Note that at this point the latch
1349 // block cannot be used, as it does not exist yet.
1350 // TODO: Model increment value in VPlan, by turning the recipe into a
1351 // multi-def and a subclass of VPHeaderPHIRecipe.
1352 VecInd->addIncoming(LastInduction, VectorPH);
1353}
1354
1355#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1357 VPSlotTracker &SlotTracker) const {
1358 O << Indent << "WIDEN-INDUCTION";
1359 if (getTruncInst()) {
1360 O << "\\l\"";
1361 O << " +\n" << Indent << "\" " << VPlanIngredient(IV) << "\\l\"";
1362 O << " +\n" << Indent << "\" ";
1364 } else
1365 O << " " << VPlanIngredient(IV);
1366
1367 O << ", ";
1369}
1370#endif
1371
1373 // The step may be defined by a recipe in the preheader (e.g. if it requires
1374 // SCEV expansion), but for the canonical induction the step is required to be
1375 // 1, which is represented as live-in.
1377 return false;
1378 auto *StepC = dyn_cast<ConstantInt>(getStepValue()->getLiveInIRValue());
1379 auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue());
1380 auto *CanIV = cast<VPCanonicalIVPHIRecipe>(&*getParent()->begin());
1381 return StartC && StartC->isZero() && StepC && StepC->isOne() &&
1382 getScalarType() == CanIV->getScalarType();
1383}
1384
1385#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1387 VPSlotTracker &SlotTracker) const {
1388 O << Indent;
1390 O << Indent << "= DERIVED-IV ";
1392 O << " + ";
1394 O << " * ";
1396}
1397#endif
1398
1400 // Fast-math-flags propagate from the original induction instruction.
1402 if (hasFastMathFlags())
1404
1405 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
1406 /// variable on which to base the steps, \p Step is the size of the step.
1407
1408 Value *BaseIV = State.get(getOperand(0), VPIteration(0, 0));
1409 Value *Step = State.get(getStepValue(), VPIteration(0, 0));
1410 IRBuilderBase &Builder = State.Builder;
1411
1412 // Ensure step has the same type as that of scalar IV.
1413 Type *BaseIVTy = BaseIV->getType()->getScalarType();
1414 assert(BaseIVTy == Step->getType() && "Types of BaseIV and Step must match!");
1415
1416 // We build scalar steps for both integer and floating-point induction
1417 // variables. Here, we determine the kind of arithmetic we will perform.
1420 if (BaseIVTy->isIntegerTy()) {
1421 AddOp = Instruction::Add;
1422 MulOp = Instruction::Mul;
1423 } else {
1424 AddOp = InductionOpcode;
1425 MulOp = Instruction::FMul;
1426 }
1427
1428 // Determine the number of scalars we need to generate for each unroll
1429 // iteration.
1430 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(this);
1431 // Compute the scalar steps and save the results in State.
1432 Type *IntStepTy =
1433 IntegerType::get(BaseIVTy->getContext(), BaseIVTy->getScalarSizeInBits());
1434 Type *VecIVTy = nullptr;
1435 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
1436 if (!FirstLaneOnly && State.VF.isScalable()) {
1437 VecIVTy = VectorType::get(BaseIVTy, State.VF);
1438 UnitStepVec =
1439 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
1440 SplatStep = Builder.CreateVectorSplat(State.VF, Step);
1441 SplatIV = Builder.CreateVectorSplat(State.VF, BaseIV);
1442 }
1443
1444 unsigned StartPart = 0;
1445 unsigned EndPart = State.UF;
1446 unsigned StartLane = 0;
1447 unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
1448 if (State.Instance) {
1449 StartPart = State.Instance->Part;
1450 EndPart = StartPart + 1;
1451 StartLane = State.Instance->Lane.getKnownLane();
1452 EndLane = StartLane + 1;
1453 }
1454 for (unsigned Part = StartPart; Part < EndPart; ++Part) {
1455 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
1456
1457 if (!FirstLaneOnly && State.VF.isScalable()) {
1458 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
1459 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
1460 if (BaseIVTy->isFloatingPointTy())
1461 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
1462 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
1463 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
1464 State.set(this, Add, Part);
1465 // It's useful to record the lane values too for the known minimum number
1466 // of elements so we do those below. This improves the code quality when
1467 // trying to extract the first element, for example.
1468 }
1469
1470 if (BaseIVTy->isFloatingPointTy())
1471 StartIdx0 = Builder.CreateSIToFP(StartIdx0, BaseIVTy);
1472
1473 for (unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
1474 Value *StartIdx = Builder.CreateBinOp(
1475 AddOp, StartIdx0, getSignedIntOrFpConstant(BaseIVTy, Lane));
1476 // The step returned by `createStepForVF` is a runtime-evaluated value
1477 // when VF is scalable. Otherwise, it should be folded into a Constant.
1478 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
1479 "Expected StartIdx to be folded to a constant when VF is not "
1480 "scalable");
1481 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
1482 auto *Add = Builder.CreateBinOp(AddOp, BaseIV, Mul);
1483 State.set(this, Add, VPIteration(Part, Lane));
1484 }
1485 }
1486}
1487
1488#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1490 VPSlotTracker &SlotTracker) const {
1491 O << Indent;
1493 O << " = SCALAR-STEPS ";
1495}
1496#endif
1497
1499 assert(State.VF.isVector() && "not widening");
1500 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
1501 // Construct a vector GEP by widening the operands of the scalar GEP as
1502 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
1503 // results in a vector of pointers when at least one operand of the GEP
1504 // is vector-typed. Thus, to keep the representation compact, we only use
1505 // vector-typed operands for loop-varying values.
1506
1507 if (areAllOperandsInvariant()) {
1508 // If we are vectorizing, but the GEP has only loop-invariant operands,
1509 // the GEP we build (by only using vector-typed operands for
1510 // loop-varying values) would be a scalar pointer. Thus, to ensure we
1511 // produce a vector of pointers, we need to either arbitrarily pick an
1512 // operand to broadcast, or broadcast a clone of the original GEP.
1513 // Here, we broadcast a clone of the original.
1514 //
1515 // TODO: If at some point we decide to scalarize instructions having
1516 // loop-invariant operands, this special case will no longer be
1517 // required. We would add the scalarization decision to
1518 // collectLoopScalars() and teach getVectorValue() to broadcast
1519 // the lane-zero scalar value.
1521 for (unsigned I = 0, E = getNumOperands(); I != E; I++)
1522 Ops.push_back(State.get(getOperand(I), VPIteration(0, 0)));
1523
1524 auto *NewGEP =
1525 State.Builder.CreateGEP(GEP->getSourceElementType(), Ops[0],
1526 ArrayRef(Ops).drop_front(), "", isInBounds());
1527 for (unsigned Part = 0; Part < State.UF; ++Part) {
1528 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, NewGEP);
1529 State.set(this, EntryPart, Part);
1530 State.addMetadata(EntryPart, GEP);
1531 }
1532 } else {
1533 // If the GEP has at least one loop-varying operand, we are sure to
1534 // produce a vector of pointers. But if we are only unrolling, we want
1535 // to produce a scalar GEP for each unroll part. Thus, the GEP we
1536 // produce with the code below will be scalar (if VF == 1) or vector
1537 // (otherwise). Note that for the unroll-only case, we still maintain
1538 // values in the vector mapping with initVector, as we do for other
1539 // instructions.
1540 for (unsigned Part = 0; Part < State.UF; ++Part) {
1541 // The pointer operand of the new GEP. If it's loop-invariant, we
1542 // won't broadcast it.
1543 auto *Ptr = isPointerLoopInvariant()
1544 ? State.get(getOperand(0), VPIteration(0, 0))
1545 : State.get(getOperand(0), Part);
1546
1547 // Collect all the indices for the new GEP. If any index is
1548 // loop-invariant, we won't broadcast it.
1550 for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
1551 VPValue *Operand = getOperand(I);
1552 if (isIndexLoopInvariant(I - 1))
1553 Indices.push_back(State.get(Operand, VPIteration(0, 0)));
1554 else
1555 Indices.push_back(State.get(Operand, Part));
1556 }
1557
1558 // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
1559 // but it should be a vector, otherwise.
1560 auto *NewGEP = State.Builder.CreateGEP(GEP->getSourceElementType(), Ptr,
1561 Indices, "", isInBounds());
1562 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
1563 "NewGEP is not a pointer vector");
1564 State.set(this, NewGEP, Part);
1565 State.addMetadata(NewGEP, GEP);
1566 }
1567 }
1568}
1569
1570#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1572 VPSlotTracker &SlotTracker) const {
1573 O << Indent << "WIDEN-GEP ";
1574 O << (isPointerLoopInvariant() ? "Inv" : "Var");
1575 for (size_t I = 0; I < getNumOperands() - 1; ++I)
1576 O << "[" << (isIndexLoopInvariant(I) ? "Inv" : "Var") << "]";
1577
1578 O << " ";
1580 O << " = getelementptr";
1581 printFlags(O);
1583}
1584#endif
1585
1586void VPVectorPointerRecipe ::execute(VPTransformState &State) {
1587 auto &Builder = State.Builder;
1589 for (unsigned Part = 0; Part < State.UF; ++Part) {
1590 // Calculate the pointer for the specific unroll-part.
1591 Value *PartPtr = nullptr;
1592 // Use i32 for the gep index type when the value is constant,
1593 // or query DataLayout for a more suitable index type otherwise.
1594 const DataLayout &DL =
1595 Builder.GetInsertBlock()->getDataLayout();
1596 Type *IndexTy = State.VF.isScalable() && (IsReverse || Part > 0)
1597 ? DL.getIndexType(IndexedTy->getPointerTo())
1598 : Builder.getInt32Ty();
1599 Value *Ptr = State.get(getOperand(0), VPIteration(0, 0));
1600 bool InBounds = isInBounds();
1601 if (IsReverse) {
1602 // If the address is consecutive but reversed, then the
1603 // wide store needs to start at the last vector element.
1604 // RunTimeVF = VScale * VF.getKnownMinValue()
1605 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
1606 Value *RunTimeVF = getRuntimeVF(Builder, IndexTy, State.VF);
1607 // NumElt = -Part * RunTimeVF
1608 Value *NumElt = Builder.CreateMul(
1609 ConstantInt::get(IndexTy, -(int64_t)Part), RunTimeVF);
1610 // LastLane = 1 - RunTimeVF
1611 Value *LastLane =
1612 Builder.CreateSub(ConstantInt::get(IndexTy, 1), RunTimeVF);
1613 PartPtr = Builder.CreateGEP(IndexedTy, Ptr, NumElt, "", InBounds);
1614 PartPtr = Builder.CreateGEP(IndexedTy, PartPtr, LastLane, "", InBounds);
1615 } else {
1616 Value *Increment = createStepForVF(Builder, IndexTy, State.VF, Part);
1617 PartPtr = Builder.CreateGEP(IndexedTy, Ptr, Increment, "", InBounds);
1618 }
1619
1620 State.set(this, PartPtr, Part, /*IsScalar*/ true);
1621 }
1622}
1623
1624#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1626 VPSlotTracker &SlotTracker) const {
1627 O << Indent;
1629 O << " = vector-pointer ";
1630 if (IsReverse)
1631 O << "(reverse) ";
1632
1634}
1635#endif
1636
1639 // We know that all PHIs in non-header blocks are converted into
1640 // selects, so we don't have to worry about the insertion order and we
1641 // can just use the builder.
1642 // At this point we generate the predication tree. There may be
1643 // duplications since this is a simple recursive scan, but future
1644 // optimizations will clean it up.
1645
1646 unsigned NumIncoming = getNumIncomingValues();
1647
1648 // Generate a sequence of selects of the form:
1649 // SELECT(Mask3, In3,
1650 // SELECT(Mask2, In2,
1651 // SELECT(Mask1, In1,
1652 // In0)))
1653 // Note that Mask0 is never used: lanes for which no path reaches this phi and
1654 // are essentially undef are taken from In0.
1655 VectorParts Entry(State.UF);
1656 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
1657 for (unsigned In = 0; In < NumIncoming; ++In) {
1658 for (unsigned Part = 0; Part < State.UF; ++Part) {
1659 // We might have single edge PHIs (blocks) - use an identity
1660 // 'select' for the first PHI operand.
1661 Value *In0 = State.get(getIncomingValue(In), Part, OnlyFirstLaneUsed);
1662 if (In == 0)
1663 Entry[Part] = In0; // Initialize with the first incoming value.
1664 else {
1665 // Select between the current value and the previous incoming edge
1666 // based on the incoming mask.
1667 Value *Cond = State.get(getMask(In), Part, OnlyFirstLaneUsed);
1668 Entry[Part] =
1669 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
1670 }
1671 }
1672 }
1673
1674 for (unsigned Part = 0; Part < State.UF; ++Part)
1675 State.set(this, Entry[Part], Part, OnlyFirstLaneUsed);
1676}
1677
1678#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1680 VPSlotTracker &SlotTracker) const {
1681 O << Indent << "BLEND ";
1683 O << " =";
1684 if (getNumIncomingValues() == 1) {
1685 // Not a User of any mask: not really blending, this is a
1686 // single-predecessor phi.
1687 O << " ";
1689 } else {
1690 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) {
1691 O << " ";
1693 if (I == 0)
1694 continue;
1695 O << "/";
1697 }
1698 }
1699}
1700#endif
1701
1703 assert(!State.Instance && "Reduction being replicated.");
1704 Value *PrevInChain = State.get(getChainOp(), 0, /*IsScalar*/ true);
1705 RecurKind Kind = RdxDesc.getRecurrenceKind();
1706 // Propagate the fast-math flags carried by the underlying instruction.
1708 State.Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
1709 for (unsigned Part = 0; Part < State.UF; ++Part) {
1710 Value *NewVecOp = State.get(getVecOp(), Part);
1711 if (VPValue *Cond = getCondOp()) {
1712 Value *NewCond = State.get(Cond, Part, State.VF.isScalar());
1713 VectorType *VecTy = dyn_cast<VectorType>(NewVecOp->getType());
1714 Type *ElementTy = VecTy ? VecTy->getElementType() : NewVecOp->getType();
1715 Value *Iden = RdxDesc.getRecurrenceIdentity(Kind, ElementTy,
1716 RdxDesc.getFastMathFlags());
1717 if (State.VF.isVector()) {
1718 Iden = State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
1719 }
1720
1721 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, Iden);
1722 NewVecOp = Select;
1723 }
1724 Value *NewRed;
1725 Value *NextInChain;
1726 if (IsOrdered) {
1727 if (State.VF.isVector())
1728 NewRed = createOrderedReduction(State.Builder, RdxDesc, NewVecOp,
1729 PrevInChain);
1730 else
1731 NewRed = State.Builder.CreateBinOp(
1732 (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), PrevInChain,
1733 NewVecOp);
1734 PrevInChain = NewRed;
1735 } else {
1736 PrevInChain = State.get(getChainOp(), Part, /*IsScalar*/ true);
1737 NewRed = createTargetReduction(State.Builder, RdxDesc, NewVecOp);
1738 }
1740 NextInChain = createMinMaxOp(State.Builder, RdxDesc.getRecurrenceKind(),
1741 NewRed, PrevInChain);
1742 } else if (IsOrdered)
1743 NextInChain = NewRed;
1744 else
1745 NextInChain = State.Builder.CreateBinOp(
1746 (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, PrevInChain);
1747 State.set(this, NextInChain, Part, /*IsScalar*/ true);
1748 }
1749}
1750
1752 assert(!State.Instance && "Reduction being replicated.");
1753 assert(State.UF == 1 &&
1754 "Expected only UF == 1 when vectorizing with explicit vector length.");
1755
1756 auto &Builder = State.Builder;
1757 // Propagate the fast-math flags carried by the underlying instruction.
1758 IRBuilderBase::FastMathFlagGuard FMFGuard(Builder);
1760 Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
1761
1762 RecurKind Kind = RdxDesc.getRecurrenceKind();
1763 Value *Prev = State.get(getChainOp(), 0, /*IsScalar*/ true);
1764 Value *VecOp = State.get(getVecOp(), 0);
1765 Value *EVL = State.get(getEVL(), VPIteration(0, 0));
1766
1767 VectorBuilder VBuilder(Builder);
1768 VBuilder.setEVL(EVL);
1769 Value *Mask;
1770 // TODO: move the all-true mask generation into VectorBuilder.
1771 if (VPValue *CondOp = getCondOp())
1772 Mask = State.get(CondOp, 0);
1773 else
1774 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
1775 VBuilder.setMask(Mask);
1776
1777 Value *NewRed;
1778 if (isOrdered()) {
1779 NewRed = createOrderedReduction(VBuilder, RdxDesc, VecOp, Prev);
1780 } else {
1781 NewRed = createSimpleTargetReduction(VBuilder, VecOp, RdxDesc);
1783 NewRed = createMinMaxOp(Builder, Kind, NewRed, Prev);
1784 else
1785 NewRed = Builder.CreateBinOp(
1786 (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, Prev);
1787 }
1788 State.set(this, NewRed, 0, /*IsScalar*/ true);
1789}
1790
1791#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1793 VPSlotTracker &SlotTracker) const {
1794 O << Indent << "REDUCE ";
1796 O << " = ";
1798 O << " +";
1799 if (isa<FPMathOperator>(getUnderlyingInstr()))
1801 O << " reduce." << Instruction::getOpcodeName(RdxDesc.getOpcode()) << " (";
1803 if (isConditional()) {
1804 O << ", ";
1806 }
1807 O << ")";
1808 if (RdxDesc.IntermediateStore)
1809 O << " (with final reduction value stored in invariant address sank "
1810 "outside of loop)";
1811}
1812
1814 VPSlotTracker &SlotTracker) const {
1816 O << Indent << "REDUCE ";
1818 O << " = ";
1820 O << " +";
1821 if (isa<FPMathOperator>(getUnderlyingInstr()))
1823 O << " vp.reduce." << Instruction::getOpcodeName(RdxDesc.getOpcode()) << " (";
1825 O << ", ";
1827 if (isConditional()) {
1828 O << ", ";
1830 }
1831 O << ")";
1832 if (RdxDesc.IntermediateStore)
1833 O << " (with final reduction value stored in invariant address sank "
1834 "outside of loop)";
1835}
1836#endif
1837
1839 // Find if the recipe is used by a widened recipe via an intervening
1840 // VPPredInstPHIRecipe. In this case, also pack the scalar values in a vector.
1841 return any_of(users(), [](const VPUser *U) {
1842 if (auto *PredR = dyn_cast<VPPredInstPHIRecipe>(U))
1843 return any_of(PredR->users(), [PredR](const VPUser *U) {
1844 return !U->usesScalars(PredR);
1845 });
1846 return false;
1847 });
1848}
1849
1850#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1852 VPSlotTracker &SlotTracker) const {
1853 O << Indent << (IsUniform ? "CLONE " : "REPLICATE ");
1854
1855 if (!getUnderlyingInstr()->getType()->isVoidTy()) {
1857 O << " = ";
1858 }
1859 if (auto *CB = dyn_cast<CallBase>(getUnderlyingInstr())) {
1860 O << "call";
1861 printFlags(O);
1862 O << "@" << CB->getCalledFunction()->getName() << "(";
1864 O, [&O, &SlotTracker](VPValue *Op) {
1865 Op->printAsOperand(O, SlotTracker);
1866 });
1867 O << ")";
1868 } else {
1870 printFlags(O);
1872 }
1873
1874 if (shouldPack())
1875 O << " (S->V)";
1876}
1877#endif
1878
1879/// Checks if \p C is uniform across all VFs and UFs. It is considered as such
1880/// if it is either defined outside the vector region or its operand is known to
1881/// be uniform across all VFs and UFs (e.g. VPDerivedIV or VPCanonicalIVPHI).
1882/// TODO: Uniformity should be associated with a VPValue and there should be a
1883/// generic way to check.
1885 return C->isDefinedOutsideVectorRegions() ||
1886 isa<VPDerivedIVRecipe>(C->getOperand(0)) ||
1887 isa<VPCanonicalIVPHIRecipe>(C->getOperand(0));
1888}
1889
1890Value *VPScalarCastRecipe ::generate(VPTransformState &State, unsigned Part) {
1892 "Codegen only implemented for first lane.");
1893 switch (Opcode) {
1894 case Instruction::SExt:
1895 case Instruction::ZExt:
1896 case Instruction::Trunc: {
1897 // Note: SExt/ZExt not used yet.
1898 Value *Op = State.get(getOperand(0), VPIteration(Part, 0));
1899 return State.Builder.CreateCast(Instruction::CastOps(Opcode), Op, ResultTy);
1900 }
1901 default:
1902 llvm_unreachable("opcode not implemented yet");
1903 }
1904}
1905
1906void VPScalarCastRecipe ::execute(VPTransformState &State) {
1907 bool IsUniformAcrossVFsAndUFs = isUniformAcrossVFsAndUFs(this);
1908 for (unsigned Part = 0; Part != State.UF; ++Part) {
1909 Value *Res;
1910 // Only generate a single instance, if the recipe is uniform across UFs and
1911 // VFs.
1912 if (Part > 0 && IsUniformAcrossVFsAndUFs)
1913 Res = State.get(this, VPIteration(0, 0));
1914 else
1915 Res = generate(State, Part);
1916 State.set(this, Res, VPIteration(Part, 0));
1917 }
1918}
1919
1920#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1921void VPScalarCastRecipe ::print(raw_ostream &O, const Twine &Indent,
1922 VPSlotTracker &SlotTracker) const {
1923 O << Indent << "SCALAR-CAST ";
1924 printAsOperand(O, SlotTracker);
1925 O << " = " << Instruction::getOpcodeName(Opcode) << " ";
1926 printOperands(O, SlotTracker);
1927 O << " to " << *ResultTy;
1928}
1929#endif
1930
1932 assert(State.Instance && "Branch on Mask works only on single instance.");
1933
1934 unsigned Part = State.Instance->Part;
1935 unsigned Lane = State.Instance->Lane.getKnownLane();
1936
1937 Value *ConditionBit = nullptr;
1938 VPValue *BlockInMask = getMask();
1939 if (BlockInMask) {
1940 ConditionBit = State.get(BlockInMask, Part);
1941 if (ConditionBit->getType()->isVectorTy())
1942 ConditionBit = State.Builder.CreateExtractElement(
1943 ConditionBit, State.Builder.getInt32(Lane));
1944 } else // Block in mask is all-one.
1945 ConditionBit = State.Builder.getTrue();
1946
1947 // Replace the temporary unreachable terminator with a new conditional branch,
1948 // whose two destinations will be set later when they are created.
1949 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
1950 assert(isa<UnreachableInst>(CurrentTerminator) &&
1951 "Expected to replace unreachable terminator with conditional branch.");
1952 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
1953 CondBr->setSuccessor(0, nullptr);
1954 ReplaceInstWithInst(CurrentTerminator, CondBr);
1955}
1956
1958 assert(State.Instance && "Predicated instruction PHI works per instance.");
1959 Instruction *ScalarPredInst =
1960 cast<Instruction>(State.get(getOperand(0), *State.Instance));
1961 BasicBlock *PredicatedBB = ScalarPredInst->getParent();
1962 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
1963 assert(PredicatingBB && "Predicated block has no single predecessor.");
1964 assert(isa<VPReplicateRecipe>(getOperand(0)) &&
1965 "operand must be VPReplicateRecipe");
1966
1967 // By current pack/unpack logic we need to generate only a single phi node: if
1968 // a vector value for the predicated instruction exists at this point it means
1969 // the instruction has vector users only, and a phi for the vector value is
1970 // needed. In this case the recipe of the predicated instruction is marked to
1971 // also do that packing, thereby "hoisting" the insert-element sequence.
1972 // Otherwise, a phi node for the scalar value is needed.
1973 unsigned Part = State.Instance->Part;
1974 if (State.hasVectorValue(getOperand(0), Part)) {
1975 Value *VectorValue = State.get(getOperand(0), Part);
1976 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
1977 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
1978 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
1979 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
1980 if (State.hasVectorValue(this, Part))
1981 State.reset(this, VPhi, Part);
1982 else
1983 State.set(this, VPhi, Part);
1984 // NOTE: Currently we need to update the value of the operand, so the next
1985 // predicated iteration inserts its generated value in the correct vector.
1986 State.reset(getOperand(0), VPhi, Part);
1987 } else {
1988 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
1989 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
1990 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
1991 PredicatingBB);
1992 Phi->addIncoming(ScalarPredInst, PredicatedBB);
1993 if (State.hasScalarValue(this, *State.Instance))
1994 State.reset(this, Phi, *State.Instance);
1995 else
1996 State.set(this, Phi, *State.Instance);
1997 // NOTE: Currently we need to update the value of the operand, so the next
1998 // predicated iteration inserts its generated value in the correct vector.
1999 State.reset(getOperand(0), Phi, *State.Instance);
2000 }
2001}
2002
2003#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2005 VPSlotTracker &SlotTracker) const {
2006 O << Indent << "PHI-PREDICATED-INSTRUCTION ";
2008 O << " = ";
2010}
2011#endif
2012
2014 auto *LI = cast<LoadInst>(&Ingredient);
2015
2016 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
2017 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
2018 const Align Alignment = getLoadStoreAlignment(&Ingredient);
2019 bool CreateGather = !isConsecutive();
2020
2021 auto &Builder = State.Builder;
2023 for (unsigned Part = 0; Part < State.UF; ++Part) {
2024 Value *NewLI;
2025 Value *Mask = nullptr;
2026 if (auto *VPMask = getMask()) {
2027 // Mask reversal is only needed for non-all-one (null) masks, as reverse
2028 // of a null all-one mask is a null mask.
2029 Mask = State.get(VPMask, Part);
2030 if (isReverse())
2031 Mask = Builder.CreateVectorReverse(Mask, "reverse");
2032 }
2033
2034 Value *Addr = State.get(getAddr(), Part, /*IsScalar*/ !CreateGather);
2035 if (CreateGather) {
2036 NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
2037 "wide.masked.gather");
2038 } else if (Mask) {
2039 NewLI = Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
2040 PoisonValue::get(DataTy),
2041 "wide.masked.load");
2042 } else {
2043 NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
2044 }
2045 // Add metadata to the load, but setVectorValue to the reverse shuffle.
2046 State.addMetadata(NewLI, LI);
2047 if (Reverse)
2048 NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
2049 State.set(this, NewLI, Part);
2050 }
2051}
2052
2053#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2055 VPSlotTracker &SlotTracker) const {
2056 O << Indent << "WIDEN ";
2058 O << " = load ";
2060}
2061
2063 VPSlotTracker &SlotTracker) const {
2064 O << Indent << "WIDEN ";
2066 O << " = vp.load ";
2068}
2069#endif
2070
2072 auto *SI = cast<StoreInst>(&Ingredient);
2073
2074 VPValue *StoredVPValue = getStoredValue();
2075 bool CreateScatter = !isConsecutive();
2076 const Align Alignment = getLoadStoreAlignment(&Ingredient);
2077
2078 auto &Builder = State.Builder;
2080
2081 for (unsigned Part = 0; Part < State.UF; ++Part) {
2082 Instruction *NewSI = nullptr;
2083 Value *Mask = nullptr;
2084 if (auto *VPMask = getMask()) {
2085 // Mask reversal is only needed for non-all-one (null) masks, as reverse
2086 // of a null all-one mask is a null mask.
2087 Mask = State.get(VPMask, Part);
2088 if (isReverse())
2089 Mask = Builder.CreateVectorReverse(Mask, "reverse");
2090 }
2091
2092 Value *StoredVal = State.get(StoredVPValue, Part);
2093 if (isReverse()) {
2094 // If we store to reverse consecutive memory locations, then we need
2095 // to reverse the order of elements in the stored value.
2096 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
2097 // We don't want to update the value in the map as it might be used in
2098 // another expression. So don't call resetVectorValue(StoredVal).
2099 }
2100 Value *Addr = State.get(getAddr(), Part, /*IsScalar*/ !CreateScatter);
2101 if (CreateScatter)
2102 NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
2103 else if (Mask)
2104 NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
2105 else
2106 NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
2107 State.addMetadata(NewSI, SI);
2108 }
2109}
2110
2111#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2113 VPSlotTracker &SlotTracker) const {
2114 O << Indent << "WIDEN store ";
2116}
2117
2119 VPSlotTracker &SlotTracker) const {
2120 O << Indent << "WIDEN vp.store ";
2122}
2123#endif
2124
2126 VectorType *DstVTy, const DataLayout &DL) {
2127 // Verify that V is a vector type with same number of elements as DstVTy.
2128 auto VF = DstVTy->getElementCount();
2129 auto *SrcVecTy = cast<VectorType>(V->getType());
2130 assert(VF == SrcVecTy->getElementCount() && "Vector dimensions do not match");
2131 Type *SrcElemTy = SrcVecTy->getElementType();
2132 Type *DstElemTy = DstVTy->getElementType();
2133 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2134 "Vector elements must have same size");
2135
2136 // Do a direct cast if element types are castable.
2137 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2138 return Builder.CreateBitOrPointerCast(V, DstVTy);
2139 }
2140 // V cannot be directly casted to desired vector type.
2141 // May happen when V is a floating point vector but DstVTy is a vector of
2142 // pointers or vice-versa. Handle this using a two-step bitcast using an
2143 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2144 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2145 "Only one type should be a pointer type");
2146 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2147 "Only one type should be a floating point type");
2148 Type *IntTy =
2149 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2150 auto *VecIntTy = VectorType::get(IntTy, VF);
2151 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2152 return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2153}
2154
2155/// Return a vector containing interleaved elements from multiple
2156/// smaller input vectors.
2158 const Twine &Name) {
2159 unsigned Factor = Vals.size();
2160 assert(Factor > 1 && "Tried to interleave invalid number of vectors");
2161
2162 VectorType *VecTy = cast<VectorType>(Vals[0]->getType());
2163#ifndef NDEBUG
2164 for (Value *Val : Vals)
2165 assert(Val->getType() == VecTy && "Tried to interleave mismatched types");
2166#endif
2167
2168 // Scalable vectors cannot use arbitrary shufflevectors (only splats), so
2169 // must use intrinsics to interleave.
2170 if (VecTy->isScalableTy()) {
2172 return Builder.CreateIntrinsic(WideVecTy, Intrinsic::vector_interleave2,
2173 Vals,
2174 /*FMFSource=*/nullptr, Name);
2175 }
2176
2177 // Fixed length. Start by concatenating all vectors into a wide vector.
2178 Value *WideVec = concatenateVectors(Builder, Vals);
2179
2180 // Interleave the elements into the wide vector.
2181 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
2182 return Builder.CreateShuffleVector(
2183 WideVec, createInterleaveMask(NumElts, Factor), Name);
2184}
2185
2186// Try to vectorize the interleave group that \p Instr belongs to.
2187//
2188// E.g. Translate following interleaved load group (factor = 3):
2189// for (i = 0; i < N; i+=3) {
2190// R = Pic[i]; // Member of index 0
2191// G = Pic[i+1]; // Member of index 1
2192// B = Pic[i+2]; // Member of index 2
2193// ... // do something to R, G, B
2194// }
2195// To:
2196// %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
2197// %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements
2198// %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements
2199// %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements
2200//
2201// Or translate following interleaved store group (factor = 3):
2202// for (i = 0; i < N; i+=3) {
2203// ... do something to R, G, B
2204// Pic[i] = R; // Member of index 0
2205// Pic[i+1] = G; // Member of index 1
2206// Pic[i+2] = B; // Member of index 2
2207// }
2208// To:
2209// %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2210// %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2211// %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2212// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
2213// store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
2215 assert(!State.Instance && "Interleave group being replicated.");
2216 const InterleaveGroup<Instruction> *Group = IG;
2217 Instruction *Instr = Group->getInsertPos();
2218
2219 // Prepare for the vector type of the interleaved load/store.
2220 Type *ScalarTy = getLoadStoreType(Instr);
2221 unsigned InterleaveFactor = Group->getFactor();
2222 auto *VecTy = VectorType::get(ScalarTy, State.VF * InterleaveFactor);
2223
2224 // Prepare for the new pointers.
2225 SmallVector<Value *, 2> AddrParts;
2226 unsigned Index = Group->getIndex(Instr);
2227
2228 // TODO: extend the masked interleaved-group support to reversed access.
2229 VPValue *BlockInMask = getMask();
2230 assert((!BlockInMask || !Group->isReverse()) &&
2231 "Reversed masked interleave-group not supported.");
2232
2233 Value *Idx;
2234 // If the group is reverse, adjust the index to refer to the last vector lane
2235 // instead of the first. We adjust the index from the first vector lane,
2236 // rather than directly getting the pointer for lane VF - 1, because the
2237 // pointer operand of the interleaved access is supposed to be uniform. For
2238 // uniform instructions, we're only required to generate a value for the
2239 // first vector lane in each unroll iteration.
2240 if (Group->isReverse()) {
2241 Value *RuntimeVF =
2242 getRuntimeVF(State.Builder, State.Builder.getInt32Ty(), State.VF);
2243 Idx = State.Builder.CreateSub(RuntimeVF, State.Builder.getInt32(1));
2244 Idx = State.Builder.CreateMul(Idx,
2245 State.Builder.getInt32(Group->getFactor()));
2246 Idx = State.Builder.CreateAdd(Idx, State.Builder.getInt32(Index));
2247 Idx = State.Builder.CreateNeg(Idx);
2248 } else
2249 Idx = State.Builder.getInt32(-Index);
2250
2251 VPValue *Addr = getAddr();
2252 for (unsigned Part = 0; Part < State.UF; Part++) {
2253 Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2254 if (auto *I = dyn_cast<Instruction>(AddrPart))
2255 State.setDebugLocFrom(I->getDebugLoc());
2256
2257 // Notice current instruction could be any index. Need to adjust the address
2258 // to the member of index 0.
2259 //
2260 // E.g. a = A[i+1]; // Member of index 1 (Current instruction)
2261 // b = A[i]; // Member of index 0
2262 // Current pointer is pointed to A[i+1], adjust it to A[i].
2263 //
2264 // E.g. A[i+1] = a; // Member of index 1
2265 // A[i] = b; // Member of index 0
2266 // A[i+2] = c; // Member of index 2 (Current instruction)
2267 // Current pointer is pointed to A[i+2], adjust it to A[i].
2268
2269 bool InBounds = false;
2270 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2271 InBounds = gep->isInBounds();
2272 AddrPart = State.Builder.CreateGEP(ScalarTy, AddrPart, Idx, "", InBounds);
2273 AddrParts.push_back(AddrPart);
2274 }
2275
2276 State.setDebugLocFrom(Instr->getDebugLoc());
2277 Value *PoisonVec = PoisonValue::get(VecTy);
2278
2279 auto CreateGroupMask = [&BlockInMask, &State, &InterleaveFactor](
2280 unsigned Part, Value *MaskForGaps) -> Value * {
2281 if (State.VF.isScalable()) {
2282 assert(!MaskForGaps && "Interleaved groups with gaps are not supported.");
2283 assert(InterleaveFactor == 2 &&
2284 "Unsupported deinterleave factor for scalable vectors");
2285 auto *BlockInMaskPart = State.get(BlockInMask, Part);
2286 SmallVector<Value *, 2> Ops = {BlockInMaskPart, BlockInMaskPart};
2287 auto *MaskTy = VectorType::get(State.Builder.getInt1Ty(),
2288 State.VF.getKnownMinValue() * 2, true);
2289 return State.Builder.CreateIntrinsic(
2290 MaskTy, Intrinsic::vector_interleave2, Ops,
2291 /*FMFSource=*/nullptr, "interleaved.mask");
2292 }
2293
2294 if (!BlockInMask)
2295 return MaskForGaps;
2296
2297 Value *BlockInMaskPart = State.get(BlockInMask, Part);
2298 Value *ShuffledMask = State.Builder.CreateShuffleVector(
2299 BlockInMaskPart,
2300 createReplicatedMask(InterleaveFactor, State.VF.getKnownMinValue()),
2301 "interleaved.mask");
2302 return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
2303 ShuffledMask, MaskForGaps)
2304 : ShuffledMask;
2305 };
2306
2307 const DataLayout &DL = Instr->getDataLayout();
2308 // Vectorize the interleaved load group.
2309 if (isa<LoadInst>(Instr)) {
2310 Value *MaskForGaps = nullptr;
2311 if (NeedsMaskForGaps) {
2312 MaskForGaps = createBitMaskForGaps(State.Builder,
2313 State.VF.getKnownMinValue(), *Group);
2314 assert(MaskForGaps && "Mask for Gaps is required but it is null");
2315 }
2316
2317 // For each unroll part, create a wide load for the group.
2318 SmallVector<Value *, 2> NewLoads;
2319 for (unsigned Part = 0; Part < State.UF; Part++) {
2320 Instruction *NewLoad;
2321 if (BlockInMask || MaskForGaps) {
2322 Value *GroupMask = CreateGroupMask(Part, MaskForGaps);
2323 NewLoad = State.Builder.CreateMaskedLoad(VecTy, AddrParts[Part],
2324 Group->getAlign(), GroupMask,
2325 PoisonVec, "wide.masked.vec");
2326 } else
2327 NewLoad = State.Builder.CreateAlignedLoad(
2328 VecTy, AddrParts[Part], Group->getAlign(), "wide.vec");
2329 Group->addMetadata(NewLoad);
2330 NewLoads.push_back(NewLoad);
2331 }
2332
2334 const DataLayout &DL = State.CFG.PrevBB->getDataLayout();
2335 if (VecTy->isScalableTy()) {
2336 assert(InterleaveFactor == 2 &&
2337 "Unsupported deinterleave factor for scalable vectors");
2338
2339 for (unsigned Part = 0; Part < State.UF; ++Part) {
2340 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
2341 // so must use intrinsics to deinterleave.
2342 Value *DI = State.Builder.CreateIntrinsic(
2343 Intrinsic::vector_deinterleave2, VecTy, NewLoads[Part],
2344 /*FMFSource=*/nullptr, "strided.vec");
2345 unsigned J = 0;
2346 for (unsigned I = 0; I < InterleaveFactor; ++I) {
2347 Instruction *Member = Group->getMember(I);
2348
2349 if (!Member)
2350 continue;
2351
2352 Value *StridedVec = State.Builder.CreateExtractValue(DI, I);
2353 // If this member has different type, cast the result type.
2354 if (Member->getType() != ScalarTy) {
2355 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
2356 StridedVec =
2357 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
2358 }
2359
2360 if (Group->isReverse())
2361 StridedVec =
2362 State.Builder.CreateVectorReverse(StridedVec, "reverse");
2363
2364 State.set(VPDefs[J], StridedVec, Part);
2365 ++J;
2366 }
2367 }
2368
2369 return;
2370 }
2371
2372 // For each member in the group, shuffle out the appropriate data from the
2373 // wide loads.
2374 unsigned J = 0;
2375 for (unsigned I = 0; I < InterleaveFactor; ++I) {
2376 Instruction *Member = Group->getMember(I);
2377
2378 // Skip the gaps in the group.
2379 if (!Member)
2380 continue;
2381
2382 auto StrideMask =
2383 createStrideMask(I, InterleaveFactor, State.VF.getKnownMinValue());
2384 for (unsigned Part = 0; Part < State.UF; Part++) {
2385 Value *StridedVec = State.Builder.CreateShuffleVector(
2386 NewLoads[Part], StrideMask, "strided.vec");
2387
2388 // If this member has different type, cast the result type.
2389 if (Member->getType() != ScalarTy) {
2390 assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
2391 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
2392 StridedVec =
2393 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
2394 }
2395
2396 if (Group->isReverse())
2397 StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
2398
2399 State.set(VPDefs[J], StridedVec, Part);
2400 }
2401 ++J;
2402 }
2403 return;
2404 }
2405
2406 // The sub vector type for current instruction.
2407 auto *SubVT = VectorType::get(ScalarTy, State.VF);
2408
2409 // Vectorize the interleaved store group.
2410 Value *MaskForGaps =
2411 createBitMaskForGaps(State.Builder, State.VF.getKnownMinValue(), *Group);
2412 assert((!MaskForGaps || !State.VF.isScalable()) &&
2413 "masking gaps for scalable vectors is not yet supported.");
2414 ArrayRef<VPValue *> StoredValues = getStoredValues();
2415 for (unsigned Part = 0; Part < State.UF; Part++) {
2416 // Collect the stored vector from each member.
2417 SmallVector<Value *, 4> StoredVecs;
2418 unsigned StoredIdx = 0;
2419 for (unsigned i = 0; i < InterleaveFactor; i++) {
2420 assert((Group->getMember(i) || MaskForGaps) &&
2421 "Fail to get a member from an interleaved store group");
2422 Instruction *Member = Group->getMember(i);
2423
2424 // Skip the gaps in the group.
2425 if (!Member) {
2426 Value *Undef = PoisonValue::get(SubVT);
2427 StoredVecs.push_back(Undef);
2428 continue;
2429 }
2430
2431 Value *StoredVec = State.get(StoredValues[StoredIdx], Part);
2432 ++StoredIdx;
2433
2434 if (Group->isReverse())
2435 StoredVec = State.Builder.CreateVectorReverse(StoredVec, "reverse");
2436
2437 // If this member has different type, cast it to a unified type.
2438
2439 if (StoredVec->getType() != SubVT)
2440 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
2441
2442 StoredVecs.push_back(StoredVec);
2443 }
2444
2445 // Interleave all the smaller vectors into one wider vector.
2446 Value *IVec =
2447 interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
2448 Instruction *NewStoreInstr;
2449 if (BlockInMask || MaskForGaps) {
2450 Value *GroupMask = CreateGroupMask(Part, MaskForGaps);
2451 NewStoreInstr = State.Builder.CreateMaskedStore(
2452 IVec, AddrParts[Part], Group->getAlign(), GroupMask);
2453 } else
2454 NewStoreInstr = State.Builder.CreateAlignedStore(IVec, AddrParts[Part],
2455 Group->getAlign());
2456
2457 Group->addMetadata(NewStoreInstr);
2458 }
2459}
2460
2461#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2463 VPSlotTracker &SlotTracker) const {
2464 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
2465 IG->getInsertPos()->printAsOperand(O, false);
2466 O << ", ";
2468 VPValue *Mask = getMask();
2469 if (Mask) {
2470 O << ", ";
2471 Mask->printAsOperand(O, SlotTracker);
2472 }
2473
2474 unsigned OpIdx = 0;
2475 for (unsigned i = 0; i < IG->getFactor(); ++i) {
2476 if (!IG->getMember(i))
2477 continue;
2478 if (getNumStoreOperands() > 0) {
2479 O << "\n" << Indent << " store ";
2480 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
2481 O << " to index " << i;
2482 } else {
2483 O << "\n" << Indent << " ";
2485 O << " = load from index " << i;
2486 }
2487 ++OpIdx;
2488 }
2489}
2490#endif
2491
2493 Value *Start = getStartValue()->getLiveInIRValue();
2494 PHINode *EntryPart = PHINode::Create(Start->getType(), 2, "index");
2495 EntryPart->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
2496
2497 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2498 EntryPart->addIncoming(Start, VectorPH);
2499 EntryPart->setDebugLoc(getDebugLoc());
2500 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part)
2501 State.set(this, EntryPart, Part, /*IsScalar*/ true);
2502}
2503
2504#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2506 VPSlotTracker &SlotTracker) const {
2507 O << Indent << "EMIT ";
2509 O << " = CANONICAL-INDUCTION ";
2511}
2512#endif
2513
2516 VPValue *Step) const {
2517 // Must be an integer induction.
2519 return false;
2520 // Start must match the start value of this canonical induction.
2521 if (Start != getStartValue())
2522 return false;
2523
2524 // If the step is defined by a recipe, it is not a ConstantInt.
2525 if (Step->getDefiningRecipe())
2526 return false;
2527
2528 ConstantInt *StepC = dyn_cast<ConstantInt>(Step->getLiveInIRValue());
2529 return StepC && StepC->isOne();
2530}
2531
2533 return IsScalarAfterVectorization &&
2534 (!IsScalable || vputils::onlyFirstLaneUsed(this));
2535}
2536
2539 "Not a pointer induction according to InductionDescriptor!");
2540 assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() &&
2541 "Unexpected type.");
2543 "Recipe should have been replaced");
2544
2545 auto *IVR = getParent()->getPlan()->getCanonicalIV();
2546 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0, /*IsScalar*/ true));
2547 Type *PhiType = IndDesc.getStep()->getType();
2548
2549 // Build a pointer phi
2550 Value *ScalarStartValue = getStartValue()->getLiveInIRValue();
2551 Type *ScStValueType = ScalarStartValue->getType();
2552 PHINode *NewPointerPhi = PHINode::Create(ScStValueType, 2, "pointer.phi",
2553 CanonicalIV->getIterator());
2554
2555 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2556 NewPointerPhi->addIncoming(ScalarStartValue, VectorPH);
2557
2558 // A pointer induction, performed by using a gep
2559 BasicBlock::iterator InductionLoc = State.Builder.GetInsertPoint();
2560
2561 Value *ScalarStepValue = State.get(getOperand(1), VPIteration(0, 0));
2562 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF);
2563 Value *NumUnrolledElems =
2564 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
2565 Value *InductionGEP = GetElementPtrInst::Create(
2566 State.Builder.getInt8Ty(), NewPointerPhi,
2567 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
2568 InductionLoc);
2569 // Add induction update using an incorrect block temporarily. The phi node
2570 // will be fixed after VPlan execution. Note that at this point the latch
2571 // block cannot be used, as it does not exist yet.
2572 // TODO: Model increment value in VPlan, by turning the recipe into a
2573 // multi-def and a subclass of VPHeaderPHIRecipe.
2574 NewPointerPhi->addIncoming(InductionGEP, VectorPH);
2575
2576 // Create UF many actual address geps that use the pointer
2577 // phi as base and a vectorized version of the step value
2578 // (<step*0, ..., step*N>) as offset.
2579 for (unsigned Part = 0; Part < State.UF; ++Part) {
2580 Type *VecPhiType = VectorType::get(PhiType, State.VF);
2581 Value *StartOffsetScalar =
2582 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
2583 Value *StartOffset =
2584 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
2585 // Create a vector of consecutive numbers from zero to VF.
2586 StartOffset = State.Builder.CreateAdd(
2587 StartOffset, State.Builder.CreateStepVector(VecPhiType));
2588
2589 assert(ScalarStepValue == State.get(getOperand(1), VPIteration(Part, 0)) &&
2590 "scalar step must be the same across all parts");
2591 Value *GEP = State.Builder.CreateGEP(
2592 State.Builder.getInt8Ty(), NewPointerPhi,
2593 State.Builder.CreateMul(
2594 StartOffset,
2595 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue),
2596 "vector.gep"));
2597 State.set(this, GEP, Part);
2598 }
2599}
2600
2601#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2603 VPSlotTracker &SlotTracker) const {
2604 O << Indent << "EMIT ";
2606 O << " = WIDEN-POINTER-INDUCTION ";
2608 O << ", " << *IndDesc.getStep();
2609}
2610#endif
2611
2613 assert(!State.Instance && "cannot be used in per-lane");
2614 const DataLayout &DL = State.CFG.PrevBB->getDataLayout();
2615 SCEVExpander Exp(SE, DL, "induction");
2616
2617 Value *Res = Exp.expandCodeFor(Expr, Expr->getType(),
2618 &*State.Builder.GetInsertPoint());
2619 assert(!State.ExpandedSCEVs.contains(Expr) &&
2620 "Same SCEV expanded multiple times");
2621 State.ExpandedSCEVs[Expr] = Res;
2622 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part)
2623 State.set(this, Res, {Part, 0});
2624}
2625
2626#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2628 VPSlotTracker &SlotTracker) const {
2629 O << Indent << "EMIT ";
2631 O << " = EXPAND SCEV " << *Expr;
2632}
2633#endif
2634
2636 Value *CanonicalIV = State.get(getOperand(0), 0, /*IsScalar*/ true);
2637 Type *STy = CanonicalIV->getType();
2638 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
2639 ElementCount VF = State.VF;
2640 Value *VStart = VF.isScalar()
2641 ? CanonicalIV
2642 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
2643 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) {
2644 Value *VStep = createStepForVF(Builder, STy, VF, Part);
2645 if (VF.isVector()) {
2646 VStep = Builder.CreateVectorSplat(VF, VStep);
2647 VStep =
2648 Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
2649 }
2650 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
2651 State.set(this, CanonicalVectorIV, Part);
2652 }
2653}
2654
2655#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2657 VPSlotTracker &SlotTracker) const {
2658 O << Indent << "EMIT ";
2660 O << " = WIDEN-CANONICAL-INDUCTION ";
2662}
2663#endif
2664
2666 auto &Builder = State.Builder;
2667 // Create a vector from the initial value.
2668 auto *VectorInit = getStartValue()->getLiveInIRValue();
2669
2670 Type *VecTy = State.VF.isScalar()
2671 ? VectorInit->getType()
2672 : VectorType::get(VectorInit->getType(), State.VF);
2673
2674 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2675 if (State.VF.isVector()) {
2676 auto *IdxTy = Builder.getInt32Ty();
2677 auto *One = ConstantInt::get(IdxTy, 1);
2678 IRBuilder<>::InsertPointGuard Guard(Builder);
2679 Builder.SetInsertPoint(VectorPH->getTerminator());
2680 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
2681 auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
2682 VectorInit = Builder.CreateInsertElement(
2683 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init");
2684 }
2685
2686 // Create a phi node for the new recurrence.
2687 PHINode *EntryPart = PHINode::Create(VecTy, 2, "vector.recur");
2688 EntryPart->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
2689 EntryPart->addIncoming(VectorInit, VectorPH);
2690 State.set(this, EntryPart, 0);
2691}
2692
2693#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2695 VPSlotTracker &SlotTracker) const {
2696 O << Indent << "FIRST-ORDER-RECURRENCE-PHI ";
2698 O << " = phi ";
2700}
2701#endif
2702
2704 auto &Builder = State.Builder;
2705
2706 // Reductions do not have to start at zero. They can start with
2707 // any loop invariant values.
2708 VPValue *StartVPV = getStartValue();
2709 Value *StartV = StartVPV->getLiveInIRValue();
2710
2711 // In order to support recurrences we need to be able to vectorize Phi nodes.
2712 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
2713 // stage #1: We create a new vector PHI node with no incoming edges. We'll use
2714 // this value when we vectorize all of the instructions that use the PHI.
2715 bool ScalarPHI = State.VF.isScalar() || IsInLoop;
2716 Type *VecTy = ScalarPHI ? StartV->getType()
2717 : VectorType::get(StartV->getType(), State.VF);
2718
2719 BasicBlock *HeaderBB = State.CFG.PrevBB;
2720 assert(State.CurrentVectorLoop->getHeader() == HeaderBB &&
2721 "recipe must be in the vector loop header");
2722 unsigned LastPartForNewPhi = isOrdered() ? 1 : State.UF;
2723 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
2724 Instruction *EntryPart = PHINode::Create(VecTy, 2, "vec.phi");
2725 EntryPart->insertBefore(HeaderBB->getFirstInsertionPt());
2726 State.set(this, EntryPart, Part, IsInLoop);
2727 }
2728
2729 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2730
2731 Value *Iden = nullptr;
2732 RecurKind RK = RdxDesc.getRecurrenceKind();
2735 // MinMax and AnyOf reductions have the start value as their identity.
2736 if (ScalarPHI) {
2737 Iden = StartV;
2738 } else {
2739 IRBuilderBase::InsertPointGuard IPBuilder(Builder);
2740 Builder.SetInsertPoint(VectorPH->getTerminator());
2741 StartV = Iden =
2742 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
2743 }
2744 } else {
2745 Iden = RdxDesc.getRecurrenceIdentity(RK, VecTy->getScalarType(),
2746 RdxDesc.getFastMathFlags());
2747
2748 if (!ScalarPHI) {
2749 Iden = Builder.CreateVectorSplat(State.VF, Iden);
2750 IRBuilderBase::InsertPointGuard IPBuilder(Builder);
2751 Builder.SetInsertPoint(VectorPH->getTerminator());
2752 Constant *Zero = Builder.getInt32(0);
2753 StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
2754 }
2755 }
2756
2757 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
2758 Value *EntryPart = State.get(this, Part, IsInLoop);
2759 // Make sure to add the reduction start value only to the
2760 // first unroll part.
2761 Value *StartVal = (Part == 0) ? StartV : Iden;
2762 cast<PHINode>(EntryPart)->addIncoming(StartVal, VectorPH);
2763 }
2764}
2765
2766#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2768 VPSlotTracker &SlotTracker) const {
2769 O << Indent << "WIDEN-REDUCTION-PHI ";
2770
2772 O << " = phi ";
2774}
2775#endif
2776
2779 "Non-native vplans are not expected to have VPWidenPHIRecipes.");
2780
2781 Value *Op0 = State.get(getOperand(0), 0);
2782 Type *VecTy = Op0->getType();
2783 Value *VecPhi = State.Builder.CreatePHI(VecTy, 2, "vec.phi");
2784 State.set(this, VecPhi, 0);
2785}
2786
2787#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2789 VPSlotTracker &SlotTracker) const {
2790 O << Indent << "WIDEN-PHI ";
2791
2792 auto *OriginalPhi = cast<PHINode>(getUnderlyingValue());
2793 // Unless all incoming values are modeled in VPlan print the original PHI
2794 // directly.
2795 // TODO: Remove once all VPWidenPHIRecipe instances keep all relevant incoming
2796 // values as VPValues.
2797 if (getNumOperands() != OriginalPhi->getNumOperands()) {
2798 O << VPlanIngredient(OriginalPhi);
2799 return;
2800 }
2801
2803 O << " = phi ";
2805}
2806#endif
2807
2808// TODO: It would be good to use the existing VPWidenPHIRecipe instead and
2809// remove VPActiveLaneMaskPHIRecipe.
2811 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2812 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) {
2813 Value *StartMask = State.get(getOperand(0), Part);
2814 PHINode *EntryPart =
2815 State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
2816 EntryPart->addIncoming(StartMask, VectorPH);
2817 EntryPart->setDebugLoc(getDebugLoc());
2818 State.set(this, EntryPart, Part);
2819 }
2820}
2821
2822#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2824 VPSlotTracker &SlotTracker) const {
2825 O << Indent << "ACTIVE-LANE-MASK-PHI ";
2826
2828 O << " = phi ";
2830}
2831#endif
2832
2834 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2835 assert(State.UF == 1 && "Expected unroll factor 1 for VP vectorization.");
2836 Value *Start = State.get(getOperand(0), VPIteration(0, 0));
2837 PHINode *EntryPart =
2838 State.Builder.CreatePHI(Start->getType(), 2, "evl.based.iv");
2839 EntryPart->addIncoming(Start, VectorPH);
2840 EntryPart->setDebugLoc(getDebugLoc());
2841 State.set(this, EntryPart, 0, /*IsScalar=*/true);
2842}
2843
2844#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2846 VPSlotTracker &SlotTracker) const {
2847 O << Indent << "EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI ";
2848
2850 O << " = phi ";
2852}
2853#endif
amdgpu AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
Hexagon Common GEP
cl::opt< unsigned > ForceTargetInstructionCost("force-target-instruction-cost", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's expected cost for " "an instruction to a single constant value. Mostly " "useful for getting consistent testing."))
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
separate const offset from gep
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static Value * getStepVector(Value *Val, Value *StartIdx, Value *Step, Instruction::BinaryOps BinOp, ElementCount VF, IRBuilderBase &Builder)
This function adds (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step,...
static Value * createBitOrPointerCast(IRBuilderBase &Builder, Value *V, VectorType *DstVTy, const DataLayout &DL)
cl::opt< unsigned > ForceTargetInstructionCost
static bool isUniformAcrossVFsAndUFs(VPScalarCastRecipe *C)
Checks if C is uniform across all VFs and UFs.
static Instruction * getInstructionForCost(const VPRecipeBase *R)
Return the underlying instruction to be used for computing R's cost via the legacy cost model.
static Constant * getSignedIntOrFpConstant(Type *Ty, int64_t C)
A helper function that returns an integer or floating-point constant with value C.
static Value * getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, ElementCount VF)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
This file contains the declarations of the Vectorization Plan base classes:
static const uint32_t IV[8]
Definition: blake3_impl.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:374
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:459
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:296
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:292
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
This class represents a function call, abstracting a target machine's calling convention.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
static StringRef getPredicateName(Predicate P)
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
Definition: Constants.h:212
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.h:124
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
Definition: Constants.cpp:1450
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
constexpr bool isVector() const
One or more elements.
Definition: TypeSize.h:326
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:322
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
void setAllowContract(bool B=true)
Definition: FMF.h:91
bool noSignedZeros() const
Definition: FMF.h:68
bool noInfs() const
Definition: FMF.h:67
void setAllowReciprocal(bool B=true)
Definition: FMF.h:88
bool allowReciprocal() const
Definition: FMF.h:69
void print(raw_ostream &O) const
Print fast-math flags to O.
Definition: Operator.cpp:260
void setNoSignedZeros(bool B=true)
Definition: FMF.h:85
bool allowReassoc() const
Flag queries.
Definition: FMF.h:65
bool approxFunc() const
Definition: FMF.h:71
void setNoNaNs(bool B=true)
Definition: FMF.h:79
void setAllowReassoc(bool B=true)
Flag setters.
Definition: FMF.h:76
bool noNaNs() const
Definition: FMF.h:66
void setApproxFunc(bool B=true)
Definition: FMF.h:94
void setNoInfs(bool B=true)
Definition: FMF.h:82
bool allowContract() const
Definition: FMF.h:70
Class to represent function types.
Definition: DerivedTypes.h:103
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:214
bool willReturn() const
Determine if the function will return.
Definition: Function.h:660
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:249
bool doesNotThrow() const
Determine if the function cannot unwind.
Definition: Function.h:593
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:219
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:938
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
Value * CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2381
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2492
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:508
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2114
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2480
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1824
Value * CreateFAdd(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1550
Value * CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, const Twine &Name="")
Return a vector splice intrinsic if using scalable vectors, otherwise return a shufflevector.
Definition: IRBuilder.cpp:1166
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.cpp:1193
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2536
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition: IRBuilder.h:463
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:933
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Definition: IRBuilder.cpp:579
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1091
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:172
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2053
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2555
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:523
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1996
Value * CreateUIToFP(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2101
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:171
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:308
Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
Definition: IRBuilder.cpp:1151
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1883
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition: IRBuilder.h:1738
InsertPoint saveIP() const
Returns the current insert point.
Definition: IRBuilder.h:274
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:483
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2225
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2386
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2417
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1766
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2261
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1361
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1137
Value * CreateNAryOp(unsigned Opc, ArrayRef< Value * > Ops, const Twine &Name="", MDNode *FPMathTag=nullptr)
Create either a UnaryOperator or BinaryOperator depending on Opc.
Definition: IRBuilder.cpp:1006
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2041
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2514
LLVMContext & getContext() const
Definition: IRBuilder.h:173
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Definition: IRBuilder.cpp:599
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2027
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1683
Value * CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1693
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2181
void restoreIP(InsertPoint IP)
Sets the current insert point to a previously-saved location.
Definition: IRBuilder.h:286
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1843
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2432
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2371
Value * CreateFMul(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1604
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:513
Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
Definition: IRBuilder.cpp:110
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1378
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
Definition: IRBuilder.cpp:662
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
Definition: IRBuilder.cpp:631
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
A struct for saving information about induction variables.
InductionKind getKind() const
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
This instruction inserts a single (scalar) element into a VectorType value.
VectorType * getType() const
Overload to return most specific vector type.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
bool isBinaryOp() const
Definition: Instruction.h:279
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const char * getOpcodeName() const
Definition: Instruction.h:276
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
The group of interleaved loads/stores sharing the same stride and close to each other.
Definition: VectorUtils.h:470
uint32_t getFactor() const
Definition: VectorUtils.h:486
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
Definition: VectorUtils.h:540
uint32_t getIndex(const InstTy *Instr) const
Get the index for the given member.
Definition: VectorUtils.h:547
bool isReverse() const
Definition: VectorUtils.h:485
InstTy * getInsertPos() const
Definition: VectorUtils.h:556
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
Align getAlign() const
Definition: VectorUtils.h:487
BlockT * getHeader() const
void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1852
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:71
FastMathFlags getFastMathFlags() const
static unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
unsigned getOpcode() const
Type * getRecurrenceType() const
Returns the type of the recurrence.
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF) const
Returns identity corresponding to the RecurrenceKind.
StoreInst * IntermediateStore
Reductions may store temporary or final result to an invariant address.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
This class uses information about analyze scalars to rewrite expressions in canonical form.
Type * getType() const
Return the LLVM type of this SCEV expression.
This class provides computation of slot numbers for LLVM Assembly writing.
Definition: AsmWriter.cpp:697
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
An instruction for storing to memory.
Definition: Instructions.h:290
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:343
Value * getOperand(unsigned i) const
Definition: User.h:169
void execute(VPTransformState &State) override
Generate the active lane mask phi of the vector loop.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:2978
RecipeListTy & getRecipeList()
Returns a reference to the list of recipes.
Definition: VPlan.h:3028
iterator end()
Definition: VPlan.h:3012
VPRegionBlock * getEnclosingLoopRegion()
Definition: VPlan.cpp:575
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition: VPlan.h:3041
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getIncomingValue(unsigned Idx) const
Return incoming value number Idx.
Definition: VPlan.h:2056
VPValue * getMask(unsigned Idx) const
Return mask number Idx.
Definition: VPlan.h:2061
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account that the first incoming value has no mask.
Definition: VPlan.h:2053
void execute(VPTransformState &State) override
Generate the phi/select nodes.
VPRegionBlock * getParent()
Definition: VPlan.h:509
VPlan * getPlan()
Definition: VPlan.cpp:150
const VPBasicBlock * getEntryBasicBlock() const
Definition: VPlan.cpp:155
VPBlockBase * getSingleSuccessor() const
Definition: VPlan.h:544
VPValue * getMask() const
Return the mask used by this recipe.
Definition: VPlan.h:2401
void execute(VPTransformState &State) override
Generate the extraction of the appropriate bit from the block mask and the conditional branch.
void execute(VPTransformState &State) override
Generate the canonical scalar induction phi of the vector loop.
bool isCanonical(InductionDescriptor::InductionKind Kind, VPValue *Start, VPValue *Step) const
Check if the induction described by Kind, /p Start and Step is canonical, i.e.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
This class augments a recipe with a set of VPValues defined by the recipe.
Definition: VPlanValue.h:307
void dump() const
Dump the VPDef to stderr (for debugging).
Definition: VPlan.cpp:111
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition: VPlanValue.h:418
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition: VPlanValue.h:396
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
Definition: VPlanValue.h:408
unsigned getVPDefID() const
Definition: VPlanValue.h:428
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStepValue() const
Definition: VPlan.h:2916
VPValue * getStartValue() const
Definition: VPlan.h:2915
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate phi for handling IV based on EVL over iterations correctly.
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition: VPlan.h:1752
bool hasResult() const
Definition: VPlan.h:1365
LLVM_DUMP_METHOD void dump() const
Print the VPInstruction to dbgs() (for debugging).
@ ResumePhi
Creates a scalar phi in a leaf VPBB with a single predecessor in VPlan.
Definition: VPlan.h:1247
@ FirstOrderRecurrenceSplice
Definition: VPlan.h:1235
@ CanonicalIVIncrementForPart
Definition: VPlan.h:1250
@ CalculateTripCountMinusVF
Definition: VPlan.h:1248
unsigned getOpcode() const
Definition: VPlan.h:1341
bool onlyFirstPartUsed(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
bool isVectorToScalar() const
Returns true if this VPInstruction produces a scalar value from a vector, e.g.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the VPInstruction to O.
bool onlyFirstLaneUsed(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
bool isSingleScalar() const
Returns true if this VPInstruction's operands are single scalars and the result is also a single scal...
void execute(VPTransformState &State) override
Generate the instruction.
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition: VPlan.h:2131
VPValue * getMask() const
Return the mask used by this recipe.
Definition: VPlan.h:2137
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
Definition: VPlan.h:2144
unsigned getNumStoreOperands() const
Returns the number of stored operands of this interleave group.
Definition: VPlan.h:2164
static VPLane getLastLaneForVF(const ElementCount &VF)
Definition: VPlan.h:196
static VPLane getLaneFromEnd(const ElementCount &VF, unsigned Offset)
Definition: VPlan.h:182
static VPLane getFirstLane()
Definition: VPlan.h:180
void print(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the VPLiveOut to O.
PHINode * getPhi() const
Definition: VPlan.h:728
void fixPhi(VPlan &Plan, VPTransformState &State)
Fix the wrapped phi node.
void execute(VPTransformState &State) override
Generates phi nodes for live-outs as needed to retain SSA form.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition: VPlan.h:764
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
bool mayWriteToMemory() const
Returns true if the recipe may write to memory.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const
Compute the cost of this recipe using the legacy cost model and the underlying instructions.
VPBasicBlock * getParent()
Definition: VPlan.h:789
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition: VPlan.h:860
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
virtual InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this recipe, taking into account if the cost computation should be skipped and the...
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void moveAfter(VPRecipeBase *MovePos)
Unlink this recipe from its current VPBasicBlock and insert it into the VPBasicBlock that MovePos liv...
Class to record LLVM IR flag for a recipe along with it.
Definition: VPlan.h:964
ExactFlagsTy ExactFlags
Definition: VPlan.h:1020
FastMathFlagsTy FMFs
Definition: VPlan.h:1023
NonNegFlagsTy NonNegFlags
Definition: VPlan.h:1022
void setFlags(Instruction *I) const
Set the IR flags for I.
Definition: VPlan.h:1149
bool isInBounds() const
Definition: VPlan.h:1191
bool hasFastMathFlags() const
Returns true if the recipe has fast-math flags.
Definition: VPlan.h:1198
DisjointFlagsTy DisjointFlags
Definition: VPlan.h:1019
WrapFlagsTy WrapFlags
Definition: VPlan.h:1018
bool hasNoUnsignedWrap() const
Definition: VPlan.h:1202
void printFlags(raw_ostream &O) const
CmpInst::Predicate getPredicate() const
Definition: VPlan.h:1185
bool hasNoSignedWrap() const
Definition: VPlan.h:1208
FastMathFlags getFastMathFlags() const
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition: VPlan.h:2282
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
Definition: VPlan.h:2025
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool isConditional() const
Return true if the in-loop reduction is conditional.
Definition: VPlan.h:2240
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
Definition: VPlan.h:2244
const RecurrenceDescriptor & getRecurrenceDescriptor() const
Return the recurrence decriptor for the in-loop reduction.
Definition: VPlan.h:2234
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getCondOp() const
The VPValue of the condition for the block.
Definition: VPlan.h:2246
bool isOrdered() const
Return true if the in-loop reduction is ordered.
Definition: VPlan.h:2238
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
Definition: VPlan.h:2242
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition: VPlan.h:3156
const VPBlockBase * getEntry() const
Definition: VPlan.h:3195
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getOpcode() const
Definition: VPlan.h:2365
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
VPScalarCastRecipe is a recipe to create scalar cast instructions.
Definition: VPlan.h:1487
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStepValue() const
Definition: VPlan.h:2965
void execute(VPTransformState &State) override
Generate the scalarized versions of the phi node as needed by their users.
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition: VPlan.h:955
This class can be used to assign names to VPValues.
Definition: VPlanValue.h:449
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition: VPlanValue.h:202
void printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the operands to O.
Definition: VPlan.cpp:1457
operand_range operands()
Definition: VPlanValue.h:272
unsigned getNumOperands() const
Definition: VPlanValue.h:251
operand_iterator op_begin()
Definition: VPlanValue.h:268
VPValue * getOperand(unsigned N) const
Definition: VPlanValue.h:252
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition: VPlan.cpp:120
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
Definition: VPlan.cpp:1453
friend class VPInstruction
Definition: VPlanValue.h:47
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition: VPlanValue.h:77
Value * getLiveInIRValue()
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition: VPlanValue.h:172
user_range users()
Definition: VPlanValue.h:132
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Function * getCalledScalarFunction() const
Definition: VPlan.h:1560
void execute(VPTransformState &State) override
Produce a widened version of the call instruction.
operand_range arg_operands()
Definition: VPlan.h:1564
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with start = {<Part*VF,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Returns the result type of the cast.
Definition: VPlan.h:1483
void execute(VPTransformState &State) override
Produce widened copies of the cast.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the gep nodes.
TruncInst * getTruncInst()
Returns the first defined value as TruncInst, if it is one or nullptr otherwise.
Definition: VPlan.h:1836
void execute(VPTransformState &State) override
Generate the vectorized and scalarized versions of the phi node as needed by their users.
VPValue * getStepValue()
Returns the step value of the induction.
Definition: VPlan.h:1831
Type * getScalarType() const
Returns the scalar type of the induction.
Definition: VPlan.h:1850
bool isCanonical() const
Returns true if the induction is canonical, i.e.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
const InductionDescriptor & getInductionDescriptor() const
Returns the induction descriptor for the recipe.
Definition: VPlan.h:1842
bool Reverse
Whether the consecutive accessed addresses are in reverse order.
Definition: VPlan.h:2461
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
Definition: VPlan.h:2500
Instruction & Ingredient
Definition: VPlan.h:2455
VPValue * getMask() const
Return the mask used by this recipe.
Definition: VPlan.h:2514
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition: VPlan.h:2507
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
Definition: VPlan.h:2504
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void execute(VPTransformState &State) override
Generate vector values for the pointer induction.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Produce a widened instruction using the opcode and operands of the recipe, processing State....
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition: VPlan.h:3260
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition: VPlan.h:3462
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
Definition: VPlan.h:3470
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:5106
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
VectorBuilder & setEVL(Value *NewExplicitVectorLength)
Definition: VectorBuilder.h:82
VectorBuilder & setMask(Value *NewMask)
Definition: VectorBuilder.h:78
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:664
static VectorType * getDoubleElementsVectorType(VectorType *VTy)
This static method returns a VectorType with twice as many elements as the input type and the same el...
Definition: DerivedTypes.h:517
Type * getElementType() const
Definition: DerivedTypes.h:436
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
iterator erase(iterator where)
Definition: ilist.h:204
pointer remove(iterator &IT)
Definition: ilist.h:188
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1539
bool isUniformAfterVectorization(const VPValue *VPV)
Returns true if VPV is uniform after vectorization.
Definition: VPlan.h:3810
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
Definition: VPlan.cpp:1605
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
Definition: VPlan.cpp:1600
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition: DWP.cpp:480
Value * createSimpleTargetReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind)
Create a target reduction of the given vector.
Definition: LoopUtils.cpp:1210
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2406
bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx)
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
Definition: STLExtras.h:2165
Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
Instruction * propagateMetadata(Instruction *I, ArrayRef< Value * > VL)
Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath, MD_nontemporal,...
Value * createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, Value *Right)
Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
Definition: LoopUtils.cpp:1075
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
cl::opt< bool > EnableVPlanNativePath("enable-vplan-native-path", cl::Hidden, cl::desc("Enable VPlan-native vectorization path with " "support for outer loop vectorization."))
Definition: VPlan.cpp:55
static bool isDbgInfoIntrinsic(Intrinsic::ID ID)
Check if ID corresponds to a debug info intrinsic.
llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:120
Value * createOrderedReduction(IRBuilderBase &B, const RecurrenceDescriptor &Desc, Value *Src, Value *Start)
Create an ordered reduction intrinsic using the given recurrence descriptor Desc.
Definition: LoopUtils.cpp:1281
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
Definition: IVDescriptors.h:34
@ Mul
Product of integers.
@ Add
Sum of integers.
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx)
Identifies if the vector form of the intrinsic has a scalar operand.
Value * createTargetReduction(IRBuilderBase &B, const RecurrenceDescriptor &Desc, Value *Src, PHINode *OrigPhi=nullptr)
Create a generic target reduction using a recurrence descriptor Desc The target is queried to determi...
Definition: LoopUtils.cpp:1265
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Struct to hold various analysis needed for cost computations.
Definition: VPlan.h:737
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
SmallPtrSet< Instruction *, 8 > SkipCostComputation
Definition: VPlan.h:742
void execute(VPTransformState &State) override
Generate the phi nodes.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPIteration represents a single point in the iteration space of the output (vectorized and/or unrolle...
Definition: VPlan.h:238
BasicBlock * PrevBB
The previous IR BasicBlock created or used.
Definition: VPlan.h:384
SmallDenseMap< VPBasicBlock *, BasicBlock * > VPBB2IRBB
A mapping of each VPBasicBlock to the corresponding BasicBlock.
Definition: VPlan.h:392
BasicBlock * getPreheaderBBFor(VPRecipeBase *R)
Returns the BasicBlock* mapped to the pre-header of the loop region containing R.
Definition: VPlan.cpp:356
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
Definition: VPlan.h:255
Value * get(VPValue *Def, unsigned Part, bool IsScalar=false)
Get the generated vector Value for a given VPValue Def and a given Part if IsScalar is false,...
Definition: VPlan.cpp:254
DenseMap< const SCEV *, Value * > ExpandedSCEVs
Map SCEVs to their expanded values.
Definition: VPlan.h:429
VPTypeAnalysis TypeAnalysis
VPlan-based type analysis.
Definition: VPlan.h:432
void addMetadata(Value *To, Instruction *From)
Add metadata from one instruction to another.
Definition: VPlan.cpp:369
void reset(VPValue *Def, Value *V, unsigned Part)
Reset an existing vector value for Def and a given Part.
Definition: VPlan.h:322
struct llvm::VPTransformState::CFGState CFG
void set(VPValue *Def, Value *V, unsigned Part, bool IsScalar=false)
Set the generated vector Value for a given VPValue and a given Part, if IsScalar is false.
Definition: VPlan.h:307
std::optional< VPIteration > Instance
Hold the indices to generate specific scalar instructions.
Definition: VPlan.h:267
IRBuilderBase & Builder
Hold a reference to the IRBuilder used to generate output IR code.
Definition: VPlan.h:409
bool hasScalarValue(VPValue *Def, VPIteration Instance)
Definition: VPlan.h:295
bool hasVectorValue(VPValue *Def, unsigned Part)
Definition: VPlan.h:289
ElementCount VF
The chosen Vectorization and Unroll Factors of the loop being vectorized.
Definition: VPlan.h:261
Loop * CurrentVectorLoop
The loop object for the current parent region, or nullptr.
Definition: VPlan.h:418
void setDebugLocFrom(DebugLoc DL)
Set the debug location in the builder using the debug location DL.
Definition: VPlan.cpp:380
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide load or gather.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool isInvariantCond() const
Definition: VPlan.h:1607
VPValue * getCond() const
Definition: VPlan.h:1603
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Produce a widened version of the select instruction.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide store or scatter.
VPValue * getStoredValue() const
Return the value stored by this recipe.
Definition: VPlan.h:2620
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.