LLVM 22.0.0git
VPlanRecipes.cpp
Go to the documentation of this file.
1//===- VPlanRecipes.cpp - Implementations for VPlan recipes ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file contains implementations for different VPlan recipes.
11///
12//===----------------------------------------------------------------------===//
13
15#include "VPlan.h"
16#include "VPlanAnalysis.h"
17#include "VPlanHelpers.h"
18#include "VPlanPatternMatch.h"
19#include "VPlanUtils.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Twine.h"
27#include "llvm/IR/BasicBlock.h"
28#include "llvm/IR/IRBuilder.h"
29#include "llvm/IR/Instruction.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/Type.h"
33#include "llvm/IR/Value.h"
36#include "llvm/Support/Debug.h"
40#include <cassert>
41
42using namespace llvm;
43using namespace llvm::VPlanPatternMatch;
44
46
47#define LV_NAME "loop-vectorize"
48#define DEBUG_TYPE LV_NAME
49
51 switch (getVPDefID()) {
52 case VPExpressionSC:
53 return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
54 case VPInstructionSC: {
55 auto *VPI = cast<VPInstruction>(this);
56 // Loads read from memory but don't write to memory.
57 if (VPI->getOpcode() == Instruction::Load)
58 return false;
59 return VPI->opcodeMayReadOrWriteFromMemory();
60 }
61 case VPInterleaveEVLSC:
62 case VPInterleaveSC:
63 return cast<VPInterleaveBase>(this)->getNumStoreOperands() > 0;
64 case VPWidenStoreEVLSC:
65 case VPWidenStoreSC:
66 return true;
67 case VPReplicateSC:
68 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
69 ->mayWriteToMemory();
70 case VPWidenCallSC:
71 return !cast<VPWidenCallRecipe>(this)
72 ->getCalledScalarFunction()
73 ->onlyReadsMemory();
74 case VPWidenIntrinsicSC:
75 return cast<VPWidenIntrinsicRecipe>(this)->mayWriteToMemory();
76 case VPCanonicalIVPHISC:
77 case VPBranchOnMaskSC:
78 case VPDerivedIVSC:
79 case VPFirstOrderRecurrencePHISC:
80 case VPReductionPHISC:
81 case VPScalarIVStepsSC:
82 case VPPredInstPHISC:
83 return false;
84 case VPBlendSC:
85 case VPReductionEVLSC:
86 case VPReductionSC:
87 case VPVectorPointerSC:
88 case VPWidenCanonicalIVSC:
89 case VPWidenCastSC:
90 case VPWidenGEPSC:
91 case VPWidenIntOrFpInductionSC:
92 case VPWidenLoadEVLSC:
93 case VPWidenLoadSC:
94 case VPWidenPHISC:
95 case VPWidenPointerInductionSC:
96 case VPWidenSC: {
97 const Instruction *I =
98 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
99 (void)I;
100 assert((!I || !I->mayWriteToMemory()) &&
101 "underlying instruction may write to memory");
102 return false;
103 }
104 default:
105 return true;
106 }
107}
108
110 switch (getVPDefID()) {
111 case VPExpressionSC:
112 return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
113 case VPInstructionSC:
114 return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
115 case VPWidenLoadEVLSC:
116 case VPWidenLoadSC:
117 return true;
118 case VPReplicateSC:
119 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
120 ->mayReadFromMemory();
121 case VPWidenCallSC:
122 return !cast<VPWidenCallRecipe>(this)
123 ->getCalledScalarFunction()
124 ->onlyWritesMemory();
125 case VPWidenIntrinsicSC:
126 return cast<VPWidenIntrinsicRecipe>(this)->mayReadFromMemory();
127 case VPBranchOnMaskSC:
128 case VPDerivedIVSC:
129 case VPFirstOrderRecurrencePHISC:
130 case VPPredInstPHISC:
131 case VPScalarIVStepsSC:
132 case VPWidenStoreEVLSC:
133 case VPWidenStoreSC:
134 return false;
135 case VPBlendSC:
136 case VPReductionEVLSC:
137 case VPReductionSC:
138 case VPVectorPointerSC:
139 case VPWidenCanonicalIVSC:
140 case VPWidenCastSC:
141 case VPWidenGEPSC:
142 case VPWidenIntOrFpInductionSC:
143 case VPWidenPHISC:
144 case VPWidenPointerInductionSC:
145 case VPWidenSC: {
146 const Instruction *I =
147 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
148 (void)I;
149 assert((!I || !I->mayReadFromMemory()) &&
150 "underlying instruction may read from memory");
151 return false;
152 }
153 default:
154 // FIXME: Return false if the recipe represents an interleaved store.
155 return true;
156 }
157}
158
160 switch (getVPDefID()) {
161 case VPExpressionSC:
162 return cast<VPExpressionRecipe>(this)->mayHaveSideEffects();
163 case VPDerivedIVSC:
164 case VPFirstOrderRecurrencePHISC:
165 case VPPredInstPHISC:
166 case VPVectorEndPointerSC:
167 return false;
168 case VPInstructionSC: {
169 auto *VPI = cast<VPInstruction>(this);
170 return mayWriteToMemory() ||
171 VPI->getOpcode() == VPInstruction::BranchOnCount ||
172 VPI->getOpcode() == VPInstruction::BranchOnCond ||
173 VPI->getOpcode() == VPInstruction::BranchOnTwoConds;
174 }
175 case VPWidenCallSC: {
176 Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
177 return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
178 }
179 case VPWidenIntrinsicSC:
180 return cast<VPWidenIntrinsicRecipe>(this)->mayHaveSideEffects();
181 case VPBlendSC:
182 case VPReductionEVLSC:
183 case VPReductionSC:
184 case VPScalarIVStepsSC:
185 case VPVectorPointerSC:
186 case VPWidenCanonicalIVSC:
187 case VPWidenCastSC:
188 case VPWidenGEPSC:
189 case VPWidenIntOrFpInductionSC:
190 case VPWidenPHISC:
191 case VPWidenPointerInductionSC:
192 case VPWidenSC: {
193 const Instruction *I =
194 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
195 (void)I;
196 assert((!I || !I->mayHaveSideEffects()) &&
197 "underlying instruction has side-effects");
198 return false;
199 }
200 case VPInterleaveEVLSC:
201 case VPInterleaveSC:
202 return mayWriteToMemory();
203 case VPWidenLoadEVLSC:
204 case VPWidenLoadSC:
205 case VPWidenStoreEVLSC:
206 case VPWidenStoreSC:
207 assert(
208 cast<VPWidenMemoryRecipe>(this)->getIngredient().mayHaveSideEffects() ==
210 "mayHaveSideffects result for ingredient differs from this "
211 "implementation");
212 return mayWriteToMemory();
213 case VPReplicateSC: {
214 auto *R = cast<VPReplicateRecipe>(this);
215 return R->getUnderlyingInstr()->mayHaveSideEffects();
216 }
217 default:
218 return true;
219 }
220}
221
223 assert(!Parent && "Recipe already in some VPBasicBlock");
224 assert(InsertPos->getParent() &&
225 "Insertion position not in any VPBasicBlock");
226 InsertPos->getParent()->insert(this, InsertPos->getIterator());
227}
228
229void VPRecipeBase::insertBefore(VPBasicBlock &BB,
231 assert(!Parent && "Recipe already in some VPBasicBlock");
232 assert(I == BB.end() || I->getParent() == &BB);
233 BB.insert(this, I);
234}
235
237 assert(!Parent && "Recipe already in some VPBasicBlock");
238 assert(InsertPos->getParent() &&
239 "Insertion position not in any VPBasicBlock");
240 InsertPos->getParent()->insert(this, std::next(InsertPos->getIterator()));
241}
242
244 assert(getParent() && "Recipe not in any VPBasicBlock");
246 Parent = nullptr;
247}
248
250 assert(getParent() && "Recipe not in any VPBasicBlock");
252}
253
256 insertAfter(InsertPos);
257}
258
264
266 // Get the underlying instruction for the recipe, if there is one. It is used
267 // to
268 // * decide if cost computation should be skipped for this recipe,
269 // * apply forced target instruction cost.
270 Instruction *UI = nullptr;
271 if (auto *S = dyn_cast<VPSingleDefRecipe>(this))
272 UI = dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
273 else if (auto *IG = dyn_cast<VPInterleaveBase>(this))
274 UI = IG->getInsertPos();
275 else if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(this))
276 UI = &WidenMem->getIngredient();
277
278 InstructionCost RecipeCost;
279 if (UI && Ctx.skipCostComputation(UI, VF.isVector())) {
280 RecipeCost = 0;
281 } else {
282 RecipeCost = computeCost(VF, Ctx);
283 if (ForceTargetInstructionCost.getNumOccurrences() > 0 &&
284 RecipeCost.isValid()) {
285 if (UI)
287 else
288 RecipeCost = InstructionCost(0);
289 }
290 }
291
292 LLVM_DEBUG({
293 dbgs() << "Cost of " << RecipeCost << " for VF " << VF << ": ";
294 dump();
295 });
296 return RecipeCost;
297}
298
300 VPCostContext &Ctx) const {
301 llvm_unreachable("subclasses should implement computeCost");
302}
303
305 return (getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC) ||
307}
308
310 auto *VPI = dyn_cast<VPInstruction>(this);
311 return VPI && Instruction::isCast(VPI->getOpcode());
312}
313
315 assert(OpType == Other.OpType && "OpType must match");
316 switch (OpType) {
317 case OperationType::OverflowingBinOp:
318 WrapFlags.HasNUW &= Other.WrapFlags.HasNUW;
319 WrapFlags.HasNSW &= Other.WrapFlags.HasNSW;
320 break;
321 case OperationType::Trunc:
322 TruncFlags.HasNUW &= Other.TruncFlags.HasNUW;
323 TruncFlags.HasNSW &= Other.TruncFlags.HasNSW;
324 break;
325 case OperationType::DisjointOp:
326 DisjointFlags.IsDisjoint &= Other.DisjointFlags.IsDisjoint;
327 break;
328 case OperationType::PossiblyExactOp:
329 ExactFlags.IsExact &= Other.ExactFlags.IsExact;
330 break;
331 case OperationType::GEPOp:
332 GEPFlags &= Other.GEPFlags;
333 break;
334 case OperationType::FPMathOp:
335 case OperationType::FCmp:
336 assert((OpType != OperationType::FCmp ||
337 FCmpFlags.Pred == Other.FCmpFlags.Pred) &&
338 "Cannot drop CmpPredicate");
339 getFMFsRef().NoNaNs &= Other.getFMFsRef().NoNaNs;
340 getFMFsRef().NoInfs &= Other.getFMFsRef().NoInfs;
341 break;
342 case OperationType::NonNegOp:
343 NonNegFlags.NonNeg &= Other.NonNegFlags.NonNeg;
344 break;
345 case OperationType::Cmp:
346 assert(CmpPredicate == Other.CmpPredicate && "Cannot drop CmpPredicate");
347 break;
348 case OperationType::Other:
349 assert(AllFlags == Other.AllFlags && "Cannot drop other flags");
350 break;
351 }
352}
353
355 assert((OpType == OperationType::FPMathOp || OpType == OperationType::FCmp) &&
356 "recipe doesn't have fast math flags");
357 const FastMathFlagsTy &F = getFMFsRef();
358 FastMathFlags Res;
359 Res.setAllowReassoc(F.AllowReassoc);
360 Res.setNoNaNs(F.NoNaNs);
361 Res.setNoInfs(F.NoInfs);
362 Res.setNoSignedZeros(F.NoSignedZeros);
363 Res.setAllowReciprocal(F.AllowReciprocal);
364 Res.setAllowContract(F.AllowContract);
365 Res.setApproxFunc(F.ApproxFunc);
366 return Res;
367}
368
369#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
371
372void VPRecipeBase::print(raw_ostream &O, const Twine &Indent,
373 VPSlotTracker &SlotTracker) const {
374 printRecipe(O, Indent, SlotTracker);
375 if (auto DL = getDebugLoc()) {
376 O << ", !dbg ";
377 DL.print(O);
378 }
379
380 if (auto *Metadata = dyn_cast<VPIRMetadata>(this))
382}
383#endif
384
385template <unsigned PartOpIdx>
386VPValue *
388 if (U.getNumOperands() == PartOpIdx + 1)
389 return U.getOperand(PartOpIdx);
390 return nullptr;
391}
392
393template <unsigned PartOpIdx>
395 if (auto *UnrollPartOp = getUnrollPartOperand(U))
396 return cast<ConstantInt>(UnrollPartOp->getLiveInIRValue())->getZExtValue();
397 return 0;
398}
399
400namespace llvm {
401template class VPUnrollPartAccessor<1>;
402template class VPUnrollPartAccessor<2>;
403template class VPUnrollPartAccessor<3>;
404}
405
407 const VPIRFlags &Flags, const VPIRMetadata &MD,
408 DebugLoc DL, const Twine &Name)
409 : VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, Flags, DL),
410 VPIRMetadata(MD), Opcode(Opcode), Name(Name.str()) {
412 "Set flags not supported for the provided opcode");
413 assert((getNumOperandsForOpcode(Opcode) == -1u ||
414 getNumOperandsForOpcode(Opcode) == getNumOperands()) &&
415 "number of operands does not match opcode");
416}
417
418#ifndef NDEBUG
419unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) {
420 if (Instruction::isUnaryOp(Opcode) || Instruction::isCast(Opcode))
421 return 1;
422
423 if (Instruction::isBinaryOp(Opcode))
424 return 2;
425
426 switch (Opcode) {
429 return 0;
430 case Instruction::Alloca:
431 case Instruction::ExtractValue:
432 case Instruction::Freeze:
433 case Instruction::Load:
448 return 1;
449 case Instruction::ICmp:
450 case Instruction::FCmp:
451 case Instruction::ExtractElement:
452 case Instruction::Store:
462 return 2;
463 case Instruction::Select:
467 return 3;
469 return 4;
470 case Instruction::Call:
471 case Instruction::GetElementPtr:
472 case Instruction::PHI:
473 case Instruction::Switch:
479 // Cannot determine the number of operands from the opcode.
480 return -1u;
481 }
482 llvm_unreachable("all cases should be handled above");
483}
484#endif
485
489
490bool VPInstruction::canGenerateScalarForFirstLane() const {
492 return true;
494 return true;
495 switch (Opcode) {
496 case Instruction::Freeze:
497 case Instruction::ICmp:
498 case Instruction::PHI:
499 case Instruction::Select:
509 return true;
510 default:
511 return false;
512 }
513}
514
515Value *VPInstruction::generate(VPTransformState &State) {
516 IRBuilderBase &Builder = State.Builder;
517
519 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
520 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
521 Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
522 auto *Res =
523 Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B, Name);
524 if (auto *I = dyn_cast<Instruction>(Res))
525 applyFlags(*I);
526 return Res;
527 }
528
529 switch (getOpcode()) {
530 case VPInstruction::Not: {
531 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
532 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
533 return Builder.CreateNot(A, Name);
534 }
535 case Instruction::ExtractElement: {
536 assert(State.VF.isVector() && "Only extract elements from vectors");
537 if (auto *IdxIRV = dyn_cast<VPIRValue>(getOperand(1))) {
538 unsigned IdxToExtract =
539 cast<ConstantInt>(IdxIRV->getValue())->getZExtValue();
540 return State.get(getOperand(0), VPLane(IdxToExtract));
541 }
542 Value *Vec = State.get(getOperand(0));
543 Value *Idx = State.get(getOperand(1), /*IsScalar=*/true);
544 return Builder.CreateExtractElement(Vec, Idx, Name);
545 }
546 case Instruction::Freeze: {
548 return Builder.CreateFreeze(Op, Name);
549 }
550 case Instruction::FCmp:
551 case Instruction::ICmp: {
552 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
553 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
554 Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
555 return Builder.CreateCmp(getPredicate(), A, B, Name);
556 }
557 case Instruction::PHI: {
558 llvm_unreachable("should be handled by VPPhi::execute");
559 }
560 case Instruction::Select: {
561 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
562 Value *Cond =
563 State.get(getOperand(0),
564 OnlyFirstLaneUsed || vputils::isSingleScalar(getOperand(0)));
565 Value *Op1 = State.get(getOperand(1), OnlyFirstLaneUsed);
566 Value *Op2 = State.get(getOperand(2), OnlyFirstLaneUsed);
567 return Builder.CreateSelect(Cond, Op1, Op2, Name);
568 }
570 // Get first lane of vector induction variable.
571 Value *VIVElem0 = State.get(getOperand(0), VPLane(0));
572 // Get the original loop tripcount.
573 Value *ScalarTC = State.get(getOperand(1), VPLane(0));
574
575 // If this part of the active lane mask is scalar, generate the CMP directly
576 // to avoid unnecessary extracts.
577 if (State.VF.isScalar())
578 return Builder.CreateCmp(CmpInst::Predicate::ICMP_ULT, VIVElem0, ScalarTC,
579 Name);
580
581 ElementCount EC = State.VF.multiplyCoefficientBy(
582 cast<ConstantInt>(getOperand(2)->getLiveInIRValue())->getZExtValue());
583 auto *PredTy = VectorType::get(Builder.getInt1Ty(), EC);
584 return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
585 {PredTy, ScalarTC->getType()},
586 {VIVElem0, ScalarTC}, nullptr, Name);
587 }
589 // Generate code to combine the previous and current values in vector v3.
590 //
591 // vector.ph:
592 // v_init = vector(..., ..., ..., a[-1])
593 // br vector.body
594 //
595 // vector.body
596 // i = phi [0, vector.ph], [i+4, vector.body]
597 // v1 = phi [v_init, vector.ph], [v2, vector.body]
598 // v2 = a[i, i+1, i+2, i+3];
599 // v3 = vector(v1(3), v2(0, 1, 2))
600
601 auto *V1 = State.get(getOperand(0));
602 if (!V1->getType()->isVectorTy())
603 return V1;
604 Value *V2 = State.get(getOperand(1));
605 return Builder.CreateVectorSplice(V1, V2, -1, Name);
606 }
608 unsigned UF = getParent()->getPlan()->getUF();
609 Value *ScalarTC = State.get(getOperand(0), VPLane(0));
610 Value *Step = createStepForVF(Builder, ScalarTC->getType(), State.VF, UF);
611 Value *Sub = Builder.CreateSub(ScalarTC, Step);
612 Value *Cmp = Builder.CreateICmp(CmpInst::Predicate::ICMP_UGT, ScalarTC, Step);
614 return Builder.CreateSelect(Cmp, Sub, Zero);
615 }
617 // TODO: Restructure this code with an explicit remainder loop, vsetvli can
618 // be outside of the main loop.
619 Value *AVL = State.get(getOperand(0), /*IsScalar*/ true);
620 // Compute EVL
621 assert(AVL->getType()->isIntegerTy() &&
622 "Requested vector length should be an integer.");
623
624 assert(State.VF.isScalable() && "Expected scalable vector factor.");
625 Value *VFArg = Builder.getInt32(State.VF.getKnownMinValue());
626
627 Value *EVL = Builder.CreateIntrinsic(
628 Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length,
629 {AVL, VFArg, Builder.getTrue()});
630 return EVL;
631 }
633 unsigned Part = getUnrollPart(*this);
634 auto *IV = State.get(getOperand(0), VPLane(0));
635 assert(Part != 0 && "Must have a positive part");
636 // The canonical IV is incremented by the vectorization factor (num of
637 // SIMD elements) times the unroll part.
638 Value *Step = createStepForVF(Builder, IV->getType(), State.VF, Part);
639 return Builder.CreateAdd(IV, Step, Name, hasNoUnsignedWrap(),
641 }
643 Value *Cond = State.get(getOperand(0), VPLane(0));
644 // Replace the temporary unreachable terminator with a new conditional
645 // branch, hooking it up to backward destination for latch blocks now, and
646 // to forward destination(s) later when they are created.
647 // Second successor may be backwards - iff it is already in VPBB2IRBB.
648 VPBasicBlock *SecondVPSucc =
649 cast<VPBasicBlock>(getParent()->getSuccessors()[1]);
650 BasicBlock *SecondIRSucc = State.CFG.VPBB2IRBB.lookup(SecondVPSucc);
651 BasicBlock *IRBB = State.CFG.VPBB2IRBB[getParent()];
652 auto *Br = Builder.CreateCondBr(Cond, IRBB, SecondIRSucc);
653 // First successor is always forward, reset it to nullptr.
654 Br->setSuccessor(0, nullptr);
656 applyMetadata(*Br);
657 return Br;
658 }
660 return Builder.CreateVectorSplat(
661 State.VF, State.get(getOperand(0), /*IsScalar*/ true), "broadcast");
662 }
664 // For struct types, we need to build a new 'wide' struct type, where each
665 // element is widened, i.e., we create a struct of vectors.
666 auto *StructTy =
668 Value *Res = PoisonValue::get(toVectorizedTy(StructTy, State.VF));
669 for (const auto &[LaneIndex, Op] : enumerate(operands())) {
670 for (unsigned FieldIndex = 0; FieldIndex != StructTy->getNumElements();
671 FieldIndex++) {
672 Value *ScalarValue =
673 Builder.CreateExtractValue(State.get(Op, true), FieldIndex);
674 Value *VectorValue = Builder.CreateExtractValue(Res, FieldIndex);
675 VectorValue =
676 Builder.CreateInsertElement(VectorValue, ScalarValue, LaneIndex);
677 Res = Builder.CreateInsertValue(Res, VectorValue, FieldIndex);
678 }
679 }
680 return Res;
681 }
683 auto *ScalarTy = State.TypeAnalysis.inferScalarType(getOperand(0));
684 auto NumOfElements = ElementCount::getFixed(getNumOperands());
685 Value *Res = PoisonValue::get(toVectorizedTy(ScalarTy, NumOfElements));
686 for (const auto &[Idx, Op] : enumerate(operands()))
687 Res = Builder.CreateInsertElement(Res, State.get(Op, true),
688 Builder.getInt32(Idx));
689 return Res;
690 }
692 if (State.VF.isScalar())
693 return State.get(getOperand(0), true);
694 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
696 // If this start vector is scaled then it should produce a vector with fewer
697 // elements than the VF.
698 ElementCount VF = State.VF.divideCoefficientBy(
699 cast<ConstantInt>(getOperand(2)->getLiveInIRValue())->getZExtValue());
700 auto *Iden = Builder.CreateVectorSplat(VF, State.get(getOperand(1), true));
701 return Builder.CreateInsertElement(Iden, State.get(getOperand(0), true),
702 Builder.getInt32(0));
703 }
705 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
706 // and will be removed by breaking up the recipe further.
707 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
708 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
709 Value *ReducedPartRdx = State.get(getOperand(2));
710 for (unsigned Idx = 3; Idx < getNumOperands(); ++Idx)
711 ReducedPartRdx =
712 Builder.CreateBinOp(Instruction::Or, State.get(getOperand(Idx)),
713 ReducedPartRdx, "bin.rdx");
714 return createAnyOfReduction(Builder, ReducedPartRdx,
715 State.get(getOperand(1), VPLane(0)), OrigPhi);
716 }
718 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
719 // and will be removed by breaking up the recipe further.
720 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
721 // Get its reduction variable descriptor.
722 RecurKind RK = PhiR->getRecurrenceKind();
724 "Unexpected reduction kind");
725 assert(!PhiR->isInLoop() &&
726 "In-loop FindLastIV reduction is not supported yet");
727
728 // The recipe's operands are the reduction phi, the start value, the
729 // sentinel value, followed by one operand for each part of the reduction.
730 unsigned UF = getNumOperands() - 3;
731 Value *ReducedPartRdx = State.get(getOperand(3));
732 RecurKind MinMaxKind;
735 MinMaxKind = IsSigned ? RecurKind::SMax : RecurKind::UMax;
736 else
737 MinMaxKind = IsSigned ? RecurKind::SMin : RecurKind::UMin;
738 for (unsigned Part = 1; Part < UF; ++Part)
739 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
740 State.get(getOperand(3 + Part)));
741
742 Value *Start = State.get(getOperand(1), true);
744
745 // Reduce the vector to a scalar.
747 Value *ReducedIV =
748 ReducedPartRdx->getType()->isVectorTy()
749 ? (IsFindLast
750 ? Builder.CreateIntMaxReduce(ReducedPartRdx, IsSigned)
751 : Builder.CreateIntMinReduce(ReducedPartRdx, IsSigned))
752 : ReducedPartRdx;
753 // Correct the final reduction result back to the start value if the
754 // reduction result is the sentinel value.
755 Value *Cmp = Builder.CreateICmpNE(ReducedIV, Sentinel, "rdx.select.cmp");
756 return Builder.CreateSelect(Cmp, ReducedIV, Start, "rdx.select");
757 }
759 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
760 // and will be removed by breaking up the recipe further.
761 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
762 // Get its reduction variable descriptor.
763
764 RecurKind RK = PhiR->getRecurrenceKind();
766 "should be handled by ComputeFindIVResult");
767
768 // The recipe's operands are the reduction phi, followed by one operand for
769 // each part of the reduction.
770 unsigned UF = getNumOperands() - 1;
771 VectorParts RdxParts(UF);
772 for (unsigned Part = 0; Part < UF; ++Part)
773 RdxParts[Part] = State.get(getOperand(1 + Part), PhiR->isInLoop());
774
775 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
776 if (hasFastMathFlags())
778
779 // Reduce all of the unrolled parts into a single vector.
780 Value *ReducedPartRdx = RdxParts[0];
781 if (PhiR->isOrdered()) {
782 ReducedPartRdx = RdxParts[UF - 1];
783 } else {
784 // Floating-point operations should have some FMF to enable the reduction.
785 for (unsigned Part = 1; Part < UF; ++Part) {
786 Value *RdxPart = RdxParts[Part];
788 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
789 else {
790 // For sub-recurrences, each UF's reduction variable is already
791 // negative, we need to do: reduce.add(-acc_uf0 + -acc_uf1)
793 RK == RecurKind::Sub
794 ? Instruction::Add
796 ReducedPartRdx =
797 Builder.CreateBinOp(Opcode, RdxPart, ReducedPartRdx, "bin.rdx");
798 }
799 }
800 }
801
802 // Create the reduction after the loop. Note that inloop reductions create
803 // the target reduction in the loop using a Reduction recipe.
804 if (State.VF.isVector() && !PhiR->isInLoop()) {
805 // TODO: Support in-order reductions based on the recurrence descriptor.
806 // All ops in the reduction inherit fast-math-flags from the recurrence
807 // descriptor.
808 ReducedPartRdx = createSimpleReduction(Builder, ReducedPartRdx, RK);
809 }
810
811 return ReducedPartRdx;
812 }
815 unsigned Offset =
817 Value *Res;
818 if (State.VF.isVector()) {
819 assert(Offset <= State.VF.getKnownMinValue() &&
820 "invalid offset to extract from");
821 // Extract lane VF - Offset from the operand.
822 Res = State.get(getOperand(0), VPLane::getLaneFromEnd(State.VF, Offset));
823 } else {
824 // TODO: Remove ExtractLastLane for scalar VFs.
825 assert(Offset <= 1 && "invalid offset to extract from");
826 Res = State.get(getOperand(0));
827 }
829 Res->setName(Name);
830 return Res;
831 }
833 Value *A = State.get(getOperand(0));
834 Value *B = State.get(getOperand(1));
835 return Builder.CreateLogicalAnd(A, B, Name);
836 }
839 "can only generate first lane for PtrAdd");
840 Value *Ptr = State.get(getOperand(0), VPLane(0));
841 Value *Addend = State.get(getOperand(1), VPLane(0));
842 return Builder.CreatePtrAdd(Ptr, Addend, Name, getGEPNoWrapFlags());
843 }
845 Value *Ptr =
847 Value *Addend = State.get(getOperand(1));
848 return Builder.CreatePtrAdd(Ptr, Addend, Name, getGEPNoWrapFlags());
849 }
851 Value *Res = Builder.CreateFreeze(State.get(getOperand(0)));
852 for (VPValue *Op : drop_begin(operands()))
853 Res = Builder.CreateOr(Res, Builder.CreateFreeze(State.get(Op)));
854 return State.VF.isScalar() ? Res : Builder.CreateOrReduce(Res);
855 }
857 Value *LaneToExtract = State.get(getOperand(0), true);
858 Type *IdxTy = State.TypeAnalysis.inferScalarType(getOperand(0));
859 Value *Res = nullptr;
860 Value *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
861
862 for (unsigned Idx = 1; Idx != getNumOperands(); ++Idx) {
863 Value *VectorStart =
864 Builder.CreateMul(RuntimeVF, ConstantInt::get(IdxTy, Idx - 1));
865 Value *VectorIdx = Idx == 1
866 ? LaneToExtract
867 : Builder.CreateSub(LaneToExtract, VectorStart);
868 Value *Ext = State.VF.isScalar()
869 ? State.get(getOperand(Idx))
870 : Builder.CreateExtractElement(
871 State.get(getOperand(Idx)), VectorIdx);
872 if (Res) {
873 Value *Cmp = Builder.CreateICmpUGE(LaneToExtract, VectorStart);
874 Res = Builder.CreateSelect(Cmp, Ext, Res);
875 } else {
876 Res = Ext;
877 }
878 }
879 return Res;
880 }
882 if (getNumOperands() == 1) {
883 Value *Mask = State.get(getOperand(0));
884 return Builder.CreateCountTrailingZeroElems(Builder.getInt64Ty(), Mask,
885 /*ZeroIsPoison=*/false, Name);
886 }
887 // If there are multiple operands, create a chain of selects to pick the
888 // first operand with an active lane and add the number of lanes of the
889 // preceding operands.
890 Value *RuntimeVF = getRuntimeVF(Builder, Builder.getInt64Ty(), State.VF);
891 unsigned LastOpIdx = getNumOperands() - 1;
892 Value *Res = nullptr;
893 for (int Idx = LastOpIdx; Idx >= 0; --Idx) {
894 Value *TrailingZeros =
895 State.VF.isScalar()
896 ? Builder.CreateZExt(
897 Builder.CreateICmpEQ(State.get(getOperand(Idx)),
898 Builder.getFalse()),
899 Builder.getInt64Ty())
901 Builder.getInt64Ty(), State.get(getOperand(Idx)),
902 /*ZeroIsPoison=*/false, Name);
903 Value *Current = Builder.CreateAdd(
904 Builder.CreateMul(RuntimeVF, Builder.getInt64(Idx)), TrailingZeros);
905 if (Res) {
906 Value *Cmp = Builder.CreateICmpNE(TrailingZeros, RuntimeVF);
907 Res = Builder.CreateSelect(Cmp, Current, Res);
908 } else {
909 Res = Current;
910 }
911 }
912
913 return Res;
914 }
916 return State.get(getOperand(0), true);
918 return Builder.CreateVectorReverse(State.get(getOperand(0)), "reverse");
919 default:
920 llvm_unreachable("Unsupported opcode for instruction");
921 }
922}
923
925 unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const {
926 Type *ScalarTy = Ctx.Types.inferScalarType(this);
927 Type *ResultTy = VF.isVector() ? toVectorTy(ScalarTy, VF) : ScalarTy;
928 switch (Opcode) {
929 case Instruction::FNeg:
930 return Ctx.TTI.getArithmeticInstrCost(Opcode, ResultTy, Ctx.CostKind);
931 case Instruction::UDiv:
932 case Instruction::SDiv:
933 case Instruction::SRem:
934 case Instruction::URem:
935 case Instruction::Add:
936 case Instruction::FAdd:
937 case Instruction::Sub:
938 case Instruction::FSub:
939 case Instruction::Mul:
940 case Instruction::FMul:
941 case Instruction::FDiv:
942 case Instruction::FRem:
943 case Instruction::Shl:
944 case Instruction::LShr:
945 case Instruction::AShr:
946 case Instruction::And:
947 case Instruction::Or:
948 case Instruction::Xor: {
951
952 if (VF.isVector()) {
953 // Certain instructions can be cheaper to vectorize if they have a
954 // constant second vector operand. One example of this are shifts on x86.
955 VPValue *RHS = getOperand(1);
956 RHSInfo = Ctx.getOperandInfo(RHS);
957
958 if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
961 }
962
965 if (CtxI)
966 Operands.append(CtxI->value_op_begin(), CtxI->value_op_end());
967 return Ctx.TTI.getArithmeticInstrCost(
968 Opcode, ResultTy, Ctx.CostKind,
969 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
970 RHSInfo, Operands, CtxI, &Ctx.TLI);
971 }
972 case Instruction::Freeze:
973 // This opcode is unknown. Assume that it is the same as 'mul'.
974 return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, ResultTy,
975 Ctx.CostKind);
976 case Instruction::ExtractValue:
977 return Ctx.TTI.getInsertExtractValueCost(Instruction::ExtractValue,
978 Ctx.CostKind);
979 case Instruction::ICmp:
980 case Instruction::FCmp: {
981 Type *ScalarOpTy = Ctx.Types.inferScalarType(getOperand(0));
982 Type *OpTy = VF.isVector() ? toVectorTy(ScalarOpTy, VF) : ScalarOpTy;
984 return Ctx.TTI.getCmpSelInstrCost(
985 Opcode, OpTy, CmpInst::makeCmpResultType(OpTy), getPredicate(),
986 Ctx.CostKind, {TTI::OK_AnyValue, TTI::OP_None},
987 {TTI::OK_AnyValue, TTI::OP_None}, CtxI);
988 }
989 case Instruction::BitCast: {
990 Type *ScalarTy = Ctx.Types.inferScalarType(this);
991 if (ScalarTy->isPointerTy())
992 return 0;
993 [[fallthrough]];
994 }
995 case Instruction::SExt:
996 case Instruction::ZExt:
997 case Instruction::FPToUI:
998 case Instruction::FPToSI:
999 case Instruction::FPExt:
1000 case Instruction::PtrToInt:
1001 case Instruction::PtrToAddr:
1002 case Instruction::IntToPtr:
1003 case Instruction::SIToFP:
1004 case Instruction::UIToFP:
1005 case Instruction::Trunc:
1006 case Instruction::FPTrunc:
1007 case Instruction::AddrSpaceCast: {
1008 // Computes the CastContextHint from a recipe that may access memory.
1009 auto ComputeCCH = [&](const VPRecipeBase *R) -> TTI::CastContextHint {
1010 if (isa<VPInterleaveBase>(R))
1012 if (const auto *ReplicateRecipe = dyn_cast<VPReplicateRecipe>(R)) {
1013 // Only compute CCH for memory operations, matching the legacy model
1014 // which only considers loads/stores for cast context hints.
1015 auto *UI = cast<Instruction>(ReplicateRecipe->getUnderlyingValue());
1016 if (!isa<LoadInst, StoreInst>(UI))
1018 return ReplicateRecipe->isPredicated() ? TTI::CastContextHint::Masked
1020 }
1021 const auto *WidenMemoryRecipe = dyn_cast<VPWidenMemoryRecipe>(R);
1022 if (WidenMemoryRecipe == nullptr)
1024 if (VF.isScalar())
1026 if (!WidenMemoryRecipe->isConsecutive())
1028 if (WidenMemoryRecipe->isReverse())
1030 if (WidenMemoryRecipe->isMasked())
1033 };
1034
1035 VPValue *Operand = getOperand(0);
1037 // For Trunc/FPTrunc, get the context from the only user.
1038 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
1039 auto GetOnlyUser = [](const VPSingleDefRecipe *R) -> VPRecipeBase * {
1040 if (R->getNumUsers() == 0 || R->hasMoreThanOneUniqueUser())
1041 return nullptr;
1042 return dyn_cast<VPRecipeBase>(*R->user_begin());
1043 };
1044 if (VPRecipeBase *Recipe = GetOnlyUser(this)) {
1045 if (match(Recipe, m_Reverse(m_VPValue())))
1046 Recipe = GetOnlyUser(cast<VPInstruction>(Recipe));
1047 if (Recipe)
1048 CCH = ComputeCCH(Recipe);
1049 }
1050 }
1051 // For Z/Sext, get the context from the operand.
1052 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
1053 Opcode == Instruction::FPExt) {
1054 if (auto *Recipe = Operand->getDefiningRecipe()) {
1055 VPValue *ReverseOp;
1056 if (match(Recipe, m_Reverse(m_VPValue(ReverseOp))))
1057 Recipe = ReverseOp->getDefiningRecipe();
1058 if (Recipe)
1059 CCH = ComputeCCH(Recipe);
1060 }
1061 }
1062
1063 auto *ScalarSrcTy = Ctx.Types.inferScalarType(Operand);
1064 Type *SrcTy = VF.isVector() ? toVectorTy(ScalarSrcTy, VF) : ScalarSrcTy;
1065 // Arm TTI will use the underlying instruction to determine the cost.
1066 return Ctx.TTI.getCastInstrCost(
1067 Opcode, ResultTy, SrcTy, CCH, Ctx.CostKind,
1069 }
1070 case Instruction::Select: {
1072 bool IsScalarCond = getOperand(0)->isDefinedOutsideLoopRegions();
1073 Type *ScalarTy = Ctx.Types.inferScalarType(this);
1074
1075 VPValue *Op0, *Op1;
1076 bool IsLogicalAnd =
1077 match(this, m_LogicalAnd(m_VPValue(Op0), m_VPValue(Op1)));
1078 bool IsLogicalOr = match(this, m_LogicalOr(m_VPValue(Op0), m_VPValue(Op1)));
1079
1080 if (!IsScalarCond && ScalarTy->getScalarSizeInBits() == 1 &&
1081 (IsLogicalAnd || IsLogicalOr)) {
1082 // select x, y, false --> x & y
1083 // select x, true, y --> x | y
1084 const auto [Op1VK, Op1VP] = Ctx.getOperandInfo(Op0);
1085 const auto [Op2VK, Op2VP] = Ctx.getOperandInfo(Op1);
1086
1087 SmallVector<const Value *, 2> Operands;
1088 if (SI && all_of(operands(),
1089 [](VPValue *Op) { return Op->getUnderlyingValue(); }))
1090 append_range(Operands, SI->operands());
1091 return Ctx.TTI.getArithmeticInstrCost(
1092 IsLogicalOr ? Instruction::Or : Instruction::And, ResultTy,
1093 Ctx.CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, Operands, SI);
1094 }
1095
1096 Type *CondTy = Ctx.Types.inferScalarType(getOperand(0));
1097 if (!IsScalarCond)
1098 CondTy = VectorType::get(CondTy, VF);
1099
1100 llvm::CmpPredicate Pred;
1101 if (!match(getOperand(0), m_Cmp(Pred, m_VPValue(), m_VPValue())))
1102 if (auto *CondIRV = dyn_cast<VPIRValue>(getOperand(0)))
1103 if (auto *Cmp = dyn_cast<CmpInst>(CondIRV->getValue()))
1104 Pred = Cmp->getPredicate();
1105 Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1106 return Ctx.TTI.getCmpSelInstrCost(
1107 Instruction::Select, VectorTy, CondTy, Pred, Ctx.CostKind,
1108 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, SI);
1109 }
1110 }
1111 llvm_unreachable("called for unsupported opcode");
1112}
1113
1115 VPCostContext &Ctx) const {
1117 if (!getUnderlyingValue() && getOpcode() != Instruction::FMul) {
1118 // TODO: Compute cost for VPInstructions without underlying values once
1119 // the legacy cost model has been retired.
1120 return 0;
1121 }
1122
1124 "Should only generate a vector value or single scalar, not scalars "
1125 "for all lanes.");
1127 getOpcode(),
1129 }
1130
1131 switch (getOpcode()) {
1132 case Instruction::Select: {
1134 match(getOperand(0), m_Cmp(Pred, m_VPValue(), m_VPValue()));
1135 auto *CondTy = Ctx.Types.inferScalarType(getOperand(0));
1136 auto *VecTy = Ctx.Types.inferScalarType(getOperand(1));
1137 if (!vputils::onlyFirstLaneUsed(this)) {
1138 CondTy = toVectorTy(CondTy, VF);
1139 VecTy = toVectorTy(VecTy, VF);
1140 }
1141 return Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VecTy, CondTy, Pred,
1142 Ctx.CostKind);
1143 }
1144 case Instruction::ExtractElement:
1146 if (VF.isScalar()) {
1147 // ExtractLane with VF=1 takes care of handling extracting across multiple
1148 // parts.
1149 return 0;
1150 }
1151
1152 // Add on the cost of extracting the element.
1153 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1154 return Ctx.TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy,
1155 Ctx.CostKind);
1156 }
1157 case VPInstruction::AnyOf: {
1158 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1159 return Ctx.TTI.getArithmeticReductionCost(
1160 Instruction::Or, cast<VectorType>(VecTy), std::nullopt, Ctx.CostKind);
1161 }
1163 Type *ScalarTy = Ctx.Types.inferScalarType(getOperand(0));
1164 if (VF.isScalar())
1165 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1167 CmpInst::ICMP_EQ, Ctx.CostKind);
1168 // Calculate the cost of determining the lane index.
1169 auto *PredTy = toVectorTy(ScalarTy, VF);
1170 IntrinsicCostAttributes Attrs(Intrinsic::experimental_cttz_elts,
1171 Type::getInt64Ty(Ctx.LLVMCtx),
1172 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1173 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1174 }
1176 Type *ScalarTy = Ctx.Types.inferScalarType(getOperand(0));
1177 if (VF.isScalar())
1178 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1180 CmpInst::ICMP_EQ, Ctx.CostKind);
1181 // Calculate the cost of determining the lane index: NOT + cttz_elts + SUB.
1182 auto *PredTy = toVectorTy(ScalarTy, VF);
1183 IntrinsicCostAttributes Attrs(Intrinsic::experimental_cttz_elts,
1184 Type::getInt64Ty(Ctx.LLVMCtx),
1185 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1186 InstructionCost Cost = Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1187 // Add cost of NOT operation on the predicate.
1188 Cost += Ctx.TTI.getArithmeticInstrCost(
1189 Instruction::Xor, PredTy, Ctx.CostKind,
1190 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1191 {TargetTransformInfo::OK_UniformConstantValue,
1192 TargetTransformInfo::OP_None});
1193 // Add cost of SUB operation on the index.
1194 Cost += Ctx.TTI.getArithmeticInstrCost(
1195 Instruction::Sub, Type::getInt64Ty(Ctx.LLVMCtx), Ctx.CostKind);
1196 return Cost;
1197 }
1199 assert(VF.isVector() && "Scalar FirstOrderRecurrenceSplice?");
1201 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
1202 Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1203
1204 return Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
1205 cast<VectorType>(VectorTy),
1206 cast<VectorType>(VectorTy), Mask,
1207 Ctx.CostKind, VF.getKnownMinValue() - 1);
1208 }
1210 Type *ArgTy = Ctx.Types.inferScalarType(getOperand(0));
1211 unsigned Multiplier =
1212 cast<ConstantInt>(getOperand(2)->getLiveInIRValue())->getZExtValue();
1213 Type *RetTy = toVectorTy(Type::getInt1Ty(Ctx.LLVMCtx), VF * Multiplier);
1214 IntrinsicCostAttributes Attrs(Intrinsic::get_active_lane_mask, RetTy,
1215 {ArgTy, ArgTy});
1216 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1217 }
1219 Type *Arg0Ty = Ctx.Types.inferScalarType(getOperand(0));
1220 Type *I32Ty = Type::getInt32Ty(Ctx.LLVMCtx);
1221 Type *I1Ty = Type::getInt1Ty(Ctx.LLVMCtx);
1222 IntrinsicCostAttributes Attrs(Intrinsic::experimental_get_vector_length,
1223 I32Ty, {Arg0Ty, I32Ty, I1Ty});
1224 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1225 }
1227 assert(VF.isVector() && "Reverse operation must be vector type");
1228 auto *VectorTy = cast<VectorType>(
1229 toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF));
1230 return Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
1231 VectorTy, /*Mask=*/{}, Ctx.CostKind,
1232 /*Index=*/0);
1233 }
1235 // Add on the cost of extracting the element.
1236 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1237 return Ctx.TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
1238 VecTy, Ctx.CostKind, 0);
1239 }
1241 if (VF == ElementCount::getScalable(1))
1243 [[fallthrough]];
1244 default:
1245 // TODO: Compute cost other VPInstructions once the legacy cost model has
1246 // been retired.
1248 "unexpected VPInstruction witht underlying value");
1249 return 0;
1250 }
1251}
1252
1265
1267 switch (getOpcode()) {
1268 case Instruction::PHI:
1272 return true;
1273 default:
1274 return isScalarCast();
1275 }
1276}
1277
1279 assert(!State.Lane && "VPInstruction executing an Lane");
1280 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
1282 "Set flags not supported for the provided opcode");
1283 if (hasFastMathFlags())
1284 State.Builder.setFastMathFlags(getFastMathFlags());
1285 Value *GeneratedValue = generate(State);
1286 if (!hasResult())
1287 return;
1288 assert(GeneratedValue && "generate must produce a value");
1289 bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
1292 assert((((GeneratedValue->getType()->isVectorTy() ||
1293 GeneratedValue->getType()->isStructTy()) ==
1294 !GeneratesPerFirstLaneOnly) ||
1295 State.VF.isScalar()) &&
1296 "scalar value but not only first lane defined");
1297 State.set(this, GeneratedValue,
1298 /*IsScalar*/ GeneratesPerFirstLaneOnly);
1299}
1300
1303 return false;
1304 switch (getOpcode()) {
1305 case Instruction::GetElementPtr:
1306 case Instruction::ExtractElement:
1307 case Instruction::Freeze:
1308 case Instruction::FCmp:
1309 case Instruction::ICmp:
1310 case Instruction::Select:
1311 case Instruction::PHI:
1331 case VPInstruction::Not:
1340 return false;
1341 default:
1342 return true;
1343 }
1344}
1345
1347 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1349 return vputils::onlyFirstLaneUsed(this);
1350
1351 switch (getOpcode()) {
1352 default:
1353 return false;
1354 case Instruction::ExtractElement:
1355 return Op == getOperand(1);
1356 case Instruction::PHI:
1357 return true;
1358 case Instruction::FCmp:
1359 case Instruction::ICmp:
1360 case Instruction::Select:
1361 case Instruction::Or:
1362 case Instruction::Freeze:
1363 case VPInstruction::Not:
1364 // TODO: Cover additional opcodes.
1365 return vputils::onlyFirstLaneUsed(this);
1374 return true;
1377 // Before replicating by VF, Build(Struct)Vector uses all lanes of the
1378 // operand, after replicating its operands only the first lane is used.
1379 // Before replicating, it will have only a single operand.
1380 return getNumOperands() > 1;
1382 return Op == getOperand(0) || vputils::onlyFirstLaneUsed(this);
1384 // WidePtrAdd supports scalar and vector base addresses.
1385 return false;
1388 return Op == getOperand(1);
1390 return Op == getOperand(0);
1391 };
1392 llvm_unreachable("switch should return");
1393}
1394
1396 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1398 return vputils::onlyFirstPartUsed(this);
1399
1400 switch (getOpcode()) {
1401 default:
1402 return false;
1403 case Instruction::FCmp:
1404 case Instruction::ICmp:
1405 case Instruction::Select:
1406 return vputils::onlyFirstPartUsed(this);
1411 return true;
1412 };
1413 llvm_unreachable("switch should return");
1414}
1415
1416#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1418 VPSlotTracker SlotTracker(getParent()->getPlan());
1420}
1421
1423 VPSlotTracker &SlotTracker) const {
1424 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1425
1426 if (hasResult()) {
1428 O << " = ";
1429 }
1430
1431 switch (getOpcode()) {
1432 case VPInstruction::Not:
1433 O << "not";
1434 break;
1436 O << "combined load";
1437 break;
1439 O << "combined store";
1440 break;
1442 O << "active lane mask";
1443 break;
1445 O << "EXPLICIT-VECTOR-LENGTH";
1446 break;
1448 O << "first-order splice";
1449 break;
1451 O << "branch-on-cond";
1452 break;
1454 O << "branch-on-two-conds";
1455 break;
1457 O << "TC > VF ? TC - VF : 0";
1458 break;
1460 O << "VF * Part +";
1461 break;
1463 O << "branch-on-count";
1464 break;
1466 O << "broadcast";
1467 break;
1469 O << "buildstructvector";
1470 break;
1472 O << "buildvector";
1473 break;
1475 O << "extract-lane";
1476 break;
1478 O << "extract-last-lane";
1479 break;
1481 O << "extract-last-part";
1482 break;
1484 O << "extract-penultimate-element";
1485 break;
1487 O << "compute-anyof-result";
1488 break;
1490 O << "compute-find-iv-result";
1491 break;
1493 O << "compute-reduction-result";
1494 break;
1496 O << "logical-and";
1497 break;
1499 O << "ptradd";
1500 break;
1502 O << "wide-ptradd";
1503 break;
1505 O << "any-of";
1506 break;
1508 O << "first-active-lane";
1509 break;
1511 O << "last-active-lane";
1512 break;
1514 O << "reduction-start-vector";
1515 break;
1517 O << "resume-for-epilogue";
1518 break;
1520 O << "reverse";
1521 break;
1523 O << "unpack";
1524 break;
1525 default:
1527 }
1528
1529 printFlags(O);
1531}
1532#endif
1533
1535 State.setDebugLocFrom(getDebugLoc());
1536 if (isScalarCast()) {
1537 Value *Op = State.get(getOperand(0), VPLane(0));
1538 Value *Cast = State.Builder.CreateCast(Instruction::CastOps(getOpcode()),
1539 Op, ResultTy);
1540 State.set(this, Cast, VPLane(0));
1541 return;
1542 }
1543 switch (getOpcode()) {
1545 Value *StepVector =
1546 State.Builder.CreateStepVector(VectorType::get(ResultTy, State.VF));
1547 State.set(this, StepVector);
1548 break;
1549 }
1550 case VPInstruction::VScale: {
1551 Value *VScale = State.Builder.CreateVScale(ResultTy);
1552 State.set(this, VScale, true);
1553 break;
1554 }
1555
1556 default:
1557 llvm_unreachable("opcode not implemented yet");
1558 }
1559}
1560
1561#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1563 VPSlotTracker &SlotTracker) const {
1564 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1566 O << " = ";
1567
1568 switch (getOpcode()) {
1570 O << "wide-iv-step ";
1572 break;
1574 O << "step-vector " << *ResultTy;
1575 break;
1577 O << "vscale " << *ResultTy;
1578 break;
1579 default:
1580 assert(Instruction::isCast(getOpcode()) && "unhandled opcode");
1583 O << " to " << *ResultTy;
1584 }
1585}
1586#endif
1587
1589 State.setDebugLocFrom(getDebugLoc());
1590 PHINode *NewPhi = State.Builder.CreatePHI(
1591 State.TypeAnalysis.inferScalarType(this), 2, getName());
1592 unsigned NumIncoming = getNumIncoming();
1593 if (getParent() != getParent()->getPlan()->getScalarPreheader()) {
1594 // TODO: Fixup all incoming values of header phis once recipes defining them
1595 // are introduced.
1596 NumIncoming = 1;
1597 }
1598 for (unsigned Idx = 0; Idx != NumIncoming; ++Idx) {
1599 Value *IncV = State.get(getIncomingValue(Idx), VPLane(0));
1600 BasicBlock *PredBB = State.CFG.VPBB2IRBB.at(getIncomingBlock(Idx));
1601 NewPhi->addIncoming(IncV, PredBB);
1602 }
1603 State.set(this, NewPhi, VPLane(0));
1604}
1605
1606#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1607void VPPhi::printRecipe(raw_ostream &O, const Twine &Indent,
1608 VPSlotTracker &SlotTracker) const {
1609 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1611 O << " = phi ";
1613}
1614#endif
1615
1616VPIRInstruction *VPIRInstruction ::create(Instruction &I) {
1617 if (auto *Phi = dyn_cast<PHINode>(&I))
1618 return new VPIRPhi(*Phi);
1619 return new VPIRInstruction(I);
1620}
1621
1623 assert(!isa<VPIRPhi>(this) && getNumOperands() == 0 &&
1624 "PHINodes must be handled by VPIRPhi");
1625 // Advance the insert point after the wrapped IR instruction. This allows
1626 // interleaving VPIRInstructions and other recipes.
1627 State.Builder.SetInsertPoint(I.getParent(), std::next(I.getIterator()));
1628}
1629
1631 VPCostContext &Ctx) const {
1632 // The recipe wraps an existing IR instruction on the border of VPlan's scope,
1633 // hence it does not contribute to the cost-modeling for the VPlan.
1634 return 0;
1635}
1636
1638 VPBuilder &Builder) {
1640 "can only update exiting operands to phi nodes");
1641 assert(getNumOperands() > 0 && "must have at least one operand");
1642 VPValue *Exiting = getOperand(0);
1643 if (isa<VPIRValue>(Exiting))
1644 return;
1645
1646 Exiting = Builder.createNaryOp(VPInstruction::ExtractLastPart, Exiting);
1647 Exiting = Builder.createNaryOp(VPInstruction::ExtractLastLane, Exiting);
1648 setOperand(0, Exiting);
1649}
1650
1651#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1653 VPSlotTracker &SlotTracker) const {
1654 O << Indent << "IR " << I;
1655}
1656#endif
1657
1659 PHINode *Phi = &getIRPhi();
1660 for (const auto &[Idx, Op] : enumerate(operands())) {
1661 VPValue *ExitValue = Op;
1662 auto Lane = vputils::isSingleScalar(ExitValue)
1664 : VPLane::getLastLaneForVF(State.VF);
1665 VPBlockBase *Pred = getParent()->getPredecessors()[Idx];
1666 auto *PredVPBB = Pred->getExitingBasicBlock();
1667 BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
1668 // Set insertion point in PredBB in case an extract needs to be generated.
1669 // TODO: Model extracts explicitly.
1670 State.Builder.SetInsertPoint(PredBB, PredBB->getFirstNonPHIIt());
1671 Value *V = State.get(ExitValue, VPLane(Lane));
1672 // If there is no existing block for PredBB in the phi, add a new incoming
1673 // value. Otherwise update the existing incoming value for PredBB.
1674 if (Phi->getBasicBlockIndex(PredBB) == -1)
1675 Phi->addIncoming(V, PredBB);
1676 else
1677 Phi->setIncomingValueForBlock(PredBB, V);
1678 }
1679
1680 // Advance the insert point after the wrapped IR instruction. This allows
1681 // interleaving VPIRInstructions and other recipes.
1682 State.Builder.SetInsertPoint(Phi->getParent(), std::next(Phi->getIterator()));
1683}
1684
1686 VPRecipeBase *R = const_cast<VPRecipeBase *>(getAsRecipe());
1687 assert(R->getNumOperands() == R->getParent()->getNumPredecessors() &&
1688 "Number of phi operands must match number of predecessors");
1689 unsigned Position = R->getParent()->getIndexForPredecessor(IncomingBlock);
1690 R->removeOperand(Position);
1691}
1692
1693#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1695 VPSlotTracker &SlotTracker) const {
1696 interleaveComma(enumerate(getAsRecipe()->operands()), O,
1697 [this, &O, &SlotTracker](auto Op) {
1698 O << "[ ";
1699 Op.value()->printAsOperand(O, SlotTracker);
1700 O << ", ";
1701 getIncomingBlock(Op.index())->printAsOperand(O);
1702 O << " ]";
1703 });
1704}
1705#endif
1706
1707#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1709 VPSlotTracker &SlotTracker) const {
1711
1712 if (getNumOperands() != 0) {
1713 O << " (extra operand" << (getNumOperands() > 1 ? "s" : "") << ": ";
1715 [&O, &SlotTracker](auto Op) {
1716 std::get<0>(Op)->printAsOperand(O, SlotTracker);
1717 O << " from ";
1718 std::get<1>(Op)->printAsOperand(O);
1719 });
1720 O << ")";
1721 }
1722}
1723#endif
1724
1726 for (const auto &[Kind, Node] : Metadata)
1727 I.setMetadata(Kind, Node);
1728}
1729
1731 SmallVector<std::pair<unsigned, MDNode *>> MetadataIntersection;
1732 for (const auto &[KindA, MDA] : Metadata) {
1733 for (const auto &[KindB, MDB] : Other.Metadata) {
1734 if (KindA == KindB && MDA == MDB) {
1735 MetadataIntersection.emplace_back(KindA, MDA);
1736 break;
1737 }
1738 }
1739 }
1740 Metadata = std::move(MetadataIntersection);
1741}
1742
1743#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1745 const Module *M = SlotTracker.getModule();
1746 if (Metadata.empty() || !M)
1747 return;
1748
1749 ArrayRef<StringRef> MDNames = SlotTracker.getMDNames();
1750 O << " (";
1751 interleaveComma(Metadata, O, [&](const auto &KindNodePair) {
1752 auto [Kind, Node] = KindNodePair;
1753 assert(Kind < MDNames.size() && !MDNames[Kind].empty() &&
1754 "Unexpected unnamed metadata kind");
1755 O << "!" << MDNames[Kind] << " ";
1756 Node->printAsOperand(O, M);
1757 });
1758 O << ")";
1759}
1760#endif
1761
1763 assert(State.VF.isVector() && "not widening");
1764 assert(Variant != nullptr && "Can't create vector function.");
1765
1766 FunctionType *VFTy = Variant->getFunctionType();
1767 // Add return type if intrinsic is overloaded on it.
1769 for (const auto &I : enumerate(args())) {
1770 Value *Arg;
1771 // Some vectorized function variants may also take a scalar argument,
1772 // e.g. linear parameters for pointers. This needs to be the scalar value
1773 // from the start of the respective part when interleaving.
1774 if (!VFTy->getParamType(I.index())->isVectorTy())
1775 Arg = State.get(I.value(), VPLane(0));
1776 else
1777 Arg = State.get(I.value(), usesFirstLaneOnly(I.value()));
1778 Args.push_back(Arg);
1779 }
1780
1783 if (CI)
1784 CI->getOperandBundlesAsDefs(OpBundles);
1785
1786 CallInst *V = State.Builder.CreateCall(Variant, Args, OpBundles);
1787 applyFlags(*V);
1788 applyMetadata(*V);
1789 V->setCallingConv(Variant->getCallingConv());
1790
1791 if (!V->getType()->isVoidTy())
1792 State.set(this, V);
1793}
1794
1796 VPCostContext &Ctx) const {
1797 return Ctx.TTI.getCallInstrCost(nullptr, Variant->getReturnType(),
1798 Variant->getFunctionType()->params(),
1799 Ctx.CostKind);
1800}
1801
1802#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1804 VPSlotTracker &SlotTracker) const {
1805 O << Indent << "WIDEN-CALL ";
1806
1807 Function *CalledFn = getCalledScalarFunction();
1808 if (CalledFn->getReturnType()->isVoidTy())
1809 O << "void ";
1810 else {
1812 O << " = ";
1813 }
1814
1815 O << "call";
1816 printFlags(O);
1817 O << " @" << CalledFn->getName() << "(";
1818 interleaveComma(args(), O, [&O, &SlotTracker](VPValue *Op) {
1819 Op->printAsOperand(O, SlotTracker);
1820 });
1821 O << ")";
1822
1823 O << " (using library function";
1824 if (Variant->hasName())
1825 O << ": " << Variant->getName();
1826 O << ")";
1827}
1828#endif
1829
1831 assert(State.VF.isVector() && "not widening");
1832
1833 SmallVector<Type *, 2> TysForDecl;
1834 // Add return type if intrinsic is overloaded on it.
1835 if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1,
1836 State.TTI)) {
1837 Type *RetTy = toVectorizedTy(getResultType(), State.VF);
1838 append_range(TysForDecl, getContainedTypes(RetTy));
1839 }
1841 for (const auto &I : enumerate(operands())) {
1842 // Some intrinsics have a scalar argument - don't replace it with a
1843 // vector.
1844 Value *Arg;
1845 if (isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index(),
1846 State.TTI))
1847 Arg = State.get(I.value(), VPLane(0));
1848 else
1849 Arg = State.get(I.value(), usesFirstLaneOnly(I.value()));
1850 if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index(),
1851 State.TTI))
1852 TysForDecl.push_back(Arg->getType());
1853 Args.push_back(Arg);
1854 }
1855
1856 // Use vector version of the intrinsic.
1857 Module *M = State.Builder.GetInsertBlock()->getModule();
1858 Function *VectorF =
1859 Intrinsic::getOrInsertDeclaration(M, VectorIntrinsicID, TysForDecl);
1860 assert(VectorF &&
1861 "Can't retrieve vector intrinsic or vector-predication intrinsics.");
1862
1865 if (CI)
1866 CI->getOperandBundlesAsDefs(OpBundles);
1867
1868 CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
1869
1870 applyFlags(*V);
1871 applyMetadata(*V);
1872
1873 if (!V->getType()->isVoidTy())
1874 State.set(this, V);
1875}
1876
1877/// Compute the cost for the intrinsic \p ID with \p Operands, produced by \p R.
1880 const VPRecipeWithIRFlags &R,
1881 ElementCount VF,
1882 VPCostContext &Ctx) {
1883 // Some backends analyze intrinsic arguments to determine cost. Use the
1884 // underlying value for the operand if it has one. Otherwise try to use the
1885 // operand of the underlying call instruction, if there is one. Otherwise
1886 // clear Arguments.
1887 // TODO: Rework TTI interface to be independent of concrete IR values.
1889 for (const auto &[Idx, Op] : enumerate(Operands)) {
1890 auto *V = Op->getUnderlyingValue();
1891 if (!V) {
1892 if (auto *UI = dyn_cast_or_null<CallBase>(R.getUnderlyingValue())) {
1893 Arguments.push_back(UI->getArgOperand(Idx));
1894 continue;
1895 }
1896 Arguments.clear();
1897 break;
1898 }
1899 Arguments.push_back(V);
1900 }
1901
1902 Type *ScalarRetTy = Ctx.Types.inferScalarType(&R);
1903 Type *RetTy = VF.isVector() ? toVectorizedTy(ScalarRetTy, VF) : ScalarRetTy;
1904 SmallVector<Type *> ParamTys;
1905 for (const VPValue *Op : Operands) {
1906 ParamTys.push_back(VF.isVector()
1907 ? toVectorTy(Ctx.Types.inferScalarType(Op), VF)
1908 : Ctx.Types.inferScalarType(Op));
1909 }
1910
1911 // TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
1912 FastMathFlags FMF =
1913 R.hasFastMathFlags() ? R.getFastMathFlags() : FastMathFlags();
1914 IntrinsicCostAttributes CostAttrs(
1915 ID, RetTy, Arguments, ParamTys, FMF,
1916 dyn_cast_or_null<IntrinsicInst>(R.getUnderlyingValue()),
1917 InstructionCost::getInvalid(), &Ctx.TLI);
1918 return Ctx.TTI.getIntrinsicInstrCost(CostAttrs, Ctx.CostKind);
1919}
1920
1922 VPCostContext &Ctx) const {
1924 return getCostForIntrinsics(VectorIntrinsicID, ArgOps, *this, VF, Ctx);
1925}
1926
1928 return Intrinsic::getBaseName(VectorIntrinsicID);
1929}
1930
1932 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1933 return all_of(enumerate(operands()), [this, &Op](const auto &X) {
1934 auto [Idx, V] = X;
1936 Idx, nullptr);
1937 });
1938}
1939
1940#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1942 VPSlotTracker &SlotTracker) const {
1943 O << Indent << "WIDEN-INTRINSIC ";
1944 if (ResultTy->isVoidTy()) {
1945 O << "void ";
1946 } else {
1948 O << " = ";
1949 }
1950
1951 O << "call";
1952 printFlags(O);
1953 O << getIntrinsicName() << "(";
1954
1956 Op->printAsOperand(O, SlotTracker);
1957 });
1958 O << ")";
1959}
1960#endif
1961
1963 IRBuilderBase &Builder = State.Builder;
1964
1965 Value *Address = State.get(getOperand(0));
1966 Value *IncAmt = State.get(getOperand(1), /*IsScalar=*/true);
1967 VectorType *VTy = cast<VectorType>(Address->getType());
1968
1969 // The histogram intrinsic requires a mask even if the recipe doesn't;
1970 // if the mask operand was omitted then all lanes should be executed and
1971 // we just need to synthesize an all-true mask.
1972 Value *Mask = nullptr;
1973 if (VPValue *VPMask = getMask())
1974 Mask = State.get(VPMask);
1975 else
1976 Mask =
1977 Builder.CreateVectorSplat(VTy->getElementCount(), Builder.getInt1(1));
1978
1979 // If this is a subtract, we want to invert the increment amount. We may
1980 // add a separate intrinsic in future, but for now we'll try this.
1981 if (Opcode == Instruction::Sub)
1982 IncAmt = Builder.CreateNeg(IncAmt);
1983 else
1984 assert(Opcode == Instruction::Add && "only add or sub supported for now");
1985
1986 State.Builder.CreateIntrinsic(Intrinsic::experimental_vector_histogram_add,
1987 {VTy, IncAmt->getType()},
1988 {Address, IncAmt, Mask});
1989}
1990
1992 VPCostContext &Ctx) const {
1993 // FIXME: Take the gather and scatter into account as well. For now we're
1994 // generating the same cost as the fallback path, but we'll likely
1995 // need to create a new TTI method for determining the cost, including
1996 // whether we can use base + vec-of-smaller-indices or just
1997 // vec-of-pointers.
1998 assert(VF.isVector() && "Invalid VF for histogram cost");
1999 Type *AddressTy = Ctx.Types.inferScalarType(getOperand(0));
2000 VPValue *IncAmt = getOperand(1);
2001 Type *IncTy = Ctx.Types.inferScalarType(IncAmt);
2002 VectorType *VTy = VectorType::get(IncTy, VF);
2003
2004 // Assume that a non-constant update value (or a constant != 1) requires
2005 // a multiply, and add that into the cost.
2006 InstructionCost MulCost =
2007 Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VTy, Ctx.CostKind);
2008 if (auto *IncAmountIRV = dyn_cast<VPIRValue>(IncAmt)) {
2009 ConstantInt *CI = dyn_cast<ConstantInt>(IncAmountIRV->getValue());
2010
2011 if (CI && CI->getZExtValue() == 1)
2012 MulCost = TTI::TCC_Free;
2013 }
2014
2015 // Find the cost of the histogram operation itself.
2016 Type *PtrTy = VectorType::get(AddressTy, VF);
2017 Type *MaskTy = VectorType::get(Type::getInt1Ty(Ctx.LLVMCtx), VF);
2018 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
2019 Type::getVoidTy(Ctx.LLVMCtx),
2020 {PtrTy, IncTy, MaskTy});
2021
2022 // Add the costs together with the add/sub operation.
2023 return Ctx.TTI.getIntrinsicInstrCost(ICA, Ctx.CostKind) + MulCost +
2024 Ctx.TTI.getArithmeticInstrCost(Opcode, VTy, Ctx.CostKind);
2025}
2026
2027#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2029 VPSlotTracker &SlotTracker) const {
2030 O << Indent << "WIDEN-HISTOGRAM buckets: ";
2032
2033 if (Opcode == Instruction::Sub)
2034 O << ", dec: ";
2035 else {
2036 assert(Opcode == Instruction::Add);
2037 O << ", inc: ";
2038 }
2040
2041 if (VPValue *Mask = getMask()) {
2042 O << ", mask: ";
2043 Mask->printAsOperand(O, SlotTracker);
2044 }
2045}
2046#endif
2047
2048VPIRFlags::FastMathFlagsTy::FastMathFlagsTy(const FastMathFlags &FMF) {
2049 AllowReassoc = FMF.allowReassoc();
2050 NoNaNs = FMF.noNaNs();
2051 NoInfs = FMF.noInfs();
2052 NoSignedZeros = FMF.noSignedZeros();
2053 AllowReciprocal = FMF.allowReciprocal();
2054 AllowContract = FMF.allowContract();
2055 ApproxFunc = FMF.approxFunc();
2056}
2057
2058#if !defined(NDEBUG)
2059bool VPIRFlags::flagsValidForOpcode(unsigned Opcode) const {
2060 switch (OpType) {
2061 case OperationType::OverflowingBinOp:
2062 return Opcode == Instruction::Add || Opcode == Instruction::Sub ||
2063 Opcode == Instruction::Mul || Opcode == Instruction::Shl ||
2064 Opcode == VPInstruction::VPInstruction::CanonicalIVIncrementForPart;
2065 case OperationType::Trunc:
2066 return Opcode == Instruction::Trunc;
2067 case OperationType::DisjointOp:
2068 return Opcode == Instruction::Or;
2069 case OperationType::PossiblyExactOp:
2070 return Opcode == Instruction::AShr || Opcode == Instruction::LShr ||
2071 Opcode == Instruction::UDiv || Opcode == Instruction::SDiv;
2072 case OperationType::GEPOp:
2073 return Opcode == Instruction::GetElementPtr ||
2074 Opcode == VPInstruction::PtrAdd ||
2075 Opcode == VPInstruction::WidePtrAdd;
2076 case OperationType::FPMathOp:
2077 return Opcode == Instruction::Call || Opcode == Instruction::FAdd ||
2078 Opcode == Instruction::FMul || Opcode == Instruction::FSub ||
2079 Opcode == Instruction::FNeg || Opcode == Instruction::FDiv ||
2080 Opcode == Instruction::FRem || Opcode == Instruction::FPExt ||
2081 Opcode == Instruction::FPTrunc || Opcode == Instruction::Select ||
2082 Opcode == VPInstruction::WideIVStep ||
2085 case OperationType::FCmp:
2086 return Opcode == Instruction::FCmp;
2087 case OperationType::NonNegOp:
2088 return Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP;
2089 case OperationType::Cmp:
2090 return Opcode == Instruction::FCmp || Opcode == Instruction::ICmp;
2091 case OperationType::Other:
2092 return true;
2093 }
2094 llvm_unreachable("Unknown OperationType enum");
2095}
2096#endif
2097
2098#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2100 switch (OpType) {
2101 case OperationType::Cmp:
2103 break;
2104 case OperationType::FCmp:
2107 break;
2108 case OperationType::DisjointOp:
2109 if (DisjointFlags.IsDisjoint)
2110 O << " disjoint";
2111 break;
2112 case OperationType::PossiblyExactOp:
2113 if (ExactFlags.IsExact)
2114 O << " exact";
2115 break;
2116 case OperationType::OverflowingBinOp:
2117 if (WrapFlags.HasNUW)
2118 O << " nuw";
2119 if (WrapFlags.HasNSW)
2120 O << " nsw";
2121 break;
2122 case OperationType::Trunc:
2123 if (TruncFlags.HasNUW)
2124 O << " nuw";
2125 if (TruncFlags.HasNSW)
2126 O << " nsw";
2127 break;
2128 case OperationType::FPMathOp:
2130 break;
2131 case OperationType::GEPOp:
2132 if (GEPFlags.isInBounds())
2133 O << " inbounds";
2134 else if (GEPFlags.hasNoUnsignedSignedWrap())
2135 O << " nusw";
2136 if (GEPFlags.hasNoUnsignedWrap())
2137 O << " nuw";
2138 break;
2139 case OperationType::NonNegOp:
2140 if (NonNegFlags.NonNeg)
2141 O << " nneg";
2142 break;
2143 case OperationType::Other:
2144 break;
2145 }
2146 O << " ";
2147}
2148#endif
2149
2151 auto &Builder = State.Builder;
2152 switch (Opcode) {
2153 case Instruction::Call:
2154 case Instruction::Br:
2155 case Instruction::PHI:
2156 case Instruction::GetElementPtr:
2157 llvm_unreachable("This instruction is handled by a different recipe.");
2158 case Instruction::UDiv:
2159 case Instruction::SDiv:
2160 case Instruction::SRem:
2161 case Instruction::URem:
2162 case Instruction::Add:
2163 case Instruction::FAdd:
2164 case Instruction::Sub:
2165 case Instruction::FSub:
2166 case Instruction::FNeg:
2167 case Instruction::Mul:
2168 case Instruction::FMul:
2169 case Instruction::FDiv:
2170 case Instruction::FRem:
2171 case Instruction::Shl:
2172 case Instruction::LShr:
2173 case Instruction::AShr:
2174 case Instruction::And:
2175 case Instruction::Or:
2176 case Instruction::Xor: {
2177 // Just widen unops and binops.
2179 for (VPValue *VPOp : operands())
2180 Ops.push_back(State.get(VPOp));
2181
2182 Value *V = Builder.CreateNAryOp(Opcode, Ops);
2183
2184 if (auto *VecOp = dyn_cast<Instruction>(V)) {
2185 applyFlags(*VecOp);
2186 applyMetadata(*VecOp);
2187 }
2188
2189 // Use this vector value for all users of the original instruction.
2190 State.set(this, V);
2191 break;
2192 }
2193 case Instruction::ExtractValue: {
2194 assert(getNumOperands() == 2 && "expected single level extractvalue");
2195 Value *Op = State.get(getOperand(0));
2197 Value *Extract = Builder.CreateExtractValue(Op, CI->getZExtValue());
2198 State.set(this, Extract);
2199 break;
2200 }
2201 case Instruction::Freeze: {
2202 Value *Op = State.get(getOperand(0));
2203 Value *Freeze = Builder.CreateFreeze(Op);
2204 State.set(this, Freeze);
2205 break;
2206 }
2207 case Instruction::ICmp:
2208 case Instruction::FCmp: {
2209 // Widen compares. Generate vector compares.
2210 bool FCmp = Opcode == Instruction::FCmp;
2211 Value *A = State.get(getOperand(0));
2212 Value *B = State.get(getOperand(1));
2213 Value *C = nullptr;
2214 if (FCmp) {
2215 C = Builder.CreateFCmp(getPredicate(), A, B);
2216 } else {
2217 C = Builder.CreateICmp(getPredicate(), A, B);
2218 }
2219 if (auto *I = dyn_cast<Instruction>(C)) {
2220 applyFlags(*I);
2221 applyMetadata(*I);
2222 }
2223 State.set(this, C);
2224 break;
2225 }
2226 case Instruction::Select: {
2227 VPValue *CondOp = getOperand(0);
2228 Value *Cond = State.get(CondOp, vputils::isSingleScalar(CondOp));
2229 Value *Op0 = State.get(getOperand(1));
2230 Value *Op1 = State.get(getOperand(2));
2231 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
2232 State.set(this, Sel);
2233 if (auto *I = dyn_cast<Instruction>(Sel)) {
2235 applyFlags(*I);
2236 applyMetadata(*I);
2237 }
2238 break;
2239 }
2240 default:
2241 // This instruction is not vectorized by simple widening.
2242 LLVM_DEBUG(dbgs() << "LV: Found an unhandled opcode : "
2243 << Instruction::getOpcodeName(Opcode));
2244 llvm_unreachable("Unhandled instruction!");
2245 } // end of switch.
2246
2247#if !defined(NDEBUG)
2248 // Verify that VPlan type inference results agree with the type of the
2249 // generated values.
2250 assert(VectorType::get(State.TypeAnalysis.inferScalarType(this), State.VF) ==
2251 State.get(this)->getType() &&
2252 "inferred type and type from generated instructions do not match");
2253#endif
2254}
2255
2257 VPCostContext &Ctx) const {
2258 switch (Opcode) {
2259 case Instruction::UDiv:
2260 case Instruction::SDiv:
2261 case Instruction::SRem:
2262 case Instruction::URem:
2263 // If the div/rem operation isn't safe to speculate and requires
2264 // predication, then the only way we can even create a vplan is to insert
2265 // a select on the second input operand to ensure we use the value of 1
2266 // for the inactive lanes. The select will be costed separately.
2267 case Instruction::FNeg:
2268 case Instruction::Add:
2269 case Instruction::FAdd:
2270 case Instruction::Sub:
2271 case Instruction::FSub:
2272 case Instruction::Mul:
2273 case Instruction::FMul:
2274 case Instruction::FDiv:
2275 case Instruction::FRem:
2276 case Instruction::Shl:
2277 case Instruction::LShr:
2278 case Instruction::AShr:
2279 case Instruction::And:
2280 case Instruction::Or:
2281 case Instruction::Xor:
2282 case Instruction::Freeze:
2283 case Instruction::ExtractValue:
2284 case Instruction::ICmp:
2285 case Instruction::FCmp:
2286 return getCostForRecipeWithOpcode(getOpcode(), VF, Ctx);
2287 case Instruction::Select:
2288 return getCostForRecipeWithOpcode(getOpcode(), VF, Ctx);
2289 default:
2290 llvm_unreachable("Unsupported opcode for instruction");
2291 }
2292}
2293
2294#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2296 VPSlotTracker &SlotTracker) const {
2297 O << Indent << "WIDEN ";
2299 O << " = " << Instruction::getOpcodeName(Opcode);
2300 printFlags(O);
2302}
2303#endif
2304
2306 auto &Builder = State.Builder;
2307 /// Vectorize casts.
2308 assert(State.VF.isVector() && "Not vectorizing?");
2309 Type *DestTy = VectorType::get(getResultType(), State.VF);
2310 VPValue *Op = getOperand(0);
2311 Value *A = State.get(Op);
2312 Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
2313 State.set(this, Cast);
2314 if (auto *CastOp = dyn_cast<Instruction>(Cast)) {
2315 applyFlags(*CastOp);
2316 applyMetadata(*CastOp);
2317 }
2318}
2319
2321 VPCostContext &Ctx) const {
2322 // TODO: In some cases, VPWidenCastRecipes are created but not considered in
2323 // the legacy cost model, including truncates/extends when evaluating a
2324 // reduction in a smaller type.
2325 if (!getUnderlyingValue())
2326 return 0;
2327 return getCostForRecipeWithOpcode(getOpcode(), VF, Ctx);
2328}
2329
2330#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2332 VPSlotTracker &SlotTracker) const {
2333 O << Indent << "WIDEN-CAST ";
2335 O << " = " << Instruction::getOpcodeName(Opcode);
2336 printFlags(O);
2338 O << " to " << *getResultType();
2339}
2340#endif
2341
2343 VPCostContext &Ctx) const {
2344 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
2345}
2346
2347#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2349 raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const {
2350 O << Indent;
2352 O << " = WIDEN-INDUCTION";
2353 printFlags(O);
2355
2356 if (auto *TI = getTruncInst())
2357 O << " (truncated to " << *TI->getType() << ")";
2358}
2359#endif
2360
2362 // The step may be defined by a recipe in the preheader (e.g. if it requires
2363 // SCEV expansion), but for the canonical induction the step is required to be
2364 // 1, which is represented as live-in.
2366 if (!Step)
2367 return false;
2368 ;
2369 auto *StepC = dyn_cast<ConstantInt>(Step->getValue());
2370 auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getValue());
2371 return StartC && StartC->isZero() && StepC && StepC->isOne() &&
2372 getScalarType() == getRegion()->getCanonicalIVType();
2373}
2374
2375#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2377 VPSlotTracker &SlotTracker) const {
2378 O << Indent;
2380 O << " = DERIVED-IV ";
2381 getStartValue()->printAsOperand(O, SlotTracker);
2382 O << " + ";
2383 getOperand(1)->printAsOperand(O, SlotTracker);
2384 O << " * ";
2385 getStepValue()->printAsOperand(O, SlotTracker);
2386}
2387#endif
2388
2390 // Fast-math-flags propagate from the original induction instruction.
2391 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
2392 if (hasFastMathFlags())
2393 State.Builder.setFastMathFlags(getFastMathFlags());
2394
2395 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2396 /// variable on which to base the steps, \p Step is the size of the step.
2397
2398 Value *BaseIV = State.get(getOperand(0), VPLane(0));
2399 Value *Step = State.get(getStepValue(), VPLane(0));
2400 IRBuilderBase &Builder = State.Builder;
2401
2402 // Ensure step has the same type as that of scalar IV.
2403 Type *BaseIVTy = BaseIV->getType()->getScalarType();
2404 assert(BaseIVTy == Step->getType() && "Types of BaseIV and Step must match!");
2405
2406 // We build scalar steps for both integer and floating-point induction
2407 // variables. Here, we determine the kind of arithmetic we will perform.
2410 if (BaseIVTy->isIntegerTy()) {
2411 AddOp = Instruction::Add;
2412 MulOp = Instruction::Mul;
2413 } else {
2414 AddOp = InductionOpcode;
2415 MulOp = Instruction::FMul;
2416 }
2417
2418 // Determine the number of scalars we need to generate for each unroll
2419 // iteration.
2420 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(this);
2421 // Compute the scalar steps and save the results in State.
2422 Type *IntStepTy =
2423 IntegerType::get(BaseIVTy->getContext(), BaseIVTy->getScalarSizeInBits());
2424
2425 unsigned StartLane = 0;
2426 unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2427 if (State.Lane) {
2428 StartLane = State.Lane->getKnownLane();
2429 EndLane = StartLane + 1;
2430 }
2431 Value *StartIdx0;
2432 if (getUnrollPart(*this) == 0)
2433 StartIdx0 = ConstantInt::get(IntStepTy, 0);
2434 else {
2435 StartIdx0 = State.get(getOperand(2), true);
2436 if (getUnrollPart(*this) != 1) {
2437 StartIdx0 =
2438 Builder.CreateMul(StartIdx0, ConstantInt::get(StartIdx0->getType(),
2439 getUnrollPart(*this)));
2440 }
2441 StartIdx0 = Builder.CreateSExtOrTrunc(StartIdx0, IntStepTy);
2442 }
2443
2444 if (BaseIVTy->isFloatingPointTy())
2445 StartIdx0 = Builder.CreateSIToFP(StartIdx0, BaseIVTy);
2446
2447 for (unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
2448 // It is okay if the induction variable type cannot hold the lane number,
2449 // we expect truncation in this case.
2450 Constant *LaneValue =
2451 BaseIVTy->isIntegerTy()
2452 ? ConstantInt::get(BaseIVTy, Lane, /*IsSigned=*/false,
2453 /*ImplicitTrunc=*/true)
2454 : ConstantFP::get(BaseIVTy, Lane);
2455 Value *StartIdx = Builder.CreateBinOp(AddOp, StartIdx0, LaneValue);
2456 // The step returned by `createStepForVF` is a runtime-evaluated value
2457 // when VF is scalable. Otherwise, it should be folded into a Constant.
2458 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2459 "Expected StartIdx to be folded to a constant when VF is not "
2460 "scalable");
2461 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2462 auto *Add = Builder.CreateBinOp(AddOp, BaseIV, Mul);
2463 State.set(this, Add, VPLane(Lane));
2464 }
2465}
2466
2467#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2469 VPSlotTracker &SlotTracker) const {
2470 O << Indent;
2472 O << " = SCALAR-STEPS ";
2474}
2475#endif
2476
2478 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
2480}
2481
2483 assert(State.VF.isVector() && "not widening");
2484 // Construct a vector GEP by widening the operands of the scalar GEP as
2485 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
2486 // results in a vector of pointers when at least one operand of the GEP
2487 // is vector-typed. Thus, to keep the representation compact, we only use
2488 // vector-typed operands for loop-varying values.
2489
2490 bool AllOperandsAreInvariant = all_of(operands(), [](VPValue *Op) {
2491 return Op->isDefinedOutsideLoopRegions();
2492 });
2493 if (AllOperandsAreInvariant) {
2494 // If we are vectorizing, but the GEP has only loop-invariant operands,
2495 // the GEP we build (by only using vector-typed operands for
2496 // loop-varying values) would be a scalar pointer. Thus, to ensure we
2497 // produce a vector of pointers, we need to either arbitrarily pick an
2498 // operand to broadcast, or broadcast a clone of the original GEP.
2499 // Here, we broadcast a clone of the original.
2500
2502 for (unsigned I = 0, E = getNumOperands(); I != E; I++)
2503 Ops.push_back(State.get(getOperand(I), VPLane(0)));
2504
2505 auto *NewGEP =
2506 State.Builder.CreateGEP(getSourceElementType(), Ops[0], drop_begin(Ops),
2507 "", getGEPNoWrapFlags());
2508 Value *Splat = State.Builder.CreateVectorSplat(State.VF, NewGEP);
2509 State.set(this, Splat);
2510 return;
2511 }
2512
2513 // If the GEP has at least one loop-varying operand, we are sure to
2514 // produce a vector of pointers unless VF is scalar.
2515 // The pointer operand of the new GEP. If it's loop-invariant, we
2516 // won't broadcast it.
2517 auto *Ptr = State.get(getOperand(0), isPointerLoopInvariant());
2518
2519 // Collect all the indices for the new GEP. If any index is
2520 // loop-invariant, we won't broadcast it.
2522 for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
2523 VPValue *Operand = getOperand(I);
2524 Indices.push_back(State.get(Operand, isIndexLoopInvariant(I - 1)));
2525 }
2526
2527 // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
2528 // but it should be a vector, otherwise.
2529 auto *NewGEP = State.Builder.CreateGEP(getSourceElementType(), Ptr, Indices,
2530 "", getGEPNoWrapFlags());
2531 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
2532 "NewGEP is not a pointer vector");
2533 State.set(this, NewGEP);
2534}
2535
2536#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2538 VPSlotTracker &SlotTracker) const {
2539 O << Indent << "WIDEN-GEP ";
2540 O << (isPointerLoopInvariant() ? "Inv" : "Var");
2541 for (size_t I = 0; I < getNumOperands() - 1; ++I)
2542 O << "[" << (isIndexLoopInvariant(I) ? "Inv" : "Var") << "]";
2543
2544 O << " ";
2546 O << " = getelementptr";
2547 printFlags(O);
2549}
2550#endif
2551
2553 auto &Builder = State.Builder;
2554 unsigned CurrentPart = getUnrollPart(*this);
2555 const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout();
2556 Type *IndexTy = DL.getIndexType(State.TypeAnalysis.inferScalarType(this));
2557
2558 // The wide store needs to start at the last vector element.
2559 Value *RunTimeVF = State.get(getVFValue(), VPLane(0));
2560 if (IndexTy != RunTimeVF->getType())
2561 RunTimeVF = Builder.CreateZExtOrTrunc(RunTimeVF, IndexTy);
2562 // NumElt = Stride * CurrentPart * RunTimeVF
2563 Value *NumElt = Builder.CreateMul(
2564 ConstantInt::getSigned(IndexTy, Stride * (int64_t)CurrentPart),
2565 RunTimeVF);
2566 // LastLane = Stride * (RunTimeVF - 1)
2567 Value *LastLane = Builder.CreateSub(RunTimeVF, ConstantInt::get(IndexTy, 1));
2568 if (Stride != 1)
2569 LastLane =
2570 Builder.CreateMul(ConstantInt::getSigned(IndexTy, Stride), LastLane);
2571 Value *Ptr = State.get(getOperand(0), VPLane(0));
2572 Value *ResultPtr =
2573 Builder.CreateGEP(IndexedTy, Ptr, NumElt, "", getGEPNoWrapFlags());
2574 ResultPtr = Builder.CreateGEP(IndexedTy, ResultPtr, LastLane, "",
2576
2577 State.set(this, ResultPtr, /*IsScalar*/ true);
2578}
2579
2580#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2582 VPSlotTracker &SlotTracker) const {
2583 O << Indent;
2585 O << " = vector-end-pointer";
2586 printFlags(O);
2588}
2589#endif
2590
2592 auto &Builder = State.Builder;
2593 assert(getOffset() &&
2594 "Expected prior simplification of recipe without offset");
2595 Value *Ptr = State.get(getOperand(0), VPLane(0));
2596 Value *Offset = State.get(getOffset(), true);
2597 Value *ResultPtr = Builder.CreateGEP(getSourceElementType(), Ptr, Offset, "",
2599 State.set(this, ResultPtr, /*IsScalar*/ true);
2600}
2601
2602#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2604 VPSlotTracker &SlotTracker) const {
2605 O << Indent;
2607 O << " = vector-pointer";
2608 printFlags(O);
2610}
2611#endif
2612
2614 VPCostContext &Ctx) const {
2615 // A blend will be expanded to a select VPInstruction, which will generate a
2616 // scalar select if only the first lane is used.
2618 VF = ElementCount::getFixed(1);
2619
2620 Type *ResultTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
2621 Type *CmpTy = toVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
2622 return (getNumIncomingValues() - 1) *
2623 Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
2624 CmpInst::BAD_ICMP_PREDICATE, Ctx.CostKind);
2625}
2626
2627#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2629 VPSlotTracker &SlotTracker) const {
2630 O << Indent << "BLEND ";
2632 O << " =";
2633 if (getNumIncomingValues() == 1) {
2634 // Not a User of any mask: not really blending, this is a
2635 // single-predecessor phi.
2636 O << " ";
2637 getIncomingValue(0)->printAsOperand(O, SlotTracker);
2638 } else {
2639 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) {
2640 O << " ";
2641 getIncomingValue(I)->printAsOperand(O, SlotTracker);
2642 if (I == 0)
2643 continue;
2644 O << "/";
2645 getMask(I)->printAsOperand(O, SlotTracker);
2646 }
2647 }
2648}
2649#endif
2650
2652 assert(!State.Lane && "Reduction being replicated.");
2655 "In-loop AnyOf reductions aren't currently supported");
2656 // Propagate the fast-math flags carried by the underlying instruction.
2657 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
2658 State.Builder.setFastMathFlags(getFastMathFlags());
2659 Value *NewVecOp = State.get(getVecOp());
2660 if (VPValue *Cond = getCondOp()) {
2661 Value *NewCond = State.get(Cond, State.VF.isScalar());
2662 VectorType *VecTy = dyn_cast<VectorType>(NewVecOp->getType());
2663 Type *ElementTy = VecTy ? VecTy->getElementType() : NewVecOp->getType();
2664
2665 Value *Start = getRecurrenceIdentity(Kind, ElementTy, getFastMathFlags());
2666 if (State.VF.isVector())
2667 Start = State.Builder.CreateVectorSplat(VecTy->getElementCount(), Start);
2668
2669 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, Start);
2670 NewVecOp = Select;
2671 }
2672 Value *NewRed;
2673 Value *NextInChain;
2674 if (isOrdered()) {
2675 Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
2676 if (State.VF.isVector())
2677 NewRed =
2678 createOrderedReduction(State.Builder, Kind, NewVecOp, PrevInChain);
2679 else
2680 NewRed = State.Builder.CreateBinOp(
2682 PrevInChain, NewVecOp);
2683 PrevInChain = NewRed;
2684 NextInChain = NewRed;
2685 } else if (isPartialReduction()) {
2686 assert(Kind == RecurKind::Add && "Unexpected partial reduction kind");
2687 Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ false);
2688 NewRed = State.Builder.CreateIntrinsic(
2689 PrevInChain->getType(), Intrinsic::vector_partial_reduce_add,
2690 {PrevInChain, NewVecOp}, nullptr, "partial.reduce");
2691 PrevInChain = NewRed;
2692 NextInChain = NewRed;
2693 } else {
2694 assert(isInLoop() &&
2695 "The reduction must either be ordered, partial or in-loop");
2696 Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
2697 NewRed = createSimpleReduction(State.Builder, NewVecOp, Kind);
2699 NextInChain = createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain);
2700 else
2701 NextInChain = State.Builder.CreateBinOp(
2703 PrevInChain, NewRed);
2704 }
2705 State.set(this, NextInChain, /*IsScalar*/ !isPartialReduction());
2706}
2707
2709 assert(!State.Lane && "Reduction being replicated.");
2710
2711 auto &Builder = State.Builder;
2712 // Propagate the fast-math flags carried by the underlying instruction.
2713 IRBuilderBase::FastMathFlagGuard FMFGuard(Builder);
2714 Builder.setFastMathFlags(getFastMathFlags());
2715
2717 Value *Prev = State.get(getChainOp(), /*IsScalar*/ true);
2718 Value *VecOp = State.get(getVecOp());
2719 Value *EVL = State.get(getEVL(), VPLane(0));
2720
2721 Value *Mask;
2722 if (VPValue *CondOp = getCondOp())
2723 Mask = State.get(CondOp);
2724 else
2725 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2726
2727 Value *NewRed;
2728 if (isOrdered()) {
2729 NewRed = createOrderedReduction(Builder, Kind, VecOp, Prev, Mask, EVL);
2730 } else {
2731 NewRed = createSimpleReduction(Builder, VecOp, Kind, Mask, EVL);
2733 NewRed = createMinMaxOp(Builder, Kind, NewRed, Prev);
2734 else
2735 NewRed = Builder.CreateBinOp(
2737 Prev);
2738 }
2739 State.set(this, NewRed, /*IsScalar*/ true);
2740}
2741
2743 VPCostContext &Ctx) const {
2744 RecurKind RdxKind = getRecurrenceKind();
2745 Type *ElementTy = Ctx.Types.inferScalarType(this);
2746 auto *VectorTy = cast<VectorType>(toVectorTy(ElementTy, VF));
2747 unsigned Opcode = RecurrenceDescriptor::getOpcode(RdxKind);
2749 std::optional<FastMathFlags> OptionalFMF =
2750 ElementTy->isFloatingPointTy() ? std::make_optional(FMFs) : std::nullopt;
2751
2752 if (isPartialReduction()) {
2753 InstructionCost CondCost = 0;
2754 if (isConditional()) {
2756 auto *CondTy = cast<VectorType>(
2757 toVectorTy(Ctx.Types.inferScalarType(getCondOp()), VF));
2758 CondCost = Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VectorTy,
2759 CondTy, Pred, Ctx.CostKind);
2760 }
2761 return CondCost + Ctx.TTI.getPartialReductionCost(
2762 Opcode, ElementTy, ElementTy, ElementTy, VF,
2764 TargetTransformInfo::PR_None, std::nullopt,
2765 Ctx.CostKind);
2766 }
2767
2768 // TODO: Support any-of reductions.
2769 assert(
2771 ForceTargetInstructionCost.getNumOccurrences() > 0) &&
2772 "Any-of reduction not implemented in VPlan-based cost model currently.");
2773
2774 // Note that TTI should model the cost of moving result to the scalar register
2775 // and the BinOp cost in the getMinMaxReductionCost().
2778 return Ctx.TTI.getMinMaxReductionCost(Id, VectorTy, FMFs, Ctx.CostKind);
2779 }
2780
2781 // Note that TTI should model the cost of moving result to the scalar register
2782 // and the BinOp cost in the getArithmeticReductionCost().
2783 return Ctx.TTI.getArithmeticReductionCost(Opcode, VectorTy, OptionalFMF,
2784 Ctx.CostKind);
2785}
2786
2787VPExpressionRecipe::VPExpressionRecipe(
2788 ExpressionTypes ExpressionType,
2789 ArrayRef<VPSingleDefRecipe *> ExpressionRecipes)
2790 : VPSingleDefRecipe(VPDef::VPExpressionSC, {}, {}),
2791 ExpressionRecipes(ExpressionRecipes), ExpressionType(ExpressionType) {
2792 assert(!ExpressionRecipes.empty() && "Nothing to combine?");
2793 assert(
2794 none_of(ExpressionRecipes,
2795 [](VPSingleDefRecipe *R) { return R->mayHaveSideEffects(); }) &&
2796 "expression cannot contain recipes with side-effects");
2797
2798 // Maintain a copy of the expression recipes as a set of users.
2799 SmallPtrSet<VPUser *, 4> ExpressionRecipesAsSetOfUsers;
2800 for (auto *R : ExpressionRecipes)
2801 ExpressionRecipesAsSetOfUsers.insert(R);
2802
2803 // Recipes in the expression, except the last one, must only be used by
2804 // (other) recipes inside the expression. If there are other users, external
2805 // to the expression, use a clone of the recipe for external users.
2806 for (VPSingleDefRecipe *R : reverse(ExpressionRecipes)) {
2807 if (R != ExpressionRecipes.back() &&
2808 any_of(R->users(), [&ExpressionRecipesAsSetOfUsers](VPUser *U) {
2809 return !ExpressionRecipesAsSetOfUsers.contains(U);
2810 })) {
2811 // There are users outside of the expression. Clone the recipe and use the
2812 // clone those external users.
2813 VPSingleDefRecipe *CopyForExtUsers = R->clone();
2814 R->replaceUsesWithIf(CopyForExtUsers, [&ExpressionRecipesAsSetOfUsers](
2815 VPUser &U, unsigned) {
2816 return !ExpressionRecipesAsSetOfUsers.contains(&U);
2817 });
2818 CopyForExtUsers->insertBefore(R);
2819 }
2820 if (R->getParent())
2821 R->removeFromParent();
2822 }
2823
2824 // Internalize all external operands to the expression recipes. To do so,
2825 // create new temporary VPValues for all operands defined by a recipe outside
2826 // the expression. The original operands are added as operands of the
2827 // VPExpressionRecipe itself.
2828 for (auto *R : ExpressionRecipes) {
2829 for (const auto &[Idx, Op] : enumerate(R->operands())) {
2830 auto *Def = Op->getDefiningRecipe();
2831 if (Def && ExpressionRecipesAsSetOfUsers.contains(Def))
2832 continue;
2833 addOperand(Op);
2834 LiveInPlaceholders.push_back(new VPSymbolicValue());
2835 }
2836 }
2837
2838 // Replace each external operand with the first one created for it in
2839 // LiveInPlaceholders.
2840 for (auto *R : ExpressionRecipes)
2841 for (auto const &[LiveIn, Tmp] : zip(operands(), LiveInPlaceholders))
2842 R->replaceUsesOfWith(LiveIn, Tmp);
2843}
2844
2846 for (auto *R : ExpressionRecipes)
2847 // Since the list could contain duplicates, make sure the recipe hasn't
2848 // already been inserted.
2849 if (!R->getParent())
2850 R->insertBefore(this);
2851
2852 for (const auto &[Idx, Op] : enumerate(operands()))
2853 LiveInPlaceholders[Idx]->replaceAllUsesWith(Op);
2854
2855 replaceAllUsesWith(ExpressionRecipes.back());
2856 ExpressionRecipes.clear();
2857}
2858
2860 VPCostContext &Ctx) const {
2861 Type *RedTy = Ctx.Types.inferScalarType(this);
2862 auto *SrcVecTy = cast<VectorType>(
2863 toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF));
2864 assert(RedTy->isIntegerTy() &&
2865 "VPExpressionRecipe only supports integer types currently.");
2866 unsigned Opcode = RecurrenceDescriptor::getOpcode(
2867 cast<VPReductionRecipe>(ExpressionRecipes.back())->getRecurrenceKind());
2868 switch (ExpressionType) {
2869 case ExpressionTypes::ExtendedReduction: {
2870 unsigned Opcode = RecurrenceDescriptor::getOpcode(
2871 cast<VPReductionRecipe>(ExpressionRecipes[1])->getRecurrenceKind());
2872 auto *ExtR = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
2873
2874 return cast<VPReductionRecipe>(ExpressionRecipes.back())
2875 ->isPartialReduction()
2876 ? Ctx.TTI.getPartialReductionCost(
2877 Opcode, Ctx.Types.inferScalarType(getOperand(0)), nullptr,
2878 RedTy, VF,
2880 ExtR->getOpcode()),
2881 TargetTransformInfo::PR_None, std::nullopt, Ctx.CostKind)
2882 : Ctx.TTI.getExtendedReductionCost(
2883 Opcode, ExtR->getOpcode() == Instruction::ZExt, RedTy,
2884 SrcVecTy, std::nullopt, Ctx.CostKind);
2885 }
2886 case ExpressionTypes::MulAccReduction:
2887 return Ctx.TTI.getMulAccReductionCost(false, Opcode, RedTy, SrcVecTy,
2888 Ctx.CostKind);
2889
2890 case ExpressionTypes::ExtNegatedMulAccReduction:
2891 assert(Opcode == Instruction::Add && "Unexpected opcode");
2892 Opcode = Instruction::Sub;
2893 [[fallthrough]];
2894 case ExpressionTypes::ExtMulAccReduction: {
2895 auto *RedR = cast<VPReductionRecipe>(ExpressionRecipes.back());
2896 if (RedR->isPartialReduction()) {
2897 auto *Ext0R = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
2898 auto *Ext1R = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
2899 auto *Mul = cast<VPWidenRecipe>(ExpressionRecipes[2]);
2900 return Ctx.TTI.getPartialReductionCost(
2901 Opcode, Ctx.Types.inferScalarType(getOperand(0)),
2902 Ctx.Types.inferScalarType(getOperand(1)), RedTy, VF,
2904 Ext0R->getOpcode()),
2906 Ext1R->getOpcode()),
2907 Mul->getOpcode(), Ctx.CostKind);
2908 }
2909 return Ctx.TTI.getMulAccReductionCost(
2910 cast<VPWidenCastRecipe>(ExpressionRecipes.front())->getOpcode() ==
2911 Instruction::ZExt,
2912 Opcode, RedTy, SrcVecTy, Ctx.CostKind);
2913 }
2914 }
2915 llvm_unreachable("Unknown VPExpressionRecipe::ExpressionTypes enum");
2916}
2917
2919 return any_of(ExpressionRecipes, [](VPSingleDefRecipe *R) {
2920 return R->mayReadFromMemory() || R->mayWriteToMemory();
2921 });
2922}
2923
2925 assert(
2926 none_of(ExpressionRecipes,
2927 [](VPSingleDefRecipe *R) { return R->mayHaveSideEffects(); }) &&
2928 "expression cannot contain recipes with side-effects");
2929 return false;
2930}
2931
2933 // Cannot use vputils::isSingleScalar(), because all external operands
2934 // of the expression will be live-ins while bundled.
2935 auto *RR = dyn_cast<VPReductionRecipe>(ExpressionRecipes.back());
2936 return RR && !RR->isPartialReduction();
2937}
2938
2939#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2940
2942 VPSlotTracker &SlotTracker) const {
2943 O << Indent << "EXPRESSION ";
2945 O << " = ";
2946 auto *Red = cast<VPReductionRecipe>(ExpressionRecipes.back());
2947 unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind());
2948
2949 switch (ExpressionType) {
2950 case ExpressionTypes::ExtendedReduction: {
2952 O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce.";
2953 O << Instruction::getOpcodeName(Opcode) << " (";
2955 Red->printFlags(O);
2956
2957 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
2958 O << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
2959 << *Ext0->getResultType();
2960 if (Red->isConditional()) {
2961 O << ", ";
2962 Red->getCondOp()->printAsOperand(O, SlotTracker);
2963 }
2964 O << ")";
2965 break;
2966 }
2967 case ExpressionTypes::ExtNegatedMulAccReduction: {
2969 O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce.";
2971 RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()))
2972 << " (sub (0, mul";
2973 auto *Mul = cast<VPWidenRecipe>(ExpressionRecipes[2]);
2974 Mul->printFlags(O);
2975 O << "(";
2977 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
2978 O << " " << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
2979 << *Ext0->getResultType() << "), (";
2981 auto *Ext1 = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
2982 O << " " << Instruction::getOpcodeName(Ext1->getOpcode()) << " to "
2983 << *Ext1->getResultType() << ")";
2984 if (Red->isConditional()) {
2985 O << ", ";
2986 Red->getCondOp()->printAsOperand(O, SlotTracker);
2987 }
2988 O << "))";
2989 break;
2990 }
2991 case ExpressionTypes::MulAccReduction:
2992 case ExpressionTypes::ExtMulAccReduction: {
2994 O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce.";
2996 RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()))
2997 << " (";
2998 O << "mul";
2999 bool IsExtended = ExpressionType == ExpressionTypes::ExtMulAccReduction;
3000 auto *Mul = cast<VPWidenRecipe>(IsExtended ? ExpressionRecipes[2]
3001 : ExpressionRecipes[0]);
3002 Mul->printFlags(O);
3003 if (IsExtended)
3004 O << "(";
3006 if (IsExtended) {
3007 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
3008 O << " " << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
3009 << *Ext0->getResultType() << "), (";
3010 } else {
3011 O << ", ";
3012 }
3014 if (IsExtended) {
3015 auto *Ext1 = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
3016 O << " " << Instruction::getOpcodeName(Ext1->getOpcode()) << " to "
3017 << *Ext1->getResultType() << ")";
3018 }
3019 if (Red->isConditional()) {
3020 O << ", ";
3021 Red->getCondOp()->printAsOperand(O, SlotTracker);
3022 }
3023 O << ")";
3024 break;
3025 }
3026 }
3027}
3028
3030 VPSlotTracker &SlotTracker) const {
3031 if (isPartialReduction())
3032 O << Indent << "PARTIAL-REDUCE ";
3033 else
3034 O << Indent << "REDUCE ";
3036 O << " = ";
3038 O << " +";
3039 printFlags(O);
3040 O << " reduce."
3043 << " (";
3045 if (isConditional()) {
3046 O << ", ";
3048 }
3049 O << ")";
3050}
3051
3053 VPSlotTracker &SlotTracker) const {
3054 O << Indent << "REDUCE ";
3056 O << " = ";
3058 O << " +";
3059 printFlags(O);
3060 O << " vp.reduce."
3063 << " (";
3065 O << ", ";
3067 if (isConditional()) {
3068 O << ", ";
3070 }
3071 O << ")";
3072}
3073
3074#endif
3075
3076/// A helper function to scalarize a single Instruction in the innermost loop.
3077/// Generates a sequence of scalar instances for lane \p Lane. Uses the VPValue
3078/// operands from \p RepRecipe instead of \p Instr's operands.
3079static void scalarizeInstruction(const Instruction *Instr,
3080 VPReplicateRecipe *RepRecipe,
3081 const VPLane &Lane, VPTransformState &State) {
3082 assert((!Instr->getType()->isAggregateType() ||
3083 canVectorizeTy(Instr->getType())) &&
3084 "Expected vectorizable or non-aggregate type.");
3085
3086 // Does this instruction return a value ?
3087 bool IsVoidRetTy = Instr->getType()->isVoidTy();
3088
3089 Instruction *Cloned = Instr->clone();
3090 if (!IsVoidRetTy) {
3091 Cloned->setName(Instr->getName() + ".cloned");
3092 Type *ResultTy = State.TypeAnalysis.inferScalarType(RepRecipe);
3093 // The operands of the replicate recipe may have been narrowed, resulting in
3094 // a narrower result type. Update the type of the cloned instruction to the
3095 // correct type.
3096 if (ResultTy != Cloned->getType())
3097 Cloned->mutateType(ResultTy);
3098 }
3099
3100 RepRecipe->applyFlags(*Cloned);
3101 RepRecipe->applyMetadata(*Cloned);
3102
3103 if (RepRecipe->hasPredicate())
3104 cast<CmpInst>(Cloned)->setPredicate(RepRecipe->getPredicate());
3105
3106 if (auto DL = RepRecipe->getDebugLoc())
3107 State.setDebugLocFrom(DL);
3108
3109 // Replace the operands of the cloned instructions with their scalar
3110 // equivalents in the new loop.
3111 for (const auto &I : enumerate(RepRecipe->operands())) {
3112 auto InputLane = Lane;
3113 VPValue *Operand = I.value();
3114 if (vputils::isSingleScalar(Operand))
3115 InputLane = VPLane::getFirstLane();
3116 Cloned->setOperand(I.index(), State.get(Operand, InputLane));
3117 }
3118
3119 // Place the cloned scalar in the new loop.
3120 State.Builder.Insert(Cloned);
3121
3122 State.set(RepRecipe, Cloned, Lane);
3123
3124 // If we just cloned a new assumption, add it the assumption cache.
3125 if (auto *II = dyn_cast<AssumeInst>(Cloned))
3126 State.AC->registerAssumption(II);
3127
3128 assert(
3129 (RepRecipe->getRegion() ||
3130 !RepRecipe->getParent()->getPlan()->getVectorLoopRegion() ||
3131 all_of(RepRecipe->operands(),
3132 [](VPValue *Op) { return Op->isDefinedOutsideLoopRegions(); })) &&
3133 "Expected a recipe is either within a region or all of its operands "
3134 "are defined outside the vectorized region.");
3135}
3136
3139
3140 if (!State.Lane) {
3141 assert(IsSingleScalar && "VPReplicateRecipes outside replicate regions "
3142 "must have already been unrolled");
3143 scalarizeInstruction(UI, this, VPLane(0), State);
3144 return;
3145 }
3146
3147 assert((State.VF.isScalar() || !isSingleScalar()) &&
3148 "uniform recipe shouldn't be predicated");
3149 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
3150 scalarizeInstruction(UI, this, *State.Lane, State);
3151 // Insert scalar instance packing it into a vector.
3152 if (State.VF.isVector() && shouldPack()) {
3153 Value *WideValue =
3154 State.Lane->isFirstLane()
3155 ? PoisonValue::get(toVectorizedTy(UI->getType(), State.VF))
3156 : State.get(this);
3157 State.set(this, State.packScalarIntoVectorizedValue(this, WideValue,
3158 *State.Lane));
3159 }
3160}
3161
3163 // Find if the recipe is used by a widened recipe via an intervening
3164 // VPPredInstPHIRecipe. In this case, also pack the scalar values in a vector.
3165 return any_of(users(), [](const VPUser *U) {
3166 if (auto *PredR = dyn_cast<VPPredInstPHIRecipe>(U))
3167 return !vputils::onlyScalarValuesUsed(PredR);
3168 return false;
3169 });
3170}
3171
3172/// Returns a SCEV expression for \p Ptr if it is a pointer computation for
3173/// which the legacy cost model computes a SCEV expression when computing the
3174/// address cost. Computing SCEVs for VPValues is incomplete and returns
3175/// SCEVCouldNotCompute in cases the legacy cost model can compute SCEVs. In
3176/// those cases we fall back to the legacy cost model. Otherwise return nullptr.
3177static const SCEV *getAddressAccessSCEV(const VPValue *Ptr,
3179 const Loop *L) {
3180 const SCEV *Addr = vputils::getSCEVExprForVPValue(Ptr, PSE, L);
3181 if (isa<SCEVCouldNotCompute>(Addr))
3182 return Addr;
3183
3184 return vputils::isAddressSCEVForCost(Addr, *PSE.getSE(), L) ? Addr : nullptr;
3185}
3186
3187/// Returns true if \p V is used as part of the address of another load or
3188/// store.
3189static bool isUsedByLoadStoreAddress(const VPUser *V) {
3191 SmallVector<const VPUser *> WorkList = {V};
3192
3193 while (!WorkList.empty()) {
3194 auto *Cur = dyn_cast<VPSingleDefRecipe>(WorkList.pop_back_val());
3195 if (!Cur || !Seen.insert(Cur).second)
3196 continue;
3197
3198 auto *Blend = dyn_cast<VPBlendRecipe>(Cur);
3199 // Skip blends that use V only through a compare by checking if any incoming
3200 // value was already visited.
3201 if (Blend && none_of(seq<unsigned>(0, Blend->getNumIncomingValues()),
3202 [&](unsigned I) {
3203 return Seen.contains(
3204 Blend->getIncomingValue(I)->getDefiningRecipe());
3205 }))
3206 continue;
3207
3208 for (VPUser *U : Cur->users()) {
3209 if (auto *InterleaveR = dyn_cast<VPInterleaveBase>(U))
3210 if (InterleaveR->getAddr() == Cur)
3211 return true;
3212 if (auto *RepR = dyn_cast<VPReplicateRecipe>(U)) {
3213 if (RepR->getOpcode() == Instruction::Load &&
3214 RepR->getOperand(0) == Cur)
3215 return true;
3216 if (RepR->getOpcode() == Instruction::Store &&
3217 RepR->getOperand(1) == Cur)
3218 return true;
3219 }
3220 if (auto *MemR = dyn_cast<VPWidenMemoryRecipe>(U)) {
3221 if (MemR->getAddr() == Cur && MemR->isConsecutive())
3222 return true;
3223 }
3224 }
3225
3226 // The legacy cost model only supports scalarization loads/stores with phi
3227 // addresses, if the phi is directly used as load/store address. Don't
3228 // traverse further for Blends.
3229 if (Blend)
3230 continue;
3231
3232 append_range(WorkList, Cur->users());
3233 }
3234 return false;
3235}
3236
3238 VPCostContext &Ctx) const {
3240 // VPReplicateRecipe may be cloned as part of an existing VPlan-to-VPlan
3241 // transform, avoid computing their cost multiple times for now.
3242 Ctx.SkipCostComputation.insert(UI);
3243
3244 if (VF.isScalable() && !isSingleScalar())
3246
3247 switch (UI->getOpcode()) {
3248 case Instruction::Alloca:
3249 if (VF.isScalable())
3251 return Ctx.TTI.getArithmeticInstrCost(
3252 Instruction::Mul, Ctx.Types.inferScalarType(this), Ctx.CostKind);
3253 case Instruction::GetElementPtr:
3254 // We mark this instruction as zero-cost because the cost of GEPs in
3255 // vectorized code depends on whether the corresponding memory instruction
3256 // is scalarized or not. Therefore, we handle GEPs with the memory
3257 // instruction cost.
3258 return 0;
3259 case Instruction::Call: {
3260 auto *CalledFn =
3262
3265 for (const VPValue *ArgOp : ArgOps)
3266 Tys.push_back(Ctx.Types.inferScalarType(ArgOp));
3267
3268 if (CalledFn->isIntrinsic())
3269 // Various pseudo-intrinsics with costs of 0 are scalarized instead of
3270 // vectorized via VPWidenIntrinsicRecipe. Return 0 for them early.
3271 switch (CalledFn->getIntrinsicID()) {
3272 case Intrinsic::assume:
3273 case Intrinsic::lifetime_end:
3274 case Intrinsic::lifetime_start:
3275 case Intrinsic::sideeffect:
3276 case Intrinsic::pseudoprobe:
3277 case Intrinsic::experimental_noalias_scope_decl: {
3278 assert(getCostForIntrinsics(CalledFn->getIntrinsicID(), ArgOps, *this,
3279 ElementCount::getFixed(1), Ctx) == 0 &&
3280 "scalarizing intrinsic should be free");
3281 return InstructionCost(0);
3282 }
3283 default:
3284 break;
3285 }
3286
3287 Type *ResultTy = Ctx.Types.inferScalarType(this);
3288 InstructionCost ScalarCallCost =
3289 Ctx.TTI.getCallInstrCost(CalledFn, ResultTy, Tys, Ctx.CostKind);
3290 if (isSingleScalar()) {
3291 if (CalledFn->isIntrinsic())
3292 ScalarCallCost = std::min(
3293 ScalarCallCost,
3294 getCostForIntrinsics(CalledFn->getIntrinsicID(), ArgOps, *this,
3295 ElementCount::getFixed(1), Ctx));
3296 return ScalarCallCost;
3297 }
3298
3299 return ScalarCallCost * VF.getFixedValue() +
3300 Ctx.getScalarizationOverhead(ResultTy, ArgOps, VF);
3301 }
3302 case Instruction::Add:
3303 case Instruction::Sub:
3304 case Instruction::FAdd:
3305 case Instruction::FSub:
3306 case Instruction::Mul:
3307 case Instruction::FMul:
3308 case Instruction::FDiv:
3309 case Instruction::FRem:
3310 case Instruction::Shl:
3311 case Instruction::LShr:
3312 case Instruction::AShr:
3313 case Instruction::And:
3314 case Instruction::Or:
3315 case Instruction::Xor:
3316 case Instruction::ICmp:
3317 case Instruction::FCmp:
3319 Ctx) *
3320 (isSingleScalar() ? 1 : VF.getFixedValue());
3321 case Instruction::SDiv:
3322 case Instruction::UDiv:
3323 case Instruction::SRem:
3324 case Instruction::URem: {
3325 InstructionCost ScalarCost =
3327 if (isSingleScalar())
3328 return ScalarCost;
3329
3330 ScalarCost = ScalarCost * VF.getFixedValue() +
3331 Ctx.getScalarizationOverhead(Ctx.Types.inferScalarType(this),
3332 to_vector(operands()), VF);
3333 // If the recipe is not predicated (i.e. not in a replicate region), return
3334 // the scalar cost. Otherwise handle predicated cost.
3335 if (!getRegion()->isReplicator())
3336 return ScalarCost;
3337
3338 // Account for the phi nodes that we will create.
3339 ScalarCost += VF.getFixedValue() *
3340 Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
3341 // Scale the cost by the probability of executing the predicated blocks.
3342 // This assumes the predicated block for each vector lane is equally
3343 // likely.
3344 ScalarCost /= Ctx.getPredBlockCostDivisor(UI->getParent());
3345 return ScalarCost;
3346 }
3347 case Instruction::Load:
3348 case Instruction::Store: {
3349 // TODO: See getMemInstScalarizationCost for how to handle replicating and
3350 // predicated cases.
3351 const VPRegionBlock *ParentRegion = getRegion();
3352 if (ParentRegion && ParentRegion->isReplicator())
3353 break;
3354
3355 bool IsLoad = UI->getOpcode() == Instruction::Load;
3356 const VPValue *PtrOp = getOperand(!IsLoad);
3357 const SCEV *PtrSCEV = getAddressAccessSCEV(PtrOp, Ctx.PSE, Ctx.L);
3359 break;
3360
3361 Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0));
3362 Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp);
3363 const Align Alignment = getLoadStoreAlignment(UI);
3364 unsigned AS = cast<PointerType>(ScalarPtrTy)->getAddressSpace();
3366 InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost(
3367 UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo);
3368
3369 Type *PtrTy = isSingleScalar() ? ScalarPtrTy : toVectorTy(ScalarPtrTy, VF);
3370 bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing();
3371 bool UsedByLoadStoreAddress =
3372 !PreferVectorizedAddressing && isUsedByLoadStoreAddress(this);
3373 InstructionCost ScalarCost =
3374 ScalarMemOpCost +
3375 Ctx.TTI.getAddressComputationCost(
3376 PtrTy, UsedByLoadStoreAddress ? nullptr : Ctx.PSE.getSE(), PtrSCEV,
3377 Ctx.CostKind);
3378 if (isSingleScalar())
3379 return ScalarCost;
3380
3381 SmallVector<const VPValue *> OpsToScalarize;
3382 Type *ResultTy = Type::getVoidTy(PtrTy->getContext());
3383 // Set ResultTy and OpsToScalarize, if scalarization is needed. Currently we
3384 // don't assign scalarization overhead in general, if the target prefers
3385 // vectorized addressing or the loaded value is used as part of an address
3386 // of another load or store.
3387 if (!UsedByLoadStoreAddress) {
3388 bool EfficientVectorLoadStore =
3389 Ctx.TTI.supportsEfficientVectorElementLoadStore();
3390 if (!(IsLoad && !PreferVectorizedAddressing) &&
3391 !(!IsLoad && EfficientVectorLoadStore))
3392 append_range(OpsToScalarize, operands());
3393
3394 if (!EfficientVectorLoadStore)
3395 ResultTy = Ctx.Types.inferScalarType(this);
3396 }
3397
3398 return (ScalarCost * VF.getFixedValue()) +
3399 Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF, true);
3400 }
3401 case Instruction::SExt:
3402 case Instruction::ZExt:
3403 case Instruction::FPToUI:
3404 case Instruction::FPToSI:
3405 case Instruction::FPExt:
3406 case Instruction::PtrToInt:
3407 case Instruction::PtrToAddr:
3408 case Instruction::IntToPtr:
3409 case Instruction::SIToFP:
3410 case Instruction::UIToFP:
3411 case Instruction::Trunc:
3412 case Instruction::FPTrunc:
3413 case Instruction::AddrSpaceCast: {
3415 Ctx) *
3416 (isSingleScalar() ? 1 : VF.getFixedValue());
3417 }
3418 case Instruction::ExtractValue:
3419 case Instruction::InsertValue:
3420 return Ctx.TTI.getInsertExtractValueCost(getOpcode(), Ctx.CostKind);
3421 }
3422
3423 return Ctx.getLegacyCost(UI, VF);
3424}
3425
3426#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3428 VPSlotTracker &SlotTracker) const {
3429 O << Indent << (IsSingleScalar ? "CLONE " : "REPLICATE ");
3430
3431 if (!getUnderlyingInstr()->getType()->isVoidTy()) {
3433 O << " = ";
3434 }
3435 if (auto *CB = dyn_cast<CallBase>(getUnderlyingInstr())) {
3436 O << "call";
3437 printFlags(O);
3438 O << "@" << CB->getCalledFunction()->getName() << "(";
3440 O, [&O, &SlotTracker](VPValue *Op) {
3441 Op->printAsOperand(O, SlotTracker);
3442 });
3443 O << ")";
3444 } else {
3446 printFlags(O);
3448 }
3449
3450 if (shouldPack())
3451 O << " (S->V)";
3452}
3453#endif
3454
3456 assert(State.Lane && "Branch on Mask works only on single instance.");
3457
3458 VPValue *BlockInMask = getOperand(0);
3459 Value *ConditionBit = State.get(BlockInMask, *State.Lane);
3460
3461 // Replace the temporary unreachable terminator with a new conditional branch,
3462 // whose two destinations will be set later when they are created.
3463 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
3464 assert(isa<UnreachableInst>(CurrentTerminator) &&
3465 "Expected to replace unreachable terminator with conditional branch.");
3466 auto CondBr =
3467 State.Builder.CreateCondBr(ConditionBit, State.CFG.PrevBB, nullptr);
3468 CondBr->setSuccessor(0, nullptr);
3469 CurrentTerminator->eraseFromParent();
3470}
3471
3473 VPCostContext &Ctx) const {
3474 // The legacy cost model doesn't assign costs to branches for individual
3475 // replicate regions. Match the current behavior in the VPlan cost model for
3476 // now.
3477 return 0;
3478}
3479
3481 assert(State.Lane && "Predicated instruction PHI works per instance.");
3482 Instruction *ScalarPredInst =
3483 cast<Instruction>(State.get(getOperand(0), *State.Lane));
3484 BasicBlock *PredicatedBB = ScalarPredInst->getParent();
3485 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
3486 assert(PredicatingBB && "Predicated block has no single predecessor.");
3488 "operand must be VPReplicateRecipe");
3489
3490 // By current pack/unpack logic we need to generate only a single phi node: if
3491 // a vector value for the predicated instruction exists at this point it means
3492 // the instruction has vector users only, and a phi for the vector value is
3493 // needed. In this case the recipe of the predicated instruction is marked to
3494 // also do that packing, thereby "hoisting" the insert-element sequence.
3495 // Otherwise, a phi node for the scalar value is needed.
3496 if (State.hasVectorValue(getOperand(0))) {
3497 auto *VecI = cast<Instruction>(State.get(getOperand(0)));
3499 "Packed operands must generate an insertelement or insertvalue");
3500
3501 // If VectorI is a struct, it will be a sequence like:
3502 // %1 = insertvalue %unmodified, %x, 0
3503 // %2 = insertvalue %1, %y, 1
3504 // %VectorI = insertvalue %2, %z, 2
3505 // To get the unmodified vector we need to look through the chain.
3506 if (auto *StructTy = dyn_cast<StructType>(VecI->getType()))
3507 for (unsigned I = 0; I < StructTy->getNumContainedTypes() - 1; I++)
3508 VecI = cast<InsertValueInst>(VecI->getOperand(0));
3509
3510 PHINode *VPhi = State.Builder.CreatePHI(VecI->getType(), 2);
3511 VPhi->addIncoming(VecI->getOperand(0), PredicatingBB); // Unmodified vector.
3512 VPhi->addIncoming(VecI, PredicatedBB); // New vector with inserted element.
3513 if (State.hasVectorValue(this))
3514 State.reset(this, VPhi);
3515 else
3516 State.set(this, VPhi);
3517 // NOTE: Currently we need to update the value of the operand, so the next
3518 // predicated iteration inserts its generated value in the correct vector.
3519 State.reset(getOperand(0), VPhi);
3520 } else {
3521 if (vputils::onlyFirstLaneUsed(this) && !State.Lane->isFirstLane())
3522 return;
3523
3524 Type *PredInstType = State.TypeAnalysis.inferScalarType(getOperand(0));
3525 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
3526 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
3527 PredicatingBB);
3528 Phi->addIncoming(ScalarPredInst, PredicatedBB);
3529 if (State.hasScalarValue(this, *State.Lane))
3530 State.reset(this, Phi, *State.Lane);
3531 else
3532 State.set(this, Phi, *State.Lane);
3533 // NOTE: Currently we need to update the value of the operand, so the next
3534 // predicated iteration inserts its generated value in the correct vector.
3535 State.reset(getOperand(0), Phi, *State.Lane);
3536 }
3537}
3538
3539#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3541 VPSlotTracker &SlotTracker) const {
3542 O << Indent << "PHI-PREDICATED-INSTRUCTION ";
3544 O << " = ";
3546}
3547#endif
3548
3550 VPCostContext &Ctx) const {
3552 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
3553 ->getAddressSpace();
3554 unsigned Opcode = isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this)
3555 ? Instruction::Load
3556 : Instruction::Store;
3557
3558 if (!Consecutive) {
3559 // TODO: Using the original IR may not be accurate.
3560 // Currently, ARM will use the underlying IR to calculate gather/scatter
3561 // instruction cost.
3562 assert(!Reverse &&
3563 "Inconsecutive memory access should not have the order.");
3564
3566 Type *PtrTy = Ptr->getType();
3567
3568 // If the address value is uniform across all lanes, then the address can be
3569 // calculated with scalar type and broadcast.
3571 PtrTy = toVectorTy(PtrTy, VF);
3572
3573 unsigned IID = isa<VPWidenLoadRecipe>(this) ? Intrinsic::masked_gather
3574 : isa<VPWidenStoreRecipe>(this) ? Intrinsic::masked_scatter
3575 : isa<VPWidenLoadEVLRecipe>(this) ? Intrinsic::vp_gather
3576 : Intrinsic::vp_scatter;
3577 return Ctx.TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
3578 Ctx.CostKind) +
3579 Ctx.TTI.getMemIntrinsicInstrCost(
3581 &Ingredient),
3582 Ctx.CostKind);
3583 }
3584
3586 if (IsMasked) {
3587 unsigned IID = isa<VPWidenLoadRecipe>(this) ? Intrinsic::masked_load
3588 : Intrinsic::masked_store;
3589 Cost += Ctx.TTI.getMemIntrinsicInstrCost(
3590 MemIntrinsicCostAttributes(IID, Ty, Alignment, AS), Ctx.CostKind);
3591 } else {
3592 TTI::OperandValueInfo OpInfo = Ctx.getOperandInfo(
3594 : getOperand(1));
3595 Cost += Ctx.TTI.getMemoryOpCost(Opcode, Ty, Alignment, AS, Ctx.CostKind,
3596 OpInfo, &Ingredient);
3597 }
3598 return Cost;
3599}
3600
3602 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
3603 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
3604 bool CreateGather = !isConsecutive();
3605
3606 auto &Builder = State.Builder;
3607 Value *Mask = nullptr;
3608 if (auto *VPMask = getMask()) {
3609 // Mask reversal is only needed for non-all-one (null) masks, as reverse
3610 // of a null all-one mask is a null mask.
3611 Mask = State.get(VPMask);
3612 if (isReverse())
3613 Mask = Builder.CreateVectorReverse(Mask, "reverse");
3614 }
3615
3616 Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateGather);
3617 Value *NewLI;
3618 if (CreateGather) {
3619 NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
3620 "wide.masked.gather");
3621 } else if (Mask) {
3622 NewLI =
3623 Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
3624 PoisonValue::get(DataTy), "wide.masked.load");
3625 } else {
3626 NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
3627 }
3629 State.set(this, NewLI);
3630}
3631
3632#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3634 VPSlotTracker &SlotTracker) const {
3635 O << Indent << "WIDEN ";
3637 O << " = load ";
3639}
3640#endif
3641
3642/// Use all-true mask for reverse rather than actual mask, as it avoids a
3643/// dependence w/o affecting the result.
3645 Value *EVL, const Twine &Name) {
3646 VectorType *ValTy = cast<VectorType>(Operand->getType());
3647 Value *AllTrueMask =
3648 Builder.CreateVectorSplat(ValTy->getElementCount(), Builder.getTrue());
3649 return Builder.CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
3650 {Operand, AllTrueMask, EVL}, nullptr, Name);
3651}
3652
3654 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
3655 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
3656 bool CreateGather = !isConsecutive();
3657
3658 auto &Builder = State.Builder;
3659 CallInst *NewLI;
3660 Value *EVL = State.get(getEVL(), VPLane(0));
3661 Value *Addr = State.get(getAddr(), !CreateGather);
3662 Value *Mask = nullptr;
3663 if (VPValue *VPMask = getMask()) {
3664 Mask = State.get(VPMask);
3665 if (isReverse())
3666 Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
3667 } else {
3668 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3669 }
3670
3671 if (CreateGather) {
3672 NewLI =
3673 Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
3674 nullptr, "wide.masked.gather");
3675 } else {
3676 NewLI = Builder.CreateIntrinsic(DataTy, Intrinsic::vp_load,
3677 {Addr, Mask, EVL}, nullptr, "vp.op.load");
3678 }
3679 NewLI->addParamAttr(
3681 applyMetadata(*NewLI);
3682 Instruction *Res = NewLI;
3683 State.set(this, Res);
3684}
3685
3687 VPCostContext &Ctx) const {
3688 if (!Consecutive || IsMasked)
3689 return VPWidenMemoryRecipe::computeCost(VF, Ctx);
3690
3691 // We need to use the getMemIntrinsicInstrCost() instead of getMemoryOpCost()
3692 // here because the EVL recipes using EVL to replace the tail mask. But in the
3693 // legacy model, it will always calculate the cost of mask.
3694 // TODO: Using getMemoryOpCost() instead of getMemIntrinsicInstrCost when we
3695 // don't need to compare to the legacy cost model.
3697 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
3698 ->getAddressSpace();
3699 return Ctx.TTI.getMemIntrinsicInstrCost(
3700 MemIntrinsicCostAttributes(Intrinsic::vp_load, Ty, Alignment, AS),
3701 Ctx.CostKind);
3702}
3703
3704#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3706 VPSlotTracker &SlotTracker) const {
3707 O << Indent << "WIDEN ";
3709 O << " = vp.load ";
3711}
3712#endif
3713
3715 VPValue *StoredVPValue = getStoredValue();
3716 bool CreateScatter = !isConsecutive();
3717
3718 auto &Builder = State.Builder;
3719
3720 Value *Mask = nullptr;
3721 if (auto *VPMask = getMask()) {
3722 // Mask reversal is only needed for non-all-one (null) masks, as reverse
3723 // of a null all-one mask is a null mask.
3724 Mask = State.get(VPMask);
3725 if (isReverse())
3726 Mask = Builder.CreateVectorReverse(Mask, "reverse");
3727 }
3728
3729 Value *StoredVal = State.get(StoredVPValue);
3730 Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateScatter);
3731 Instruction *NewSI = nullptr;
3732 if (CreateScatter)
3733 NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
3734 else if (Mask)
3735 NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
3736 else
3737 NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
3738 applyMetadata(*NewSI);
3739}
3740
3741#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3743 VPSlotTracker &SlotTracker) const {
3744 O << Indent << "WIDEN store ";
3746}
3747#endif
3748
3750 VPValue *StoredValue = getStoredValue();
3751 bool CreateScatter = !isConsecutive();
3752
3753 auto &Builder = State.Builder;
3754
3755 CallInst *NewSI = nullptr;
3756 Value *StoredVal = State.get(StoredValue);
3757 Value *EVL = State.get(getEVL(), VPLane(0));
3758 Value *Mask = nullptr;
3759 if (VPValue *VPMask = getMask()) {
3760 Mask = State.get(VPMask);
3761 if (isReverse())
3762 Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
3763 } else {
3764 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3765 }
3766 Value *Addr = State.get(getAddr(), !CreateScatter);
3767 if (CreateScatter) {
3768 NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
3769 Intrinsic::vp_scatter,
3770 {StoredVal, Addr, Mask, EVL});
3771 } else {
3772 NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
3773 Intrinsic::vp_store,
3774 {StoredVal, Addr, Mask, EVL});
3775 }
3776 NewSI->addParamAttr(
3778 applyMetadata(*NewSI);
3779}
3780
3782 VPCostContext &Ctx) const {
3783 if (!Consecutive || IsMasked)
3784 return VPWidenMemoryRecipe::computeCost(VF, Ctx);
3785
3786 // We need to use the getMemIntrinsicInstrCost() instead of getMemoryOpCost()
3787 // here because the EVL recipes using EVL to replace the tail mask. But in the
3788 // legacy model, it will always calculate the cost of mask.
3789 // TODO: Using getMemoryOpCost() instead of getMemIntrinsicInstrCost when we
3790 // don't need to compare to the legacy cost model.
3792 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
3793 ->getAddressSpace();
3794 return Ctx.TTI.getMemIntrinsicInstrCost(
3795 MemIntrinsicCostAttributes(Intrinsic::vp_store, Ty, Alignment, AS),
3796 Ctx.CostKind);
3797}
3798
3799#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3801 VPSlotTracker &SlotTracker) const {
3802 O << Indent << "WIDEN vp.store ";
3804}
3805#endif
3806
3808 VectorType *DstVTy, const DataLayout &DL) {
3809 // Verify that V is a vector type with same number of elements as DstVTy.
3810 auto VF = DstVTy->getElementCount();
3811 auto *SrcVecTy = cast<VectorType>(V->getType());
3812 assert(VF == SrcVecTy->getElementCount() && "Vector dimensions do not match");
3813 Type *SrcElemTy = SrcVecTy->getElementType();
3814 Type *DstElemTy = DstVTy->getElementType();
3815 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3816 "Vector elements must have same size");
3817
3818 // Do a direct cast if element types are castable.
3819 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3820 return Builder.CreateBitOrPointerCast(V, DstVTy);
3821 }
3822 // V cannot be directly casted to desired vector type.
3823 // May happen when V is a floating point vector but DstVTy is a vector of
3824 // pointers or vice-versa. Handle this using a two-step bitcast using an
3825 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3826 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3827 "Only one type should be a pointer type");
3828 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3829 "Only one type should be a floating point type");
3830 Type *IntTy =
3831 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3832 auto *VecIntTy = VectorType::get(IntTy, VF);
3833 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3834 return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
3835}
3836
3837/// Return a vector containing interleaved elements from multiple
3838/// smaller input vectors.
3840 const Twine &Name) {
3841 unsigned Factor = Vals.size();
3842 assert(Factor > 1 && "Tried to interleave invalid number of vectors");
3843
3844 VectorType *VecTy = cast<VectorType>(Vals[0]->getType());
3845#ifndef NDEBUG
3846 for (Value *Val : Vals)
3847 assert(Val->getType() == VecTy && "Tried to interleave mismatched types");
3848#endif
3849
3850 // Scalable vectors cannot use arbitrary shufflevectors (only splats), so
3851 // must use intrinsics to interleave.
3852 if (VecTy->isScalableTy()) {
3853 assert(Factor <= 8 && "Unsupported interleave factor for scalable vectors");
3854 return Builder.CreateVectorInterleave(Vals, Name);
3855 }
3856
3857 // Fixed length. Start by concatenating all vectors into a wide vector.
3858 Value *WideVec = concatenateVectors(Builder, Vals);
3859
3860 // Interleave the elements into the wide vector.
3861 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
3862 return Builder.CreateShuffleVector(
3863 WideVec, createInterleaveMask(NumElts, Factor), Name);
3864}
3865
3866// Try to vectorize the interleave group that \p Instr belongs to.
3867//
3868// E.g. Translate following interleaved load group (factor = 3):
3869// for (i = 0; i < N; i+=3) {
3870// R = Pic[i]; // Member of index 0
3871// G = Pic[i+1]; // Member of index 1
3872// B = Pic[i+2]; // Member of index 2
3873// ... // do something to R, G, B
3874// }
3875// To:
3876// %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
3877// %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements
3878// %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements
3879// %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements
3880//
3881// Or translate following interleaved store group (factor = 3):
3882// for (i = 0; i < N; i+=3) {
3883// ... do something to R, G, B
3884// Pic[i] = R; // Member of index 0
3885// Pic[i+1] = G; // Member of index 1
3886// Pic[i+2] = B; // Member of index 2
3887// }
3888// To:
3889// %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
3890// %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
3891// %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
3892// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
3893// store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
3895 assert(!State.Lane && "Interleave group being replicated.");
3896 assert((!needsMaskForGaps() || !State.VF.isScalable()) &&
3897 "Masking gaps for scalable vectors is not yet supported.");
3899 Instruction *Instr = Group->getInsertPos();
3900
3901 // Prepare for the vector type of the interleaved load/store.
3902 Type *ScalarTy = getLoadStoreType(Instr);
3903 unsigned InterleaveFactor = Group->getFactor();
3904 auto *VecTy = VectorType::get(ScalarTy, State.VF * InterleaveFactor);
3905
3906 VPValue *BlockInMask = getMask();
3907 VPValue *Addr = getAddr();
3908 Value *ResAddr = State.get(Addr, VPLane(0));
3909
3910 auto CreateGroupMask = [&BlockInMask, &State,
3911 &InterleaveFactor](Value *MaskForGaps) -> Value * {
3912 if (State.VF.isScalable()) {
3913 assert(!MaskForGaps && "Interleaved groups with gaps are not supported.");
3914 assert(InterleaveFactor <= 8 &&
3915 "Unsupported deinterleave factor for scalable vectors");
3916 auto *ResBlockInMask = State.get(BlockInMask);
3917 SmallVector<Value *> Ops(InterleaveFactor, ResBlockInMask);
3918 return interleaveVectors(State.Builder, Ops, "interleaved.mask");
3919 }
3920
3921 if (!BlockInMask)
3922 return MaskForGaps;
3923
3924 Value *ResBlockInMask = State.get(BlockInMask);
3925 Value *ShuffledMask = State.Builder.CreateShuffleVector(
3926 ResBlockInMask,
3927 createReplicatedMask(InterleaveFactor, State.VF.getFixedValue()),
3928 "interleaved.mask");
3929 return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
3930 ShuffledMask, MaskForGaps)
3931 : ShuffledMask;
3932 };
3933
3934 const DataLayout &DL = Instr->getDataLayout();
3935 // Vectorize the interleaved load group.
3936 if (isa<LoadInst>(Instr)) {
3937 Value *MaskForGaps = nullptr;
3938 if (needsMaskForGaps()) {
3939 MaskForGaps =
3940 createBitMaskForGaps(State.Builder, State.VF.getFixedValue(), *Group);
3941 assert(MaskForGaps && "Mask for Gaps is required but it is null");
3942 }
3943
3944 Instruction *NewLoad;
3945 if (BlockInMask || MaskForGaps) {
3946 Value *GroupMask = CreateGroupMask(MaskForGaps);
3947 Value *PoisonVec = PoisonValue::get(VecTy);
3948 NewLoad = State.Builder.CreateMaskedLoad(VecTy, ResAddr,
3949 Group->getAlign(), GroupMask,
3950 PoisonVec, "wide.masked.vec");
3951 } else
3952 NewLoad = State.Builder.CreateAlignedLoad(VecTy, ResAddr,
3953 Group->getAlign(), "wide.vec");
3954 applyMetadata(*NewLoad);
3955 // TODO: Also manage existing metadata using VPIRMetadata.
3956 Group->addMetadata(NewLoad);
3957
3959 if (VecTy->isScalableTy()) {
3960 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
3961 // so must use intrinsics to deinterleave.
3962 assert(InterleaveFactor <= 8 &&
3963 "Unsupported deinterleave factor for scalable vectors");
3964 NewLoad = State.Builder.CreateIntrinsic(
3965 Intrinsic::getDeinterleaveIntrinsicID(InterleaveFactor),
3966 NewLoad->getType(), NewLoad,
3967 /*FMFSource=*/nullptr, "strided.vec");
3968 }
3969
3970 auto CreateStridedVector = [&InterleaveFactor, &State,
3971 &NewLoad](unsigned Index) -> Value * {
3972 assert(Index < InterleaveFactor && "Illegal group index");
3973 if (State.VF.isScalable())
3974 return State.Builder.CreateExtractValue(NewLoad, Index);
3975
3976 // For fixed length VF, use shuffle to extract the sub-vectors from the
3977 // wide load.
3978 auto StrideMask =
3979 createStrideMask(Index, InterleaveFactor, State.VF.getFixedValue());
3980 return State.Builder.CreateShuffleVector(NewLoad, StrideMask,
3981 "strided.vec");
3982 };
3983
3984 for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) {
3985 Instruction *Member = Group->getMember(I);
3986
3987 // Skip the gaps in the group.
3988 if (!Member)
3989 continue;
3990
3991 Value *StridedVec = CreateStridedVector(I);
3992
3993 // If this member has different type, cast the result type.
3994 if (Member->getType() != ScalarTy) {
3995 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
3996 StridedVec =
3997 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
3998 }
3999
4000 if (Group->isReverse())
4001 StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
4002
4003 State.set(VPDefs[J], StridedVec);
4004 ++J;
4005 }
4006 return;
4007 }
4008
4009 // The sub vector type for current instruction.
4010 auto *SubVT = VectorType::get(ScalarTy, State.VF);
4011
4012 // Vectorize the interleaved store group.
4013 Value *MaskForGaps =
4014 createBitMaskForGaps(State.Builder, State.VF.getKnownMinValue(), *Group);
4015 assert(((MaskForGaps != nullptr) == needsMaskForGaps()) &&
4016 "Mismatch between NeedsMaskForGaps and MaskForGaps");
4017 ArrayRef<VPValue *> StoredValues = getStoredValues();
4018 // Collect the stored vector from each member.
4019 SmallVector<Value *, 4> StoredVecs;
4020 unsigned StoredIdx = 0;
4021 for (unsigned i = 0; i < InterleaveFactor; i++) {
4022 assert((Group->getMember(i) || MaskForGaps) &&
4023 "Fail to get a member from an interleaved store group");
4024 Instruction *Member = Group->getMember(i);
4025
4026 // Skip the gaps in the group.
4027 if (!Member) {
4028 Value *Undef = PoisonValue::get(SubVT);
4029 StoredVecs.push_back(Undef);
4030 continue;
4031 }
4032
4033 Value *StoredVec = State.get(StoredValues[StoredIdx]);
4034 ++StoredIdx;
4035
4036 if (Group->isReverse())
4037 StoredVec = State.Builder.CreateVectorReverse(StoredVec, "reverse");
4038
4039 // If this member has different type, cast it to a unified type.
4040
4041 if (StoredVec->getType() != SubVT)
4042 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
4043
4044 StoredVecs.push_back(StoredVec);
4045 }
4046
4047 // Interleave all the smaller vectors into one wider vector.
4048 Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
4049 Instruction *NewStoreInstr;
4050 if (BlockInMask || MaskForGaps) {
4051 Value *GroupMask = CreateGroupMask(MaskForGaps);
4052 NewStoreInstr = State.Builder.CreateMaskedStore(
4053 IVec, ResAddr, Group->getAlign(), GroupMask);
4054 } else
4055 NewStoreInstr =
4056 State.Builder.CreateAlignedStore(IVec, ResAddr, Group->getAlign());
4057
4058 applyMetadata(*NewStoreInstr);
4059 // TODO: Also manage existing metadata using VPIRMetadata.
4060 Group->addMetadata(NewStoreInstr);
4061}
4062
4063#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4065 VPSlotTracker &SlotTracker) const {
4067 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
4068 IG->getInsertPos()->printAsOperand(O, false);
4069 O << ", ";
4071 VPValue *Mask = getMask();
4072 if (Mask) {
4073 O << ", ";
4074 Mask->printAsOperand(O, SlotTracker);
4075 }
4076
4077 unsigned OpIdx = 0;
4078 for (unsigned i = 0; i < IG->getFactor(); ++i) {
4079 if (!IG->getMember(i))
4080 continue;
4081 if (getNumStoreOperands() > 0) {
4082 O << "\n" << Indent << " store ";
4084 O << " to index " << i;
4085 } else {
4086 O << "\n" << Indent << " ";
4088 O << " = load from index " << i;
4089 }
4090 ++OpIdx;
4091 }
4092}
4093#endif
4094
4096 assert(!State.Lane && "Interleave group being replicated.");
4097 assert(State.VF.isScalable() &&
4098 "Only support scalable VF for EVL tail-folding.");
4100 "Masking gaps for scalable vectors is not yet supported.");
4102 Instruction *Instr = Group->getInsertPos();
4103
4104 // Prepare for the vector type of the interleaved load/store.
4105 Type *ScalarTy = getLoadStoreType(Instr);
4106 unsigned InterleaveFactor = Group->getFactor();
4107 assert(InterleaveFactor <= 8 &&
4108 "Unsupported deinterleave/interleave factor for scalable vectors");
4109 ElementCount WideVF = State.VF * InterleaveFactor;
4110 auto *VecTy = VectorType::get(ScalarTy, WideVF);
4111
4112 VPValue *Addr = getAddr();
4113 Value *ResAddr = State.get(Addr, VPLane(0));
4114 Value *EVL = State.get(getEVL(), VPLane(0));
4115 Value *InterleaveEVL = State.Builder.CreateMul(
4116 EVL, ConstantInt::get(EVL->getType(), InterleaveFactor), "interleave.evl",
4117 /* NUW= */ true, /* NSW= */ true);
4118 LLVMContext &Ctx = State.Builder.getContext();
4119
4120 Value *GroupMask = nullptr;
4121 if (VPValue *BlockInMask = getMask()) {
4122 SmallVector<Value *> Ops(InterleaveFactor, State.get(BlockInMask));
4123 GroupMask = interleaveVectors(State.Builder, Ops, "interleaved.mask");
4124 } else {
4125 GroupMask =
4126 State.Builder.CreateVectorSplat(WideVF, State.Builder.getTrue());
4127 }
4128
4129 // Vectorize the interleaved load group.
4130 if (isa<LoadInst>(Instr)) {
4131 CallInst *NewLoad = State.Builder.CreateIntrinsic(
4132 VecTy, Intrinsic::vp_load, {ResAddr, GroupMask, InterleaveEVL}, nullptr,
4133 "wide.vp.load");
4134 NewLoad->addParamAttr(0,
4135 Attribute::getWithAlignment(Ctx, Group->getAlign()));
4136
4137 applyMetadata(*NewLoad);
4138 // TODO: Also manage existing metadata using VPIRMetadata.
4139 Group->addMetadata(NewLoad);
4140
4141 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
4142 // so must use intrinsics to deinterleave.
4143 NewLoad = State.Builder.CreateIntrinsic(
4144 Intrinsic::getDeinterleaveIntrinsicID(InterleaveFactor),
4145 NewLoad->getType(), NewLoad,
4146 /*FMFSource=*/nullptr, "strided.vec");
4147
4148 const DataLayout &DL = Instr->getDataLayout();
4149 for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) {
4150 Instruction *Member = Group->getMember(I);
4151 // Skip the gaps in the group.
4152 if (!Member)
4153 continue;
4154
4155 Value *StridedVec = State.Builder.CreateExtractValue(NewLoad, I);
4156 // If this member has different type, cast the result type.
4157 if (Member->getType() != ScalarTy) {
4158 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
4159 StridedVec =
4160 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
4161 }
4162
4163 State.set(getVPValue(J), StridedVec);
4164 ++J;
4165 }
4166 return;
4167 } // End for interleaved load.
4168
4169 // The sub vector type for current instruction.
4170 auto *SubVT = VectorType::get(ScalarTy, State.VF);
4171 // Vectorize the interleaved store group.
4172 ArrayRef<VPValue *> StoredValues = getStoredValues();
4173 // Collect the stored vector from each member.
4174 SmallVector<Value *, 4> StoredVecs;
4175 const DataLayout &DL = Instr->getDataLayout();
4176 for (unsigned I = 0, StoredIdx = 0; I < InterleaveFactor; I++) {
4177 Instruction *Member = Group->getMember(I);
4178 // Skip the gaps in the group.
4179 if (!Member) {
4180 StoredVecs.push_back(PoisonValue::get(SubVT));
4181 continue;
4182 }
4183
4184 Value *StoredVec = State.get(StoredValues[StoredIdx]);
4185 // If this member has different type, cast it to a unified type.
4186 if (StoredVec->getType() != SubVT)
4187 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
4188
4189 StoredVecs.push_back(StoredVec);
4190 ++StoredIdx;
4191 }
4192
4193 // Interleave all the smaller vectors into one wider vector.
4194 Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
4195 CallInst *NewStore =
4196 State.Builder.CreateIntrinsic(Type::getVoidTy(Ctx), Intrinsic::vp_store,
4197 {IVec, ResAddr, GroupMask, InterleaveEVL});
4198 NewStore->addParamAttr(1,
4199 Attribute::getWithAlignment(Ctx, Group->getAlign()));
4200
4201 applyMetadata(*NewStore);
4202 // TODO: Also manage existing metadata using VPIRMetadata.
4203 Group->addMetadata(NewStore);
4204}
4205
4206#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4208 VPSlotTracker &SlotTracker) const {
4210 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
4211 IG->getInsertPos()->printAsOperand(O, false);
4212 O << ", ";
4214 O << ", ";
4216 if (VPValue *Mask = getMask()) {
4217 O << ", ";
4218 Mask->printAsOperand(O, SlotTracker);
4219 }
4220
4221 unsigned OpIdx = 0;
4222 for (unsigned i = 0; i < IG->getFactor(); ++i) {
4223 if (!IG->getMember(i))
4224 continue;
4225 if (getNumStoreOperands() > 0) {
4226 O << "\n" << Indent << " vp.store ";
4228 O << " to index " << i;
4229 } else {
4230 O << "\n" << Indent << " ";
4232 O << " = vp.load from index " << i;
4233 }
4234 ++OpIdx;
4235 }
4236}
4237#endif
4238
4240 VPCostContext &Ctx) const {
4241 Instruction *InsertPos = getInsertPos();
4242 // Find the VPValue index of the interleave group. We need to skip gaps.
4243 unsigned InsertPosIdx = 0;
4244 for (unsigned Idx = 0; IG->getFactor(); ++Idx)
4245 if (auto *Member = IG->getMember(Idx)) {
4246 if (Member == InsertPos)
4247 break;
4248 InsertPosIdx++;
4249 }
4250 Type *ValTy = Ctx.Types.inferScalarType(
4251 getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx)
4252 : getStoredValues()[InsertPosIdx]);
4253 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4254 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
4255 ->getAddressSpace();
4256
4257 unsigned InterleaveFactor = IG->getFactor();
4258 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
4259
4260 // Holds the indices of existing members in the interleaved group.
4262 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
4263 if (IG->getMember(IF))
4264 Indices.push_back(IF);
4265
4266 // Calculate the cost of the whole interleaved group.
4267 InstructionCost Cost = Ctx.TTI.getInterleavedMemoryOpCost(
4268 InsertPos->getOpcode(), WideVecTy, IG->getFactor(), Indices,
4269 IG->getAlign(), AS, Ctx.CostKind, getMask(), NeedsMaskForGaps);
4270
4271 if (!IG->isReverse())
4272 return Cost;
4273
4274 return Cost + IG->getNumMembers() *
4275 Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
4276 VectorTy, VectorTy, {}, Ctx.CostKind,
4277 0);
4278}
4279
4280#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4282 VPSlotTracker &SlotTracker) const {
4283 O << Indent << "EMIT ";
4285 O << " = CANONICAL-INDUCTION ";
4287}
4288#endif
4289
4291 return vputils::onlyScalarValuesUsed(this) &&
4292 (!IsScalable || vputils::onlyFirstLaneUsed(this));
4293}
4294
4295#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4297 raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const {
4298 assert((getNumOperands() == 3 || getNumOperands() == 5) &&
4299 "unexpected number of operands");
4300 O << Indent << "EMIT ";
4302 O << " = WIDEN-POINTER-INDUCTION ";
4304 O << ", ";
4306 O << ", ";
4308 if (getNumOperands() == 5) {
4309 O << ", ";
4311 O << ", ";
4313 }
4314}
4315
4317 VPSlotTracker &SlotTracker) const {
4318 O << Indent << "EMIT ";
4320 O << " = EXPAND SCEV " << *Expr;
4321}
4322#endif
4323
4325 Value *CanonicalIV = State.get(getOperand(0), /*IsScalar*/ true);
4326 Type *STy = CanonicalIV->getType();
4327 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
4328 ElementCount VF = State.VF;
4329 Value *VStart = VF.isScalar()
4330 ? CanonicalIV
4331 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
4332 Value *VStep = createStepForVF(Builder, STy, VF, getUnrollPart(*this));
4333 if (VF.isVector()) {
4334 VStep = Builder.CreateVectorSplat(VF, VStep);
4335 VStep =
4336 Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
4337 }
4338 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
4339 State.set(this, CanonicalVectorIV);
4340}
4341
4342#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4344 VPSlotTracker &SlotTracker) const {
4345 O << Indent << "EMIT ";
4347 O << " = WIDEN-CANONICAL-INDUCTION ";
4349}
4350#endif
4351
4353 auto &Builder = State.Builder;
4354 // Create a vector from the initial value.
4355 auto *VectorInit = getStartValue()->getLiveInIRValue();
4356
4357 Type *VecTy = State.VF.isScalar()
4358 ? VectorInit->getType()
4359 : VectorType::get(VectorInit->getType(), State.VF);
4360
4361 BasicBlock *VectorPH =
4362 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4363 if (State.VF.isVector()) {
4364 auto *IdxTy = Builder.getInt32Ty();
4365 auto *One = ConstantInt::get(IdxTy, 1);
4366 IRBuilder<>::InsertPointGuard Guard(Builder);
4367 Builder.SetInsertPoint(VectorPH->getTerminator());
4368 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
4369 auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4370 VectorInit = Builder.CreateInsertElement(
4371 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init");
4372 }
4373
4374 // Create a phi node for the new recurrence.
4375 PHINode *Phi = PHINode::Create(VecTy, 2, "vector.recur");
4376 Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
4377 Phi->addIncoming(VectorInit, VectorPH);
4378 State.set(this, Phi);
4379}
4380
4383 VPCostContext &Ctx) const {
4384 if (VF.isScalar())
4385 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
4386
4387 return 0;
4388}
4389
4390#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4392 raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const {
4393 O << Indent << "FIRST-ORDER-RECURRENCE-PHI ";
4395 O << " = phi ";
4397}
4398#endif
4399
4401 // Reductions do not have to start at zero. They can start with
4402 // any loop invariant values.
4403 VPValue *StartVPV = getStartValue();
4404
4405 // In order to support recurrences we need to be able to vectorize Phi nodes.
4406 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4407 // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4408 // this value when we vectorize all of the instructions that use the PHI.
4409 BasicBlock *VectorPH =
4410 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4411 bool ScalarPHI = State.VF.isScalar() || isInLoop();
4412 Value *StartV = State.get(StartVPV, ScalarPHI);
4413 Type *VecTy = StartV->getType();
4414
4415 BasicBlock *HeaderBB = State.CFG.PrevBB;
4416 assert(State.CurrentParentLoop->getHeader() == HeaderBB &&
4417 "recipe must be in the vector loop header");
4418 auto *Phi = PHINode::Create(VecTy, 2, "vec.phi");
4419 Phi->insertBefore(HeaderBB->getFirstInsertionPt());
4420 State.set(this, Phi, isInLoop());
4421
4422 Phi->addIncoming(StartV, VectorPH);
4423}
4424
4425#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4427 VPSlotTracker &SlotTracker) const {
4428 O << Indent << "WIDEN-REDUCTION-PHI ";
4429
4431 O << " = phi ";
4433 if (getVFScaleFactor() > 1)
4434 O << " (VF scaled by 1/" << getVFScaleFactor() << ")";
4435}
4436#endif
4437
4439 Value *Op0 = State.get(getOperand(0));
4440 Type *VecTy = Op0->getType();
4441 Instruction *VecPhi = State.Builder.CreatePHI(VecTy, 2, Name);
4442 State.set(this, VecPhi);
4443}
4444
4445#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4447 VPSlotTracker &SlotTracker) const {
4448 O << Indent << "WIDEN-PHI ";
4449
4451 O << " = phi ";
4453}
4454#endif
4455
4457 BasicBlock *VectorPH =
4458 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4459 Value *StartMask = State.get(getOperand(0));
4460 PHINode *Phi =
4461 State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
4462 Phi->addIncoming(StartMask, VectorPH);
4463 State.set(this, Phi);
4464}
4465
4466#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4468 VPSlotTracker &SlotTracker) const {
4469 O << Indent << "ACTIVE-LANE-MASK-PHI ";
4470
4472 O << " = phi ";
4474}
4475#endif
4476
4477#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4479 VPSlotTracker &SlotTracker) const {
4480 O << Indent << "EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI ";
4481
4483 O << " = phi ";
4485}
4486#endif
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
iv users
Definition IVUsers.cpp:48
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file provides a LoopVectorizationPlanner class.
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static bool isOrdered(const Instruction *I)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file contains the declarations of different VPlan-related auxiliary helpers.
static Instruction * createReverseEVL(IRBuilderBase &Builder, Value *Operand, Value *EVL, const Twine &Name)
Use all-true mask for reverse rather than actual mask, as it avoids a dependence w/o affecting the re...
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static InstructionCost getCostForIntrinsics(Intrinsic::ID ID, ArrayRef< const VPValue * > Operands, const VPRecipeWithIRFlags &R, ElementCount VF, VPCostContext &Ctx)
Compute the cost for the intrinsic ID with Operands, produced by R.
static Value * createBitOrPointerCast(IRBuilderBase &Builder, Value *V, VectorType *DstVTy, const DataLayout &DL)
SmallVector< Value *, 2 > VectorParts
static bool isUsedByLoadStoreAddress(const VPUser *V)
Returns true if V is used as part of the address of another load or store.
static void scalarizeInstruction(const Instruction *Instr, VPReplicateRecipe *RepRecipe, const VPLane &Lane, VPTransformState &State)
A helper function to scalarize a single Instruction in the innermost loop.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
This file contains the declarations of the Vectorization Plan base classes:
static const uint32_t IV[8]
Definition blake3_impl.h:83
void printAsOperand(OutputBuffer &OB, Prec P=Prec::Default, bool StrictlyWorse=false) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
static LLVM_ABI StringRef getPredicateName(Predicate P)
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
Definition Operator.cpp:272
void setAllowContract(bool B=true)
Definition FMF.h:90
bool noSignedZeros() const
Definition FMF.h:67
bool noInfs() const
Definition FMF.h:66
void setAllowReciprocal(bool B=true)
Definition FMF.h:87
bool allowReciprocal() const
Definition FMF.h:68
void setNoSignedZeros(bool B=true)
Definition FMF.h:84
bool allowReassoc() const
Flag queries.
Definition FMF.h:64
bool approxFunc() const
Definition FMF.h:70
void setNoNaNs(bool B=true)
Definition FMF.h:78
void setAllowReassoc(bool B=true)
Flag setters.
Definition FMF.h:75
bool noNaNs() const
Definition FMF.h:65
void setApproxFunc(bool B=true)
Definition FMF.h:93
void setNoInfs(bool B=true)
Definition FMF.h:81
bool allowContract() const
Definition FMF.h:69
Class to represent function types.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool willReturn() const
Determine if the function will return.
Definition Function.h:661
bool doesNotThrow() const
Determine if the function cannot unwind.
Definition Function.h:594
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2585
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:547
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2639
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2573
LLVM_ABI Value * CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, const Twine &Name="")
Return a vector splice intrinsic if using scalable vectors, otherwise return a shufflevector.
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2632
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition IRBuilder.h:2651
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2039
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition IRBuilder.h:567
LLVM_ABI Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2336
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition IRBuilder.h:527
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Value * CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition IRBuilder.h:1725
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2466
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1808
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2332
Value * CreateCountTrailingZeroElems(Type *ResTy, Value *Mask, bool ZeroIsPoison=true, const Twine &Name="")
Create a call to llvm.experimental_cttz_elts.
Definition IRBuilder.h:1134
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1420
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition IRBuilder.h:1197
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2085
LLVM_ABI CallInst * CreateIntMaxReduce(Value *Src, bool IsSigned=false)
Create a vector integer max reduction intrinsic of the source vector.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1403
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition IRBuilder.h:507
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1708
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2344
LLVM_ABI CallInst * CreateIntMinReduce(Value *Src, bool IsSigned=false)
Create a vector integer min reduction intrinsic of the source vector.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2442
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2794
static InstructionCost getInvalid(CostType Val=0)
bool isCast() const
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isUnaryOp() const
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
bool isReverse() const
InstTy * getInsertPos() const
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
Align getAlign() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Information for memory intrinsic cost model.
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
static bool isSignedRecurrenceKind(RecurKind Kind)
Returns true if recurrece kind is a signed redux kind.
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindLastIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
This class represents an analyzed expression in the program.
This class provides computation of slot numbers for LLVM Assembly writing.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
LLVM_ABI InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo Op1Info={OK_AnyValue, OP_None}, OperandValueInfo Op2Info={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
@ TCC_Free
Expected to fold away in lowering.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Reverse
Reverse the order of the vector.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:297
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
value_op_iterator value_op_end()
Definition User.h:314
void setOperand(unsigned i, Value *Val)
Definition User.h:238
Value * getOperand(unsigned i) const
Definition User.h:233
value_op_iterator value_op_begin()
Definition User.h:311
void execute(VPTransformState &State) override
Generate the active lane mask phi of the vector loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
RecipeListTy & getRecipeList()
Returns a reference to the list of recipes.
Definition VPlan.h:4002
iterator end()
Definition VPlan.h:3986
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition VPlan.h:4015
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
VPValue * getIncomingValue(unsigned Idx) const
Return incoming value number Idx.
Definition VPlan.h:2528
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account when normalized the first incoming value wi...
Definition VPlan.h:2523
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
const VPBlocksTy & getPredecessors() const
Definition VPlan.h:204
VPlan * getPlan()
Definition VPlan.cpp:173
void printAsOperand(raw_ostream &OS, bool PrintType=false) const
Definition VPlan.h:349
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPBranchOnMaskRecipe.
void execute(VPTransformState &State) override
Generate the extraction of the appropriate bit from the block mask and the conditional branch.
VPlan-based builder utility analogous to IRBuilder.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
This class augments a recipe with a set of VPValues defined by the recipe.
Definition VPlanValue.h:332
LLVM_ABI_FOR_TEST void dump() const
Dump the VPDef to stderr (for debugging).
Definition VPlan.cpp:110
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:453
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:426
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
Definition VPlanValue.h:438
ArrayRef< VPRecipeValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition VPlanValue.h:448
unsigned getVPDefID() const
Definition VPlanValue.h:458
VPIRValue * getStartValue() const
Definition VPlan.h:3750
VPValue * getStepValue() const
Definition VPlan.h:3751
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void decompose()
Insert the recipes of the expression back into the VPlan, directly before the current recipe.
bool isSingleScalar() const
Returns true if the result of this VPExpressionRecipe is a single-scalar.
bool mayHaveSideEffects() const
Returns true if this expression contains recipes that may have side effects.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
bool mayReadOrWriteMemory() const
Returns true if this expression contains recipes that may read from or write to memory.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this header phi recipe.
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2052
void execute(VPTransformState &State) override
Produce a vectorized histogram operation.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPHistogramRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getMask() const
Return the mask operand if one was provided, or a null pointer if all lanes should be executed uncond...
Definition VPlan.h:1803
Class to record and manage LLVM IR flags.
Definition VPlan.h:608
FastMathFlagsTy FMFs
Definition VPlan.h:679
LLVM_ABI_FOR_TEST bool flagsValidForOpcode(unsigned Opcode) const
Returns true if the set flags are valid for Opcode.
WrapFlagsTy WrapFlags
Definition VPlan.h:673
CmpInst::Predicate CmpPredicate
Definition VPlan.h:672
void printFlags(raw_ostream &O) const
GEPNoWrapFlags GEPFlags
Definition VPlan.h:677
bool hasFastMathFlags() const
Returns true if the recipe has fast-math flags.
Definition VPlan.h:857
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
TruncFlagsTy TruncFlags
Definition VPlan.h:674
CmpInst::Predicate getPredicate() const
Definition VPlan.h:834
ExactFlagsTy ExactFlags
Definition VPlan.h:676
bool hasNoSignedWrap() const
Definition VPlan.h:883
void intersectFlags(const VPIRFlags &Other)
Only keep flags also present in Other.
GEPNoWrapFlags getGEPNoWrapFlags() const
Definition VPlan.h:849
bool hasPredicate() const
Returns true if the recipe has a comparison predicate.
Definition VPlan.h:852
DisjointFlagsTy DisjointFlags
Definition VPlan.h:675
unsigned AllFlags
Definition VPlan.h:681
bool hasNoUnsignedWrap() const
Definition VPlan.h:872
FCmpFlagsTy FCmpFlags
Definition VPlan.h:680
NonNegFlagsTy NonNegFlags
Definition VPlan.h:678
void applyFlags(Instruction &I) const
Apply the IR flags to I.
Definition VPlan.h:794
Instruction & getInstruction() const
Definition VPlan.h:1457
void extractLastLaneOfLastPartOfFirstOperand(VPBuilder &Builder)
Update the recipe's first operand to the last lane of the last part of the operand using Builder.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPIRInstruction.
VPIRInstruction(Instruction &I)
VPIRInstruction::create() should be used to create VPIRInstructions, as subclasses may need to be cre...
Definition VPlan.h:1432
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void intersect(const VPIRMetadata &MD)
Intersect this VPIRMetadata object with MD, keeping only metadata nodes that are common to both.
VPIRMetadata()=default
void print(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print metadata with node IDs.
void applyMetadata(Instruction &I) const
Add all metadata to I.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the instruction.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPInstruction.
bool doesGeneratePerAllLanes() const
Returns true if this VPInstruction generates scalar values for all lanes.
@ ExtractLane
Extracts a single lane (first operand) from a set of vector operands.
Definition VPlan.h:1136
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1081
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
Definition VPlan.h:1126
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1139
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
Definition VPlan.h:1078
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1130
@ BuildVector
Creates a fixed-width vector containing all operands.
Definition VPlan.h:1073
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
Definition VPlan.h:1070
@ VScale
Returns the value for vscale.
Definition VPlan.h:1141
@ CanonicalIVIncrementForPart
Definition VPlan.h:1054
bool hasResult() const
Definition VPlan.h:1207
bool opcodeMayReadOrWriteFromMemory() const
Returns true if the underlying opcode may read from or write to memory.
LLVM_DUMP_METHOD void dump() const
Print the VPInstruction to dbgs() (for debugging).
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the VPInstruction to O.
StringRef getName() const
Returns the symbolic name assigned to the VPInstruction.
Definition VPlan.h:1248
unsigned getOpcode() const
Definition VPlan.h:1191
VPInstruction(unsigned Opcode, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
bool isVectorToScalar() const
Returns true if this VPInstruction produces a scalar value from a vector, e.g.
bool isSingleScalar() const
Returns true if this VPInstruction's operands are single scalars and the result is also a single scal...
void execute(VPTransformState &State) override
Generate the instruction.
bool usesFirstPartOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
bool needsMaskForGaps() const
Return true if the access needs a mask because of the gaps.
Definition VPlan.h:2639
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this recipe.
Instruction * getInsertPos() const
Definition VPlan.h:2643
const InterleaveGroup< Instruction > * getInterleaveGroup() const
Definition VPlan.h:2641
VPValue * getMask() const
Return the mask used by this recipe.
Definition VPlan.h:2633
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
Definition VPlan.h:2662
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition VPlan.h:2627
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition VPlan.h:2737
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
Definition VPlan.h:2750
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
Definition VPlan.h:2700
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
static VPLane getLastLaneForVF(const ElementCount &VF)
static VPLane getLaneFromEnd(const ElementCount &VF, unsigned Offset)
static VPLane getFirstLane()
virtual const VPRecipeBase * getAsRecipe() const =0
Return a VPRecipeBase* to the current object.
virtual unsigned getNumIncoming() const
Returns the number of incoming values, also number of incoming blocks.
Definition VPlan.h:1347
void removeIncomingValueFor(VPBlockBase *IncomingBlock) const
Removes the incoming value for IncomingBlock, which must be a predecessor.
const VPBasicBlock * getIncomingBlock(unsigned Idx) const
Returns the incoming block with index Idx.
Definition VPlan.h:4093
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1372
VPValue * getIncomingValue(unsigned Idx) const
Returns the incoming VPValue with index Idx.
Definition VPlan.h:1339
void printPhiOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the recipe.
void execute(VPTransformState &State) override
Generates phi nodes for live-outs (from a replicate region) as needed to retain SSA form.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:387
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
virtual void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const =0
Each concrete VPRecipe prints itself, without printing common information, like debug info or metadat...
VPRegionBlock * getRegion()
Definition VPlan.h:4254
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override final
Print the recipe, delegating to printRecipe().
bool isPhi() const
Returns true for PHI-like recipes.
bool mayWriteToMemory() const
Returns true if the recipe may write to memory.
virtual InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
VPBasicBlock * getParent()
Definition VPlan.h:408
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:479
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this recipe, taking into account if the cost computation should be skipped and the...
bool isScalarCast() const
Return true if the recipe is a scalar cast.
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void moveAfter(VPRecipeBase *MovePos)
Unlink this recipe from its current VPBasicBlock and insert it into the VPBasicBlock that MovePos liv...
VPRecipeBase(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
Definition VPlan.h:398
friend class VPValue
Definition VPlanValue.h:212
void execute(VPTransformState &State) override
Generate the reduction in the loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition VPlan.h:2900
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
Definition VPlan.h:2445
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
Definition VPlan.h:2469
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool isConditional() const
Return true if the in-loop reduction is conditional.
Definition VPlan.h:2842
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of VPReductionRecipe.
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
Definition VPlan.h:2853
VPValue * getCondOp() const
The VPValue of the condition for the block.
Definition VPlan.h:2855
RecurKind getRecurrenceKind() const
Return the recurrence kind for the in-loop reduction.
Definition VPlan.h:2838
bool isPartialReduction() const
Returns true if the reduction outputs a vector with a scaled down VF.
Definition VPlan.h:2844
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
Definition VPlan.h:2851
bool isInLoop() const
Returns true if the reduction is in-loop.
Definition VPlan.h:2846
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4137
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
Definition VPlan.h:4205
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2922
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool isSingleScalar() const
Definition VPlan.h:2963
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPReplicateRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getOpcode() const
Definition VPlan.h:2992
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
VPValue * getStepValue() const
Definition VPlan.h:3817
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the scalarized versions of the phi node as needed by their users.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:531
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:594
LLVM_ABI_FOR_TEST LLVM_DUMP_METHOD void dump() const
Print this VPSingleDefRecipe to dbgs() (for debugging).
VPSingleDefRecipe(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
Definition VPlan.h:533
This class can be used to assign names to VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
Helper to access the operand that contains the unroll part for this recipe after unrolling.
Definition VPlan.h:968
VPValue * getUnrollPartOperand(const VPUser &U) const
Return the VPValue operand containing the unroll part or null if there is no such operand.
unsigned getUnrollPart(const VPUser &U) const
Return the unroll part.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:229
void printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the operands to O.
Definition VPlan.cpp:1428
operand_range operands()
Definition VPlanValue.h:297
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:273
unsigned getNumOperands() const
Definition VPlanValue.h:267
operand_iterator op_begin()
Definition VPlanValue.h:293
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:268
virtual bool usesFirstLaneOnly(const VPValue *Op) const
Returns true if the VPUser only uses the first lane of operand Op.
Definition VPlanValue.h:312
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:45
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
Definition VPlan.cpp:133
bool isDefinedOutsideLoopRegions() const
Returns true if the VPValue is defined outside any loop.
Definition VPlan.cpp:1382
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:119
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
Definition VPlan.cpp:1424
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:72
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1385
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getSourceElementType() const
Definition VPlan.h:1957
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
operand_range args()
Definition VPlan.h:1759
Function * getCalledScalarFunction() const
Definition VPlan.h:1755
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCallRecipe.
void execute(VPTransformState &State) override
Produce a widened version of the call instruction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with start = {<Part*VF,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Returns the result type of the cast.
Definition VPlan.h:1609
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce widened copies of the cast.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCastRecipe.
void execute(VPTransformState &State) override
Generate the gep nodes.
Type * getSourceElementType() const
Definition VPlan.h:1855
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
VPIRValue * getStartValue() const
Returns the start value of the induction.
Definition VPlan.h:2115
VPValue * getStepValue()
Returns the step value of the induction.
Definition VPlan.h:2118
VPIRValue * getStartValue() const
Returns the start value of the induction.
Definition VPlan.h:2213
TruncInst * getTruncInst()
Returns the first defined value as TruncInst, if it is one or nullptr otherwise.
Definition VPlan.h:2228
Type * getScalarType() const
Returns the scalar type of the induction.
Definition VPlan.h:2237
bool isCanonical() const
Returns true if the induction is canonical, i.e.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Intrinsic::ID getVectorIntrinsicID() const
Return the ID of the intrinsic.
Definition VPlan.h:1691
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
StringRef getIntrinsicName() const
Return to name of the intrinsic as string.
LLVM_ABI_FOR_TEST bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
Type * getResultType() const
Return the scalar return type of the intrinsic.
Definition VPlan.h:1694
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce a widened version of the vector intrinsic.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this vector intrinsic.
bool IsMasked
Whether the memory access is masked.
Definition VPlan.h:3247
bool Reverse
Whether the consecutive accessed addresses are in reverse order.
Definition VPlan.h:3244
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
Definition VPlan.h:3287
Instruction & Ingredient
Definition VPlan.h:3235
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
bool Consecutive
Whether the accessed addresses are consecutive.
Definition VPlan.h:3241
VPValue * getMask() const
Return the mask used by this recipe.
Definition VPlan.h:3301
Align Alignment
Alignment information for this memory access.
Definition VPlan.h:3238
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition VPlan.h:3294
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
Definition VPlan.h:3291
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenRecipe.
void execute(VPTransformState &State) override
Produce a widened instruction using the opcode and operands of the recipe, processing State....
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getUF() const
Definition VPlan.h:4484
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1022
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
Definition Value.h:838
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
iterator erase(iterator where)
Definition ilist.h:204
pointer remove(iterator &IT)
Definition ilist.h:188
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
LLVM_ABI Intrinsic::ID getDeinterleaveIntrinsicID(unsigned Factor)
Returns the corresponding llvm.vector.deinterleaveN intrinsic for factor N.
LLVM_ABI StringRef getBaseName(ID id)
Return the LLVM name for an intrinsic, without encoded types for overloading, such as "llvm....
bool match(Val *V, const Pattern &P)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::Reverse, Op0_t > m_Reverse(const Op0_t &Op0)
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI Value * createSimpleReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind)
Create a reduction of the given vector.
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2530
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2184
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
Definition STLExtras.h:2289
auto cast_or_null(const Y &Val)
Definition Casting.h:714
LLVM_ABI Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
LLVM_ABI Value * createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, Value *Right)
Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:323
@ Other
Any other memory.
Definition ModRef.h:68
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
DWARFExpression::Operation Op
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI Value * createOrderedReduction(IRBuilderBase &B, RecurKind RdxKind, Value *Src, Value *Start)
Create an ordered reduction intrinsic using the given recurrence kind RdxKind.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
LLVM_ABI Value * createAnyOfReduction(IRBuilderBase &B, Value *Src, Value *InitVal, PHINode *OrigPhi)
Create a reduction of the given vector Src for a reduction of kind RecurKind::AnyOf.
LLVM_ABI bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Struct to hold various analysis needed for cost computations.
TargetTransformInfo::OperandValueInfo getOperandInfo(VPValue *V) const
Returns the OperandInfo for V, if it is a live-in.
Definition VPlan.cpp:1737
TargetTransformInfo::TargetCostKind CostKind
VPTypeAnalysis Types
const TargetTransformInfo & TTI
void execute(VPTransformState &State) override
Generate the phi nodes.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this first-order recurrence phi recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
An overlay for VPIRInstructions wrapping PHI nodes enabling convenient use cast/dyn_cast/isa and exec...
Definition VPlan.h:1495
PHINode & getIRPhi()
Definition VPlan.h:1503
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
A VPValue representing a live-in from the input IR or a constant.
Definition VPlanValue.h:184
Value * getValue() const
Returns the underlying IR value.
Definition VPlanValue.h:190
void execute(VPTransformState &State) override
Generate the instruction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
A pure-virtual common base class for recipes defining a single VPValue and using IR flags.
Definition VPlan.h:922
InstructionCost getCostForRecipeWithOpcode(unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const
Compute the cost for this recipe for VF, using Opcode and Ctx.
VPRecipeWithIRFlags(const unsigned char SC, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags, DebugLoc DL=DebugLoc::getUnknown())
Definition VPlan.h:923
A symbolic live-in VPValue, used for values like vector trip count, VF, and VFxUF.
Definition VPlanValue.h:202
SmallDenseMap< const VPBasicBlock *, BasicBlock * > VPBB2IRBB
A mapping of each VPBasicBlock to the corresponding BasicBlock.
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
VPTypeAnalysis TypeAnalysis
VPlan-based type analysis.
struct llvm::VPTransformState::CFGState CFG
Value * get(const VPValue *Def, bool IsScalar=false)
Get the generated vector Value for a given VPValue Def if IsScalar is false, otherwise return the gen...
Definition VPlan.cpp:275
IRBuilderBase & Builder
Hold a reference to the IRBuilder used to generate output IR code.
ElementCount VF
The chosen Vectorization Factor of the loop being vectorized.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide load or gather.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenLoadEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
Definition VPlan.h:3379
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide load or gather.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
Definition VPlan.h:3462
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide store or scatter.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenStoreEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
Definition VPlan.h:3465
void execute(VPTransformState &State) override
Generate a wide store or scatter.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStoredValue() const
Return the value stored by this recipe.
Definition VPlan.h:3425