46#define LV_NAME "loop-vectorize"
47#define DEBUG_TYPE LV_NAME
53 case VPInstructionSC: {
56 if (VPI->getOpcode() == Instruction::Load)
58 return VPI->opcodeMayReadOrWriteFromMemory();
60 case VPInterleaveEVLSC:
63 case VPWidenStoreEVLSC:
71 ->getCalledScalarFunction()
73 case VPWidenIntrinsicSC:
75 case VPCanonicalIVPHISC:
76 case VPBranchOnMaskSC:
78 case VPFirstOrderRecurrencePHISC:
79 case VPReductionPHISC:
80 case VPScalarIVStepsSC:
84 case VPReductionEVLSC:
86 case VPVectorPointerSC:
87 case VPWidenCanonicalIVSC:
90 case VPWidenIntOrFpInductionSC:
91 case VPWidenLoadEVLSC:
94 case VPWidenPointerInductionSC:
96 case VPWidenSelectSC: {
100 assert((!
I || !
I->mayWriteToMemory()) &&
101 "underlying instruction may write to memory");
113 case VPInstructionSC:
115 case VPWidenLoadEVLSC:
120 ->mayReadFromMemory();
123 ->getCalledScalarFunction()
124 ->onlyWritesMemory();
125 case VPWidenIntrinsicSC:
127 case VPBranchOnMaskSC:
129 case VPFirstOrderRecurrencePHISC:
130 case VPPredInstPHISC:
131 case VPScalarIVStepsSC:
132 case VPWidenStoreEVLSC:
136 case VPReductionEVLSC:
138 case VPVectorPointerSC:
139 case VPWidenCanonicalIVSC:
142 case VPWidenIntOrFpInductionSC:
144 case VPWidenPointerInductionSC:
146 case VPWidenSelectSC: {
150 assert((!
I || !
I->mayReadFromMemory()) &&
151 "underlying instruction may read from memory");
165 case VPFirstOrderRecurrencePHISC:
166 case VPPredInstPHISC:
167 case VPVectorEndPointerSC:
169 case VPInstructionSC: {
175 case VPWidenCallSC: {
179 case VPWidenIntrinsicSC:
182 case VPReductionEVLSC:
184 case VPScalarIVStepsSC:
185 case VPVectorPointerSC:
186 case VPWidenCanonicalIVSC:
189 case VPWidenIntOrFpInductionSC:
191 case VPWidenPointerInductionSC:
193 case VPWidenSelectSC: {
197 assert((!
I || !
I->mayHaveSideEffects()) &&
198 "underlying instruction has side-effects");
201 case VPInterleaveEVLSC:
204 case VPWidenLoadEVLSC:
206 case VPWidenStoreEVLSC:
211 "mayHaveSideffects result for ingredient differs from this "
214 case VPReplicateSC: {
216 return R->getUnderlyingInstr()->mayHaveSideEffects();
224 assert(!Parent &&
"Recipe already in some VPBasicBlock");
226 "Insertion position not in any VPBasicBlock");
232 assert(!Parent &&
"Recipe already in some VPBasicBlock");
238 assert(!Parent &&
"Recipe already in some VPBasicBlock");
240 "Insertion position not in any VPBasicBlock");
275 UI = IG->getInsertPos();
277 UI = &WidenMem->getIngredient();
280 if (UI && Ctx.skipCostComputation(UI, VF.
isVector())) {
294 dbgs() <<
"Cost of " << RecipeCost <<
" for VF " << VF <<
": ";
316 assert(OpType == Other.OpType &&
"OpType must match");
318 case OperationType::OverflowingBinOp:
319 WrapFlags.HasNUW &= Other.WrapFlags.HasNUW;
320 WrapFlags.HasNSW &= Other.WrapFlags.HasNSW;
322 case OperationType::Trunc:
326 case OperationType::DisjointOp:
329 case OperationType::PossiblyExactOp:
330 ExactFlags.IsExact &= Other.ExactFlags.IsExact;
332 case OperationType::GEPOp:
335 case OperationType::FPMathOp:
336 case OperationType::FCmp:
337 assert((OpType != OperationType::FCmp ||
338 FCmpFlags.Pred == Other.FCmpFlags.Pred) &&
339 "Cannot drop CmpPredicate");
340 getFMFsRef().NoNaNs &= Other.getFMFsRef().NoNaNs;
341 getFMFsRef().NoInfs &= Other.getFMFsRef().NoInfs;
343 case OperationType::NonNegOp:
346 case OperationType::Cmp:
349 case OperationType::Other:
356 assert((OpType == OperationType::FPMathOp || OpType == OperationType::FCmp) &&
357 "recipe doesn't have fast math flags");
358 const FastMathFlagsTy &
F = getFMFsRef();
370#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
386template <
unsigned PartOpIdx>
389 if (U.getNumOperands() == PartOpIdx + 1)
390 return U.getOperand(PartOpIdx);
394template <
unsigned PartOpIdx>
413 "Set flags not supported for the provided opcode");
414 assert((getNumOperandsForOpcode(Opcode) == -1u ||
416 "number of operands does not match opcode");
420unsigned VPInstruction::getNumOperandsForOpcode(
unsigned Opcode) {
431 case Instruction::Alloca:
432 case Instruction::ExtractValue:
433 case Instruction::Freeze:
434 case Instruction::Load:
449 case Instruction::ICmp:
450 case Instruction::FCmp:
451 case Instruction::ExtractElement:
452 case Instruction::Store:
462 case Instruction::Select:
469 case Instruction::Call:
470 case Instruction::GetElementPtr:
471 case Instruction::PHI:
472 case Instruction::Switch:
489bool VPInstruction::canGenerateScalarForFirstLane()
const {
495 case Instruction::Freeze:
496 case Instruction::ICmp:
497 case Instruction::PHI:
498 case Instruction::Select:
514 IRBuilderBase &Builder = State.
Builder;
533 case Instruction::ExtractElement: {
536 unsigned IdxToExtract =
544 case Instruction::Freeze: {
548 case Instruction::FCmp:
549 case Instruction::ICmp: {
555 case Instruction::PHI: {
558 case Instruction::Select: {
584 {VIVElem0, ScalarTC},
nullptr, Name);
600 if (!V1->getType()->isVectorTy())
620 "Requested vector length should be an integer.");
626 Builder.
getInt32Ty(), Intrinsic::experimental_get_vector_length,
627 {AVL, VFArg, Builder.getTrue()});
633 assert(Part != 0 &&
"Must have a positive part");
646 VPBasicBlock *SecondVPSucc =
668 for (
unsigned FieldIndex = 0; FieldIndex != StructTy->getNumElements();
692 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
711 ReducedPartRdx,
"bin.rdx");
720 RecurKind RK = PhiR->getRecurrenceKind();
722 "Unexpected reduction kind");
723 assert(!PhiR->isInLoop() &&
724 "In-loop FindLastIV reduction is not supported yet");
736 for (
unsigned Part = 1; Part <
UF; ++Part)
737 ReducedPartRdx =
createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
751 RecurKind RK = PhiR->getRecurrenceKind();
753 "should be handled by ComputeFindIVResult");
759 for (
unsigned Part = 0; Part <
UF; ++Part)
760 RdxParts[Part] = State.
get(
getOperand(1 + Part), PhiR->isInLoop());
762 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
767 Value *ReducedPartRdx = RdxParts[0];
768 if (PhiR->isOrdered()) {
769 ReducedPartRdx = RdxParts[
UF - 1];
772 for (
unsigned Part = 1; Part <
UF; ++Part) {
773 Value *RdxPart = RdxParts[Part];
775 ReducedPartRdx =
createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
784 Builder.
CreateBinOp(Opcode, RdxPart, ReducedPartRdx,
"bin.rdx");
791 if (State.
VF.
isVector() && !PhiR->isInLoop()) {
798 return ReducedPartRdx;
807 "invalid offset to extract from");
812 assert(
Offset <= 1 &&
"invalid offset to extract from");
826 "can only generate first lane for PtrAdd");
846 Value *Res =
nullptr;
851 Builder.
CreateMul(RuntimeVF, ConstantInt::get(IdxTy, Idx - 1));
852 Value *VectorIdx = Idx == 1
854 : Builder.
CreateSub(LaneToExtract, VectorStart);
879 Value *Res =
nullptr;
880 for (
int Idx = LastOpIdx; Idx >= 0; --Idx) {
881 Value *TrailingZeros =
911 Type *ScalarTy = Ctx.Types.inferScalarType(
this);
914 case Instruction::FNeg:
915 return Ctx.TTI.getArithmeticInstrCost(Opcode, ResultTy, Ctx.CostKind);
916 case Instruction::UDiv:
917 case Instruction::SDiv:
918 case Instruction::SRem:
919 case Instruction::URem:
920 case Instruction::Add:
921 case Instruction::FAdd:
922 case Instruction::Sub:
923 case Instruction::FSub:
924 case Instruction::Mul:
925 case Instruction::FMul:
926 case Instruction::FDiv:
927 case Instruction::FRem:
928 case Instruction::Shl:
929 case Instruction::LShr:
930 case Instruction::AShr:
931 case Instruction::And:
932 case Instruction::Or:
933 case Instruction::Xor: {
941 RHSInfo = Ctx.getOperandInfo(RHS);
952 return Ctx.TTI.getArithmeticInstrCost(
953 Opcode, ResultTy, Ctx.CostKind,
954 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
955 RHSInfo, Operands, CtxI, &Ctx.TLI);
957 case Instruction::Freeze:
959 return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, ResultTy,
961 case Instruction::ExtractValue:
962 return Ctx.TTI.getInsertExtractValueCost(Instruction::ExtractValue,
964 case Instruction::ICmp:
965 case Instruction::FCmp: {
969 return Ctx.TTI.getCmpSelInstrCost(
971 Ctx.CostKind, {TTI::OK_AnyValue, TTI::OP_None},
972 {TTI::OK_AnyValue, TTI::OP_None}, CtxI);
988 "Should only generate a vector value or single scalar, not scalars "
996 case Instruction::Select: {
999 auto *CondTy = Ctx.Types.inferScalarType(
getOperand(0));
1000 auto *VecTy = Ctx.Types.inferScalarType(
getOperand(1));
1005 return Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VecTy, CondTy, Pred,
1008 case Instruction::ExtractElement:
1018 return Ctx.TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy,
1022 auto *VecTy =
toVectorTy(Ctx.Types.inferScalarType(
this), VF);
1023 return Ctx.TTI.getArithmeticReductionCost(
1029 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1036 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1037 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1042 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1049 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1052 Cost += Ctx.TTI.getArithmeticInstrCost(
1053 Instruction::Xor, PredTy, Ctx.CostKind,
1054 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1055 {TargetTransformInfo::OK_UniformConstantValue,
1056 TargetTransformInfo::OP_None});
1058 Cost += Ctx.TTI.getArithmeticInstrCost(
1066 Type *VectorTy =
toVectorTy(Ctx.Types.inferScalarType(
this), VF);
1075 unsigned Multiplier =
1080 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1087 I32Ty, {Arg0Ty, I32Ty, I1Ty});
1088 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1093 return Ctx.TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
1094 VecTy, Ctx.CostKind, 0);
1104 "unexpected VPInstruction witht underlying value");
1112 getOpcode() == Instruction::ExtractElement ||
1124 case Instruction::PHI:
1135 assert(!State.Lane &&
"VPInstruction executing an Lane");
1138 "Set flags not supported for the provided opcode");
1141 Value *GeneratedValue = generate(State);
1144 assert(GeneratedValue &&
"generate must produce a value");
1145 bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
1150 !GeneratesPerFirstLaneOnly) ||
1151 State.VF.isScalar()) &&
1152 "scalar value but not only first lane defined");
1153 State.set(
this, GeneratedValue,
1154 GeneratesPerFirstLaneOnly);
1161 case Instruction::GetElementPtr:
1162 case Instruction::ExtractElement:
1163 case Instruction::Freeze:
1164 case Instruction::FCmp:
1165 case Instruction::ICmp:
1166 case Instruction::Select:
1167 case Instruction::PHI:
1208 case Instruction::ExtractElement:
1210 case Instruction::PHI:
1212 case Instruction::FCmp:
1213 case Instruction::ICmp:
1214 case Instruction::Select:
1215 case Instruction::Or:
1216 case Instruction::Freeze:
1257 case Instruction::FCmp:
1258 case Instruction::ICmp:
1259 case Instruction::Select:
1269#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1277 O << Indent <<
"EMIT" << (
isSingleScalar() ?
"-SCALAR" :
"") <<
" ";
1289 O <<
"combined load";
1292 O <<
"combined store";
1295 O <<
"active lane mask";
1298 O <<
"EXPLICIT-VECTOR-LENGTH";
1301 O <<
"first-order splice";
1304 O <<
"branch-on-cond";
1307 O <<
"TC > VF ? TC - VF : 0";
1313 O <<
"branch-on-count";
1319 O <<
"buildstructvector";
1325 O <<
"extract-lane";
1328 O <<
"extract-last-lane";
1331 O <<
"extract-last-part";
1334 O <<
"extract-penultimate-element";
1337 O <<
"compute-anyof-result";
1340 O <<
"compute-find-iv-result";
1343 O <<
"compute-reduction-result";
1358 O <<
"first-active-lane";
1361 O <<
"last-active-lane";
1364 O <<
"reduction-start-vector";
1367 O <<
"resume-for-epilogue";
1387 State.set(
this, Cast,
VPLane(0));
1398 Value *
VScale = State.Builder.CreateVScale(ResultTy);
1399 State.set(
this,
VScale,
true);
1408#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1411 O << Indent <<
"EMIT" << (
isSingleScalar() ?
"-SCALAR" :
"") <<
" ";
1417 O <<
"wide-iv-step ";
1421 O <<
"step-vector " << *ResultTy;
1424 O <<
"vscale " << *ResultTy;
1430 O <<
" to " << *ResultTy;
1437 PHINode *NewPhi = State.Builder.CreatePHI(
1438 State.TypeAnalysis.inferScalarType(
this), 2,
getName());
1445 for (
unsigned Idx = 0; Idx != NumIncoming; ++Idx) {
1450 State.set(
this, NewPhi,
VPLane(0));
1453#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1456 O << Indent <<
"EMIT" << (
isSingleScalar() ?
"-SCALAR" :
"") <<
" ";
1471 "PHINodes must be handled by VPIRPhi");
1474 State.Builder.SetInsertPoint(I.getParent(), std::next(I.getIterator()));
1487 "can only update exiting operands to phi nodes");
1498#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1501 O << Indent <<
"IR " << I;
1513 auto *PredVPBB = Pred->getExitingBasicBlock();
1514 BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
1521 if (Phi->getBasicBlockIndex(PredBB) == -1)
1522 Phi->addIncoming(V, PredBB);
1524 Phi->setIncomingValueForBlock(PredBB, V);
1529 State.Builder.SetInsertPoint(Phi->getParent(), std::next(Phi->getIterator()));
1534 assert(R->getNumOperands() == R->getParent()->getNumPredecessors() &&
1535 "Number of phi operands must match number of predecessors");
1536 unsigned Position = R->getParent()->getIndexForPredecessor(IncomingBlock);
1537 R->removeOperand(Position);
1540#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1554#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1560 O <<
" (extra operand" << (
getNumOperands() > 1 ?
"s" :
"") <<
": ";
1565 std::get<1>(
Op)->printAsOperand(O);
1573 for (
const auto &[Kind,
Node] : Metadata)
1574 I.setMetadata(Kind,
Node);
1579 for (
const auto &[KindA, MDA] : Metadata) {
1580 for (
const auto &[KindB, MDB] :
Other.Metadata) {
1581 if (KindA == KindB && MDA == MDB) {
1587 Metadata = std::move(MetadataIntersection);
1590#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1593 if (Metadata.empty() || !M)
1599 auto [Kind,
Node] = KindNodePair;
1601 "Unexpected unnamed metadata kind");
1602 O <<
"!" << MDNames[Kind] <<
" ";
1610 assert(State.VF.isVector() &&
"not widening");
1611 assert(Variant !=
nullptr &&
"Can't create vector function.");
1622 Arg = State.get(
I.value(),
VPLane(0));
1625 Args.push_back(Arg);
1631 CI->getOperandBundlesAsDefs(OpBundles);
1633 CallInst *V = State.Builder.CreateCall(Variant, Args, OpBundles);
1636 V->setCallingConv(Variant->getCallingConv());
1638 if (!V->getType()->isVoidTy())
1644 return Ctx.TTI.getCallInstrCost(
nullptr, Variant->getReturnType(),
1645 Variant->getFunctionType()->params(),
1649#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1652 O << Indent <<
"WIDEN-CALL ";
1664 O <<
" @" << CalledFn->
getName() <<
"(";
1670 O <<
" (using library function";
1671 if (Variant->hasName())
1672 O <<
": " << Variant->getName();
1678 assert(State.VF.isVector() &&
"not widening");
1691 Arg = State.get(
I.value(),
VPLane(0));
1697 Args.push_back(Arg);
1701 Module *M = State.Builder.GetInsertBlock()->getModule();
1705 "Can't retrieve vector intrinsic or vector-predication intrinsics.");
1710 CI->getOperandBundlesAsDefs(OpBundles);
1712 CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
1717 if (!V->getType()->isVoidTy())
1733 for (
const auto &[Idx,
Op] :
enumerate(Operands)) {
1734 auto *V =
Op->getUnderlyingValue();
1737 Arguments.push_back(UI->getArgOperand(Idx));
1746 Type *ScalarRetTy = Ctx.Types.inferScalarType(&R);
1752 : Ctx.Types.inferScalarType(
Op));
1757 R.hasFastMathFlags() ? R.getFastMathFlags() :
FastMathFlags();
1762 return Ctx.TTI.getIntrinsicInstrCost(CostAttrs, Ctx.CostKind);
1784#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1787 O << Indent <<
"WIDEN-INTRINSIC ";
1788 if (ResultTy->isVoidTy()) {
1816 Value *Mask =
nullptr;
1818 Mask = State.get(VPMask);
1821 Builder.CreateVectorSplat(VTy->
getElementCount(), Builder.getInt1(1));
1825 if (Opcode == Instruction::Sub)
1826 IncAmt = Builder.CreateNeg(IncAmt);
1828 assert(Opcode == Instruction::Add &&
"only add or sub supported for now");
1830 State.Builder.CreateIntrinsic(Intrinsic::experimental_vector_histogram_add,
1845 Type *IncTy = Ctx.Types.inferScalarType(IncAmt);
1851 Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VTy, Ctx.CostKind);
1864 {PtrTy, IncTy, MaskTy});
1867 return Ctx.TTI.getIntrinsicInstrCost(ICA, Ctx.CostKind) + MulCost +
1868 Ctx.TTI.getArithmeticInstrCost(Opcode, VTy, Ctx.CostKind);
1871#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1874 O << Indent <<
"WIDEN-HISTOGRAM buckets: ";
1877 if (Opcode == Instruction::Sub)
1880 assert(Opcode == Instruction::Add);
1893 O << Indent <<
"WIDEN-SELECT ";
1912 Value *Sel = State.Builder.CreateSelect(
Cond, Op0, Op1);
1913 State.set(
this, Sel);
1925 Type *ScalarTy = Ctx.Types.inferScalarType(
this);
1926 Type *VectorTy =
toVectorTy(Ctx.Types.inferScalarType(
this), VF);
1934 const auto [Op1VK, Op1VP] = Ctx.getOperandInfo(Op0);
1935 const auto [Op2VK, Op2VP] = Ctx.getOperandInfo(Op1);
1939 [](
VPValue *
Op) {
return Op->getUnderlyingValue(); }))
1940 Operands.
append(
SI->op_begin(),
SI->op_end());
1942 return Ctx.TTI.getArithmeticInstrCost(
1943 IsLogicalOr ? Instruction::Or : Instruction::And, VectorTy,
1944 Ctx.CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, Operands,
SI);
1953 Pred = Cmp->getPredicate();
1954 return Ctx.TTI.getCmpSelInstrCost(
1955 Instruction::Select, VectorTy, CondTy, Pred, Ctx.CostKind,
1956 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
SI);
1959VPIRFlags::FastMathFlagsTy::FastMathFlagsTy(
const FastMathFlags &FMF) {
1972 case OperationType::OverflowingBinOp:
1973 return Opcode == Instruction::Add || Opcode == Instruction::Sub ||
1974 Opcode == Instruction::Mul || Opcode == Instruction::Shl ||
1975 Opcode == VPInstruction::VPInstruction::CanonicalIVIncrementForPart;
1976 case OperationType::Trunc:
1977 return Opcode == Instruction::Trunc;
1978 case OperationType::DisjointOp:
1979 return Opcode == Instruction::Or;
1980 case OperationType::PossiblyExactOp:
1981 return Opcode == Instruction::AShr || Opcode == Instruction::LShr ||
1982 Opcode == Instruction::UDiv || Opcode == Instruction::SDiv;
1983 case OperationType::GEPOp:
1984 return Opcode == Instruction::GetElementPtr ||
1987 case OperationType::FPMathOp:
1988 return Opcode == Instruction::Call || Opcode == Instruction::FAdd ||
1989 Opcode == Instruction::FMul || Opcode == Instruction::FSub ||
1990 Opcode == Instruction::FNeg || Opcode == Instruction::FDiv ||
1991 Opcode == Instruction::FRem || Opcode == Instruction::FPExt ||
1992 Opcode == Instruction::FPTrunc || Opcode == Instruction::Select ||
1996 case OperationType::FCmp:
1997 return Opcode == Instruction::FCmp;
1998 case OperationType::NonNegOp:
1999 return Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP;
2000 case OperationType::Cmp:
2001 return Opcode == Instruction::FCmp || Opcode == Instruction::ICmp;
2002 case OperationType::Other:
2009#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2012 case OperationType::Cmp:
2015 case OperationType::FCmp:
2019 case OperationType::DisjointOp:
2023 case OperationType::PossiblyExactOp:
2027 case OperationType::OverflowingBinOp:
2033 case OperationType::Trunc:
2039 case OperationType::FPMathOp:
2042 case OperationType::GEPOp:
2045 else if (
GEPFlags.hasNoUnsignedSignedWrap())
2050 case OperationType::NonNegOp:
2054 case OperationType::Other:
2062 auto &Builder = State.Builder;
2064 case Instruction::Call:
2065 case Instruction::Br:
2066 case Instruction::PHI:
2067 case Instruction::GetElementPtr:
2068 case Instruction::Select:
2070 case Instruction::UDiv:
2071 case Instruction::SDiv:
2072 case Instruction::SRem:
2073 case Instruction::URem:
2074 case Instruction::Add:
2075 case Instruction::FAdd:
2076 case Instruction::Sub:
2077 case Instruction::FSub:
2078 case Instruction::FNeg:
2079 case Instruction::Mul:
2080 case Instruction::FMul:
2081 case Instruction::FDiv:
2082 case Instruction::FRem:
2083 case Instruction::Shl:
2084 case Instruction::LShr:
2085 case Instruction::AShr:
2086 case Instruction::And:
2087 case Instruction::Or:
2088 case Instruction::Xor: {
2092 Ops.push_back(State.get(VPOp));
2094 Value *V = Builder.CreateNAryOp(Opcode,
Ops);
2105 case Instruction::ExtractValue: {
2109 Value *Extract = Builder.CreateExtractValue(
Op, CI->getZExtValue());
2110 State.set(
this, Extract);
2113 case Instruction::Freeze: {
2115 Value *Freeze = Builder.CreateFreeze(
Op);
2116 State.set(
this, Freeze);
2119 case Instruction::ICmp:
2120 case Instruction::FCmp: {
2122 bool FCmp = Opcode == Instruction::FCmp;
2149 State.get(
this)->getType() &&
2150 "inferred type and type from generated instructions do not match");
2157 case Instruction::UDiv:
2158 case Instruction::SDiv:
2159 case Instruction::SRem:
2160 case Instruction::URem:
2165 case Instruction::FNeg:
2166 case Instruction::Add:
2167 case Instruction::FAdd:
2168 case Instruction::Sub:
2169 case Instruction::FSub:
2170 case Instruction::Mul:
2171 case Instruction::FMul:
2172 case Instruction::FDiv:
2173 case Instruction::FRem:
2174 case Instruction::Shl:
2175 case Instruction::LShr:
2176 case Instruction::AShr:
2177 case Instruction::And:
2178 case Instruction::Or:
2179 case Instruction::Xor:
2180 case Instruction::Freeze:
2181 case Instruction::ExtractValue:
2182 case Instruction::ICmp:
2183 case Instruction::FCmp:
2190#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2193 O << Indent <<
"WIDEN ";
2202 auto &Builder = State.Builder;
2204 assert(State.VF.isVector() &&
"Not vectorizing?");
2209 State.set(
this, Cast);
2233 if (WidenMemoryRecipe ==
nullptr)
2235 if (!WidenMemoryRecipe->isConsecutive())
2237 if (WidenMemoryRecipe->isReverse())
2239 if (WidenMemoryRecipe->isMasked())
2247 if ((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
2250 CCH = ComputeCCH(StoreRecipe);
2253 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
2254 Opcode == Instruction::FPExt) {
2265 return Ctx.TTI.getCastInstrCost(
2266 Opcode, DestTy, SrcTy, CCH, Ctx.CostKind,
2270#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2273 O << Indent <<
"WIDEN-CAST ";
2284 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
2291 : ConstantFP::get(Ty,
C);
2294#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2299 O <<
" = WIDEN-INDUCTION";
2304 O <<
" (truncated to " << *TI->getType() <<
")";
2316 return StartC && StartC->isZero() && StepC && StepC->isOne() &&
2320#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2325 O <<
" = DERIVED-IV ";
2349 assert(BaseIVTy == Step->
getType() &&
"Types of BaseIV and Step must match!");
2356 AddOp = Instruction::Add;
2357 MulOp = Instruction::Mul;
2359 AddOp = InductionOpcode;
2360 MulOp = Instruction::FMul;
2370 unsigned StartLane = 0;
2371 unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2373 StartLane = State.Lane->getKnownLane();
2374 EndLane = StartLane + 1;
2378 StartIdx0 = ConstantInt::get(IntStepTy, 0);
2383 Builder.CreateMul(StartIdx0, ConstantInt::get(StartIdx0->
getType(),
2386 StartIdx0 = Builder.CreateSExtOrTrunc(StartIdx0, IntStepTy);
2390 StartIdx0 = Builder.CreateSIToFP(StartIdx0, BaseIVTy);
2392 for (
unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
2393 Value *StartIdx = Builder.CreateBinOp(
2398 "Expected StartIdx to be folded to a constant when VF is not "
2400 auto *
Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2401 auto *
Add = Builder.CreateBinOp(AddOp, BaseIV,
Mul);
2406#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2411 O <<
" = SCALAR-STEPS ";
2422 assert(State.VF.isVector() &&
"not widening");
2431 [](
VPValue *
Op) {
return !
Op->isDefinedOutsideLoopRegions(); }) &&
2432 "Expected at least one loop-variant operand");
2438 auto *Ptr = State.get(
getOperand(0), isPointerLoopInvariant());
2445 Indices.
push_back(State.get(Operand, isIndexLoopInvariant(
I - 1)));
2452 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
2453 "NewGEP is not a pointer vector");
2454 State.set(
this, NewGEP);
2457#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2460 O << Indent <<
"WIDEN-GEP ";
2461 O << (isPointerLoopInvariant() ?
"Inv" :
"Var");
2463 O <<
"[" << (isIndexLoopInvariant(
I) ?
"Inv" :
"Var") <<
"]";
2467 O <<
" = getelementptr";
2474 auto &Builder = State.Builder;
2476 const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout();
2477 Type *IndexTy = DL.getIndexType(State.TypeAnalysis.inferScalarType(
this));
2481 if (IndexTy != RunTimeVF->
getType())
2482 RunTimeVF = Builder.CreateZExtOrTrunc(RunTimeVF, IndexTy);
2484 Value *NumElt = Builder.CreateMul(
2485 ConstantInt::get(IndexTy, Stride * (int64_t)CurrentPart), RunTimeVF);
2487 Value *LastLane = Builder.CreateSub(RunTimeVF, ConstantInt::get(IndexTy, 1));
2494 ResultPtr = Builder.CreateGEP(IndexedTy, ResultPtr, LastLane,
"",
2497 State.set(
this, ResultPtr,
true);
2500#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2505 O <<
" = vector-end-pointer";
2512 auto &Builder = State.Builder;
2514 "Expected prior simplification of recipe without offset");
2519 State.set(
this, ResultPtr,
true);
2522#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2527 O <<
" = vector-pointer";
2540 Type *ResultTy =
toVectorTy(Ctx.Types.inferScalarType(
this), VF);
2543 Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
2547#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2550 O << Indent <<
"BLEND ";
2572 assert(!State.Lane &&
"Reduction being replicated.");
2575 "In-loop AnyOf reductions aren't currently supported");
2581 Value *NewCond = State.get(
Cond, State.VF.isScalar());
2586 if (State.VF.isVector())
2587 Start = State.Builder.CreateVectorSplat(VecTy->
getElementCount(), Start);
2589 Value *
Select = State.Builder.CreateSelect(NewCond, NewVecOp, Start);
2596 if (State.VF.isVector())
2600 NewRed = State.Builder.CreateBinOp(
2602 PrevInChain, NewVecOp);
2603 PrevInChain = NewRed;
2604 NextInChain = NewRed;
2608 NewRed = State.Builder.CreateIntrinsic(
2609 PrevInChain->
getType(), Intrinsic::vector_partial_reduce_add,
2610 {PrevInChain, NewVecOp},
nullptr,
"partial.reduce");
2611 PrevInChain = NewRed;
2612 NextInChain = NewRed;
2615 "The reduction must either be ordered, partial or in-loop");
2619 NextInChain =
createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain);
2621 NextInChain = State.Builder.CreateBinOp(
2623 PrevInChain, NewRed);
2629 assert(!State.Lane &&
"Reduction being replicated.");
2631 auto &Builder = State.Builder;
2643 Mask = State.get(CondOp);
2645 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2655 NewRed = Builder.CreateBinOp(
2659 State.set(
this, NewRed,
true);
2665 Type *ElementTy = Ctx.Types.inferScalarType(
this);
2669 std::optional<FastMathFlags> OptionalFMF =
2678 CondCost = Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VectorTy,
2679 CondTy, Pred, Ctx.CostKind);
2681 return CondCost + Ctx.TTI.getPartialReductionCost(
2682 Opcode, ElementTy, ElementTy, ElementTy, VF,
2692 "Any-of reduction not implemented in VPlan-based cost model currently.");
2698 return Ctx.TTI.getMinMaxReductionCost(Id, VectorTy,
FMFs, Ctx.CostKind);
2703 return Ctx.TTI.getArithmeticReductionCost(Opcode, VectorTy, OptionalFMF,
2708 ExpressionTypes ExpressionType,
2711 ExpressionRecipes(ExpressionRecipes),
ExpressionType(ExpressionType) {
2712 assert(!ExpressionRecipes.empty() &&
"Nothing to combine?");
2716 "expression cannot contain recipes with side-effects");
2720 for (
auto *R : ExpressionRecipes)
2721 ExpressionRecipesAsSetOfUsers.
insert(R);
2727 if (R != ExpressionRecipes.back() &&
2728 any_of(
R->users(), [&ExpressionRecipesAsSetOfUsers](
VPUser *U) {
2729 return !ExpressionRecipesAsSetOfUsers.contains(U);
2734 R->replaceUsesWithIf(CopyForExtUsers, [&ExpressionRecipesAsSetOfUsers](
2736 return !ExpressionRecipesAsSetOfUsers.contains(&U);
2741 R->removeFromParent();
2748 for (
auto *R : ExpressionRecipes) {
2749 for (
const auto &[Idx,
Op] :
enumerate(
R->operands())) {
2750 auto *
Def =
Op->getDefiningRecipe();
2751 if (Def && ExpressionRecipesAsSetOfUsers.contains(Def))
2754 LiveInPlaceholders.push_back(
new VPValue());
2760 for (
auto *R : ExpressionRecipes)
2761 for (
auto const &[LiveIn, Tmp] :
zip(operands(), LiveInPlaceholders))
2762 R->replaceUsesOfWith(LiveIn, Tmp);
2766 for (
auto *R : ExpressionRecipes)
2769 if (!R->getParent())
2770 R->insertBefore(
this);
2773 LiveInPlaceholders[Idx]->replaceAllUsesWith(
Op);
2776 ExpressionRecipes.clear();
2781 Type *RedTy = Ctx.Types.inferScalarType(
this);
2785 "VPExpressionRecipe only supports integer types currently.");
2788 switch (ExpressionType) {
2789 case ExpressionTypes::ExtendedReduction: {
2795 ->isPartialReduction()
2796 ? Ctx.TTI.getPartialReductionCost(
2797 Opcode, Ctx.Types.inferScalarType(
getOperand(0)),
nullptr,
2802 : Ctx.TTI.getExtendedReductionCost(
2803 Opcode, ExtR->getOpcode() == Instruction::ZExt, RedTy,
2804 SrcVecTy, std::nullopt, Ctx.CostKind);
2806 case ExpressionTypes::MulAccReduction:
2807 return Ctx.TTI.getMulAccReductionCost(
false, Opcode, RedTy, SrcVecTy,
2810 case ExpressionTypes::ExtNegatedMulAccReduction:
2811 assert(Opcode == Instruction::Add &&
"Unexpected opcode");
2812 Opcode = Instruction::Sub;
2814 case ExpressionTypes::ExtMulAccReduction: {
2816 if (RedR->isPartialReduction()) {
2820 return Ctx.TTI.getPartialReductionCost(
2821 Opcode, Ctx.Types.inferScalarType(
getOperand(0)),
2822 Ctx.Types.inferScalarType(
getOperand(1)), RedTy, VF,
2824 Ext0R->getOpcode()),
2826 Ext1R->getOpcode()),
2827 Mul->getOpcode(), Ctx.CostKind);
2829 return Ctx.TTI.getMulAccReductionCost(
2832 Opcode, RedTy, SrcVecTy, Ctx.CostKind);
2840 return R->mayReadFromMemory() || R->mayWriteToMemory();
2848 "expression cannot contain recipes with side-effects");
2856 return RR && !RR->isPartialReduction();
2859#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2863 O << Indent <<
"EXPRESSION ";
2869 switch (ExpressionType) {
2870 case ExpressionTypes::ExtendedReduction: {
2872 O <<
" + " << (Red->isPartialReduction() ?
"partial." :
"") <<
"reduce.";
2879 << *Ext0->getResultType();
2880 if (Red->isConditional()) {
2887 case ExpressionTypes::ExtNegatedMulAccReduction: {
2889 O <<
" + " << (Red->isPartialReduction() ?
"partial." :
"") <<
"reduce.";
2899 << *Ext0->getResultType() <<
"), (";
2903 << *Ext1->getResultType() <<
")";
2904 if (Red->isConditional()) {
2911 case ExpressionTypes::MulAccReduction:
2912 case ExpressionTypes::ExtMulAccReduction: {
2914 O <<
" + " << (Red->isPartialReduction() ?
"partial." :
"") <<
"reduce.";
2919 bool IsExtended = ExpressionType == ExpressionTypes::ExtMulAccReduction;
2921 : ExpressionRecipes[0]);
2929 << *Ext0->getResultType() <<
"), (";
2937 << *Ext1->getResultType() <<
")";
2939 if (Red->isConditional()) {
2952 O << Indent <<
"PARTIAL-REDUCE ";
2954 O << Indent <<
"REDUCE ";
2974 O << Indent <<
"REDUCE ";
3002 assert((!Instr->getType()->isAggregateType() ||
3004 "Expected vectorizable or non-aggregate type.");
3007 bool IsVoidRetTy = Instr->getType()->isVoidTy();
3011 Cloned->
setName(Instr->getName() +
".cloned");
3012 Type *ResultTy = State.TypeAnalysis.inferScalarType(RepRecipe);
3016 if (ResultTy != Cloned->
getType())
3027 State.setDebugLocFrom(
DL);
3032 auto InputLane = Lane;
3036 Cloned->
setOperand(
I.index(), State.get(Operand, InputLane));
3040 State.Builder.Insert(Cloned);
3042 State.set(RepRecipe, Cloned, Lane);
3046 State.AC->registerAssumption(
II);
3052 [](
VPValue *
Op) { return Op->isDefinedOutsideLoopRegions(); })) &&
3053 "Expected a recipe is either within a region or all of its operands "
3054 "are defined outside the vectorized region.");
3061 assert(IsSingleScalar &&
"VPReplicateRecipes outside replicate regions "
3062 "must have already been unrolled");
3068 "uniform recipe shouldn't be predicated");
3069 assert(!State.VF.isScalable() &&
"Can't scalarize a scalable vector");
3074 State.Lane->isFirstLane()
3077 State.set(
this, State.packScalarIntoVectorizedValue(
this, WideValue,
3102 Instruction::GetElementPtr) ||
3110 if (!Opd->isDefinedOutsideLoopRegions() &&
3124 while (!WorkList.
empty()) {
3126 if (!Cur || !Seen.
insert(Cur).second)
3134 return Seen.contains(
3135 Blend->getIncomingValue(I)->getDefiningRecipe());
3139 for (
VPUser *U : Cur->users()) {
3141 if (InterleaveR->getAddr() == Cur)
3144 if (RepR->getOpcode() == Instruction::Load &&
3145 RepR->getOperand(0) == Cur)
3147 if (RepR->getOpcode() == Instruction::Store &&
3148 RepR->getOperand(1) == Cur)
3152 if (MemR->getAddr() == Cur && MemR->isConsecutive())
3173 Ctx.SkipCostComputation.insert(UI);
3179 case Instruction::GetElementPtr:
3185 case Instruction::Call: {
3191 for (
const VPValue *ArgOp : ArgOps)
3192 Tys.
push_back(Ctx.Types.inferScalarType(ArgOp));
3194 if (CalledFn->isIntrinsic())
3197 switch (CalledFn->getIntrinsicID()) {
3198 case Intrinsic::assume:
3199 case Intrinsic::lifetime_end:
3200 case Intrinsic::lifetime_start:
3201 case Intrinsic::sideeffect:
3202 case Intrinsic::pseudoprobe:
3203 case Intrinsic::experimental_noalias_scope_decl: {
3206 "scalarizing intrinsic should be free");
3213 Type *ResultTy = Ctx.Types.inferScalarType(
this);
3215 Ctx.TTI.getCallInstrCost(CalledFn, ResultTy, Tys, Ctx.CostKind);
3217 if (CalledFn->isIntrinsic())
3218 ScalarCallCost = std::min(
3222 return ScalarCallCost;
3226 Ctx.getScalarizationOverhead(ResultTy, ArgOps, VF);
3228 case Instruction::Add:
3229 case Instruction::Sub:
3230 case Instruction::FAdd:
3231 case Instruction::FSub:
3232 case Instruction::Mul:
3233 case Instruction::FMul:
3234 case Instruction::FDiv:
3235 case Instruction::FRem:
3236 case Instruction::Shl:
3237 case Instruction::LShr:
3238 case Instruction::AShr:
3239 case Instruction::And:
3240 case Instruction::Or:
3241 case Instruction::Xor:
3242 case Instruction::ICmp:
3243 case Instruction::FCmp:
3247 case Instruction::SDiv:
3248 case Instruction::UDiv:
3249 case Instruction::SRem:
3250 case Instruction::URem: {
3257 Ctx.getScalarizationOverhead(Ctx.Types.inferScalarType(
this),
3266 Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
3270 ScalarCost /= Ctx.getPredBlockCostDivisor(UI->
getParent());
3273 case Instruction::Load:
3274 case Instruction::Store: {
3281 bool IsLoad = UI->
getOpcode() == Instruction::Load;
3287 Type *ValTy = Ctx.Types.inferScalarType(IsLoad ?
this :
getOperand(0));
3288 Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp);
3293 UI->
getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo);
3296 bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing();
3297 bool UsedByLoadStoreAddress =
3300 ScalarMemOpCost + Ctx.TTI.getAddressComputationCost(
3301 PtrTy, UsedByLoadStoreAddress ?
nullptr : &Ctx.SE,
3302 PtrSCEV, Ctx.CostKind);
3312 if (!UsedByLoadStoreAddress) {
3313 bool EfficientVectorLoadStore =
3314 Ctx.TTI.supportsEfficientVectorElementLoadStore();
3315 if (!(IsLoad && !PreferVectorizedAddressing) &&
3316 !(!IsLoad && EfficientVectorLoadStore))
3319 if (!EfficientVectorLoadStore)
3320 ResultTy = Ctx.Types.inferScalarType(
this);
3324 Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF,
true);
3328 return Ctx.getLegacyCost(UI, VF);
3331#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3334 O << Indent << (IsSingleScalar ?
"CLONE " :
"REPLICATE ");
3343 O <<
"@" << CB->getCalledFunction()->getName() <<
"(";
3361 assert(State.Lane &&
"Branch on Mask works only on single instance.");
3364 Value *ConditionBit = State.get(BlockInMask, *State.Lane);
3368 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
3370 "Expected to replace unreachable terminator with conditional branch.");
3372 State.Builder.CreateCondBr(ConditionBit, State.CFG.PrevBB,
nullptr);
3373 CondBr->setSuccessor(0,
nullptr);
3374 CurrentTerminator->eraseFromParent();
3386 assert(State.Lane &&
"Predicated instruction PHI works per instance.");
3391 assert(PredicatingBB &&
"Predicated block has no single predecessor.");
3393 "operand must be VPReplicateRecipe");
3404 "Packed operands must generate an insertelement or insertvalue");
3412 for (
unsigned I = 0;
I < StructTy->getNumContainedTypes() - 1;
I++)
3415 PHINode *VPhi = State.Builder.CreatePHI(VecI->getType(), 2);
3416 VPhi->
addIncoming(VecI->getOperand(0), PredicatingBB);
3418 if (State.hasVectorValue(
this))
3419 State.reset(
this, VPhi);
3421 State.set(
this, VPhi);
3429 Type *PredInstType = State.TypeAnalysis.inferScalarType(
getOperand(0));
3430 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
3433 Phi->addIncoming(ScalarPredInst, PredicatedBB);
3434 if (State.hasScalarValue(
this, *State.Lane))
3435 State.reset(
this, Phi, *State.Lane);
3437 State.set(
this, Phi, *State.Lane);
3440 State.reset(
getOperand(0), Phi, *State.Lane);
3444#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3447 O << Indent <<
"PHI-PREDICATED-INSTRUCTION ";
3458 ->getAddressSpace();
3461 : Instruction::Store;
3468 "Inconsecutive memory access should not have the order.");
3481 : Intrinsic::vp_scatter;
3482 return Ctx.TTI.getAddressComputationCost(PtrTy,
nullptr,
nullptr,
3484 Ctx.TTI.getMemIntrinsicInstrCost(
3493 : Intrinsic::masked_store;
3494 Cost += Ctx.TTI.getMemIntrinsicInstrCost(
3500 Cost += Ctx.TTI.getMemoryOpCost(Opcode, Ty,
Alignment, AS, Ctx.CostKind,
3506 return Cost += Ctx.TTI.getShuffleCost(
3516 auto &Builder = State.Builder;
3517 Value *Mask =
nullptr;
3518 if (
auto *VPMask =
getMask()) {
3521 Mask = State.get(VPMask);
3523 Mask = Builder.CreateVectorReverse(Mask,
"reverse");
3529 NewLI = Builder.CreateMaskedGather(DataTy, Addr,
Alignment, Mask,
nullptr,
3530 "wide.masked.gather");
3533 Builder.CreateMaskedLoad(DataTy, Addr,
Alignment, Mask,
3536 NewLI = Builder.CreateAlignedLoad(DataTy, Addr,
Alignment,
"wide.load");
3540 NewLI = Builder.CreateVectorReverse(NewLI,
"reverse");
3541 State.set(
this, NewLI);
3544#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3547 O << Indent <<
"WIDEN ";
3559 Value *AllTrueMask =
3560 Builder.CreateVectorSplat(ValTy->getElementCount(), Builder.getTrue());
3561 return Builder.CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
3562 {Operand, AllTrueMask, EVL},
nullptr, Name);
3570 auto &Builder = State.Builder;
3574 Value *Mask =
nullptr;
3576 Mask = State.get(VPMask);
3580 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3585 Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
3586 nullptr,
"wide.masked.gather");
3588 NewLI = Builder.CreateIntrinsic(DataTy, Intrinsic::vp_load,
3589 {Addr, Mask, EVL},
nullptr,
"vp.op.load");
3597 State.set(
this, Res);
3612 ->getAddressSpace();
3619 return Cost + Ctx.TTI.getShuffleCost(
3624#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3627 O << Indent <<
"WIDEN ";
3638 auto &Builder = State.Builder;
3640 Value *Mask =
nullptr;
3641 if (
auto *VPMask =
getMask()) {
3644 Mask = State.get(VPMask);
3646 Mask = Builder.CreateVectorReverse(Mask,
"reverse");
3649 Value *StoredVal = State.get(StoredVPValue);
3653 StoredVal = Builder.CreateVectorReverse(StoredVal,
"reverse");
3660 NewSI = Builder.CreateMaskedScatter(StoredVal, Addr,
Alignment, Mask);
3662 NewSI = Builder.CreateMaskedStore(StoredVal, Addr,
Alignment, Mask);
3664 NewSI = Builder.CreateAlignedStore(StoredVal, Addr,
Alignment);
3668#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3671 O << Indent <<
"WIDEN store ";
3680 auto &Builder = State.Builder;
3683 Value *StoredVal = State.get(StoredValue);
3687 Value *Mask =
nullptr;
3689 Mask = State.get(VPMask);
3693 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3696 if (CreateScatter) {
3698 Intrinsic::vp_scatter,
3699 {StoredVal, Addr, Mask, EVL});
3702 Intrinsic::vp_store,
3703 {StoredVal, Addr, Mask, EVL});
3722 ->getAddressSpace();
3729 return Cost + Ctx.TTI.getShuffleCost(
3734#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3737 O << Indent <<
"WIDEN vp.store ";
3745 auto VF = DstVTy->getElementCount();
3747 assert(VF == SrcVecTy->getElementCount() &&
"Vector dimensions do not match");
3748 Type *SrcElemTy = SrcVecTy->getElementType();
3749 Type *DstElemTy = DstVTy->getElementType();
3750 assert((
DL.getTypeSizeInBits(SrcElemTy) ==
DL.getTypeSizeInBits(DstElemTy)) &&
3751 "Vector elements must have same size");
3755 return Builder.CreateBitOrPointerCast(V, DstVTy);
3762 "Only one type should be a pointer type");
3764 "Only one type should be a floating point type");
3768 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3769 return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
3775 const Twine &Name) {
3776 unsigned Factor = Vals.
size();
3777 assert(Factor > 1 &&
"Tried to interleave invalid number of vectors");
3781 for (
Value *Val : Vals)
3782 assert(Val->getType() == VecTy &&
"Tried to interleave mismatched types");
3787 if (VecTy->isScalableTy()) {
3788 assert(Factor <= 8 &&
"Unsupported interleave factor for scalable vectors");
3789 return Builder.CreateVectorInterleave(Vals, Name);
3796 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
3797 return Builder.CreateShuffleVector(
3830 assert(!State.Lane &&
"Interleave group being replicated.");
3832 "Masking gaps for scalable vectors is not yet supported.");
3838 unsigned InterleaveFactor = Group->
getFactor();
3845 auto CreateGroupMask = [&BlockInMask, &State,
3846 &InterleaveFactor](
Value *MaskForGaps) ->
Value * {
3847 if (State.VF.isScalable()) {
3848 assert(!MaskForGaps &&
"Interleaved groups with gaps are not supported.");
3849 assert(InterleaveFactor <= 8 &&
3850 "Unsupported deinterleave factor for scalable vectors");
3851 auto *ResBlockInMask = State.get(BlockInMask);
3859 Value *ResBlockInMask = State.get(BlockInMask);
3860 Value *ShuffledMask = State.Builder.CreateShuffleVector(
3863 "interleaved.mask");
3864 return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
3865 ShuffledMask, MaskForGaps)
3869 const DataLayout &DL = Instr->getDataLayout();
3872 Value *MaskForGaps =
nullptr;
3876 assert(MaskForGaps &&
"Mask for Gaps is required but it is null");
3880 if (BlockInMask || MaskForGaps) {
3881 Value *GroupMask = CreateGroupMask(MaskForGaps);
3883 NewLoad = State.Builder.CreateMaskedLoad(VecTy, ResAddr,
3885 PoisonVec,
"wide.masked.vec");
3887 NewLoad = State.Builder.CreateAlignedLoad(VecTy, ResAddr,
3894 if (VecTy->isScalableTy()) {
3897 assert(InterleaveFactor <= 8 &&
3898 "Unsupported deinterleave factor for scalable vectors");
3899 NewLoad = State.Builder.CreateIntrinsic(
3902 nullptr,
"strided.vec");
3905 auto CreateStridedVector = [&InterleaveFactor, &State,
3906 &NewLoad](
unsigned Index) ->
Value * {
3907 assert(Index < InterleaveFactor &&
"Illegal group index");
3908 if (State.VF.isScalable())
3909 return State.Builder.CreateExtractValue(NewLoad, Index);
3915 return State.Builder.CreateShuffleVector(NewLoad, StrideMask,
3919 for (
unsigned I = 0, J = 0;
I < InterleaveFactor; ++
I) {
3926 Value *StridedVec = CreateStridedVector(
I);
3929 if (Member->getType() != ScalarTy) {
3936 StridedVec = State.Builder.CreateVectorReverse(StridedVec,
"reverse");
3938 State.set(VPDefs[J], StridedVec);
3948 Value *MaskForGaps =
3951 "Mismatch between NeedsMaskForGaps and MaskForGaps");
3955 unsigned StoredIdx = 0;
3956 for (
unsigned i = 0; i < InterleaveFactor; i++) {
3958 "Fail to get a member from an interleaved store group");
3968 Value *StoredVec = State.get(StoredValues[StoredIdx]);
3972 StoredVec = State.Builder.CreateVectorReverse(StoredVec,
"reverse");
3976 if (StoredVec->
getType() != SubVT)
3985 if (BlockInMask || MaskForGaps) {
3986 Value *GroupMask = CreateGroupMask(MaskForGaps);
3987 NewStoreInstr = State.Builder.CreateMaskedStore(
3988 IVec, ResAddr, Group->
getAlign(), GroupMask);
3991 State.Builder.CreateAlignedStore(IVec, ResAddr, Group->
getAlign());
3998#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4002 O << Indent <<
"INTERLEAVE-GROUP with factor " << IG->getFactor() <<
" at ";
4003 IG->getInsertPos()->printAsOperand(O,
false);
4013 for (
unsigned i = 0; i < IG->getFactor(); ++i) {
4014 if (!IG->getMember(i))
4017 O <<
"\n" << Indent <<
" store ";
4019 O <<
" to index " << i;
4021 O <<
"\n" << Indent <<
" ";
4023 O <<
" = load from index " << i;
4031 assert(!State.Lane &&
"Interleave group being replicated.");
4032 assert(State.VF.isScalable() &&
4033 "Only support scalable VF for EVL tail-folding.");
4035 "Masking gaps for scalable vectors is not yet supported.");
4041 unsigned InterleaveFactor = Group->
getFactor();
4042 assert(InterleaveFactor <= 8 &&
4043 "Unsupported deinterleave/interleave factor for scalable vectors");
4050 Value *InterleaveEVL = State.Builder.CreateMul(
4051 EVL, ConstantInt::get(EVL->
getType(), InterleaveFactor),
"interleave.evl",
4055 Value *GroupMask =
nullptr;
4061 State.Builder.CreateVectorSplat(WideVF, State.Builder.getTrue());
4066 CallInst *NewLoad = State.Builder.CreateIntrinsic(
4067 VecTy, Intrinsic::vp_load, {ResAddr, GroupMask, InterleaveEVL},
nullptr,
4078 NewLoad = State.Builder.CreateIntrinsic(
4081 nullptr,
"strided.vec");
4083 const DataLayout &DL = Instr->getDataLayout();
4084 for (
unsigned I = 0, J = 0;
I < InterleaveFactor; ++
I) {
4090 Value *StridedVec = State.Builder.CreateExtractValue(NewLoad,
I);
4092 if (Member->getType() != ScalarTy) {
4110 const DataLayout &DL = Instr->getDataLayout();
4111 for (
unsigned I = 0, StoredIdx = 0;
I < InterleaveFactor;
I++) {
4119 Value *StoredVec = State.get(StoredValues[StoredIdx]);
4121 if (StoredVec->
getType() != SubVT)
4131 State.Builder.CreateIntrinsic(
Type::getVoidTy(Ctx), Intrinsic::vp_store,
4132 {IVec, ResAddr, GroupMask, InterleaveEVL});
4141#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4145 O << Indent <<
"INTERLEAVE-GROUP with factor " << IG->getFactor() <<
" at ";
4146 IG->getInsertPos()->printAsOperand(O,
false);
4157 for (
unsigned i = 0; i < IG->getFactor(); ++i) {
4158 if (!IG->getMember(i))
4161 O <<
"\n" << Indent <<
" vp.store ";
4163 O <<
" to index " << i;
4165 O <<
"\n" << Indent <<
" ";
4167 O <<
" = vp.load from index " << i;
4178 unsigned InsertPosIdx = 0;
4179 for (
unsigned Idx = 0; IG->getFactor(); ++Idx)
4180 if (
auto *Member = IG->getMember(Idx)) {
4181 if (Member == InsertPos)
4185 Type *ValTy = Ctx.Types.inferScalarType(
4190 ->getAddressSpace();
4192 unsigned InterleaveFactor = IG->getFactor();
4197 for (
unsigned IF = 0; IF < InterleaveFactor; IF++)
4198 if (IG->getMember(IF))
4203 InsertPos->
getOpcode(), WideVecTy, IG->getFactor(), Indices,
4204 IG->getAlign(), AS, Ctx.CostKind,
getMask(), NeedsMaskForGaps);
4206 if (!IG->isReverse())
4209 return Cost + IG->getNumMembers() *
4211 VectorTy, VectorTy, {}, Ctx.CostKind,
4215#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4218 O << Indent <<
"EMIT ";
4220 O <<
" = CANONICAL-INDUCTION ";
4230#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4234 "unexpected number of operands");
4235 O << Indent <<
"EMIT ";
4237 O <<
" = WIDEN-POINTER-INDUCTION ";
4253 O << Indent <<
"EMIT ";
4255 O <<
" = EXPAND SCEV " << *Expr;
4262 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
4266 : Builder.CreateVectorSplat(VF, CanonicalIV,
"broadcast");
4269 VStep = Builder.CreateVectorSplat(VF, VStep);
4271 Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->
getType()));
4273 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep,
"vec.iv");
4274 State.set(
this, CanonicalVectorIV);
4277#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4280 O << Indent <<
"EMIT ";
4282 O <<
" = WIDEN-CANONICAL-INDUCTION ";
4288 auto &Builder = State.Builder;
4292 Type *VecTy = State.VF.isScalar()
4293 ? VectorInit->getType()
4297 State.CFG.VPBB2IRBB.at(
getParent()->getCFGPredecessor(0));
4298 if (State.VF.isVector()) {
4300 auto *One = ConstantInt::get(IdxTy, 1);
4303 auto *RuntimeVF =
getRuntimeVF(Builder, IdxTy, State.VF);
4304 auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4305 VectorInit = Builder.CreateInsertElement(
4311 Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
4312 Phi->addIncoming(VectorInit, VectorPH);
4313 State.set(
this, Phi);
4320 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
4325#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4328 O << Indent <<
"FIRST-ORDER-RECURRENCE-PHI ";
4345 State.CFG.VPBB2IRBB.at(
getParent()->getCFGPredecessor(0));
4346 bool ScalarPHI = State.VF.isScalar() ||
isInLoop();
4347 Value *StartV = State.get(StartVPV, ScalarPHI);
4351 assert(State.CurrentParentLoop->getHeader() == HeaderBB &&
4352 "recipe must be in the vector loop header");
4357 Phi->addIncoming(StartV, VectorPH);
4360#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4363 O << Indent <<
"WIDEN-REDUCTION-PHI ";
4376 Instruction *VecPhi = State.Builder.CreatePHI(VecTy, 2, Name);
4377 State.set(
this, VecPhi);
4380#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4383 O << Indent <<
"WIDEN-PHI ";
4393 State.CFG.VPBB2IRBB.at(
getParent()->getCFGPredecessor(0));
4396 State.Builder.CreatePHI(StartMask->
getType(), 2,
"active.lane.mask");
4397 Phi->addIncoming(StartMask, VectorPH);
4398 State.set(
this, Phi);
4401#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4404 O << Indent <<
"ACTIVE-LANE-MASK-PHI ";
4412#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4415 O << Indent <<
"EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI ";
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file provides a LoopVectorizationPlanner class.
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static bool isOrdered(const Instruction *I)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
This file contains the declarations of different VPlan-related auxiliary helpers.
static Instruction * createReverseEVL(IRBuilderBase &Builder, Value *Operand, Value *EVL, const Twine &Name)
Use all-true mask for reverse rather than actual mask, as it avoids a dependence w/o affecting the re...
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static InstructionCost getCostForIntrinsics(Intrinsic::ID ID, ArrayRef< const VPValue * > Operands, const VPRecipeWithIRFlags &R, ElementCount VF, VPCostContext &Ctx)
Compute the cost for the intrinsic ID with Operands, produced by R.
static Value * createBitOrPointerCast(IRBuilderBase &Builder, Value *V, VectorType *DstVTy, const DataLayout &DL)
SmallVector< Value *, 2 > VectorParts
static bool isUsedByLoadStoreAddress(const VPUser *V)
Returns true if V is used as part of the address of another load or store.
static void scalarizeInstruction(const Instruction *Instr, VPReplicateRecipe *RepRecipe, const VPLane &Lane, VPTransformState &State)
A helper function to scalarize a single Instruction in the innermost loop.
static Constant * getSignedIntOrFpConstant(Type *Ty, int64_t C)
A helper function that returns an integer or floating-point constant with value C.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
This file contains the declarations of the Vectorization Plan base classes:
static const uint32_t IV[8]
void printAsOperand(OutputBuffer &OB, Prec P=Prec::Default, bool StrictlyWorse=false) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
static LLVM_ABI StringRef getPredicateName(Predicate P)
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getScalable(ScalarTy MinVal)
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
void setAllowContract(bool B=true)
bool noSignedZeros() const
void setAllowReciprocal(bool B=true)
bool allowReciprocal() const
void setNoSignedZeros(bool B=true)
bool allowReassoc() const
Flag queries.
void setNoNaNs(bool B=true)
void setAllowReassoc(bool B=true)
Flag setters.
void setApproxFunc(bool B=true)
void setNoInfs(bool B=true)
bool allowContract() const
Class to represent function types.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool willReturn() const
Determine if the function will return.
bool doesNotThrow() const
Determine if the function cannot unwind.
Type * getReturnType() const
Returns the type of the ret val.
Common base class shared among various IRBuilders.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
LLVM_ABI Value * CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, const Twine &Name="")
Return a vector splice intrinsic if using scalable vectors, otherwise return a shufflevector.
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateFreeze(Value *V, const Twine &Name="")
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Value * CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name="", Instruction *MDFrom=nullptr)
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateCountTrailingZeroElems(Type *ResTy, Value *Mask, bool ZeroIsPoison=true, const Twine &Name="")
Create a call to llvm.experimental_cttz_elts.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static InstructionCost getInvalid(CostType Val=0)
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
Information for memory intrinsic cost model.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
static bool isSignedRecurrenceKind(RecurKind Kind)
Returns true if recurrece kind is a signed redux kind.
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
unsigned getOpcode() const
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindLastIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This class represents the LLVM 'select' instruction.
This class provides computation of slot numbers for LLVM Assembly writing.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isVoidTy() const
Return true if this is 'void'.
value_op_iterator value_op_end()
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
value_op_iterator value_op_begin()
void execute(VPTransformState &State) override
Generate the active lane mask phi of the vector loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
RecipeListTy & getRecipeList()
Returns a reference to the list of recipes.
void insert(VPRecipeBase *Recipe, iterator InsertPt)
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
VPValue * getIncomingValue(unsigned Idx) const
Return incoming value number Idx.
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account when normalized the first incoming value wi...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
const VPBlocksTy & getPredecessors() const
void printAsOperand(raw_ostream &OS, bool PrintType=false) const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPBranchOnMaskRecipe.
void execute(VPTransformState &State) override
Generate the extraction of the appropriate bit from the block mask and the conditional branch.
VPlan-based builder utility analogous to IRBuilder.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
This class augments a recipe with a set of VPValues defined by the recipe.
LLVM_ABI_FOR_TEST void dump() const
Dump the VPDef to stderr (for debugging).
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
unsigned getVPDefID() const
VPValue * getStepValue() const
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStartValue() const
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void decompose()
Insert the recipes of the expression back into the VPlan, directly before the current recipe.
bool isSingleScalar() const
Returns true if the result of this VPExpressionRecipe is a single-scalar.
bool mayHaveSideEffects() const
Returns true if this expression contains recipes that may have side effects.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
bool mayReadOrWriteMemory() const
Returns true if this expression contains recipes that may read from or write to memory.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Produce a vectorized histogram operation.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPHistogramRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getMask() const
Return the mask operand if one was provided, or a null pointer if all lanes should be executed uncond...
Class to record and manage LLVM IR flags.
LLVM_ABI_FOR_TEST bool flagsValidForOpcode(unsigned Opcode) const
Returns true if the set flags are valid for Opcode.
CmpInst::Predicate CmpPredicate
void printFlags(raw_ostream &O) const
bool hasFastMathFlags() const
Returns true if the recipe has fast-math flags.
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
CmpInst::Predicate getPredicate() const
bool hasNoSignedWrap() const
void intersectFlags(const VPIRFlags &Other)
Only keep flags also present in Other.
GEPNoWrapFlags getGEPNoWrapFlags() const
bool hasPredicate() const
Returns true if the recipe has a comparison predicate.
DisjointFlagsTy DisjointFlags
bool hasNoUnsignedWrap() const
NonNegFlagsTy NonNegFlags
void applyFlags(Instruction &I) const
Apply the IR flags to I.
Instruction & getInstruction() const
void extractLastLaneOfLastPartOfFirstOperand(VPBuilder &Builder)
Update the recipe's first operand to the last lane of the last part of the operand using Builder.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPIRInstruction.
VPIRInstruction(Instruction &I)
VPIRInstruction::create() should be used to create VPIRInstructions, as subclasses may need to be cre...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the instruction.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPInstruction.
bool doesGeneratePerAllLanes() const
Returns true if this VPInstruction generates scalar values for all lanes.
@ ExtractLane
Extracts a single lane (first operand) from a set of vector operands.
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
@ ExtractPenultimateElement
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
@ FirstOrderRecurrenceSplice
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
@ BuildVector
Creates a fixed-width vector containing all operands.
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
@ VScale
Returns the value for vscale.
@ CanonicalIVIncrementForPart
@ CalculateTripCountMinusVF
bool opcodeMayReadOrWriteFromMemory() const
Returns true if the underlying opcode may read from or write to memory.
LLVM_DUMP_METHOD void dump() const
Print the VPInstruction to dbgs() (for debugging).
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the VPInstruction to O.
StringRef getName() const
Returns the symbolic name assigned to the VPInstruction.
unsigned getOpcode() const
VPInstruction(unsigned Opcode, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
bool isVectorToScalar() const
Returns true if this VPInstruction produces a scalar value from a vector, e.g.
bool isSingleScalar() const
Returns true if this VPInstruction's operands are single scalars and the result is also a single scal...
void execute(VPTransformState &State) override
Generate the instruction.
bool usesFirstPartOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
bool needsMaskForGaps() const
Return true if the access needs a mask because of the gaps.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this recipe.
Instruction * getInsertPos() const
const InterleaveGroup< Instruction > * getInterleaveGroup() const
VPValue * getMask() const
Return the mask used by this recipe.
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
VPValue * getAddr() const
Return the address accessed by this recipe.
VPValue * getEVL() const
The VPValue of the explicit vector length.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
static VPLane getLastLaneForVF(const ElementCount &VF)
static VPLane getLaneFromEnd(const ElementCount &VF, unsigned Offset)
static VPLane getFirstLane()
virtual const VPRecipeBase * getAsRecipe() const =0
Return a VPRecipeBase* to the current object.
virtual unsigned getNumIncoming() const
Returns the number of incoming values, also number of incoming blocks.
void removeIncomingValueFor(VPBlockBase *IncomingBlock) const
Removes the incoming value for IncomingBlock, which must be a predecessor.
const VPBasicBlock * getIncomingBlock(unsigned Idx) const
Returns the incoming block with index Idx.
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
VPValue * getIncomingValue(unsigned Idx) const
Returns the incoming VPValue with index Idx.
void printPhiOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the recipe.
void execute(VPTransformState &State) override
Generates phi nodes for live-outs (from a replicate region) as needed to retain SSA form.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
virtual void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const =0
Each concrete VPRecipe prints itself, without printing common information, like debug info or metadat...
VPRegionBlock * getRegion()
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override final
Print the recipe, delegating to printRecipe().
bool isPhi() const
Returns true for PHI-like recipes.
bool mayWriteToMemory() const
Returns true if the recipe may write to memory.
virtual InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
VPBasicBlock * getParent()
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this recipe, taking into account if the cost computation should be skipped and the...
bool isScalarCast() const
Return true if the recipe is a scalar cast.
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void moveAfter(VPRecipeBase *MovePos)
Unlink this recipe from its current VPBasicBlock and insert it into the VPBasicBlock that MovePos liv...
VPRecipeBase(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
void execute(VPTransformState &State) override
Generate the reduction in the loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getEVL() const
The VPValue of the explicit vector length.
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool isConditional() const
Return true if the in-loop reduction is conditional.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of VPReductionRecipe.
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
VPValue * getCondOp() const
The VPValue of the condition for the block.
RecurKind getRecurrenceKind() const
Return the recurrence kind for the in-loop reduction.
bool isPartialReduction() const
Returns true if the reduction outputs a vector with a scaled down VF.
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
bool isInLoop() const
Returns true if the reduction is in-loop.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool isSingleScalar() const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPReplicateRecipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getOpcode() const
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
VPValue * getStepValue() const
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the scalarized versions of the phi node as needed by their users.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
LLVM_ABI_FOR_TEST LLVM_DUMP_METHOD void dump() const
Print this VPSingleDefRecipe to dbgs() (for debugging).
VPSingleDefRecipe(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
This class can be used to assign names to VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
Helper to access the operand that contains the unroll part for this recipe after unrolling.
VPValue * getUnrollPartOperand(const VPUser &U) const
Return the VPValue operand containing the unroll part or null if there is no such operand.
unsigned getUnrollPart(const VPUser &U) const
Return the unroll part.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
void printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the operands to O.
void setOperand(unsigned I, VPValue *New)
unsigned getNumOperands() const
operand_iterator op_begin()
VPValue * getOperand(unsigned N) const
virtual bool usesFirstLaneOnly(const VPValue *Op) const
Returns true if the VPUser only uses the first lane of operand Op.
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
bool isDefinedOutsideLoopRegions() const
Returns true if the VPValue is defined outside any loop.
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
friend class VPExpressionRecipe
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
bool hasMoreThanOneUniqueUser() const
Returns true if the value has more than one unique user.
Value * getLiveInIRValue() const
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
VPValue(const unsigned char SC, Value *UV=nullptr, VPDef *Def=nullptr)
void replaceAllUsesWith(VPValue *New)
user_iterator user_begin()
unsigned getNumUsers() const
bool isLiveIn() const
Returns true if this VPValue is a live-in, i.e. defined outside the VPlan.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getSourceElementType() const
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
Function * getCalledScalarFunction() const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCallRecipe.
void execute(VPTransformState &State) override
Produce a widened version of the call instruction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with start = {<Part*VF,...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Returns the result type of the cast.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce widened copies of the cast.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCastRecipe.
void execute(VPTransformState &State) override
Generate the gep nodes.
Type * getSourceElementType() const
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
VPValue * getStepValue()
Returns the step value of the induction.
TruncInst * getTruncInst()
Returns the first defined value as TruncInst, if it is one or nullptr otherwise.
Type * getScalarType() const
Returns the scalar type of the induction.
bool isCanonical() const
Returns true if the induction is canonical, i.e.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Intrinsic::ID getVectorIntrinsicID() const
Return the ID of the intrinsic.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
StringRef getIntrinsicName() const
Return to name of the intrinsic as string.
LLVM_ABI_FOR_TEST bool usesFirstLaneOnly(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
Type * getResultType() const
Return the scalar return type of the intrinsic.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Produce a widened version of the vector intrinsic.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this vector intrinsic.
bool IsMasked
Whether the memory access is masked.
bool Reverse
Whether the consecutive accessed addresses are in reverse order.
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
bool Consecutive
Whether the accessed addresses are consecutive.
VPValue * getMask() const
Return the mask used by this recipe.
Align Alignment
Alignment information for this memory access.
VPValue * getAddr() const
Return the address accessed by this recipe.
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenRecipe.
void execute(VPTransformState &State) override
Produce a widened instruction using the opcode and operands of the recipe, processing State....
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
const ParentTy * getParent() const
self_iterator getIterator()
typename base_list_type::iterator iterator
iterator erase(iterator where)
pointer remove(iterator &IT)
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
LLVM_ABI Intrinsic::ID getDeinterleaveIntrinsicID(unsigned Factor)
Returns the corresponding llvm.vector.deinterleaveN intrinsic for factor N.
LLVM_ABI StringRef getBaseName(ID id)
Return the LLVM name for an intrinsic, without encoded types for overloading, such as "llvm....
bool match(Val *V, const Pattern &P)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
GEPLikeRecipe_match< Op0_t, Op1_t > m_GetElementPtr(const Op0_t &Op0, const Op1_t &Op1)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
NodeAddr< DefNode * > Def
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
const SCEV * getSCEVExprForVPValue(const VPValue *V, ScalarEvolution &SE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI Value * createSimpleReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind)
Create a reduction of the given vector.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
FunctionAddr VTableAddr Value
LLVM_ABI Value * createFindLastIVReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind, Value *Start, Value *Sentinel)
Create a reduction of the given vector Src for a reduction of the kind RecurKind::FindLastIV.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
auto cast_or_null(const Y &Val)
LLVM_ABI Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
bool isa_and_nonnull(const Y &Val)
LLVM_ABI Value * createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, Value *Right)
Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
auto dyn_cast_or_null(const Y &Val)
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
LLVM_ABI llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
DWARFExpression::Operation Op
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI Value * createOrderedReduction(IRBuilderBase &B, RecurKind RdxKind, Value *Src, Value *Start)
Create an ordered reduction intrinsic using the given recurrence kind RdxKind.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
LLVM_ABI Value * createAnyOfReduction(IRBuilderBase &B, Value *Src, Value *InitVal, PHINode *OrigPhi)
Create a reduction of the given vector Src for a reduction of kind RecurKind::AnyOf.
LLVM_ABI bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Struct to hold various analysis needed for cost computations.
void execute(VPTransformState &State) override
Generate the phi nodes.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this first-order recurrence phi recipe.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
An overlay for VPIRInstructions wrapping PHI nodes enabling convenient use cast/dyn_cast/isa and exec...
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void execute(VPTransformState &State) override
Generate the instruction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
A pure-virtual common base class for recipes defining a single VPValue and using IR flags.
InstructionCost getCostForRecipeWithOpcode(unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const
Compute the cost for this recipe for VF, using Opcode and Ctx.
VPRecipeWithIRFlags(const unsigned char SC, ArrayRef< VPValue * > Operands, const VPIRFlags &Flags, DebugLoc DL=DebugLoc::getUnknown())
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide load or gather.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenLoadEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide load or gather.
VPValue * getCond() const
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenSelectRecipe.
void execute(VPTransformState &State) override
Produce a widened version of the select instruction.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
LLVM_ABI_FOR_TEST void execute(VPTransformState &State) override
Generate the wide store or scatter.
LLVM_ABI_FOR_TEST void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenStoreEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
void execute(VPTransformState &State) override
Generate a wide store or scatter.
void printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStoredValue() const
Return the value stored by this recipe.