57 if (!VPBB->getParent())
60 auto EndIter = Term ? Term->getIterator() : VPBB->end();
65 VPValue *VPV = Ingredient.getVPSingleValue();
81 *Load, Ingredient.getOperand(0),
nullptr ,
82 false , *VPI, Ingredient.getDebugLoc());
85 *Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
86 nullptr ,
false , *VPI,
87 Ingredient.getDebugLoc());
90 Ingredient.getDebugLoc());
102 if (VectorID == Intrinsic::experimental_noalias_scope_decl)
107 if (VectorID == Intrinsic::assume ||
108 VectorID == Intrinsic::lifetime_end ||
109 VectorID == Intrinsic::lifetime_start ||
110 VectorID == Intrinsic::sideeffect ||
111 VectorID == Intrinsic::pseudoprobe) {
116 const bool IsSingleScalar = VectorID != Intrinsic::assume &&
117 VectorID != Intrinsic::pseudoprobe;
121 Ingredient.getDebugLoc());
124 *CI, VectorID,
drop_end(Ingredient.operands()), CI->getType(),
125 VPIRFlags(*CI), *VPI, CI->getDebugLoc());
129 CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), CI,
133 *VPI, Ingredient.getDebugLoc());
137 "inductions must be created earlier");
146 "Only recpies with zero or one defined values expected");
147 Ingredient.eraseFromParent();
164 if (
A->getOpcode() != Instruction::Store ||
165 B->getOpcode() != Instruction::Store)
175 const APInt *Distance;
181 Type *TyA = TypeInfo.inferScalarType(
A->getOperand(0));
183 Type *TyB = TypeInfo.inferScalarType(
B->getOperand(0));
189 uint64_t MaxStoreSize = std::max(SizeA, SizeB);
191 auto VFs =
B->getParent()->getPlan()->vectorFactors();
195 return Distance->
abs().
uge(
203 : ExcludeRecipes(ExcludeRecipes), GroupLeader(GroupLeader), PSE(PSE),
204 L(L), TypeInfo(TypeInfo) {}
211 return ExcludeRecipes.contains(&R) ||
212 (Store && isNoAliasViaDistance(Store, &GroupLeader));
225 std::optional<SinkStoreInfo> SinkInfo = {}) {
226 bool CheckReads = SinkInfo.has_value();
233 if (SinkInfo && SinkInfo->shouldSkip(R))
237 if (!
R.mayWriteToMemory() && !(CheckReads &&
R.mayReadFromMemory()))
255template <
unsigned Opcode>
260 static_assert(Opcode == Instruction::Load || Opcode == Instruction::Store,
261 "Only Load and Store opcodes supported");
262 constexpr bool IsLoad = (Opcode == Instruction::Load);
269 if (!RepR || RepR->getOpcode() != Opcode || !FilterFn(RepR))
273 VPValue *Addr = RepR->getOperand(IsLoad ? 0 : 1);
276 RecipesByAddress[AddrSCEV].push_back(RepR);
281 for (
auto &Group :
Groups) {
296 auto InsertIfValidSinkCandidate = [ScalarVFOnly, &WorkList](
308 if (Candidate->getParent() == SinkTo ||
313 if (!ScalarVFOnly && RepR->isSingleScalar())
316 WorkList.
insert({SinkTo, Candidate});
328 for (
auto &Recipe : *VPBB)
330 InsertIfValidSinkCandidate(VPBB,
Op);
334 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
337 std::tie(SinkTo, SinkCandidate) = WorkList[
I];
342 auto UsersOutsideSinkTo =
344 return cast<VPRecipeBase>(U)->getParent() != SinkTo;
346 if (
any_of(UsersOutsideSinkTo, [SinkCandidate](
VPUser *U) {
347 return !U->usesFirstLaneOnly(SinkCandidate);
350 bool NeedsDuplicating = !UsersOutsideSinkTo.empty();
352 if (NeedsDuplicating) {
356 if (
auto *SinkCandidateRepR =
362 nullptr , *SinkCandidateRepR,
366 Clone = SinkCandidate->
clone();
376 InsertIfValidSinkCandidate(SinkTo,
Op);
386 if (!EntryBB || EntryBB->size() != 1 ||
396 if (EntryBB->getNumSuccessors() != 2)
401 if (!Succ0 || !Succ1)
404 if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1)
406 if (Succ0->getSingleSuccessor() == Succ1)
408 if (Succ1->getSingleSuccessor() == Succ0)
425 if (!Region1->isReplicator())
427 auto *MiddleBasicBlock =
429 if (!MiddleBasicBlock || !MiddleBasicBlock->empty())
434 if (!Region2 || !Region2->isReplicator())
439 if (!Mask1 || Mask1 != Mask2)
442 assert(Mask1 && Mask2 &&
"both region must have conditions");
448 if (TransformedRegions.
contains(Region1))
455 if (!Then1 || !Then2)
475 VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue();
481 if (Phi1ToMove.getVPSingleValue()->getNumUsers() == 0) {
482 Phi1ToMove.eraseFromParent();
485 Phi1ToMove.moveBefore(*Merge2, Merge2->begin());
499 TransformedRegions.
insert(Region1);
502 return !TransformedRegions.
empty();
510 std::string RegionName = (
Twine(
"pred.") + Instr->getOpcodeName()).str();
511 assert(Instr->getParent() &&
"Predicated instruction not in any basic block");
512 auto *BlockInMask = PredRecipe->
getMask();
533 Region->setParent(ParentRegion);
539 RecipeWithoutMask->getDebugLoc());
540 Exiting->appendRecipe(PHIRecipe);
553 if (RepR->isPredicated())
572 if (ParentRegion && ParentRegion->
getExiting() == CurrentBlock)
584 if (!VPBB->getParent())
588 if (!PredVPBB || PredVPBB->getNumSuccessors() != 1 ||
597 R.moveBefore(*PredVPBB, PredVPBB->
end());
599 auto *ParentRegion = VPBB->getParent();
600 if (ParentRegion && ParentRegion->getExiting() == VPBB)
601 ParentRegion->setExiting(PredVPBB);
605 return !WorkList.
empty();
612 bool ShouldSimplify =
true;
613 while (ShouldSimplify) {
629 if (!
IV ||
IV->getTruncInst())
644 for (
auto *U : FindMyCast->
users()) {
646 if (UserCast && UserCast->getUnderlyingValue() == IRCast) {
647 FoundUserCast = UserCast;
654 FindMyCast = FoundUserCast;
656 if (FindMyCast !=
IV)
671 Builder.createDerivedIV(Kind, FPBinOp, StartV, CanonicalIV, Step);
681 BaseIV = Builder.createScalarCast(Instruction::Trunc, BaseIV, TruncTy,
DL);
687 if (ResultTy != StepTy) {
694 Builder.setInsertPoint(VecPreheader);
695 Step = Builder.createScalarCast(Instruction::Trunc, Step, ResultTy,
DL);
697 return Builder.createScalarIVSteps(InductionOpcode, FPBinOp, BaseIV, Step,
723 WideCanIV->getDebugLoc(), Builder));
724 WideCanIV->eraseFromParent();
736 if (!WidenIV || !WidenIV->isCanonical())
740 WidenIV->dropPoisonGeneratingFlags();
741 WideCanIV->replaceAllUsesWith(WidenIV);
742 WideCanIV->eraseFromParent();
751 if (PHICost > BroadcastCost)
760 unsigned RegClass =
TTI.getRegisterClassForType(
true, VecTy);
774 WideCanIV->getDebugLoc());
775 NewWideIV->insertBefore(&*Header->getFirstNonPhi());
776 WideCanIV->replaceAllUsesWith(NewWideIV);
777 WideCanIV->eraseFromParent();
785 bool IsConditionalAssume = RepR && RepR->isPredicated() &&
787 if (IsConditionalAssume)
790 if (R.mayHaveSideEffects())
794 return all_of(R.definedValues(),
795 [](
VPValue *V) { return V->getNumUsers() == 0; });
815 VPUser *PhiUser = PhiR->getSingleUser();
821 PhiR->replaceAllUsesWith(Start);
822 PhiR->eraseFromParent();
830 for (
unsigned I = 0;
I !=
Users.size(); ++
I) {
833 Users.insert_range(V->users());
835 return Users.takeVector();
849 nullptr, StartV, StepV, PtrIV->
getDebugLoc(), Builder);
886 Def->getNumUsers() == 0 || !Def->getUnderlyingValue() ||
887 (RepR && (RepR->isSingleScalar() || RepR->isPredicated())))
900 Def->operands(),
true,
902 Clone->insertAfter(Def);
903 Def->replaceAllUsesWith(Clone);
914 PtrIV->replaceAllUsesWith(PtrAdd);
921 if (HasOnlyVectorVFs &&
none_of(WideIV->users(), [WideIV](
VPUser *U) {
922 return U->usesScalars(WideIV);
928 Plan,
ID.getKind(),
ID.getInductionOpcode(),
930 WideIV->getTruncInst(), WideIV->getStartValue(), WideIV->getStepValue(),
931 WideIV->getDebugLoc(), Builder);
934 if (!HasOnlyVectorVFs) {
936 "plans containing a scalar VF cannot also include scalable VFs");
937 WideIV->replaceAllUsesWith(Steps);
940 WideIV->replaceUsesWithIf(Steps,
941 [WideIV, HasScalableVF](
VPUser &U,
unsigned) {
943 return U.usesFirstLaneOnly(WideIV);
944 return U.usesScalars(WideIV);
960 return (IntOrFpIV && IntOrFpIV->getTruncInst()) ? nullptr : WideIV;
965 if (!Def || Def->getNumOperands() != 2)
973 auto IsWideIVInc = [&]() {
974 auto &
ID = WideIV->getInductionDescriptor();
977 VPValue *IVStep = WideIV->getStepValue();
978 switch (
ID.getInductionOpcode()) {
979 case Instruction::Add:
981 case Instruction::FAdd:
983 case Instruction::FSub:
986 case Instruction::Sub: {
1006 return IsWideIVInc() ? WideIV :
nullptr;
1025 if (WideIntOrFp && WideIntOrFp->getTruncInst())
1036 VPValue *FirstActiveLane =
B.createFirstActiveLane(Mask,
DL);
1038 FirstActiveLane =
B.createScalarZExtOrTrunc(FirstActiveLane, CanonicalIVType,
1039 FirstActiveLaneType,
DL);
1040 VPValue *EndValue =
B.createAdd(CanonicalIV, FirstActiveLane,
DL);
1045 if (Incoming != WideIV) {
1047 EndValue =
B.createAdd(EndValue, One,
DL);
1050 if (!WideIntOrFp || !WideIntOrFp->isCanonical()) {
1052 VPIRValue *Start = WideIV->getStartValue();
1053 VPValue *Step = WideIV->getStepValue();
1054 EndValue =
B.createDerivedIV(
1056 Start, EndValue, Step);
1071 if (WideIntOrFp && WideIntOrFp->getTruncInst())
1078 if (!WideIntOrFp || !WideIntOrFp->isCanonical()) {
1081 Start, VectorTC, Step);
1110 assert(EndValue &&
"Must have computed the end value up front");
1115 if (Incoming != WideIV)
1127 auto *Zero = Plan.
getZero(StepTy);
1128 return B.createPtrAdd(EndValue,
B.createSub(Zero, Step),
1133 return B.createNaryOp(
1134 ID.getInductionBinOp()->getOpcode() == Instruction::FAdd
1136 : Instruction::FAdd,
1137 {EndValue, Step}, {ID.getInductionBinOp()->getFastMathFlags()});
1149 VPBuilder VectorPHBuilder(VectorPH, VectorPH->begin());
1158 WideIV, VectorPHBuilder, TypeInfo, ResumeTC))
1159 EndValues[WideIV] = EndValue;
1169 R.getVPSingleValue()->replaceAllUsesWith(EndValue);
1170 R.eraseFromParent();
1179 for (
auto [Idx, PredVPBB] :
enumerate(ExitVPBB->getPredecessors())) {
1181 if (PredVPBB == MiddleVPBB)
1183 Plan, TypeInfo, ExitIRI->getOperand(Idx), EndValues, PSE);
1186 Plan, TypeInfo, ExitIRI->getOperand(Idx), PSE);
1188 ExitIRI->setOperand(Idx, Escape);
1205 const auto &[V, Inserted] = SCEV2VPV.
try_emplace(ExpR->getSCEV(), ExpR);
1208 ExpR->replaceAllUsesWith(V->second);
1209 ExpR->eraseFromParent();
1218 while (!WorkList.
empty()) {
1220 if (!Seen.
insert(Cur).second)
1228 R->eraseFromParent();
1235static std::optional<std::pair<bool, unsigned>>
1238 std::optional<std::pair<bool, unsigned>>>(R)
1241 [](
auto *
I) {
return std::make_pair(
false,
I->getOpcode()); })
1243 return std::make_pair(
true,
I->getVectorIntrinsicID());
1245 .Case<VPVectorPointerRecipe, VPPredInstPHIRecipe, VPScalarIVStepsRecipe>(
1251 I->getVPRecipeID());
1253 .
Default([](
auto *) {
return std::nullopt; });
1271 Value *V =
Op->getUnderlyingValue();
1277 auto FoldToIRValue = [&]() ->
Value * {
1279 if (OpcodeOrIID->first) {
1280 if (R.getNumOperands() != 2)
1282 unsigned ID = OpcodeOrIID->second;
1283 return Folder.FoldBinaryIntrinsic(
ID,
Ops[0],
Ops[1],
1286 unsigned Opcode = OpcodeOrIID->second;
1295 return Folder.FoldSelect(
Ops[0],
Ops[1],
1298 return Folder.FoldBinOp(Instruction::BinaryOps::Xor,
Ops[0],
1300 case Instruction::Select:
1301 return Folder.FoldSelect(
Ops[0],
Ops[1],
Ops[2]);
1302 case Instruction::ICmp:
1303 case Instruction::FCmp:
1306 case Instruction::GetElementPtr: {
1309 return Folder.FoldGEP(
GEP->getSourceElementType(),
Ops[0],
1319 case Instruction::ExtractElement:
1326 if (
Value *V = FoldToIRValue())
1327 return R.getParent()->getPlan()->getOrAddLiveIn(V);
1333 VPlan *Plan = Def->getParent()->getPlan();
1339 return Def->replaceAllUsesWith(V);
1345 PredPHI->replaceAllUsesWith(
Op);
1358 bool CanCreateNewRecipe =
1365 if (TruncTy == ATy) {
1366 Def->replaceAllUsesWith(
A);
1375 : Instruction::ZExt;
1378 if (
auto *UnderlyingExt = Def->getOperand(0)->getUnderlyingValue()) {
1380 Ext->setUnderlyingValue(UnderlyingExt);
1382 Def->replaceAllUsesWith(Ext);
1384 auto *Trunc = Builder.createWidenCast(Instruction::Trunc,
A, TruncTy);
1385 Def->replaceAllUsesWith(Trunc);
1393 for (
VPUser *U :
A->users()) {
1395 for (
VPValue *VPV : R->definedValues())
1409 Def->replaceAllUsesWith(
X);
1410 Def->eraseFromParent();
1416 return Def->replaceAllUsesWith(
1421 return Def->replaceAllUsesWith(
X);
1425 return Def->replaceAllUsesWith(
1430 return Def->replaceAllUsesWith(
1435 return Def->replaceAllUsesWith(
X);
1439 return Def->replaceAllUsesWith(Plan->
getFalse());
1443 return Def->replaceAllUsesWith(
X);
1446 if (CanCreateNewRecipe &&
1451 (!Def->getOperand(0)->hasMoreThanOneUniqueUser() ||
1452 !Def->getOperand(1)->hasMoreThanOneUniqueUser()))
1453 return Def->replaceAllUsesWith(
1454 Builder.createLogicalAnd(
X, Builder.createOr(
Y, Z)));
1459 return Def->replaceAllUsesWith(Def->getOperand(1));
1464 return Def->replaceAllUsesWith(Builder.createLogicalAnd(
X,
Y));
1468 return Def->replaceAllUsesWith(Plan->
getFalse());
1471 return Def->replaceAllUsesWith(
X);
1475 if (CanCreateNewRecipe &&
1477 return Def->replaceAllUsesWith(Builder.createNot(
C));
1481 Def->setOperand(0,
C);
1482 Def->setOperand(1,
Y);
1483 Def->setOperand(2,
X);
1488 return Def->replaceAllUsesWith(
A);
1491 return Def->replaceAllUsesWith(
A);
1494 return Def->replaceAllUsesWith(
1501 return Def->replaceAllUsesWith(
1503 Def->getDebugLoc(),
"", NW));
1506 if (CanCreateNewRecipe &&
1514 ->hasNoSignedWrap()};
1515 return Def->replaceAllUsesWith(
1516 Builder.createSub(
X,
Y, Def->getDebugLoc(),
"", NW));
1522 return Def->replaceAllUsesWith(Builder.createNaryOp(
1524 {A, Plan->getConstantInt(APC->getBitWidth(), APC->exactLogBase2())},
1529 return Def->replaceAllUsesWith(Builder.createNaryOp(
1531 {A, Plan->getConstantInt(APC->getBitWidth(), APC->exactLogBase2())},
1536 return Def->replaceAllUsesWith(
A);
1551 R->setOperand(1,
Y);
1552 R->setOperand(2,
X);
1556 R->replaceAllUsesWith(Cmp);
1561 if (!Cmp->getDebugLoc() && Def->getDebugLoc())
1562 Cmp->setDebugLoc(Def->getDebugLoc());
1574 if (
Op->getNumUsers() > 1 ||
1578 }
else if (!UnpairedCmp) {
1579 UnpairedCmp =
Op->getDefiningRecipe();
1583 UnpairedCmp =
nullptr;
1590 if (NewOps.
size() < Def->getNumOperands()) {
1592 return Def->replaceAllUsesWith(NewAnyOf);
1599 if (CanCreateNewRecipe &&
1605 return Def->replaceAllUsesWith(NewCmp);
1613 return Def->replaceAllUsesWith(Def->getOperand(1));
1619 X = Builder.createWidenCast(Instruction::Trunc,
X, WideStepTy);
1620 Def->replaceAllUsesWith(
X);
1630 Def->setOperand(1, Def->getOperand(0));
1631 Def->setOperand(0,
Y);
1638 return Def->replaceAllUsesWith(Def->getOperand(0));
1644 Def->replaceAllUsesWith(
1645 BuildVector->getOperand(BuildVector->getNumOperands() - 1));
1649 return Def->replaceAllUsesWith(
A);
1655 Def->replaceAllUsesWith(
1656 BuildVector->getOperand(BuildVector->getNumOperands() - 2));
1663 Def->replaceAllUsesWith(BuildVector->getOperand(Idx));
1668 Def->replaceAllUsesWith(
1678 "broadcast operand must be single-scalar");
1679 Def->setOperand(0,
C);
1684 if (Def->getNumOperands() == 1) {
1685 Def->replaceAllUsesWith(Def->getOperand(0));
1690 Phi->replaceAllUsesWith(Phi->getOperand(0));
1696 if (Def->getNumOperands() == 1 &&
1698 return Def->replaceAllUsesWith(IRV);
1711 return Def->replaceAllUsesWith(
A);
1714 Def->replaceAllUsesWith(Builder.createNaryOp(
1715 Instruction::ExtractElement, {A, LaneToExtract}, Def->getDebugLoc()));
1729 auto *IVInc = Def->getOperand(0);
1730 if (IVInc->getNumUsers() == 2) {
1735 if (Phi->getNumUsers() == 1 || (Phi->getNumUsers() == 2 && Inc)) {
1736 Def->replaceAllUsesWith(IVInc);
1738 Inc->replaceAllUsesWith(Phi);
1739 Phi->setOperand(0,
Y);
1755 Steps->replaceAllUsesWith(Steps->getOperand(0));
1763 Def->replaceUsesWithIf(StartV, [](
const VPUser &U,
unsigned Idx) {
1765 return PhiR && PhiR->isInLoop();
1771 Def->replaceAllUsesWith(
A);
1777 return U->usesScalars(A) || Def == U;
1779 return Def->replaceAllUsesWith(
A);
1783 return Def->replaceAllUsesWith(
A);
1810 while (!Worklist.
empty()) {
1819 R->replaceAllUsesWith(
1820 Builder.createLogicalAnd(HeaderMask, Builder.createLogicalAnd(
X,
Y)));
1834 if (RepR && (RepR->isSingleScalar() || RepR->isPredicated()))
1838 if (RepR && RepR->getOpcode() == Instruction::Store &&
1841 RepOrWidenR->getUnderlyingInstr(), RepOrWidenR->operands(),
1842 true ,
nullptr , *RepR ,
1843 *RepR , RepR->getDebugLoc());
1844 Clone->insertBefore(RepOrWidenR);
1846 VPValue *ExtractOp = Clone->getOperand(0);
1852 Clone->setOperand(0, ExtractOp);
1853 RepR->eraseFromParent();
1862 auto IntroducesBCastOf = [](
const VPValue *
Op) {
1871 return !U->usesScalars(
Op);
1875 if (
any_of(RepOrWidenR->users(), IntroducesBCastOf(RepOrWidenR)) &&
1878 make_filter_range(Op->users(), not_equal_to(RepOrWidenR)),
1879 IntroducesBCastOf(Op)))
1883 auto *IRV = dyn_cast<VPIRValue>(Op);
1884 bool LiveInNeedsBroadcast = IRV && !isa<Constant>(IRV->getValue());
1885 auto *OpR = dyn_cast<VPReplicateRecipe>(Op);
1886 return LiveInNeedsBroadcast || (OpR && OpR->isSingleScalar());
1891 RepOrWidenR->getUnderlyingInstr(), RepOrWidenR->operands(),
1892 true ,
nullptr, *RepOrWidenR);
1893 Clone->insertBefore(RepOrWidenR);
1894 RepOrWidenR->replaceAllUsesWith(Clone);
1896 RepOrWidenR->eraseFromParent();
1932 if (Blend->isNormalized() || !
match(Blend->getMask(0),
m_False()))
1933 UniqueValues.
insert(Blend->getIncomingValue(0));
1934 for (
unsigned I = 1;
I != Blend->getNumIncomingValues(); ++
I)
1936 UniqueValues.
insert(Blend->getIncomingValue(
I));
1938 if (UniqueValues.
size() == 1) {
1939 Blend->replaceAllUsesWith(*UniqueValues.
begin());
1940 Blend->eraseFromParent();
1944 if (Blend->isNormalized())
1950 unsigned StartIndex = 0;
1951 for (
unsigned I = 0;
I != Blend->getNumIncomingValues(); ++
I) {
1956 if (Mask->getNumUsers() == 1 && !
match(Mask,
m_False())) {
1963 OperandsWithMask.
push_back(Blend->getIncomingValue(StartIndex));
1965 for (
unsigned I = 0;
I != Blend->getNumIncomingValues(); ++
I) {
1966 if (
I == StartIndex)
1968 OperandsWithMask.
push_back(Blend->getIncomingValue(
I));
1969 OperandsWithMask.
push_back(Blend->getMask(
I));
1974 OperandsWithMask, *Blend, Blend->getDebugLoc());
1975 NewBlend->insertBefore(&R);
1977 VPValue *DeadMask = Blend->getMask(StartIndex);
1979 Blend->eraseFromParent();
1984 if (NewBlend->getNumOperands() == 3 &&
1986 VPValue *Inc0 = NewBlend->getOperand(0);
1987 VPValue *Inc1 = NewBlend->getOperand(1);
1988 VPValue *OldMask = NewBlend->getOperand(2);
1989 NewBlend->setOperand(0, Inc1);
1990 NewBlend->setOperand(1, Inc0);
1991 NewBlend->setOperand(2, NewMask);
2018 APInt MaxVal = AlignedTC - 1;
2021 unsigned NewBitWidth =
2027 bool MadeChange =
false;
2036 if (!WideIV || !WideIV->isCanonical() ||
2037 WideIV->hasMoreThanOneUniqueUser() ||
2038 NewIVTy == WideIV->getScalarType())
2043 VPUser *SingleUser = WideIV->getSingleUser();
2051 auto *NewStart = Plan.
getZero(NewIVTy);
2052 WideIV->setStartValue(NewStart);
2054 WideIV->setStepValue(NewStep);
2061 Cmp->setOperand(1, NewBTC);
2075 return any_of(
Cond->getDefiningRecipe()->operands(), [&Plan, BestVF, BestUF,
2077 return isConditionTrueViaVFAndUF(C, Plan, BestVF, BestUF, PSE);
2091 const SCEV *VectorTripCount =
2096 "Trip count SCEV must be computable");
2117 auto *Term = &ExitingVPBB->
back();
2130 for (
unsigned Part = 0; Part < UF; ++Part) {
2136 Extracts[Part] = Ext;
2148 match(Phi->getBackedgeValue(),
2150 assert(Index &&
"Expected index from ActiveLaneMask instruction");
2167 "Expected one VPActiveLaneMaskPHIRecipe for each unroll part");
2174 "Expected incoming values of Phi to be ActiveLaneMasks");
2179 EntryALM->setOperand(2, ALMMultiplier);
2180 LoopALM->setOperand(2, ALMMultiplier);
2184 ExtractFromALM(EntryALM, EntryExtracts);
2189 ExtractFromALM(LoopALM, LoopExtracts);
2191 Not->setOperand(0, LoopExtracts[0]);
2194 for (
unsigned Part = 0; Part < UF; ++Part) {
2195 Phis[Part]->setStartValue(EntryExtracts[Part]);
2196 Phis[Part]->setBackedgeValue(LoopExtracts[Part]);
2209 auto *Term = &ExitingVPBB->
back();
2221 const SCEV *VectorTripCount =
2227 "Trip count SCEV must be computable");
2246 Term->setOperand(1, Plan.
getTrue());
2251 {}, Term->getDebugLoc());
2253 Term->eraseFromParent();
2288 R.getVPSingleValue()->replaceAllUsesWith(Trunc);
2298 assert(Plan.
hasVF(BestVF) &&
"BestVF is not available in Plan");
2299 assert(Plan.
hasUF(BestUF) &&
"BestUF is not available in Plan");
2317 RecurKind RK = PhiR->getRecurrenceKind();
2324 RecWithFlags->dropPoisonGeneratingFlags();
2330struct VPCSEDenseMapInfo :
public DenseMapInfo<VPSingleDefRecipe *> {
2332 return Def == getEmptyKey() || Def == getTombstoneKey();
2343 return GEP->getSourceElementType();
2346 .Case<VPVectorPointerRecipe, VPWidenGEPRecipe>(
2347 [](
auto *
I) {
return I->getSourceElementType(); })
2348 .
Default([](
auto *) {
return nullptr; });
2352 static bool canHandle(
const VPSingleDefRecipe *Def) {
2361 if (!
C || (!
C->first && (
C->second == Instruction::InsertValue ||
2362 C->second == Instruction::ExtractValue)))
2368 return !
Def->mayReadFromMemory();
2372 static unsigned getHashValue(
const VPSingleDefRecipe *Def) {
2373 const VPlan *Plan =
Def->getParent()->getPlan();
2374 VPTypeAnalysis TypeInfo(*Plan);
2377 getGEPSourceElementType(Def), TypeInfo.inferScalarType(Def),
2380 if (RFlags->hasPredicate())
2383 return hash_combine(Result, SIVSteps->getInductionOpcode());
2388 static bool isEqual(
const VPSingleDefRecipe *L,
const VPSingleDefRecipe *R) {
2391 if (
L->getVPRecipeID() !=
R->getVPRecipeID() ||
2393 getGEPSourceElementType(L) != getGEPSourceElementType(R) ||
2395 !
equal(
L->operands(),
R->operands()))
2398 "must have valid opcode info for both recipes");
2400 if (LFlags->hasPredicate() &&
2401 LFlags->getPredicate() !=
2405 if (LSIV->getInductionOpcode() !=
2411 const VPRegionBlock *RegionL =
L->getRegion();
2412 const VPRegionBlock *RegionR =
R->getRegion();
2415 L->getParent() !=
R->getParent())
2417 const VPlan *Plan =
L->getParent()->getPlan();
2418 VPTypeAnalysis TypeInfo(*Plan);
2419 return TypeInfo.inferScalarType(L) == TypeInfo.inferScalarType(R);
2435 if (!Def || !VPCSEDenseMapInfo::canHandle(Def))
2439 if (!VPDT.
dominates(V->getParent(), VPBB))
2444 Def->replaceAllUsesWith(V);
2475 "Expected vector prehader's successor to be the vector loop region");
2483 return !Op->isDefinedOutsideLoopRegions();
2486 R.moveBefore(*Preheader, Preheader->
end());
2504 assert(!RepR->isPredicated() &&
2505 "Expected prior transformation of predicated replicates to "
2506 "replicate regions");
2511 if (!RepR->isSingleScalar())
2523 if (
any_of(Def->users(), [&SinkBB, &LoopRegion](
VPUser *U) {
2524 auto *UserR = cast<VPRecipeBase>(U);
2525 VPBasicBlock *Parent = UserR->getParent();
2527 if (SinkBB && SinkBB != Parent)
2532 return UserR->isPhi() || Parent->getEnclosingLoopRegion() ||
2533 Parent->getSinglePredecessor() != LoopRegion;
2543 "Defining block must dominate sink block");
2569 VPValue *ResultVPV = R.getVPSingleValue();
2571 unsigned NewResSizeInBits = MinBWs.
lookup(UI);
2572 if (!NewResSizeInBits)
2585 (void)OldResSizeInBits;
2593 VPW->dropPoisonGeneratingFlags();
2595 if (OldResSizeInBits != NewResSizeInBits &&
2599 Instruction::ZExt, ResultVPV, OldResTy,
nullptr,
2601 Ext->insertAfter(&R);
2603 Ext->setOperand(0, ResultVPV);
2604 assert(OldResSizeInBits > NewResSizeInBits &&
"Nothing to shrink?");
2607 "Only ICmps should not need extending the result.");
2617 for (
unsigned Idx = StartIdx; Idx != R.getNumOperands(); ++Idx) {
2618 auto *
Op = R.getOperand(Idx);
2619 unsigned OpSizeInBits =
2621 if (OpSizeInBits == NewResSizeInBits)
2623 assert(OpSizeInBits > NewResSizeInBits &&
"nothing to truncate");
2624 auto [ProcessedIter, IterIsEmpty] = ProcessedTruncs.
try_emplace(
Op);
2626 R.setOperand(Idx, ProcessedIter->second);
2634 Builder.setInsertPoint(&R);
2636 Builder.createWidenCast(Instruction::Trunc,
Op, NewResTy);
2637 ProcessedIter->second = NewOp;
2638 R.setOperand(Idx, NewOp);
2646 std::optional<VPDominatorTree> VPDT;
2663 assert(VPBB->getNumSuccessors() == 2 &&
2664 "Two successors expected for BranchOnCond");
2665 unsigned RemovedIdx;
2676 "There must be a single edge between VPBB and its successor");
2684 VPBB->back().eraseFromParent();
2696 if (Reachable.contains(
B))
2707 for (
VPValue *Def : R.definedValues())
2708 Def->replaceAllUsesWith(&Tmp);
2709 R.eraseFromParent();
2766 DebugLoc DL = CanonicalIVIncrement->getDebugLoc();
2777 auto *EntryIncrement = Builder.createOverflowingOp(
2779 DL,
"index.part.next");
2785 {EntryIncrement, TC, ALMMultiplier},
DL,
2786 "active.lane.mask.entry");
2793 LaneMaskPhi->insertBefore(*HeaderVPBB, HeaderVPBB->begin());
2798 Builder.setInsertPoint(OriginalTerminator);
2799 auto *InLoopIncrement = Builder.createOverflowingOp(
2801 {CanonicalIVIncrement, &Plan.
getVF()}, {
false,
false},
DL);
2803 {InLoopIncrement, TC, ALMMultiplier},
DL,
2804 "active.lane.mask.next");
2809 auto *NotMask = Builder.createNot(ALM,
DL);
2816 bool UseActiveLaneMaskForControlFlow) {
2820 assert(WideCanonicalIV &&
2821 "Must have widened canonical IV when tail folding!");
2824 if (UseActiveLaneMaskForControlFlow) {
2833 nullptr,
"active.lane.mask");
2849 template <
typename OpTy>
bool match(OpTy *V)
const {
2860template <
typename Op0_t,
typename Op1_t>
2879 VPValue *Addr, *Mask, *EndPtr;
2882 auto AdjustEndPtr = [&CurRecipe, &EVL](
VPValue *EndPtr) {
2884 EVLEndPtr->insertBefore(&CurRecipe);
2885 EVLEndPtr->setOperand(1, &EVL);
2889 auto GetVPReverse = [&CurRecipe, &EVL, &TypeInfo, Plan,
2894 Intrinsic::experimental_vp_reverse, {V, Plan->
getTrue(), &EVL},
2896 Reverse->insertBefore(&CurRecipe);
2900 if (
match(&CurRecipe,
2911 Mask = GetVPReverse(Mask);
2912 Addr = AdjustEndPtr(EndPtr);
2915 LoadR->insertBefore(&CurRecipe);
2917 Intrinsic::experimental_vp_reverse, {LoadR, Plan->
getTrue(), &EVL},
2925 StoredVal, EVL, Mask);
2927 if (
match(&CurRecipe,
2931 Mask = GetVPReverse(Mask);
2932 Addr = AdjustEndPtr(EndPtr);
2933 StoredVal = GetVPReverse(ReversedVal);
2935 StoredVal, EVL, Mask);
2939 if (Rdx->isConditional() &&
2944 if (Interleave->getMask() &&
2949 if (
match(&CurRecipe,
2958 Intrinsic::vp_merge, {Mask,
LHS,
RHS, &EVL},
2972 if (
match(&CurRecipe,
2986 VPValue *HeaderMask =
nullptr, *EVL =
nullptr;
2991 HeaderMask = R.getVPSingleValue();
3003 NewR->insertBefore(R);
3004 for (
auto [Old, New] :
3005 zip_equal(R->definedValues(), NewR->definedValues()))
3006 Old->replaceAllUsesWith(New);
3020 Merge->insertBefore(LogicalAnd);
3021 LogicalAnd->replaceAllUsesWith(
Merge);
3029 R->eraseFromParent();
3046 "User of VF that we can't transform to EVL.");
3056 "Only users of VFxUF should be VPWidenPointerInductionRecipe and the "
3057 "increment of the canonical induction.");
3073 MaxEVL = Builder.createScalarZExtOrTrunc(
3077 Builder.setInsertPoint(Header, Header->getFirstNonPhi());
3078 VPValue *PrevEVL = Builder.createScalarPhi(
3092 Intrinsic::experimental_vp_splice,
3093 {V1, V2, Imm, Plan.
getTrue(), PrevEVL, &EVL},
3097 R.getVPSingleValue()->replaceAllUsesWith(VPSplice);
3110 if (match(&R, m_ComputeReductionResult(m_Select(m_Specific(HeaderMask),
3111 m_VPValue(), m_VPValue()))))
3112 return R.getOperand(0)->getDefiningRecipe()->getRegion() ==
3113 Plan.getVectorLoopRegion();
3125 VPValue *EVLMask = Builder.createICmp(
3185 VPlan &Plan,
const std::optional<unsigned> &MaxSafeElements) {
3197 auto *CurrentIteration =
3199 CurrentIteration->insertBefore(*Header, Header->begin());
3200 VPBuilder Builder(Header, Header->getFirstNonPhi());
3203 VPPhi *AVLPhi = Builder.createScalarPhi(
3207 if (MaxSafeElements) {
3217 Builder.setInsertPoint(CanonicalIVIncrement);
3221 OpVPEVL = Builder.createScalarZExtOrTrunc(
3222 OpVPEVL, CanIVTy, I32Ty, CanonicalIVIncrement->getDebugLoc());
3224 auto *NextIter = Builder.createAdd(
3225 OpVPEVL, CurrentIteration, CanonicalIVIncrement->getDebugLoc(),
3226 "current.iteration.next", CanonicalIVIncrement->getNoWrapFlags());
3227 CurrentIteration->addOperand(NextIter);
3231 "avl.next", {
true,
false});
3239 CanonicalIV->replaceAllUsesWith(CurrentIteration);
3240 CanonicalIVIncrement->setOperand(0, CanonicalIV);
3254 assert(!CurrentIteration &&
3255 "Found multiple CurrentIteration. Only one expected");
3256 CurrentIteration = PhiR;
3260 if (!CurrentIteration)
3271 CurrentIteration->
getDebugLoc(),
"current.iteration.iv");
3280 CanIVInc->eraseFromParent();
3289 if (Header->empty())
3298 if (!
match(EVLPhi->getBackedgeValue(),
3311 [[maybe_unused]]
bool FoundAVLNext =
3314 assert(FoundAVLNext &&
"Didn't find AVL backedge?");
3322 [[maybe_unused]]
bool FoundIncrement =
match(
3329 "Expected BranchOnCond with ICmp comparing CanIV + VFxUF with vector "
3334 LatchBr->setOperand(
3345 return R->getRegion() ||
3349 for (
const SCEV *Stride : StridesMap.
values()) {
3352 const APInt *StrideConst;
3375 RewriteMap[StrideV] = PSE.
getSCEV(StrideV);
3382 const SCEV *ScevExpr = ExpSCEV->getSCEV();
3385 if (NewSCEV != ScevExpr) {
3387 ExpSCEV->replaceAllUsesWith(NewExp);
3398 auto CollectPoisonGeneratingInstrsInBackwardSlice([&](
VPRecipeBase *Root) {
3403 while (!Worklist.
empty()) {
3406 if (!Visited.
insert(CurRec).second)
3428 RecWithFlags->isDisjoint()) {
3431 Builder.createAdd(
A,
B, RecWithFlags->getDebugLoc());
3432 New->setUnderlyingValue(RecWithFlags->getUnderlyingValue());
3433 RecWithFlags->replaceAllUsesWith(New);
3434 RecWithFlags->eraseFromParent();
3437 RecWithFlags->dropPoisonGeneratingFlags();
3442 assert((!Instr || !Instr->hasPoisonGeneratingFlags()) &&
3443 "found instruction with poison generating flags not covered by "
3444 "VPRecipeWithIRFlags");
3449 if (
VPRecipeBase *OpDef = Operand->getDefiningRecipe())
3457 auto IsNotHeaderMask = [&Plan](
VPValue *Mask) {
3469 VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
3470 if (AddrDef && WidenRec->isConsecutive() &&
3471 IsNotHeaderMask(WidenRec->getMask()))
3472 CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
3474 VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
3475 if (AddrDef && IsNotHeaderMask(InterleaveRec->getMask()))
3476 CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
3486 const bool &EpilogueAllowed) {
3487 if (InterleaveGroups.empty())
3497 IRMemberToRecipe[&MemR.getIngredient()] = &MemR;
3504 for (
const auto *IG : InterleaveGroups) {
3509 return !IRMemberToRecipe.contains(Member);
3513 auto *Start = IRMemberToRecipe.
lookup(IG->getMember(0));
3517 StoredValues.
push_back(StoreR->getStoredValue());
3518 for (
unsigned I = 1;
I < IG->getFactor(); ++
I) {
3524 StoredValues.
push_back(StoreR->getStoredValue());
3528 bool NeedsMaskForGaps =
3529 (IG->requiresScalarEpilogue() && !EpilogueAllowed) ||
3530 (!StoredValues.
empty() && !IG->isFull());
3533 auto *InsertPos = IRMemberToRecipe.
lookup(IRInsertPos);
3541 VPValue *Addr = Start->getAddr();
3550 assert(IG->getIndex(IRInsertPos) != 0 &&
3551 "index of insert position shouldn't be zero");
3555 IG->getIndex(IRInsertPos),
3559 Addr =
B.createNoWrapPtrAdd(InsertPos->getAddr(), OffsetVPV, NW);
3565 if (IG->isReverse()) {
3568 -(int64_t)IG->getFactor(), NW, InsertPos->getDebugLoc());
3569 ReversePtr->insertBefore(InsertPos);
3573 InsertPos->getMask(), NeedsMaskForGaps,
3574 InterleaveMD, InsertPos->getDebugLoc());
3575 VPIG->insertBefore(InsertPos);
3578 for (
unsigned i = 0; i < IG->getFactor(); ++i)
3581 if (!Member->getType()->isVoidTy()) {
3640 AddOp = Instruction::Add;
3641 MulOp = Instruction::Mul;
3643 AddOp =
ID.getInductionOpcode();
3644 MulOp = Instruction::FMul;
3652 Step = Builder.createScalarCast(Instruction::Trunc, Step, Ty,
DL);
3653 Start = Builder.createScalarCast(Instruction::Trunc, Start, Ty,
DL);
3662 Init = Builder.createWidenCast(Instruction::UIToFP,
Init, StepTy);
3667 Init = Builder.createNaryOp(MulOp, {
Init, SplatStep}, Flags);
3668 Init = Builder.createNaryOp(AddOp, {SplatStart,
Init}, Flags,
3684 Builder.setInsertPoint(R->getParent(), std::next(R->getIterator()));
3688 VF = Builder.createScalarCast(Instruction::CastOps::UIToFP, VF, StepTy,
3691 VF = Builder.createScalarZExtOrTrunc(VF, StepTy,
3694 Inc = Builder.createNaryOp(MulOp, {Step, VF}, Flags);
3701 auto *
Next = Builder.createNaryOp(AddOp, {Prev, Inc}, Flags,
3704 WidePHI->addOperand(
Next);
3732 VPlan *Plan = R->getParent()->getPlan();
3733 VPValue *Start = R->getStartValue();
3734 VPValue *Step = R->getStepValue();
3735 VPValue *VF = R->getVFValue();
3737 assert(R->getInductionDescriptor().getKind() ==
3739 "Not a pointer induction according to InductionDescriptor!");
3742 "Recipe should have been replaced");
3748 VPPhi *ScalarPtrPhi = Builder.createScalarPhi(Start,
DL,
"pointer.phi");
3752 Builder.setInsertPoint(R->getParent(), R->getParent()->getFirstNonPhi());
3755 Offset = Builder.createOverflowingOp(Instruction::Mul, {
Offset, Step});
3757 Builder.createWidePtrAdd(ScalarPtrPhi,
Offset,
DL,
"vector.gep");
3758 R->replaceAllUsesWith(PtrAdd);
3763 VF = Builder.createScalarZExtOrTrunc(VF, StepTy, TypeInfo.
inferScalarType(VF),
3765 VPValue *Inc = Builder.createOverflowingOp(Instruction::Mul, {Step, VF});
3768 Builder.createPtrAdd(ScalarPtrPhi, Inc,
DL,
"ptr.ind");
3776 VPValue *Step = R->getStepValue();
3777 VPValue *Index = R->getIndex();
3781 ? Builder.createScalarSExtOrTrunc(
3783 : Builder.createScalarCast(Instruction::SIToFP, Index, StepTy,
3785 switch (R->getInductionKind()) {
3788 "Index type does not match StartValue type");
3789 return R->replaceAllUsesWith(Builder.createAdd(
3790 Start, Builder.createOverflowingOp(Instruction::Mul, {Index, Step})));
3793 return R->replaceAllUsesWith(Builder.createPtrAdd(
3794 Start, Builder.createOverflowingOp(Instruction::Mul, {Index, Step})));
3799 (FPBinOp->
getOpcode() == Instruction::FAdd ||
3800 FPBinOp->
getOpcode() == Instruction::FSub) &&
3801 "Original BinOp should be defined for FP induction");
3803 VPValue *
FMul = Builder.createNaryOp(Instruction::FMul, {Step, Index}, FMF);
3804 return R->replaceAllUsesWith(
3805 Builder.createNaryOp(FPBinOp->
getOpcode(), {Start, FMul}, FMF));
3818 if (!R->isReplicator())
3822 R->dissolveToCFGLoop();
3843 assert(Br->getNumOperands() == 2 &&
3844 "BranchOnTwoConds must have exactly 2 conditions");
3848 assert(Successors.size() == 3 &&
3849 "BranchOnTwoConds must have exactly 3 successors");
3854 VPValue *Cond0 = Br->getOperand(0);
3855 VPValue *Cond1 = Br->getOperand(1);
3860 !BrOnTwoCondsBB->
getParent() &&
"regions must already be dissolved");
3873 Br->eraseFromParent();
3896 WidenIVR->replaceAllUsesWith(PtrAdd);
3915 for (
unsigned I = 1;
I != Blend->getNumIncomingValues(); ++
I)
3916 Select = Builder.createSelect(Blend->getMask(
I),
3917 Blend->getIncomingValue(
I),
Select,
3918 R.getDebugLoc(),
"predphi", *Blend);
3919 Blend->replaceAllUsesWith(
Select);
3924 if (!VEPR->getOffset()) {
3926 "Expected unroller to have materialized offset for UF != 1");
3927 VEPR->materializeOffset();
3942 for (
VPValue *
Op : LastActiveL->operands()) {
3943 VPValue *NotMask = Builder.createNot(
Op, LastActiveL->getDebugLoc());
3948 VPValue *FirstInactiveLane = Builder.createFirstActiveLane(
3949 NotMasks, LastActiveL->getDebugLoc(),
"first.inactive.lane");
3955 Builder.createSub(FirstInactiveLane, One,
3956 LastActiveL->getDebugLoc(),
"last.active.lane");
3966 assert(VPI->isMasked() &&
3967 "Unmasked MaskedCond should be simplified earlier");
3968 VPI->replaceAllUsesWith(Builder.createNaryOp(
3980 Instruction::Add, VPI->operands(), VPI->getNoWrapFlags(),
3981 VPI->getDebugLoc());
3982 VPI->replaceAllUsesWith(
Add);
3991 DebugLoc DL = BranchOnCountInst->getDebugLoc();
3994 ToRemove.push_back(BranchOnCountInst);
4009 ? Instruction::UIToFP
4010 : Instruction::Trunc;
4011 VectorStep = Builder.createWidenCast(CastOp, VectorStep, IVTy);
4017 Builder.createWidenCast(Instruction::Trunc, ScalarStep, IVTy);
4023 MulOpc = Instruction::FMul;
4024 Flags = VPI->getFastMathFlags();
4026 MulOpc = Instruction::Mul;
4031 MulOpc, {VectorStep, ScalarStep}, Flags, R.getDebugLoc());
4033 VPI->replaceAllUsesWith(VectorStep);
4039 R->eraseFromParent();
4047 struct EarlyExitInfo {
4058 if (Pred == MiddleVPBB)
4063 VPValue *CondOfEarlyExitingVPBB;
4064 [[maybe_unused]]
bool Matched =
4065 match(EarlyExitingVPBB->getTerminator(),
4067 assert(Matched &&
"Terminator must be BranchOnCond");
4071 VPBuilder EarlyExitingBuilder(EarlyExitingVPBB->getTerminator());
4072 auto *CondToEarlyExit = EarlyExitingBuilder.
createNaryOp(
4074 TrueSucc == ExitBlock
4075 ? CondOfEarlyExitingVPBB
4076 : EarlyExitingBuilder.
createNot(CondOfEarlyExitingVPBB));
4082 "exit condition must dominate the latch");
4091 assert(!Exits.
empty() &&
"must have at least one early exit");
4098 for (
const auto &[Num, VPB] :
enumerate(RPOT))
4100 llvm::sort(Exits, [&RPOIdx](
const EarlyExitInfo &
A,
const EarlyExitInfo &
B) {
4101 return RPOIdx[
A.EarlyExitingVPBB] < RPOIdx[
B.EarlyExitingVPBB];
4107 for (
unsigned I = 0;
I + 1 < Exits.
size(); ++
I)
4108 for (
unsigned J =
I + 1; J < Exits.
size(); ++J)
4110 Exits[
I].EarlyExitingVPBB) &&
4111 "RPO sort must place dominating exits before dominated ones");
4117 VPValue *Combined = Exits[0].CondToExit;
4118 for (
const EarlyExitInfo &Info :
drop_begin(Exits))
4119 Combined = Builder.createLogicalOr(Combined, Info.CondToExit);
4125 "Early exit store masking not implemented");
4129 for (
unsigned Idx = 0; Idx != Exits.
size(); ++Idx) {
4133 VectorEarlyExitVPBBs[Idx] = VectorEarlyExitVPBB;
4141 Exits.
size() == 1 ? VectorEarlyExitVPBBs[0]
4175 for (
auto [Exit, VectorEarlyExitVPBB] :
4176 zip_equal(Exits, VectorEarlyExitVPBBs)) {
4177 auto &[EarlyExitingVPBB, EarlyExitVPBB,
_] = Exit;
4189 ExitIRI->getIncomingValueForBlock(EarlyExitingVPBB);
4190 VPValue *NewIncoming = IncomingVal;
4192 VPBuilder EarlyExitBuilder(VectorEarlyExitVPBB);
4197 ExitIRI->removeIncomingValueFor(EarlyExitingVPBB);
4198 ExitIRI->addOperand(NewIncoming);
4201 EarlyExitingVPBB->getTerminator()->eraseFromParent();
4235 bool IsLastDispatch = (
I + 2 == Exits.
size());
4237 IsLastDispatch ? VectorEarlyExitVPBBs.
back()
4243 VectorEarlyExitVPBBs[
I]->setPredecessors({CurrentBB});
4246 CurrentBB = FalseBB;
4253 "Unexpected terminator");
4254 auto *IsLatchExitTaken =
4256 LatchExitingBranch->getOperand(1));
4258 DebugLoc LatchDL = LatchExitingBranch->getDebugLoc();
4259 LatchExitingBranch->eraseFromParent();
4260 Builder.setInsertPoint(LatchVPBB);
4262 {IsAnyExitTaken, IsLatchExitTaken}, LatchDL);
4264 LatchVPBB->
setSuccessors({DispatchVPBB, MiddleVPBB, HeaderVPBB});
4274 Type *RedTy = Ctx.Types.inferScalarType(Red);
4275 VPValue *VecOp = Red->getVecOp();
4277 assert(!Red->isPartialReduction() &&
4278 "This path does not support partial reductions");
4281 auto IsExtendedRedValidAndClampRange =
4294 "getExtendedReductionCost only supports integer types");
4295 ExtRedCost = Ctx.TTI.getExtendedReductionCost(
4296 Opcode, ExtOpc == Instruction::CastOps::ZExt, RedTy, SrcVecTy,
4297 Red->getFastMathFlags(),
CostKind);
4298 return ExtRedCost.
isValid() && ExtRedCost < ExtCost + RedCost;
4306 IsExtendedRedValidAndClampRange(
4309 Ctx.Types.inferScalarType(
A)))
4328 if (Opcode != Instruction::Add && Opcode != Instruction::Sub &&
4329 Opcode != Instruction::FAdd)
4332 assert(!Red->isPartialReduction() &&
4333 "This path does not support partial reductions");
4334 Type *RedTy = Ctx.Types.inferScalarType(Red);
4337 auto IsMulAccValidAndClampRange =
4344 Ext0 ? Ctx.Types.inferScalarType(Ext0->getOperand(0)) : RedTy;
4350 (Ext0->getOpcode() != Ext1->getOpcode() ||
4351 Ext0->getOpcode() == Instruction::CastOps::FPExt))
4355 !Ext0 || Ext0->getOpcode() == Instruction::CastOps::ZExt;
4357 MulAccCost = Ctx.TTI.getMulAccReductionCost(IsZExt, Opcode, RedTy,
4364 ExtCost += Ext0->computeCost(VF, Ctx);
4366 ExtCost += Ext1->computeCost(VF, Ctx);
4368 ExtCost += OuterExt->computeCost(VF, Ctx);
4370 return MulAccCost.
isValid() &&
4371 MulAccCost < ExtCost + MulCost + RedCost;
4376 VPValue *VecOp = Red->getVecOp();
4414 Builder.createWidenCast(Instruction::CastOps::Trunc, ValB, NarrowTy);
4415 Type *WideTy = Ctx.Types.inferScalarType(ExtA);
4416 ValB = ExtB = Builder.createWidenCast(ExtOpc, Trunc, WideTy);
4417 Mul->setOperand(1, ExtB);
4427 ExtendAndReplaceConstantOp(RecipeA, RecipeB,
B,
Mul);
4432 IsMulAccValidAndClampRange(
Mul, RecipeA, RecipeB,
nullptr)) {
4439 if (!
Sub && IsMulAccValidAndClampRange(
Mul,
nullptr,
nullptr,
nullptr))
4456 ExtendAndReplaceConstantOp(Ext0, Ext1,
B,
Mul);
4465 (Ext->getOpcode() == Ext0->getOpcode() || Ext0 == Ext1) &&
4466 Ext0->getOpcode() == Ext1->getOpcode() &&
4467 IsMulAccValidAndClampRange(
Mul, Ext0, Ext1, Ext) &&
Mul->hasOneUse()) {
4469 Ext0->getOpcode(), Ext0->getOperand(0), Ext->getResultType(),
nullptr,
4470 *Ext0, *Ext0, Ext0->getDebugLoc());
4471 NewExt0->insertBefore(Ext0);
4476 Ext->getResultType(),
nullptr, *Ext1,
4477 *Ext1, Ext1->getDebugLoc());
4480 Mul->setOperand(0, NewExt0);
4481 Mul->setOperand(1, NewExt1);
4482 Red->setOperand(1,
Mul);
4496 assert(!Red->isPartialReduction() &&
4497 "This path does not support partial reductions");
4500 auto IP = std::next(Red->getIterator());
4501 auto *VPBB = Red->getParent();
4511 Red->replaceAllUsesWith(AbstractR);
4541 for (
VPValue *VPV : VPValues) {
4550 if (
User->usesScalars(VPV))
4553 HoistPoint = HoistBlock->
begin();
4557 "All users must be in the vector preheader or dominated by it");
4562 VPV->replaceUsesWithIf(Broadcast,
4563 [VPV, Broadcast](
VPUser &U,
unsigned Idx) {
4564 return Broadcast != &U && !U.usesScalars(VPV);
4575 return CommonMetadata;
4578template <
unsigned Opcode>
4583 static_assert(Opcode == Instruction::Load || Opcode == Instruction::Store,
4584 "Only Load and Store opcodes supported");
4585 constexpr bool IsLoad = (Opcode == Instruction::Load);
4591 return TypeInfo.
inferScalarType(IsLoad ? Recipe : Recipe->getOperand(0));
4596 for (
auto Recipes :
Groups) {
4597 if (Recipes.size() < 2)
4605 VPValue *MaskI = RecipeI->getMask();
4606 Type *TypeI = GetLoadStoreValueType(RecipeI);
4612 bool HasComplementaryMask =
false;
4617 VPValue *MaskJ = RecipeJ->getMask();
4618 Type *TypeJ = GetLoadStoreValueType(RecipeJ);
4619 if (TypeI == TypeJ) {
4629 if (HasComplementaryMask) {
4630 assert(Group.
size() >= 2 &&
"must have at least 2 entries");
4640template <
typename InstType>
4658 for (
auto &Group :
Groups) {
4678 return R->isSingleScalar() == IsSingleScalar;
4680 "all members in group must agree on IsSingleScalar");
4685 LoadWithMinAlign->getUnderlyingInstr(), {EarliestLoad->getOperand(0)},
4686 IsSingleScalar,
nullptr, *EarliestLoad, CommonMetadata);
4688 UnpredicatedLoad->insertBefore(EarliestLoad);
4692 Load->replaceAllUsesWith(UnpredicatedLoad);
4693 Load->eraseFromParent();
4703 if (!StoreLoc || !StoreLoc->AATags.Scope)
4709 StoresToSink.
end());
4713 SinkStoreInfo SinkInfo(StoresToSinkSet, *StoresToSink[0], PSE, L, TypeInfo);
4727 for (
auto &Group :
Groups) {
4740 VPValue *SelectedValue = Group[0]->getOperand(0);
4743 bool IsSingleScalar = Group[0]->isSingleScalar();
4744 for (
unsigned I = 1;
I < Group.size(); ++
I) {
4745 assert(IsSingleScalar == Group[
I]->isSingleScalar() &&
4746 "all members in group must agree on IsSingleScalar");
4747 VPValue *Mask = Group[
I]->getMask();
4749 SelectedValue = Builder.createSelect(Mask,
Value, SelectedValue,
4758 StoreWithMinAlign->getUnderlyingInstr(),
4759 {SelectedValue, LastStore->getOperand(1)}, IsSingleScalar,
4760 nullptr, *LastStore, CommonMetadata);
4761 UnpredicatedStore->insertBefore(*InsertBB, LastStore->
getIterator());
4765 Store->eraseFromParent();
4772 assert(Plan.
hasVF(BestVF) &&
"BestVF is not available in Plan");
4773 assert(Plan.
hasUF(BestUF) &&
"BestUF is not available in Plan");
4837 auto UsesVectorOrInsideReplicateRegion = [DefR, LoopRegion](
VPUser *U) {
4839 return !U->usesScalars(DefR) || ParentRegion != LoopRegion;
4846 none_of(DefR->users(), UsesVectorOrInsideReplicateRegion))
4856 DefR->replaceUsesWithIf(
4857 BuildVector, [BuildVector, &UsesVectorOrInsideReplicateRegion](
4859 return &U != BuildVector && UsesVectorOrInsideReplicateRegion(&U);
4873 for (
VPValue *Def : R.definedValues()) {
4886 auto IsCandidateUnpackUser = [Def](
VPUser *U) {
4888 return U->usesScalars(Def) &&
4891 if (
none_of(Def->users(), IsCandidateUnpackUser))
4898 Unpack->insertAfter(&R);
4899 Def->replaceUsesWithIf(Unpack,
4900 [&IsCandidateUnpackUser](
VPUser &U,
unsigned) {
4901 return IsCandidateUnpackUser(&U);
4910 bool RequiresScalarEpilogue,
VPValue *Step,
4911 std::optional<uint64_t> MaxRuntimeStep) {
4922 assert(StepR->getParent() == VectorPHVPBB &&
4923 "Step must be defined in VectorPHVPBB");
4925 InsertPt = std::next(StepR->getIterator());
4927 VPBuilder Builder(VectorPHVPBB, InsertPt);
4933 if (!RequiresScalarEpilogue &&
match(TC,
m_APInt(TCVal)) && MaxRuntimeStep &&
4945 if (TailByMasking) {
4946 TC = Builder.createAdd(
4957 Builder.createNaryOp(Instruction::URem, {TC, Step},
4966 if (RequiresScalarEpilogue) {
4968 "requiring scalar epilogue is not supported with fail folding");
4971 R = Builder.createSelect(IsZero, Step, R);
4985 "VF and VFxUF must be materialized together");
4997 Builder.createElementCount(TCTy, VFEC * Plan.
getConcreteUF());
5004 VPValue *RuntimeVF = Builder.createElementCount(TCTy, VFEC);
5008 BC, [&VF](
VPUser &U,
unsigned) {
return !U.usesScalars(&VF); });
5012 VPValue *MulByUF = Builder.createOverflowingOp(
5024 BasicBlock *EntryBB = Entry->getIRBasicBlock();
5032 const SCEV *Expr = ExpSCEV->getSCEV();
5035 ExpandedSCEVs[ExpSCEV->getSCEV()] = Res;
5040 ExpSCEV->eraseFromParent();
5043 "VPExpandSCEVRecipes must be at the beginning of the entry block, "
5044 "before any VPIRInstructions");
5047 auto EI = Entry->begin();
5057 return ExpandedSCEVs;
5069 VPValue *OpV,
unsigned Idx,
bool IsScalable) {
5073 return Member0Op == OpV;
5077 return !IsScalable && !W->getMask() && W->isConsecutive() &&
5080 return IR->getInterleaveGroup()->isFull() &&
IR->getVPValue(Idx) == OpV;
5097 for (
unsigned Idx = 0; Idx != WideMember0->getNumOperands(); ++Idx) {
5100 OpsI.
push_back(
Op->getDefiningRecipe()->getOperand(Idx));
5105 if (
any_of(
enumerate(OpsI), [WideMember0, Idx, IsScalable](
const auto &
P) {
5106 const auto &[
OpIdx, OpV] =
P;
5121 if (!InterleaveR || InterleaveR->
getMask())
5122 return std::nullopt;
5124 Type *GroupElementTy =
nullptr;
5128 [&TypeInfo, GroupElementTy](
VPValue *
Op) {
5129 return TypeInfo.inferScalarType(Op) == GroupElementTy;
5131 return std::nullopt;
5136 [&TypeInfo, GroupElementTy](
VPValue *
Op) {
5137 return TypeInfo.inferScalarType(Op) == GroupElementTy;
5139 return std::nullopt;
5143 if (IG->getFactor() != IG->getNumMembers())
5144 return std::nullopt;
5150 assert(
Size.isScalable() == VF.isScalable() &&
5151 "if Size is scalable, VF must be scalable and vice versa");
5152 return Size.getKnownMinValue();
5156 unsigned MinVal = VF.getKnownMinValue();
5158 if (IG->getFactor() == MinVal && GroupSize == GetVectorBitWidthForVF(VF))
5161 return std::nullopt;
5169 return RepR && RepR->isSingleScalar();
5176 auto *R = V->getDefiningRecipe();
5185 for (
unsigned Idx = 0,
E = WideMember0->getNumOperands(); Idx !=
E; ++Idx)
5186 WideMember0->setOperand(
5195 auto *LI =
cast<LoadInst>(LoadGroup->getInterleaveGroup()->getInsertPos());
5197 LoadGroup->getMask(),
true,
5198 {}, LoadGroup->getDebugLoc());
5199 L->insertBefore(LoadGroup);
5205 assert(RepR->isSingleScalar() && RepR->getOpcode() == Instruction::Load &&
5206 "must be a single scalar load");
5207 NarrowedOps.
insert(RepR);
5212 VPValue *PtrOp = WideLoad->getAddr();
5214 PtrOp = VecPtr->getOperand(0);
5219 nullptr, {}, *WideLoad);
5220 N->insertBefore(WideLoad);
5225std::unique_ptr<VPlan>
5245 "unexpected branch-on-count");
5249 std::optional<ElementCount> VFToOptimize;
5263 if (R.mayWriteToMemory() && !InterleaveR)
5269 return any_of(V->users(), [&](VPUser *U) {
5270 auto *UR = cast<VPRecipeBase>(U);
5271 return UR->getParent()->getParent() != VectorLoop;
5288 std::optional<ElementCount> NarrowedVF =
5290 if (!NarrowedVF || (VFToOptimize && NarrowedVF != VFToOptimize))
5292 VFToOptimize = NarrowedVF;
5295 if (InterleaveR->getStoredValues().empty())
5300 auto *Member0 = InterleaveR->getStoredValues()[0];
5310 VPRecipeBase *DefR = Op.value()->getDefiningRecipe();
5313 auto *IR = dyn_cast<VPInterleaveRecipe>(DefR);
5314 return IR && IR->getInterleaveGroup()->isFull() &&
5315 IR->getVPValue(Op.index()) == Op.value();
5324 VFToOptimize->isScalable()))
5329 if (StoreGroups.empty())
5333 bool RequiresScalarEpilogue =
5344 std::unique_ptr<VPlan> NewPlan;
5346 NewPlan = std::unique_ptr<VPlan>(Plan.
duplicate());
5347 Plan.
setVF(*VFToOptimize);
5348 NewPlan->removeVF(*VFToOptimize);
5354 for (
auto *StoreGroup : StoreGroups) {
5361 StoreGroup->getDebugLoc());
5362 S->insertBefore(StoreGroup);
5363 StoreGroup->eraseFromParent();
5369 Type *CanIVTy = VectorLoop->getCanonicalIVType();
5375 if (VFToOptimize->isScalable()) {
5378 Step = PHBuilder.createOverflowingOp(Instruction::Mul, {VScale,
UF},
5386 materializeVectorTripCount(Plan, VectorPH,
false,
5387 RequiresScalarEpilogue, Step);
5392 removeDeadRecipes(Plan);
5395 "All VPVectorPointerRecipes should have been removed");
5411 "must have a BranchOnCond");
5414 if (VF.
isScalable() && VScaleForTuning.has_value())
5415 VectorStep *= *VScaleForTuning;
5416 assert(VectorStep > 0 &&
"trip count should not be zero");
5420 MiddleTerm->setMetadata(LLVMContext::MD_prof, BranchWeights);
5440 "Cannot handle loops with uncountable early exits");
5447 assert(RecurSplice &&
"expected FirstOrderRecurrenceSplice");
5454 if (
any_of(RecurSplice->users(),
5455 [](
VPUser *U) { return !cast<VPRecipeBase>(U)->getRegion(); }) &&
5536 {},
"vector.recur.extract.for.phi");
5539 ExitPhi->replaceUsesOfWith(ExtractR, PenultimateElement);
5553 VPValue *WidenIVCandidate = BinOp->getOperand(0);
5554 VPValue *InvariantCandidate = BinOp->getOperand(1);
5556 std::swap(WidenIVCandidate, InvariantCandidate);
5570 auto *ClonedOp = BinOp->
clone();
5571 if (ClonedOp->getOperand(0) == WidenIV) {
5572 ClonedOp->setOperand(0, ScalarIV);
5574 assert(ClonedOp->getOperand(1) == WidenIV &&
"one operand must be WideIV");
5575 ClonedOp->setOperand(1, ScalarIV);
5590 auto CheckSentinel = [&SE](
const SCEV *IVSCEV,
5591 bool UseMax) -> std::optional<APSInt> {
5593 for (
bool Signed : {
true,
false}) {
5602 return std::nullopt;
5610 PhiR->getRecurrenceKind()))
5619 VPValue *BackedgeVal = PhiR->getBackedgeValue();
5633 !
match(FindLastSelect,
5642 IVOfExpressionToSink ? IVOfExpressionToSink : FindLastExpression, PSE,
5648 "IVOfExpressionToSink not being an AddRec must imply "
5649 "FindLastExpression not being an AddRec.");
5660 std::optional<APSInt> SentinelVal = CheckSentinel(IVSCEV, UseMax);
5661 bool UseSigned = SentinelVal && SentinelVal->isSigned();
5668 if (IVOfExpressionToSink) {
5669 const SCEV *FindLastExpressionSCEV =
5671 if (
match(FindLastExpressionSCEV,
5674 if (
auto NewSentinel =
5675 CheckSentinel(FindLastExpressionSCEV, NewUseMax)) {
5678 SentinelVal = *NewSentinel;
5679 UseSigned = NewSentinel->isSigned();
5681 IVSCEV = FindLastExpressionSCEV;
5682 IVOfExpressionToSink =
nullptr;
5692 if (AR->hasNoSignedWrap())
5694 else if (AR->hasNoUnsignedWrap())
5704 VPValue *NewFindLastSelect = BackedgeVal;
5706 if (!SentinelVal || IVOfExpressionToSink) {
5709 DebugLoc DL = FindLastSelect->getDefiningRecipe()->getDebugLoc();
5710 VPBuilder LoopBuilder(FindLastSelect->getDefiningRecipe());
5711 if (FindLastSelect->getDefiningRecipe()->getOperand(1) == PhiR)
5712 SelectCond = LoopBuilder.
createNot(SelectCond);
5719 if (SelectCond !=
Cond || IVOfExpressionToSink) {
5722 IVOfExpressionToSink ? IVOfExpressionToSink : FindLastExpression,
5731 VPIRFlags Flags(MinMaxKind,
false,
false,
5737 NewFindLastSelect, Flags, ExitDL);
5740 VPValue *VectorRegionExitingVal = ReducedIV;
5741 if (IVOfExpressionToSink)
5742 VectorRegionExitingVal =
5744 ReducedIV, IVOfExpressionToSink);
5747 VPValue *StartVPV = PhiR->getStartValue();
5754 NewRdxResult = MiddleBuilder.
createSelect(Cmp, VectorRegionExitingVal,
5764 AnyOfPhi->insertAfter(PhiR);
5771 OrVal, VectorRegionExitingVal, StartVPV, ExitDL);
5784 PhiR->hasUsesOutsideReductionChain());
5785 NewPhiR->insertBefore(PhiR);
5786 PhiR->replaceAllUsesWith(NewPhiR);
5787 PhiR->eraseFromParent();
5794struct ReductionExtend {
5795 Type *SrcType =
nullptr;
5796 ExtendKind Kind = ExtendKind::PR_None;
5802struct ExtendedReductionOperand {
5806 ReductionExtend ExtendA, ExtendB;
5814struct VPPartialReductionChain {
5817 VPWidenRecipe *ReductionBinOp =
nullptr;
5819 ExtendedReductionOperand ExtendedOp;
5826 unsigned AccumulatorOpIdx;
5827 unsigned ScaleFactor;
5840 if (!
Op->hasOneUse() ||
5846 auto *Trunc = Builder.createWidenCast(Instruction::CastOps::Trunc,
5847 Op->getOperand(1), NarrowTy);
5849 Op->setOperand(1, Builder.createWidenCast(ExtOpc, Trunc, WideTy));
5858 auto *
Sub =
Op->getOperand(0)->getDefiningRecipe();
5860 assert(Ext->getOpcode() ==
5862 "Expected both the LHS and RHS extends to be the same");
5863 bool IsSigned = Ext->getOpcode() == Instruction::SExt;
5866 auto *FreezeX = Builder.insert(
new VPWidenRecipe(Instruction::Freeze, {
X}));
5867 auto *FreezeY = Builder.insert(
new VPWidenRecipe(Instruction::Freeze, {
Y}));
5868 auto *
Max = Builder.insert(
5870 {FreezeX, FreezeY}, SrcTy));
5871 auto *Min = Builder.insert(
5873 {FreezeX, FreezeY}, SrcTy));
5876 return Builder.createWidenCast(Instruction::CastOps::ZExt, AbsDiff,
5889 if (!
Mul->hasOneUse() ||
5890 (Ext->getOpcode() != MulLHS->getOpcode() && MulLHS != MulRHS) ||
5891 MulLHS->getOpcode() != MulRHS->getOpcode())
5894 Mul->setOperand(0, Builder.createWidenCast(MulLHS->getOpcode(),
5895 MulLHS->getOperand(0),
5896 Ext->getResultType()));
5897 Mul->setOperand(1, MulLHS == MulRHS
5898 ?
Mul->getOperand(0)
5899 : Builder.createWidenCast(MulRHS->getOpcode(),
5900 MulRHS->getOperand(0),
5901 Ext->getResultType()));
5910 VPValue *VecOp = Red->getVecOp();
5944static void transformToPartialReduction(
const VPPartialReductionChain &Chain,
5952 WidenRecipe->
getOperand(1 - Chain.AccumulatorOpIdx));
5968 if (WidenRecipe->
getOpcode() == Instruction::Sub &&
5976 Builder.insert(NegRecipe);
5977 ExtendedOp = NegRecipe;
5981 "FSub chain reduction isn't supported");
5984 ExtendedOp = optimizeExtendsForPartialReduction(ExtendedOp, TypeInfo);
5994 assert((!ExitValue || IsLastInChain) &&
5995 "if we found ExitValue, it must match RdxPhi's backedge value");
6006 PartialRed->insertBefore(WidenRecipe);
6014 E->insertBefore(WidenRecipe);
6015 PartialRed->replaceAllUsesWith(
E);
6028 auto *NewScaleFactor = Plan.
getConstantInt(32, Chain.ScaleFactor);
6029 StartInst->setOperand(2, NewScaleFactor);
6037 VPValue *OldStartValue = StartInst->getOperand(0);
6038 StartInst->setOperand(0, StartInst->getOperand(1));
6042 assert(RdxResult &&
"Could not find reduction result");
6045 unsigned SubOpc = Chain.RK ==
RecurKind::FSub ? Instruction::BinaryOps::FSub
6046 : Instruction::BinaryOps::Sub;
6052 [&NewResult](
VPUser &U,
unsigned Idx) {
return &
U != NewResult; });
6058 const VPPartialReductionChain &Link,
6061 const ExtendedReductionOperand &ExtendedOp = Link.ExtendedOp;
6062 std::optional<unsigned> BinOpc = std::nullopt;
6064 if (ExtendedOp.ExtendB.Kind != ExtendKind::PR_None)
6065 BinOpc = ExtendedOp.ExtendsUser->
getOpcode();
6067 std::optional<llvm::FastMathFlags>
Flags;
6071 auto GetLinkOpcode = [&Link]() ->
unsigned {
6074 return Instruction::Add;
6076 return Instruction::FAdd;
6078 return Link.ReductionBinOp->
getOpcode();
6083 GetLinkOpcode(), ExtendedOp.ExtendA.SrcType, ExtendedOp.ExtendB.SrcType,
6084 RdxType, VF, ExtendedOp.ExtendA.Kind, ExtendedOp.ExtendB.Kind, BinOpc,
6107static std::optional<ExtendedReductionOperand>
6111 "Op should be operand of UpdateR");
6119 if (
Op->hasOneUse() &&
6129 if (LHSInputType != RHSInputType ||
6130 LHSExt->getOpcode() != RHSExt->getOpcode())
6131 return std::nullopt;
6134 return ExtendedReductionOperand{
6136 {LHSInputType, getPartialReductionExtendKind(LHSExt)},
6140 std::optional<TTI::PartialReductionExtendKind> OuterExtKind;
6143 VPValue *CastSource = CastRecipe->getOperand(0);
6144 OuterExtKind = getPartialReductionExtendKind(CastRecipe);
6154 if (UpdateR->
getOpcode() == Instruction::Sub)
6155 return std::nullopt;
6156 }
else if (UpdateR->
getOpcode() == Instruction::Add ||
6157 UpdateR->
getOpcode() == Instruction::FAdd) {
6161 return ExtendedReductionOperand{
6168 if (!
Op->hasOneUse())
6169 return std::nullopt;
6174 return std::nullopt;
6184 return std::nullopt;
6188 ExtendKind LHSExtendKind = getPartialReductionExtendKind(LHSCast);
6191 const APInt *RHSConst =
nullptr;
6197 return std::nullopt;
6201 if (Cast && OuterExtKind &&
6202 getPartialReductionExtendKind(Cast) != OuterExtKind)
6203 return std::nullopt;
6205 Type *RHSInputType = LHSInputType;
6206 ExtendKind RHSExtendKind = LHSExtendKind;
6209 RHSExtendKind = getPartialReductionExtendKind(RHSCast);
6212 return ExtendedReductionOperand{
6213 MulOp, {LHSInputType, LHSExtendKind}, {RHSInputType, RHSExtendKind}};
6220static std::optional<SmallVector<VPPartialReductionChain>>
6228 return std::nullopt;
6239 VPValue *CurrentValue = ExitValue;
6240 while (CurrentValue != RedPhiR) {
6243 return std::nullopt;
6250 std::optional<ExtendedReductionOperand> ExtendedOp =
6251 matchExtendedReductionOperand(UpdateR,
Op, TypeInfo);
6253 ExtendedOp = matchExtendedReductionOperand(UpdateR, PrevValue, TypeInfo);
6255 return std::nullopt;
6259 Type *ExtSrcType = ExtendedOp->ExtendA.SrcType;
6262 return std::nullopt;
6267 VPPartialReductionChain Link(
6268 {UpdateR, *ExtendedOp, RK,
6272 CurrentValue = PrevValue;
6277 std::reverse(Chain.
begin(), Chain.
end());
6296 if (
auto Chains = getScaledReductions(RedPhiR, CostCtx,
Range))
6297 ChainsByPhi.
try_emplace(RedPhiR, std::move(*Chains));
6300 if (ChainsByPhi.
empty())
6307 for (
const auto &[
_, Chains] : ChainsByPhi)
6308 for (
const VPPartialReductionChain &Chain : Chains) {
6309 PartialReductionOps.
insert(Chain.ExtendedOp.ExtendsUser);
6310 ScaledReductionMap[Chain.ReductionBinOp] = Chain.ScaleFactor;
6316 auto ExtendUsersValid = [&](
VPValue *Ext) {
6318 return PartialReductionOps.contains(cast<VPRecipeBase>(U));
6322 auto IsProfitablePartialReductionChainForVF =
6329 for (
const VPPartialReductionChain &Link : Chain) {
6330 const ExtendedReductionOperand &ExtendedOp = Link.ExtendedOp;
6331 InstructionCost LinkCost = getPartialReductionLinkCost(CostCtx, Link, VF);
6335 PartialCost += LinkCost;
6336 RegularCost += Link.ReductionBinOp->
computeCost(VF, CostCtx);
6338 if (ExtendedOp.ExtendB.Kind != ExtendKind::PR_None)
6339 RegularCost += ExtendedOp.ExtendsUser->
computeCost(VF, CostCtx);
6342 RegularCost += Extend->computeCost(VF, CostCtx);
6344 return PartialCost.
isValid() && PartialCost < RegularCost;
6352 for (
auto &[RedPhiR, Chains] : ChainsByPhi) {
6353 for (
const VPPartialReductionChain &Chain : Chains) {
6354 if (!
all_of(Chain.ExtendedOp.ExtendsUser->operands(), ExtendUsersValid)) {
6358 auto UseIsValid = [&, RedPhiR = RedPhiR](
VPUser *U) {
6360 return PhiR == RedPhiR;
6362 return Chain.ScaleFactor == ScaledReductionMap.
lookup_or(R, 0) ||
6368 if (!
all_of(Chain.ReductionBinOp->users(), UseIsValid)) {
6377 auto *RepR = dyn_cast<VPReplicateRecipe>(U);
6378 return RepR && RepR->getOpcode() == Instruction::Store;
6389 return IsProfitablePartialReductionChainForVF(Chains, VF);
6395 for (
auto &[Phi, Chains] : ChainsByPhi)
6396 for (
const VPPartialReductionChain &Chain : Chains)
6397 transformToPartialReduction(Chain, CostCtx.
Types, Plan, Phi);
6411 if (VPI && VPI->getUnderlyingValue() &&
6423 New->insertBefore(VPI);
6424 if (VPI->getOpcode() == Instruction::Load)
6425 VPI->replaceAllUsesWith(New->getVPSingleValue());
6426 VPI->eraseFromParent();
6431 FinalRedStoresBuilder))
6440 ReplaceWith(Histogram);
6448 ReplaceWith(Recipe);
6471 if (VPI->mayHaveSideEffects())
6475 if (VPI->isMasked() && !VPI->isSafeToSpeculativelyExecute())
6480 if (VPI->getOpcode() == Instruction::Add &&
6489 I, VPI->operandsWithoutMask(),
true,
6490 nullptr, *VPI, *VPI, VPI->getDebugLoc());
6491 Recipe->insertBefore(VPI);
6492 VPI->replaceAllUsesWith(Recipe);
6493 VPI->eraseFromParent();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefInfo InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Legalize the Machine IR a function s Machine IR
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
This is the interface for a metadata-based scoped no-alias analysis.
This file defines generic set operations that may be used on set's of different types,...
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file implements dominator tree analysis for a single level of a VPlan's H-CFG.
This file contains the declarations of different VPlan-related auxiliary helpers.
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const X86InstrFMA3Group Groups[]
static const uint32_t IV[8]
Helper for extra no-alias checks via known-safe recipe and SCEV.
SinkStoreInfo(const SmallPtrSetImpl< VPRecipeBase * > &ExcludeRecipes, VPReplicateRecipe &GroupLeader, PredicatedScalarEvolution &PSE, const Loop &L, VPTypeAnalysis &TypeInfo)
bool shouldSkip(VPRecipeBase &R) const
Return true if R should be skipped during alias checking, either because it's in the exclude set or b...
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt abs() const
Get the absolute value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
An arbitrary precision integer that knows its signedness.
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
@ NoAlias
The two locations do not alias at all.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
Get the last element.
const T & front() const
Get the first element.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
This class represents a function call, abstracting a target machine's calling convention.
@ ICMP_ULT
unsigned less than
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
This class represents a range of values.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
static DebugLoc getCompilerGenerated()
static DebugLoc getUnknown()
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
ValueT lookup_or(const_arg_type_t< KeyT > Val, U &&Default) const
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getScalable(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Utility class for floating point operations which can have information about relaxed accuracy require...
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Convenience struct for specifying and reasoning about fast-math flags.
Represents flags for the getelementptr instruction/expression.
GEPNoWrapFlags withoutNoUnsignedWrap() const
static GEPNoWrapFlags none()
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
A struct for saving information about induction variables.
static LLVM_ABI InductionDescriptor getCanonicalIntInduction(Type *Ty, ScalarEvolution &SE)
Returns the canonical integer induction for type Ty with start = 0 and step = 1.
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
static InstructionCost getInvalid(CostType Val=0)
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
The group of interleaved loads/stores sharing the same stride and close to each other.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Represents a single loop in the control flow graph.
LLVM_ABI MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
This class implements a map that also provides access to all stored values in a deterministic order.
ValueT lookup(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Representation for a specific memory location.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Post-order traversal of a graph.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
unsigned getOpcode() const
static bool isFindLastRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
RegionT * getParent() const
Get the parent of the Region.
This class uses information about analyze scalars to rewrite expressions in canonical form.
LLVM_ABI Value * expandCodeFor(SCEVUse SH, Type *Ty, BasicBlock::iterator I)
Insert code to directly compute the specified SCEV expression into the program.
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, ValueToSCEVMapTy &Map)
This class represents an analyzed expression in the program.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
LLVM_ABI const SCEV * getUDivExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
static LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A recipe for generating the active lane mask for the vector loop that is used to predicate the vector...
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
RecipeListTy::iterator iterator
Instruction iterators...
iterator begin()
Recipe iterator methods.
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
VPBasicBlock * splitAt(iterator SplitAt)
Split current block at SplitAt by inserting a new block between the current block and its successors ...
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
const VPRecipeBase & back() const
A recipe for vectorizing a phi-node as a sequence of mask-based select instructions.
VPValue * getMask(unsigned Idx) const
Return mask number Idx.
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account when normalized the first incoming value wi...
void setMask(unsigned Idx, VPValue *V)
Set mask number Idx to V.
bool isNormalized() const
A normalized blend is one that has an odd number of operands, whereby the first operand does not have...
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
void setSuccessors(ArrayRef< VPBlockBase * > NewSuccs)
Set each VPBasicBlock in NewSuccss as successor of this VPBlockBase.
VPRegionBlock * getParent()
const VPBasicBlock * getExitingBasicBlock() const
size_t getNumSuccessors() const
void setPredecessors(ArrayRef< VPBlockBase * > NewPreds)
Set each VPBasicBlock in NewPreds as predecessor of this VPBlockBase.
const VPBlocksTy & getPredecessors() const
const std::string & getName() const
void clearSuccessors()
Remove all the successors of this block.
VPBlockBase * getSinglePredecessor() const
const VPBasicBlock * getEntryBasicBlock() const
VPBlockBase * getSingleHierarchicalPredecessor()
VPBlockBase * getSingleSuccessor() const
const VPBlocksTy & getSuccessors() const
static void insertOnEdge(VPBlockBase *From, VPBlockBase *To, VPBlockBase *BlockPtr)
Inserts BlockPtr on the edge between From and To.
static bool isLatch(const VPBlockBase *VPB, const VPDominatorTree &VPDT)
Returns true if VPB is a loop latch, using isHeader().
static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse, VPBlockBase *BlockPtr)
Insert disconnected VPBlockBases IfTrue and IfFalse after BlockPtr.
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To)
Disconnect VPBlockBases From and To bi-directionally.
static auto blocksOnly(T &&Range)
Return an iterator range over Range which only includes BlockTy blocks.
static void transferSuccessors(VPBlockBase *Old, VPBlockBase *New)
Transfer successors from Old to New. New must have no successors.
static SmallVector< VPBasicBlock * > blocksInSingleSuccessorChainBetween(VPBasicBlock *FirstBB, VPBasicBlock *LastBB)
Returns the blocks between FirstBB and LastBB, where FirstBB to LastBB forms a single-sucessor chain.
A recipe for generating conditional branches on the bits of a mask.
RAII object that stores the current insertion point and restores it when the object is destroyed.
VPlan-based builder utility analogous to IRBuilder.
VPInstruction * createFirstActiveLane(ArrayRef< VPValue * > Masks, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
VPInstruction * createOr(VPValue *LHS, VPValue *RHS, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
VPValue * createScalarZExtOrTrunc(VPValue *Op, Type *ResultTy, Type *SrcTy, DebugLoc DL)
VPInstruction * createNot(VPValue *Operand, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
VPInstruction * createAnyOfReduction(VPValue *ChainOp, VPValue *TrueVal, VPValue *FalseVal, DebugLoc DL=DebugLoc::getUnknown())
Create an AnyOf reduction pattern: or-reduce ChainOp, freeze the result, then select between TrueVal ...
VPInstruction * createLogicalAnd(VPValue *LHS, VPValue *RHS, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
VPDerivedIVRecipe * createDerivedIV(InductionDescriptor::InductionKind Kind, FPMathOperator *FPBinOp, VPIRValue *Start, VPValue *Current, VPValue *Step)
Convert the input value Current to the corresponding value of an induction with Start and Step values...
VPInstruction * createScalarCast(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy, DebugLoc DL, const VPIRMetadata &Metadata={})
VPWidenPHIRecipe * createWidenPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
static VPBuilder getToInsertAfter(VPRecipeBase *R)
Create a VPBuilder to insert after R.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", const VPIRFlags &Flags={})
VPInstruction * createICmp(CmpInst::Predicate Pred, VPValue *A, VPValue *B, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create a new ICmp VPInstruction with predicate Pred and operands A and B.
VPInstruction * createSelect(VPValue *Cond, VPValue *TrueVal, VPValue *FalseVal, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", const VPIRFlags &Flags={})
void setInsertPoint(VPBasicBlock *TheBB)
This specifies that created VPInstructions should be appended to the end of the specified block.
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
A recipe for generating the phi node tracking the current scalar iteration index.
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
ArrayRef< VPRecipeValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
A recipe for converting the input value IV value to the corresponding value of an IV with different s...
Template specialization of the standard LLVM dominator tree utility for VPBlockBases.
bool properlyDominates(const VPRecipeBase *A, const VPRecipeBase *B)
A recipe to combine multiple recipes into a single 'expression' recipe, which should be considered a ...
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
A special type of VPBasicBlock that wraps an existing IR basic block.
Class to record and manage LLVM IR flags.
static VPIRFlags getDefaultFlags(unsigned Opcode)
Returns default flags for Opcode for opcodes that support it, asserts otherwise.
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
static LLVM_ABI_FOR_TEST VPIRInstruction * create(Instruction &I)
Create a new VPIRPhi for \I , if it is a PHINode, otherwise create a VPIRInstruction.
This is a concrete Recipe that models a single VPlan-level instruction.
@ ExtractLane
Extracts a single lane (first operand) from a set of vector operands.
@ ExtractPenultimateElement
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
@ BuildVector
Creates a fixed-width vector containing all operands.
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
@ CanonicalIVIncrementForPart
@ ComputeReductionResult
Reduce the operands to the final reduction result using the operation specified via the operation's V...
const InterleaveGroup< Instruction > * getInterleaveGroup() const
VPValue * getMask() const
Return the mask used by this recipe.
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
A recipe for interleaved memory operations with vector-predication intrinsics.
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when control converges back from ...
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
VPBasicBlock * getParent()
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPHistogramRecipe * widenIfHistogram(VPInstruction *VPI)
If VPI represents a histogram operation (as determined by LoopVectorizationLegality) make that safe f...
VPRecipeBase * tryToWidenMemory(VPInstruction *VPI, VFRange &Range)
Check if the load or store instruction VPI should widened for Range.Start and potentially masked.
bool replaceWithFinalIfReductionStore(VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder)
If VPI is a store of a reduction into an invariant address, delete it.
VPReplicateRecipe * handleReplication(VPInstruction *VPI, VFRange &Range)
Build a VPReplicationRecipe for VPI.
A recipe to represent inloop reduction operations with vector-predication intrinsics,...
A recipe for handling reduction phis.
void setVFScaleFactor(unsigned ScaleFactor)
Set the VFScaleFactor for this reduction phi.
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
A recipe to represent inloop, ordered or partial reduction operations.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
const VPBlockBase * getEntry() const
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
VPInstruction * getOrCreateCanonicalIVIncrement()
Get the canonical IV increment instruction if it exists.
void setExiting(VPBlockBase *ExitingBlock)
Set ExitingBlock as the exiting VPBlockBase of this VPRegionBlock.
Type * getCanonicalIVType() const
Return the type of the canonical IV for loop regions.
bool hasCanonicalIVNUW() const
Indicates if NUW is set for the canonical IV increment, for loop regions.
void clearCanonicalIVNUW(VPInstruction *Increment)
Unsets NUW for the canonical IV increment Increment, for loop regions.
VPRegionValue * getCanonicalIV()
Return the canonical induction variable of the region, null for replicating regions.
const VPBlockBase * getExiting() const
VPBasicBlock * getPreheaderVPBB()
Returns the pre-header VPBasicBlock of the loop region.
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
bool isSingleScalar() const
bool isPredicated() const
VPValue * getMask()
Return the mask of a predicated VPReplicateRecipe.
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
VPSingleDefRecipe * clone() override=0
Clone the current recipe.
An analysis for type-inference for VPValues.
LLVMContext & getContext()
Return the LLVMContext used by the analysis.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
void setOperand(unsigned I, VPValue *New)
unsigned getNumOperands() const
VPValue * getOperand(unsigned N) const
void addOperand(VPValue *Operand)
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
bool isDefinedOutsideLoopRegions() const
Returns true if the VPValue is defined outside any loop.
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
void setUnderlyingValue(Value *Val)
void replaceAllUsesWith(VPValue *New)
unsigned getNumUsers() const
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
VPWidenCastRecipe is a recipe to create vector cast instructions.
Instruction::CastOps getOpcode() const
A recipe for handling GEP instructions.
Base class for widened induction (VPWidenIntOrFpInductionRecipe and VPWidenPointerInductionRecipe),...
VPIRValue * getStartValue() const
Returns the start value of the induction.
VPValue * getStepValue()
Returns the step value of the induction.
const InductionDescriptor & getInductionDescriptor() const
Returns the induction descriptor for the recipe.
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
VPIRValue * getStartValue() const
Returns the start value of the induction.
VPValue * getSplatVFValue() const
If the recipe has been unrolled, return the VPValue for the induction increment, otherwise return nul...
VPValue * getLastUnrolledPartOperand()
Returns the VPValue representing the value of this induction at the last unrolled part,...
A recipe for widening vector intrinsics.
A common base class for widening memory operations.
A recipe for widened phis.
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenRecipe.
VPWidenRecipe * clone() override
Clone the current recipe.
unsigned getOpcode() const
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
VPIRValue * getLiveIn(Value *V) const
Return the live-in VPIRValue for V, if there is one or nullptr otherwise.
bool hasVF(ElementCount VF) const
const DataLayout & getDataLayout() const
LLVMContext & getContext() const
VPBasicBlock * getEntry()
bool hasScalableVF() const
VPValue * getTripCount() const
The trip count of the original loop.
VPValue * getOrCreateBackedgeTakenCount()
The backedge taken count of the original loop.
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
VPIRValue * getFalse()
Return a VPIRValue wrapping i1 false.
VPSymbolicValue & getVFxUF()
Returns VF * UF of the vector loop region.
VPIRValue * getAllOnesValue(Type *Ty)
Return a VPIRValue wrapping the AllOnes value of type Ty.
VPRegionBlock * createReplicateRegion(VPBlockBase *Entry, VPBlockBase *Exiting, const std::string &Name="")
Create a new replicate region with Entry, Exiting and Name.
auto getLiveIns() const
Return the list of live-in VPValues available in the VPlan.
bool hasUF(unsigned UF) const
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
VPSymbolicValue & getVectorTripCount()
The vector trip count.
VPValue * getBackedgeTakenCount() const
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
VPIRValue * getZero(Type *Ty)
Return a VPIRValue wrapping the null value of type Ty.
void setVF(ElementCount VF)
bool isUnrolled() const
Returns true if the VPlan already has been unrolled, i.e.
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
unsigned getConcreteUF() const
Returns the concrete UF of the plan, after unrolling.
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
VPBasicBlock * createVPBasicBlock(const Twine &Name, VPRecipeBase *Recipe=nullptr)
Create a new VPBasicBlock with Name and containing Recipe if present.
VPIRValue * getTrue()
Return a VPIRValue wrapping i1 true.
VPBasicBlock * getVectorPreheader() const
Returns the preheader of the vector loop region, if one exists, or null otherwise.
VPSymbolicValue & getUF()
Returns the UF of the vector loop region.
bool hasScalarVFOnly() const
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
VPSymbolicValue & getVF()
Returns the VF of the vector loop region.
bool hasScalarTail() const
Returns true if the scalar tail may execute after the vector loop, i.e.
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
VPIRValue * getConstantInt(Type *Ty, uint64_t Val, bool IsSigned=false)
Return a VPIRValue wrapping a ConstantInt with the given type and value.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
iterator_range< user_iterator > users()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_isa< To... > m_Isa()
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
auto m_Cmp()
Matches any compare instruction and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_MaskedStore(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
Matches MaskedStore Intrinsic.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
LogicalOp_match< LHS, RHS, Instruction::And > m_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R either in the form of L & R or L ?
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_MaskedLoad(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
Matches MaskedLoad Intrinsic.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
SpecificCmpClass_match< LHS, RHS, CmpInst > m_SpecificCmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
bool match(const SCEV *S, const Pattern &P)
SCEVAffineAddRec_match< Op0_t, Op1_t, match_isa< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::ExtractLastLane, VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > > m_ExtractLastLaneOfLastPart(const Op0_t &Op0)
AllRecipe_commutative_match< Instruction::And, Op0_t, Op1_t > m_c_BinaryAnd(const Op0_t &Op0, const Op1_t &Op1)
Match a binary AND operation.
AllRecipe_match< Instruction::Or, Op0_t, Op1_t > m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1)
Match a binary OR operation.
VPInstruction_match< VPInstruction::AnyOf > m_AnyOf()
AllRecipe_commutative_match< Instruction::Or, Op0_t, Op1_t > m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::ComputeReductionResult, Op0_t > m_ComputeReductionResult(const Op0_t &Op0)
auto m_WidenAnyExtend(const Op0_t &Op0)
match_bind< VPIRValue > m_VPIRValue(VPIRValue *&V)
Match a VPIRValue.
VPInstruction_match< VPInstruction::StepVector > m_StepVector()
auto m_VPPhi(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::BranchOnTwoConds > m_BranchOnTwoConds()
AllRecipe_match< Opcode, Op0_t, Op1_t > m_Binary(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::LastActiveLane, Op0_t > m_LastActiveLane(const Op0_t &Op0)
auto m_WidenIntrinsic(const T &...Ops)
VPInstruction_match< VPInstruction::ExitingIVValue, Op0_t > m_ExitingIVValue(const Op0_t &Op0)
VPInstruction_match< Instruction::ExtractElement, Op0_t, Op1_t > m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1)
specific_intval< 1 > m_False()
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ActiveLaneMask, Op0_t, Op1_t, Op2_t > m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
match_bind< VPSingleDefRecipe > m_VPSingleDefRecipe(VPSingleDefRecipe *&V)
Match a VPSingleDefRecipe, capturing if we match.
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
auto m_GetElementPtr(const Op0_t &Op0, const Op1_t &Op1)
specific_intval< 1 > m_True()
auto m_VPValue()
Match an arbitrary VPValue and ignore it.
VectorEndPointerRecipe_match< Op0_t, Op1_t > m_VecEndPtr(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
VPInstruction_match< VPInstruction::Broadcast, Op0_t > m_Broadcast(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExplicitVectorLength, Op0_t > m_EVL(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BuildVector > m_BuildVector()
BuildVector is matches only its opcode, w/o matching its operands as the number of operands is not fi...
VPInstruction_match< VPInstruction::ExtractPenultimateElement, Op0_t > m_ExtractPenultimateElement(const Op0_t &Op0)
match_bind< VPInstruction > m_VPInstruction(VPInstruction *&V)
Match a VPInstruction, capturing if we match.
VPInstruction_match< VPInstruction::FirstActiveLane, Op0_t > m_FirstActiveLane(const Op0_t &Op0)
auto m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
VPInstruction_match< VPInstruction::BranchOnCond > m_BranchOnCond()
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::Reverse, Op0_t > m_Reverse(const Op0_t &Op0)
NodeAddr< DefNode * > Def
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
bool cannotHoistOrSinkRecipe(const VPRecipeBase &R, bool Sinking=false)
Return true if we do not know how to (mechanically) hoist or sink R.
VPInstruction * findComputeReductionResult(VPReductionPHIRecipe *PhiR)
Find the ComputeReductionResult recipe for PhiR, looking through selects inserted for predicated redu...
VPInstruction * findCanonicalIVIncrement(VPlan &Plan)
Find the canonical IV increment of Plan's vector loop region.
std::optional< MemoryLocation > getMemoryLocation(const VPRecipeBase &R)
Return a MemoryLocation for R with noalias metadata populated from R, if the recipe is supported and ...
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
VPRecipeBase * findRecipe(VPValue *Start, PredT Pred)
Search Start's users for a recipe satisfying Pred, looking through recipes with definitions.
VPSingleDefRecipe * findHeaderMask(VPlan &Plan)
Collect the header mask with the pattern: (ICMP_ULE, WideCanonicalIV, backedge-taken-count) TODO: Int...
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
static VPRecipeBase * findUserOf(VPValue *V, const MatchT &P)
If V is used by a recipe matching pattern P, return it.
bool isUniformAcrossVFsAndUFs(const VPValue *V)
Checks if V is uniform across all VF lanes and UF parts.
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
bool isHeaderMask(const VPValue *V, const VPlan &Plan)
Return true if V is a header mask in Plan.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
constexpr auto not_equal_to(T &&Arg)
Functor variant of std::not_equal_to that can be used as a UnaryPredicate in functional algorithms li...
void stable_sort(R &&Range)
auto min_element(R &&Range)
Provide wrappers to std::min_element which take ranges instead of having to pass begin/end explicitly...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
DenseMap< const Value *, const SCEV * > ValueToSCEVMapTy
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
constexpr from_range_t from_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
auto cast_or_null(const Y &Val)
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI_FOR_TEST cl::opt< bool > EnableWideActiveLaneMask
UncountableExitStyle
Different methods of handling early exits.
@ ReadOnly
No side effects to worry about, so we can process any uncountable exits in the loop and branch either...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
bool canConstantBeExtended(const APInt *C, Type *NarrowType, TTI::PartialReductionExtendKind ExtKind)
Check if a constant CI can be safely treated as having been extended from a narrower type with the gi...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
RecurKind
These are the kinds of recurrences that we support.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ FindIV
FindIV reduction with select(icmp(),x,y) where one of (x,y) is a loop induction variable (increasing ...
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ FSub
Subtraction of floats.
@ FAddChainWithSubs
A chain of fadds and fsubs.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ AddChainWithSubs
A chain of adds and subs.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
FunctionAddr VTableAddr Next
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
ArrayRef(const T &OneElt) -> ArrayRef< T >
auto make_second_range(ContainerTy &&c)
Given a container of pairs, return a range over the second elements.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
@ Default
The result value is uniform if and only if all operands are uniform.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
RemoveMask_match(const Op0_t &In, Op1_t &Out)
bool match(OpTy *V) const
MDNode * Scope
The tag for alias scope specification (used with noalias).
This struct is a compact representation of a valid (non-zero power of two) alignment.
An information struct used to provide DenseMap with the various necessary components for a given valu...
This reduction is unordered with the partial result scaled down by some factor.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
Struct to hold various analysis needed for cost computations.
TargetTransformInfo::TargetCostKind CostKind
const TargetTransformInfo & TTI
A VPValue representing a live-in from the input IR or a constant.
Type * getType() const
Returns the type of the underlying IR value.
A struct that represents some properties of the register usage of a loop.
SmallMapVector< unsigned, unsigned, 4 > MaxLocalUsers
Holds the maximum number of concurrent live intervals in the loop.
InstructionCost spillCost(const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, unsigned OverrideMaxNumRegs=0) const
Calculate the estimated cost of any spills due to using more registers than the number available for ...
A symbolic live-in VPValue, used for values like vector trip count, VF, and VFxUF.
bool isMaterialized() const
Returns true if this symbolic value has been materialized.
A recipe for widening load operations with vector-predication intrinsics, using the address to load f...
A recipe for widening load operations, using the address to load from and an optional mask.
A recipe for widening store operations with vector-predication intrinsics, using the value to store,...
A recipe for widening store operations, using the stored value, the address to store to and an option...