32#if LLVM_ENABLE_ABI_BREAKING_CHECKS
33#define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
35#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
42 cl::desc(
"When performing SCEV expansion only if it is cheap to do, this "
43 "controls the budget that is considered cheap (default = 4)"));
57 NUW = OBO->hasNoUnsignedWrap();
58 NSW = OBO->hasNoSignedWrap();
61 Exact = PEO->isExact();
65 NNeg = PNI->hasNonNeg();
67 NUW = TI->hasNoUnsignedWrap();
68 NSW = TI->hasNoSignedWrap();
78 I->setHasNoUnsignedWrap(
NUW);
79 I->setHasNoSignedWrap(
NSW);
88 I->setHasNoUnsignedWrap(
NUW);
89 I->setHasNoSignedWrap(
NSW);
114 Value *Ret =
nullptr;
119 if (U->getType() != Ty)
128 if (IP->getParent() == CI->
getParent() && &*BIP != CI &&
138 SCEVInsertPointGuard Guard(Builder,
this);
139 Builder.SetInsertPoint(&*IP);
140 Ret = Builder.CreateCast(
Op, V, Ty,
V->getName());
157 IP =
II->getNormalDest()->begin();
165 IP = MustDominate->
getParent()->getFirstInsertionPt();
167 assert(!IP->isEHPad() &&
"unexpected eh pad!");
183 while (!WorkList.
empty()) {
192 InsertedValues.erase(
I);
193 InsertedPostIncValues.erase(
I);
195 I->eraseFromParent();
200SCEVExpander::GetOptimalInsertionPointForCastOf(
Value *V)
const {
219 "Expected the cast argument to be a global/constant");
220 return Builder.GetInsertBlock()
223 .getFirstInsertionPt();
231 assert((
Op == Instruction::BitCast ||
232 Op == Instruction::PtrToInt ||
233 Op == Instruction::IntToPtr) &&
234 "InsertNoopCastOfTo cannot perform non-noop casts!");
235 assert(SE.getTypeSizeInBits(
V->getType()) == SE.getTypeSizeInBits(Ty) &&
236 "InsertNoopCastOfTo cannot change sizes!");
243 if (
Op == Instruction::IntToPtr) {
245 if (DL.isNonIntegralPointerType(PtrTy))
249 if (
Op == Instruction::BitCast) {
250 if (
V->getType() == Ty)
258 if ((
Op == Instruction::PtrToInt ||
Op == Instruction::IntToPtr) &&
259 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(
V->getType())) {
261 if ((CI->
getOpcode() == Instruction::PtrToInt ||
262 CI->
getOpcode() == Instruction::IntToPtr) &&
263 SE.getTypeSizeInBits(CI->
getType()) ==
267 if ((
CE->getOpcode() == Instruction::PtrToInt ||
268 CE->getOpcode() == Instruction::IntToPtr) &&
269 SE.getTypeSizeInBits(
CE->getType()) ==
270 SE.getTypeSizeInBits(
CE->getOperand(0)->getType()))
271 return CE->getOperand(0);
279 return ReuseOrCreateCast(V, Ty,
Op, GetOptimalInsertionPointForCastOf(V));
295 unsigned ScanLimit = 6;
299 if (IP != BlockBegin) {
301 for (; ScanLimit; --IP, --ScanLimit) {
316 if (IP->getOpcode() == (
unsigned)Opcode && IP->getOperand(0) ==
LHS &&
317 IP->getOperand(1) ==
RHS && !canGenerateIncompatiblePoison(&*IP))
319 if (IP == BlockBegin)
break;
324 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
325 SCEVInsertPointGuard Guard(Builder,
this);
329 while (
const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
330 if (!
L->isLoopInvariant(
LHS) || !
L->isLoopInvariant(
RHS))
break;
332 if (!Preheader)
break;
340 Builder.SetCurrentDebugLocation(Loc);
345 if (LSRMode && !PostIncLoops.empty() &&
346 all_of(PostIncLoops, [&](
const Loop *L) {
347 return !
L->contains(Builder.GetInsertBlock());
351 BO->setHasNoUnsignedWrap();
353 BO->setHasNoSignedWrap();
354 return Builder.Insert(BO);
356 return Builder.CreateNoWrapBinOp(Opcode,
LHS,
RHS, IsNUW, IsNSW);
394 : GEPNoWrapFlags::
none();
399 return Builder.CreatePtrAdd(CLHS, CRHS,
"", NW);
402 unsigned ScanLimit = 6;
406 if (IP != BlockBegin) {
408 for (; ScanLimit; --IP, --ScanLimit) {
410 if (
GEP->getPointerOperand() == V &&
411 GEP->getSourceElementType() == Builder.getInt8Ty() &&
412 GEP->getOperand(1) == Idx) {
414 GEP->setNoWrapFlags(
GEP->getNoWrapFlags() & NW);
418 if (IP == BlockBegin)
break;
423 SCEVInsertPointGuard Guard(Builder,
this);
426 while (
const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
427 if (!
L->isLoopInvariant(V) || !
L->isLoopInvariant(Idx))
break;
429 if (!Preheader)
break;
436 return Builder.CreatePtrAdd(V, Idx,
"scevgep", NW);
446 if (
A->contains(
B))
return B;
447 if (
B->contains(
A))
return A;
448 if (DT.
dominates(
A->getHeader(),
B->getHeader()))
return B;
449 if (DT.
dominates(
B->getHeader(),
A->getHeader()))
return A;
455const Loop *SCEVExpander::getRelevantLoop(
const SCEV *S) {
457 auto Pair = RelevantLoops.try_emplace(S);
459 return Pair.first->second;
479 const Loop *
L =
nullptr;
484 return RelevantLoops[S] =
L;
489 return Pair.first->second = SE.LI.getLoopFor(
I->getParent());
505 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
507 bool operator()(std::pair<const Loop *, const SCEV *>
LHS,
508 std::pair<const Loop *, const SCEV *>
RHS)
const {
515 if (
LHS.first !=
RHS.first)
521 if (
LHS.second->isNonConstantNegative()) {
522 if (!
RHS.second->isNonConstantNegative())
524 }
else if (
RHS.second->isNonConstantNegative())
536 const SCEV *URemLHS =
nullptr;
537 const SCEV *URemRHS =
nullptr;
550 for (
const SCEV *
Op :
reverse(S->operands()))
551 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(
Op),
Op));
559 Value *Sum =
nullptr;
560 for (
auto I = OpsAndLoops.
begin(),
E = OpsAndLoops.
end();
I !=
E;) {
561 const Loop *CurLoop =
I->first;
562 const SCEV *
Op =
I->second;
570 assert(!
Op->getType()->isPointerTy() &&
"Only first op can be pointer");
575 for (;
I !=
E &&
I->first == CurLoop; ++
I) {
578 const SCEV *
X =
I->second;
581 X = SE.getSCEV(
U->getValue());
584 Sum = expandAddToGEP(SE.getAddExpr(NewOps), Sum, S.
getNoWrapFlags());
585 }
else if (
Op->isNonConstantNegative()) {
587 Value *
W = expand(SE.getNegativeSCEV(
Op));
607 Type *Ty = S->getType();
612 for (
const SCEV *
Op :
reverse(S->operands()))
613 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(
Op),
Op));
620 Value *Prod =
nullptr;
621 auto I = OpsAndLoops.
begin();
626 const auto ExpandOpBinPowN = [
this, &
I, &OpsAndLoops]() {
636 while (
E != OpsAndLoops.
end() && *
I == *
E &&
Exponent != MaxExponent) {
640 assert(
Exponent > 0 &&
"Trying to calculate a zeroth exponent of operand?");
648 for (uint64_t BinExp = 2; BinExp <=
Exponent; BinExp <<= 1) {
659 assert(Result &&
"Nothing was expanded?");
663 while (
I != OpsAndLoops.
end()) {
666 Prod = ExpandOpBinPowN();
667 }
else if (
I->second->isAllOnesValue()) {
674 Value *
W = ExpandOpBinPowN();
683 if (
RHS->logBase2() ==
RHS->getBitWidth() - 1)
685 Prod = InsertBinop(Instruction::Shl, Prod,
686 ConstantInt::get(Ty,
RHS->logBase2()), NWFlags,
701 const APInt &
RHS = SC->getAPInt();
702 if (
RHS.isPowerOf2())
703 return InsertBinop(Instruction::LShr,
LHS,
704 ConstantInt::get(SC->getType(),
RHS.logBase2()),
708 const SCEV *RHSExpr = S->getRHS();
711 bool GuaranteedNotPoison =
712 ScalarEvolution::isGuaranteedNotToBePoison(RHSExpr);
713 if (!GuaranteedNotPoison)
714 RHS = Builder.CreateFreeze(
RHS);
719 if (!SE.isKnownNonZero(RHSExpr) || !GuaranteedNotPoison)
720 RHS = Builder.CreateIntrinsic(
RHS->
getType(), Intrinsic::umax,
721 {RHS, ConstantInt::get(RHS->getType(), 1)});
724 SE.isKnownNonZero(S->getRHS()));
737 if (L == IVIncInsertLoop) {
740 if (!SE.DT.dominates(OInst, IVIncInsertPos))
754 return isNormalAddRecExprPHI(PN, IncV, L);
769 if (IncV == InsertPos)
776 case Instruction::Add:
777 case Instruction::Sub: {
779 if (!OInst || SE.DT.dominates(OInst, InsertPos))
783 case Instruction::BitCast:
785 case Instruction::GetElementPtr:
790 if (!SE.DT.dominates(OInst, InsertPos))
816 if (Builder.GetInsertPoint() == It)
817 Builder.SetInsertPoint(&*NewInsertPt);
818 for (
auto *InsertPtGuard : InsertPointGuards)
819 if (InsertPtGuard->GetInsertPoint() == It)
820 InsertPtGuard->SetInsertPoint(NewInsertPt);
827 bool RecomputePoisonFlags) {
832 I->dropPoisonGeneratingFlags();
834 if (
auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
836 BO->setHasNoUnsignedWrap(
838 BO->setHasNoSignedWrap(
843 if (SE.DT.dominates(IncV, InsertPos)) {
844 if (RecomputePoisonFlags)
845 FixupPoisonFlags(IncV);
855 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
867 if (SE.DT.dominates(IncV, InsertPos))
871 fixupInsertPoints(
I);
873 if (RecomputePoisonFlags)
896 (IVOper =
getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
913 IncV = Builder.CreatePtrAdd(PN, StepV,
"scevgep");
916 Builder.CreateSub(PN, StepV, Twine(IVName) +
".iv.next") :
917 Builder.CreateAdd(PN, StepV, Twine(IVName) +
".iv.next");
929 Type *PhiTy = Phi->getType();
943 if (Phi == Requested) {
966 const SCEV *ExtendAfterOp =
968 return ExtendAfterOp == OpAfterExtend;
980 const SCEV *ExtendAfterOp =
982 return ExtendAfterOp == OpAfterExtend;
989SCEVExpander::getAddRecExprPHILiterally(
const SCEVAddRecExpr *Normalized,
992 assert((!IVIncInsertLoop || IVIncInsertPos) &&
993 "Uninitialized insert position");
998 PHINode *AddRecPhiMatch =
nullptr;
1005 bool TryNonMatchingSCEV =
1007 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1009 for (PHINode &PN :
L->getHeader()->phis()) {
1010 if (!SE.isSCEVable(PN.
getType()))
1017 DebugType,
dbgs() <<
"One incomplete PHI is found: " << PN <<
"\n");
1025 bool IsMatchingSCEV = PhiSCEV == Normalized;
1029 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1040 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1043 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1048 if (IsMatchingSCEV) {
1052 AddRecPhiMatch = &PN;
1058 if ((!TruncTy || InvertStep) &&
1062 AddRecPhiMatch = &PN;
1064 TruncTy = Normalized->
getType();
1068 if (AddRecPhiMatch) {
1071 InsertedValues.insert(AddRecPhiMatch);
1073 rememberInstruction(IncV);
1075 ReusedValues.insert(AddRecPhiMatch);
1076 ReusedValues.insert(IncV);
1077 return AddRecPhiMatch;
1082 SCEVInsertPointGuard Guard(Builder,
this);
1092 PostIncLoops.
clear();
1095 assert(
L->getLoopPreheader() &&
1096 "Can't expand add recurrences without a loop preheader!");
1098 expand(Normalized->
getStart(),
L->getLoopPreheader()->getTerminator());
1115 Step = SE.getNegativeSCEV(Step);
1117 Value *StepV = expand(Step,
L->getHeader()->getFirstInsertionPt());
1122 bool IncrementIsNUW = !useSubtract &&
IsIncrementNUW(SE, Normalized);
1123 bool IncrementIsNSW = !useSubtract &&
IsIncrementNSW(SE, Normalized);
1127 Builder.SetInsertPoint(Header, Header->begin());
1129 Builder.CreatePHI(ExpandTy,
pred_size(Header), Twine(IVName) +
".iv");
1134 if (!
L->contains(Pred)) {
1143 IVIncInsertPos : Pred->getTerminator();
1144 Builder.SetInsertPoint(InsertPos);
1145 Value *IncV = expandIVInc(PN, StepV, L, useSubtract);
1158 PostIncLoops = SavedPostIncLoops;
1162 InsertedValues.
insert(PN);
1163 InsertedIVs.push_back(PN);
1169 const Loop *
L = S->getLoop();
1173 const SCEVAddRecExpr *Normalized = S;
1174 if (PostIncLoops.count(L)) {
1181 [[maybe_unused]]
const SCEV *
Start = Normalized->
getStart();
1183 assert(SE.properlyDominates(Start,
L->getHeader()) &&
1184 "Start does not properly dominate loop header");
1185 assert(SE.dominates(Step,
L->getHeader()) &&
"Step not dominate loop header");
1189 Type *TruncTy =
nullptr;
1190 bool InvertStep =
false;
1191 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, TruncTy, InvertStep);
1195 if (!PostIncLoops.count(L))
1200 assert(LatchBlock &&
"PostInc mode requires a unique loop latch!");
1208 if (!S->hasNoUnsignedWrap())
1209 I->setHasNoUnsignedWrap(
false);
1210 if (!S->hasNoSignedWrap())
1211 I->setHasNoSignedWrap(
false);
1219 &*Builder.GetInsertPoint())) {
1232 Step = SE.getNegativeSCEV(Step);
1236 SCEVInsertPointGuard Guard(Builder,
this);
1237 StepV = expand(Step,
L->getHeader()->getFirstInsertionPt());
1239 Result = expandIVInc(PN, StepV, L, useSubtract);
1247 if (TruncTy !=
Result->getType())
1248 Result = Builder.CreateTrunc(Result, TruncTy);
1252 Result = Builder.CreateSub(expand(Normalized->
getStart()), Result);
1259 Type *STy = S->getType();
1260 const Loop *
L = S->getLoop();
1263 !SE.DT.dominates(EB, Builder.GetInsertBlock()))
1268 auto CanReuse = [&](
const SCEV *ExitSCEV) ->
const SCEV * {
1271 const SCEV *Diff = SE.getMinusSCEV(S, ExitSCEV);
1272 const SCEV *
Op = Diff;
1282 for (
auto &PN : EB->
phis()) {
1283 if (!SE.isSCEVable(PN.
getType()))
1285 auto *ExitSCEV = SE.getSCEV(&PN);
1289 const SCEV *Diff =
nullptr;
1291 DL.getAddressType(PhiTy) == STy) {
1293 const SCEV *AddrSCEV = SE.getPtrToAddrExpr(ExitSCEV);
1294 Diff = CanReuse(AddrSCEV);
1296 const SCEV *IntSCEV = SE.getPtrToIntExpr(ExitSCEV, STy);
1297 Diff = CanReuse(IntSCEV);
1299 }
else if (STy == PhiTy) {
1300 Diff = CanReuse(ExitSCEV);
1306 "difference must be of integer type");
1307 Value *DiffV = expand(Diff);
1308 Value *BaseV = fixupLCSSAFormFor(&PN);
1311 return Builder.CreatePtrAdd(BaseV, DiffV);
1312 BaseV = Builder.CreatePtrToAddr(BaseV);
1314 return Builder.CreateAdd(BaseV, DiffV);
1331 if (!CanonicalMode || (S->getNumOperands() > 2))
1332 return expandAddRecExprLiterally(S);
1334 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1335 const Loop *
L = S->getLoop();
1338 PHINode *CanonicalIV =
nullptr;
1339 if (PHINode *PN =
L->getCanonicalInductionVariable())
1340 if (SE.getTypeSizeInBits(PN->
getType()) >= SE.getTypeSizeInBits(Ty))
1346 SE.getTypeSizeInBits(CanonicalIV->
getType()) > SE.getTypeSizeInBits(Ty) &&
1347 !S->getType()->isPointerTy()) {
1349 for (
unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1350 NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->
getType());
1355 &*Builder.GetInsertPoint())
1356 : Builder.GetInsertPoint();
1357 V = expand(SE.getTruncateExpr(SE.getUnknown(V), Ty), NewInsertPt);
1363 if (
Value *V = tryToReuseLCSSAPhi(S))
1367 if (!S->getStart()->isZero()) {
1369 Value *StartV = expand(SE.getPointerBase(S));
1370 return expandAddToGEP(SE.removePointerBase(S), StartV,
1375 NewOps[0] = SE.getConstant(Ty, 0);
1383 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1384 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1385 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1396 rememberInstruction(CanonicalIV);
1398 SmallPtrSet<BasicBlock *, 4> PredSeen;
1399 Constant *One = ConstantInt::get(Ty, 1);
1402 if (!PredSeen.
insert(HP).second) {
1409 if (
L->contains(HP)) {
1416 rememberInstruction(
Add);
1425 if (S->isAffine() && S->getOperand(1)->isOne()) {
1426 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->
getType()) &&
1427 "IVs with types different from the canonical IV should "
1428 "already have been handled!");
1437 expand(SE.getTruncateOrNoop(
1438 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1439 SE.getNoopOrAnyExtend(S->getOperand(1),
1447 const SCEV *IH = SE.getUnknown(CanonicalIV);
1450 const SCEV *NewS = S;
1451 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->
getType());
1458 const SCEV *
T = SE.getTruncateOrNoop(V, Ty);
1463 Value *
V = expand(S->getOperand());
1464 Type *Ty = S->getType();
1469 for (User *U :
V->users()) {
1471 if (CI && CI->
getType() == Ty &&
1472 (CI->
getOpcode() == CastInst::PtrToAddr ||
1473 CI->
getOpcode() == CastInst::PtrToInt) &&
1474 &*BIP != CI && SE.DT.dominates(CI, &*BIP))
1478 return ReuseOrCreateCast(V, Ty, CastInst::PtrToAddr,
1479 GetOptimalInsertionPointForCastOf(V));
1483 Value *
V = expand(S->getOperand());
1484 return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1485 GetOptimalInsertionPointForCastOf(V));
1489 Value *
V = expand(S->getOperand());
1490 return Builder.CreateTrunc(V, S->getType());
1495 Value *
V = expand(S->getOperand());
1496 return Builder.CreateZExt(V, S->getType(),
"",
1497 SE.isKnownNonNegative(S->getOperand()));
1502 Value *
V = expand(S->getOperand());
1503 return Builder.CreateSExt(V, S->getType());
1508 bool IsSequential) {
1509 bool PrevSafeMode = SafeUDivMode;
1510 SafeUDivMode |= IsSequential;
1511 Value *
LHS = expand(S->getOperand(S->getNumOperands() - 1));
1514 LHS = Builder.CreateFreeze(
LHS);
1515 for (
int i = S->getNumOperands() - 2; i >= 0; --i) {
1516 SafeUDivMode = (IsSequential && i != 0) || PrevSafeMode;
1517 Value *
RHS = expand(S->getOperand(i));
1518 if (IsSequential && i != 0)
1519 RHS = Builder.CreateFreeze(
RHS);
1522 Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {
LHS,
RHS},
1527 Sel = Builder.CreateSelect(ICmp,
LHS,
RHS, Name);
1531 SafeUDivMode = PrevSafeMode;
1536 return expandMinMaxExpr(S, Intrinsic::smax,
"smax");
1540 return expandMinMaxExpr(S, Intrinsic::umax,
"umax");
1544 return expandMinMaxExpr(S, Intrinsic::smin,
"smin");
1548 return expandMinMaxExpr(S, Intrinsic::umin,
"umin");
1551Value *SCEVExpander::visitSequentialUMinExpr(
1553 return expandMinMaxExpr(S, Intrinsic::umin,
"umin",
1558 return Builder.CreateVScale(S->getType());
1569 Value *V = expand(SH);
1571 if (Ty && Ty != V->getType()) {
1572 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->
getType()) &&
1573 "non-trivial casts should be done with the SCEVs directly!");
1574 V = InsertNoopCastOfTo(V, Ty);
1579Value *SCEVExpander::FindValueInExprValueMap(
1591 for (
Value *V : SE.getSCEVValues(S)) {
1608 DropPoisonGeneratingInsts.
clear();
1626 auto SafeToHoist = [](
const SCEV *S) {
1631 return SC->getValue()->isZero();
1641 if (SafeToHoist(S)) {
1642 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1643 L =
L->getParentLoop()) {
1644 if (SE.isLoopInvariant(S, L)) {
1646 if (BasicBlock *Preheader =
L->getLoopPreheader()) {
1652 InsertPt =
L->getHeader()->getFirstInsertionPt();
1658 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1659 InsertPt =
L->getHeader()->getFirstInsertionPt();
1661 while (InsertPt != Builder.GetInsertPoint() &&
1663 InsertPt = std::next(InsertPt);
1671 auto I = InsertedExpressions.find(std::make_pair(S, &*InsertPt));
1672 if (
I != InsertedExpressions.end())
1675 SCEVInsertPointGuard Guard(Builder,
this);
1676 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1680 Value *
V = FindValueInExprValueMap(S, &*InsertPt, DropPoisonGeneratingInsts);
1683 V = fixupLCSSAFormFor(V);
1685 for (Instruction *
I : DropPoisonGeneratingInsts) {
1687 I->dropPoisonGeneratingAnnotations();
1691 if (
auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
1693 BO->setHasNoUnsignedWrap(
1695 BO->setHasNoSignedWrap(
1699 auto *Src = NNI->getOperand(0);
1702 DL).value_or(
false))
1703 NNI->setNonNeg(
true);
1713 InsertedExpressions[std::make_pair(S, &*InsertPt)] =
V;
1717void SCEVExpander::rememberInstruction(
Value *
I) {
1718 auto DoInsert = [
this](
Value *
V) {
1719 if (!PostIncLoops.empty())
1720 InsertedPostIncValues.insert(V);
1722 InsertedValues.insert(V);
1729 OrigFlags.try_emplace(
I, PoisonFlags(
I));
1732void SCEVExpander::replaceCongruentIVInc(
1743 if (!OrigInc || !IsomorphicInc)
1750 bool Chained = ChainedPhis.contains(Phi);
1751 if (!(Chained || isExpandedAddRecExprPHI(OrigPhi, OrigInc, L)) &&
1752 (Chained || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1767 const SCEV *TruncExpr =
1768 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->
getType());
1769 if (OrigInc == IsomorphicInc || TruncExpr != SE.getSCEV(IsomorphicInc) ||
1770 !SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc))
1773 bool BothHaveNUW =
false;
1774 bool BothHaveNSW =
false;
1777 if (OBOIncV && OBOIsomorphic) {
1779 OBOIncV->hasNoUnsignedWrap() && OBOIsomorphic->hasNoUnsignedWrap();
1781 OBOIncV->hasNoSignedWrap() && OBOIsomorphic->hasNoSignedWrap();
1794 "Should only replace an increment with a wider one.");
1795 if (BothHaveNUW || BothHaveNSW) {
1801 dbgs() <<
"INDVARS: Eliminated congruent iv.inc: "
1802 << *IsomorphicInc <<
'\n');
1803 Value *NewInc = OrigInc;
1807 IP = PN->
getParent()->getFirstInsertionPt();
1812 Builder.SetCurrentDebugLocation(IsomorphicInc->
getDebugLoc());
1814 Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->
getType(), IVName);
1839 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1840 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1841 return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() <
1842 LHS->getType()->getPrimitiveSizeInBits().getFixedValue();
1845 unsigned NumElim = 0;
1853 if (!SE.isSCEVable(PN->
getType()))
1858 return Const->getValue();
1863 if (
Value *V = SimplifyPHINode(Phi)) {
1864 if (V->getType() != Phi->getType())
1866 SE.forgetValue(Phi);
1867 Phi->replaceAllUsesWith(V);
1871 dbgs() <<
"INDVARS: Eliminated constant iv: " << *Phi
1876 if (!SE.isSCEVable(Phi->getType()))
1879 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1882 if (Phi->getType()->isIntegerTy() &&
TTI &&
1883 TTI->isTruncateFree(Phi->getType(), Phis.
back()->getType())) {
1887 const SCEV *PhiExpr = SE.getSCEV(Phi);
1891 const SCEV *TruncExpr =
1892 SE.getTruncateExpr(PhiExpr, Phis.
back()->getType());
1893 ExprToIVMap[TruncExpr] = Phi;
1904 replaceCongruentIVInc(Phi, OrigPhiRef, L, DT, DeadInsts);
1906 dbgs() <<
"INDVARS: Eliminated congruent iv: " << *Phi
1909 DebugType,
dbgs() <<
"INDVARS: Original iv: " << *OrigPhiRef <<
'\n');
1911 Value *NewIV = OrigPhiRef;
1912 if (OrigPhiRef->
getType() != Phi->getType()) {
1914 L->getHeader()->getFirstInsertionPt());
1915 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1916 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1918 Phi->replaceAllUsesWith(NewIV);
1930 L->getExitingBlocks(ExitingBlocks);
1937 if (!
match(BB->getTerminator(),
1942 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1945 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1954 return FindValueInExprValueMap(S, At, DropPoisonGeneratingInsts) !=
nullptr;
1965 struct OperationIndices {
1966 OperationIndices(
unsigned Opc,
size_t min,
size_t max) :
1967 Opcode(
Opc), MinIdx(
min), MaxIdx(
max) { }
1980 return TTI.getCastInstrCost(Opcode, S->getType(),
1981 S->getOperand(0)->getType(),
1985 auto ArithCost = [&](
unsigned Opcode,
unsigned NumRequired,
1986 unsigned MinIdx = 0,
1989 return NumRequired *
1990 TTI.getArithmeticInstrCost(Opcode, S->getType(),
CostKind);
1993 auto CmpSelCost = [&](
unsigned Opcode,
unsigned NumRequired,
unsigned MinIdx,
1996 Type *OpType = S->getType();
1997 return NumRequired *
TTI.getCmpSelInstrCost(
2002 switch (S->getSCEVType()) {
2010 Cost = CastCost(Instruction::PtrToAddr);
2013 Cost = CastCost(Instruction::PtrToInt);
2016 Cost = CastCost(Instruction::Trunc);
2019 Cost = CastCost(Instruction::ZExt);
2022 Cost = CastCost(Instruction::SExt);
2025 unsigned Opcode = Instruction::UDiv;
2027 if (SC->getAPInt().isPowerOf2())
2028 Opcode = Instruction::LShr;
2029 Cost = ArithCost(Opcode, 1);
2033 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2044 unsigned OpCode = Instruction::Mul;
2045 if (S->getNumOperands() == 2)
2047 if (SC->getAPInt().isAllOnes())
2048 OpCode = Instruction::Sub;
2049 else if (SC->getAPInt().isPowerOf2())
2050 OpCode = Instruction::Shl;
2052 Cost = ArithCost(OpCode, S->getNumOperands() - 1);
2062 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2063 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2064 switch (S->getSCEVType()) {
2068 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2069 Cost += ArithCost(Instruction::Or,
2070 S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2071 Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2076 "Unhandled SCEV expression type?");
2083 unsigned NumRecurrences = S->getNumOperands() - 1;
2084 Cost +=
TTI.getCFInstrCost(Instruction::PHI,
CostKind) * NumRecurrences;
2086 TTI.getArithmeticInstrCost(Instruction::Add, S->getType(),
CostKind) *
2089 Worklist.
emplace_back(Instruction::PHI, 0, S->getOperand(0));
2091 for (
const SCEV *
Op : S->operands().drop_front())
2097 for (
auto &CostOp : Operations) {
2098 for (
auto SCEVOp :
enumerate(S->operands())) {
2100 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2101 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2108bool SCEVExpander::isHighCostExpansionHelper(
2116 const SCEV *S = WorkItem.
S;
2127 L->getHeader()->getParent()->hasMinSize()
2146 return Cost > Budget;
2167 SE.getAddExpr(S, SE.getConstant(S->
getType(), 1)), &At, L))
2182 "Nary expr should have more than 1 operand.");
2187 return Cost > Budget;
2191 "Polynomial should be at least linear");
2194 return Cost > Budget;
2203 switch (Pred->getKind()) {
2218 Value *Expr0 = expand(Pred->getLHS(), IP);
2219 Value *Expr1 = expand(Pred->getRHS(), IP);
2221 Builder.SetInsertPoint(IP);
2223 auto *
I = Builder.CreateICmp(InvPred, Expr0, Expr1,
"ident.check");
2230 "non-affine expression");
2234 const SCEV *ExitCount =
2235 SE.getPredicatedSymbolicMaxBackedgeTakenCount(AR->
getLoop(), Pred);
2243 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->
getType());
2244 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2251 Builder.SetInsertPoint(
Loc);
2252 Value *TripCountVal = expand(ExitCount,
Loc);
2257 Value *StepValue = expand(Step,
Loc);
2258 Value *NegStepValue = expand(SE.getNegativeSCEV(Step),
Loc);
2259 Value *StartValue = expand(Start,
Loc);
2264 Builder.SetInsertPoint(
Loc);
2267 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2277 auto ComputeEndCheck = [&]() ->
Value * {
2279 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2281 CallInst *
Mul = Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, Ty,
2282 {AbsStep, TruncTripCount},
2284 Value *MulV = Builder.CreateExtractValue(
Mul, 0,
"mul.result");
2285 Value *OfMul = Builder.CreateExtractValue(
Mul, 1,
"mul.overflow");
2288 bool NeedPosCheck = !SE.isKnownNegative(Step);
2289 bool NeedNegCheck = !SE.isKnownPositive(Step);
2292 Value *NegMulV = Builder.CreateNeg(MulV);
2294 Add = Builder.CreatePtrAdd(StartValue, MulV);
2296 Sub = Builder.CreatePtrAdd(StartValue, NegMulV);
2299 Add = Builder.CreateAdd(StartValue, MulV);
2301 Sub = Builder.CreateSub(StartValue, MulV);
2304 Value *EndCompareLT =
nullptr;
2305 Value *EndCompareGT =
nullptr;
2306 Value *EndCheck =
nullptr;
2308 EndCheck = EndCompareLT = Builder.CreateICmp(
2311 EndCheck = EndCompareGT = Builder.CreateICmp(
2313 if (NeedPosCheck && NeedNegCheck) {
2315 EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2317 return Builder.CreateOr(EndCheck, OfMul);
2319 Value *EndCheck = ComputeEndCheck();
2324 if (SrcBits > DstBits) {
2326 auto *BackedgeCheck =
2328 ConstantInt::get(
Loc->getContext(), MaxVal));
2329 BackedgeCheck = Builder.CreateAnd(
2332 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2341 Value *NSSWCheck =
nullptr, *NUSWCheck =
nullptr;
2351 if (NUSWCheck && NSSWCheck)
2352 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2367 for (
const auto *Pred : Union->getPredicates()) {
2369 Builder.SetInsertPoint(IP);
2374 return Builder.CreateOr(Checks);
2377Value *SCEVExpander::fixupLCSSAFormFor(
Value *V) {
2379 if (!PreserveLCSSA || !DefI)
2385 if (!DefLoop || UseLoop == DefLoop || DefLoop->
contains(UseLoop))
2396 if (DefI->getType()->isIntegerTy())
2410 for (
PHINode *PN : InsertedPHIs)
2411 rememberInstruction(PN);
2412 for (
PHINode *PN : PHIsToRemove) {
2415 InsertedValues.erase(PN);
2416 InsertedPostIncValues.erase(PN);
2420 return User->getOperand(0);
2442struct SCEVFindUnsafe {
2443 ScalarEvolution &SE;
2445 bool IsUnsafe =
false;
2447 SCEVFindUnsafe(ScalarEvolution &SE,
bool CanonicalMode)
2448 : SE(SE), CanonicalMode(CanonicalMode) {}
2450 bool follow(
const SCEV *S) {
2460 if (!AR->getLoop()->getLoopPreheader() &&
2461 (!CanonicalMode || !AR->isAffine())) {
2468 bool isDone()
const {
return IsUnsafe; }
2473 SCEVFindUnsafe Search(SE, CanonicalMode);
2475 return !Search.IsUnsafe;
2506 for (
auto [
I, Flags] : Expander.OrigFlags)
2509 auto InsertedInstructions = Expander.getAllInsertedInstructions();
2512 InsertedInstructions);
2522 [&InsertedSet](
Value *U) {
2523 return InsertedSet.contains(cast<Instruction>(U));
2525 "removed instruction should only be used by instructions inserted "
2526 "during expansion");
2528 assert(!
I->getType()->isVoidTy() &&
2529 "inserted instruction should have non-void types");
2531 I->eraseFromParent();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion.
static InstructionCost costAndCollectOperands(const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, SmallVectorImpl< SCEVOperand > &Worklist)
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents an incoming formal argument to a Function.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static GEPNoWrapFlags noUnsignedWrap()
static GEPNoWrapFlags none()
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This node represents a polynomial recurrence on the trip count of the specified loop.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
LLVM_ABI Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
LLVM_ABI bool hasRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Determine whether there is an existing expansion of S that can be reused.
SmallVector< Instruction *, 32 > getAllInsertedInstructions() const
Return a vector containing all instructions inserted during expansion.
LLVM_ABI bool isSafeToExpand(const SCEV *S) const
Return true if the given expression is safe to expand in the sense that all materialized values are s...
LLVM_ABI bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint) const
Return true if the given expression is safe to expand in the sense that all materialized values are d...
LLVM_ABI unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
LLVM_ABI Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI bool hoistIVInc(Instruction *IncV, Instruction *InsertPos, bool RecomputePoisonFlags=false)
Utility for hoisting IncV (with all subexpressions requried for its computation) before InsertPos.
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
static LLVM_ABI bool canReuseFlagsFromOriginalIVInc(PHINode *OrigPhi, PHINode *WidePhi, Instruction *OrigInc, Instruction *WideInc)
Return true if both increments directly increment the corresponding IV PHI nodes and have the same op...
LLVM_ABI Value * expandCodeFor(SCEVUse SH, Type *Ty, BasicBlock::iterator I)
Insert code to directly compute the specified SCEV expression into the program.
LLVM_ABI Value * expandComparePredicate(const SCEVComparePredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
LLVM_ABI BasicBlock::iterator findInsertPointAfter(Instruction *I, Instruction *MustDominate) const
Returns a suitable insert point after I, that dominates MustDominate.
void setInsertPoint(Instruction *IP)
Set the current insertion point.
This class represents an assumption made using SCEV expressions which can be checked at run-time.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an assumption made on an AddRec expression.
This class represents an analyzed expression in the program.
SCEVNoWrapFlags NoWrapFlags
static constexpr auto FlagNUW
static constexpr auto FlagAnyWrap
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
static constexpr auto FlagNSW
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
SCEVTypes getSCEVType() const
static constexpr auto FlagNW
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags Mask)
Convenient NoWrapFlags manipulation.
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr bool any(E Val)
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
bool match(Val *V, const Pattern &P)
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto m_BasicBlock()
Match an arbitrary basic block value and ignore it.
auto m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVPtrToAddrExpr, Op0_t > m_scev_PtrToAddr(const Op0_t &Op0)
match_bind< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
SCEVUnaryExpr_match< SCEVPtrToIntExpr, Op0_t > m_scev_PtrToInt(const Op0_t &Op0)
match_bind< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
constexpr from_range_t from_range
constexpr NextUseDistance min(NextUseDistance A, NextUseDistance B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI cl::opt< unsigned > SCEVCheapExpansionBudget
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE, bool CheckInvertible=true)
Normalize S to be post-increment for all loops present in Loops.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
constexpr NextUseDistance max(NextUseDistance A, NextUseDistance B)
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
constexpr unsigned BitWidth
LLVM_ABI bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr, SmallVectorImpl< PHINode * > *InsertedPHIs=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
SmallPtrSet< const Loop *, 2 > PostIncLoopSet
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
SCEVUseT< const SCEV * > SCEVUse
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
LLVM_ABI void apply(Instruction *I)
LLVM_ABI PoisonFlags(const Instruction *I)
struct for holding enough information to help calculate the cost of the given SCEV when expanded into...
const SCEV * S
The SCEV operand to be costed.
unsigned ParentOpcode
LLVM instruction opcode that uses the operand.
int OperandIdx
The use index of an expanded instruction.
SCEVNoWrapFlags getNoWrapFlags(SCEVNoWrapFlags Mask=SCEVNoWrapFlags::NoWrapMask) const
Return the no-wrap flags for this SCEVUse, which is the union of the use-specific flags and the under...