85#include "llvm/Config/llvm-config.h"
136using namespace PatternMatch;
137using namespace SCEVPatternMatch;
139#define DEBUG_TYPE "scalar-evolution"
142 "Number of loop exits with predictable exit counts");
144 "Number of loop exits without predictable exit counts");
146 "Number of loops with trip counts computed by force");
148#ifdef EXPENSIVE_CHECKS
156 cl::desc(
"Maximum number of iterations SCEV will "
157 "symbolically execute a constant "
163 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
166 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
170 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
175 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
180 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
184 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
185 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
189 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
190 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
194 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
195 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
200 cl::desc(
"Maximum depth of recursive arithmetics"),
204 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
209 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
214 cl::desc(
"Max coefficients in AddRec during evolving"),
219 cl::desc(
"Size of the expression which is considered huge"),
224 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
228 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
229 cl::desc(
"Maximum depth for recrusive loop guard collection"),
cl::init(1));
234 cl::desc(
"When printing analysis, include information on every instruction"));
237 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
239 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
240 "be costly in terms of compile time"));
243 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
244 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
245 "Phi strongly connected components"),
250 cl::desc(
"Handle <= and >= in finite loops"),
254 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
255 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
266#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
276 cast<SCEVConstant>(
this)->getValue()->printAsOperand(
OS,
false);
284 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
285 << *PtrToInt->
getType() <<
")";
291 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
298 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
305 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
334 const char *OpStr =
nullptr;
347 OpStr =
" umin_seq ";
353 ListSeparator LS(OpStr);
377 cast<SCEVUnknown>(
this)->getValue()->printAsOperand(
OS,
false);
380 OS <<
"***COULDNOTCOMPUTE***";
389 return cast<SCEVConstant>(
this)->getType();
391 return cast<SCEVVScale>(
this)->getType();
396 return cast<SCEVCastExpr>(
this)->getType();
398 return cast<SCEVAddRecExpr>(
this)->getType();
400 return cast<SCEVMulExpr>(
this)->getType();
405 return cast<SCEVMinMaxExpr>(
this)->getType();
407 return cast<SCEVSequentialMinMaxExpr>(
this)->getType();
409 return cast<SCEVAddExpr>(
this)->getType();
411 return cast<SCEVUDivExpr>(
this)->getType();
413 return cast<SCEVUnknown>(
this)->getType();
430 return cast<SCEVCastExpr>(
this)->operands();
439 return cast<SCEVNAryExpr>(
this)->operands();
441 return cast<SCEVUDivExpr>(
this)->operands();
456 if (!
Mul)
return false;
460 if (!SC)
return false;
463 return SC->getAPInt().isNegative();
478 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
480 UniqueSCEVs.InsertNode(S, IP);
499 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
502 UniqueSCEVs.InsertNode(S, IP);
521 "Must be a non-bit-width-changing pointer-to-integer cast!");
533 "Cannot truncate non-integer value!");
540 "Cannot zero extend non-integer value!");
547 "Cannot sign extend non-integer value!");
550void SCEVUnknown::deleted() {
552 SE->forgetMemoizedResults(
this);
555 SE->UniqueSCEVs.RemoveNode(
this);
561void SCEVUnknown::allUsesReplacedWith(
Value *New) {
563 SE->forgetMemoizedResults(
this);
566 SE->UniqueSCEVs.RemoveNode(
this);
588 if (LIsPointer != RIsPointer)
589 return (
int)LIsPointer - (int)RIsPointer;
594 return (
int)LID - (int)RID;
597 if (
const auto *LA = dyn_cast<Argument>(LV)) {
598 const auto *
RA = cast<Argument>(RV);
599 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
600 return (
int)LArgNo - (int)RArgNo;
603 if (
const auto *LGV = dyn_cast<GlobalValue>(LV)) {
604 const auto *RGV = cast<GlobalValue>(RV);
606 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
607 auto LT = GV->getLinkage();
614 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
615 return LGV->getName().compare(RGV->getName());
620 if (
const auto *LInst = dyn_cast<Instruction>(LV)) {
621 const auto *RInst = cast<Instruction>(RV);
626 if (LParent != RParent) {
629 if (LDepth != RDepth)
630 return (
int)LDepth - (int)RDepth;
634 unsigned LNumOps = LInst->getNumOperands(),
635 RNumOps = RInst->getNumOperands();
636 if (LNumOps != RNumOps)
637 return (
int)LNumOps - (int)RNumOps;
639 for (
unsigned Idx :
seq(LNumOps)) {
655static std::optional<int>
666 return (
int)LType - (int)RType;
696 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
697 if (LBitWidth != RBitWidth)
698 return (
int)LBitWidth - (int)RBitWidth;
699 return LA.
ult(
RA) ? -1 : 1;
703 const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(
LHS)->
getType());
704 const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(
RHS)->
getType());
705 return LTy->getBitWidth() - RTy->getBitWidth();
716 if (LLoop != RLoop) {
718 assert(LHead != RHead &&
"Two loops share the same header?");
722 "No dominance between recurrences used by one SCEV?");
745 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
746 if (LNumOps != RNumOps)
747 return (
int)LNumOps - (int)RNumOps;
749 for (
unsigned i = 0; i != LNumOps; ++i) {
776 if (Ops.
size() < 2)
return;
783 return Complexity && *Complexity < 0;
785 if (Ops.
size() == 2) {
789 if (IsLessComplex(
RHS,
LHS))
796 return IsLessComplex(
LHS,
RHS);
803 for (
unsigned i = 0, e = Ops.
size(); i != e-2; ++i) {
804 const SCEV *S = Ops[i];
809 for (
unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
814 if (i == e-2)
return;
836template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
840 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
844 if (
const auto *
C = dyn_cast<SCEVConstant>(
Op)) {
848 Folded = cast<SCEVConstant>(
857 assert(Folded &&
"Must have folded value");
861 if (Folded && IsAbsorber(Folded->
getAPInt()))
865 if (Folded && !IsIdentity(Folded->
getAPInt()))
868 return Ops.
size() == 1 ? Ops[0] :
nullptr;
943 APInt OddFactorial(W, 1);
945 for (
unsigned i = 3; i <= K; ++i) {
948 OddFactorial *= (i >> TwoFactors);
952 unsigned CalculationBits = W +
T;
966 for (
unsigned i = 1; i != K; ++i) {
999 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1004 if (isa<SCEVCouldNotCompute>(Coeff))
1019 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1023 if (!
Op->getType()->isPointerTy())
1034 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1053 if (
auto *U = dyn_cast<SCEVUnknown>(
Op)) {
1058 if (isa<ConstantPointerNull>(U->getValue()))
1064 SCEV *S =
new (SCEVAllocator)
1066 UniqueSCEVs.InsertNode(S, IP);
1071 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1072 "non-SCEVUnknown's.");
1084 class SCEVPtrToIntSinkingRewriter
1092 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1102 return Base::visit(S);
1107 bool Changed =
false;
1117 bool Changed =
false;
1127 "Should only reach pointer-typed SCEVUnknown's.");
1133 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(
Op, *
this);
1135 "We must have succeeded in sinking the cast, "
1136 "and ending up with an integer-typed expression!");
1144 if (isa<SCEVCouldNotCompute>(IntOp))
1153 "This is not a truncating conversion!");
1155 "This is not a conversion to a SCEVable type!");
1156 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1164 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1186 UniqueSCEVs.InsertNode(S, IP);
1195 if (isa<SCEVAddExpr>(
Op) || isa<SCEVMulExpr>(
Op)) {
1196 auto *CommOp = cast<SCEVCommutativeExpr>(
Op);
1198 unsigned numTruncs = 0;
1199 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1202 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1203 isa<SCEVTruncateExpr>(S))
1207 if (numTruncs < 2) {
1208 if (isa<SCEVAddExpr>(
Op))
1210 if (isa<SCEVMulExpr>(
Op))
1217 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1224 for (
const SCEV *
Op : AddRec->operands())
1239 UniqueSCEVs.InsertNode(S, IP);
1279struct ExtendOpTraitsBase {
1285template <
typename ExtendOp>
struct ExtendOpTraits {
1301 static const GetExtendExprTy GetExtendExpr;
1303 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1310const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1317 static const GetExtendExprTy GetExtendExpr;
1319 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1326const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1338template <
typename ExtendOpTy>
1341 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1342 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1349 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1358 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1371 auto PreStartFlags =
1389 const SCEV *OperandExtendedStart =
1391 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1392 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1404 const SCEV *OverflowLimit =
1405 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1407 if (OverflowLimit &&
1415template <
typename ExtendOpTy>
1419 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1421 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE,
Depth);
1427 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1462template <
typename ExtendOpTy>
1463bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1466 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1472 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1478 for (
unsigned Delta : {-2, -1, 1, 2}) {
1483 ID.AddPointer(PreStart);
1484 ID.AddPointer(Step);
1492 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1495 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1496 DeltaS, &Pred,
this);
1514 const unsigned BitWidth =
C.getBitWidth();
1532 const APInt &ConstantStart,
1551 auto &UserIDs = FoldCacheUser[
I.first->second];
1552 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1553 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1554 if (UserIDs[
I] ==
ID) {
1559 I.first->second = S;
1561 FoldCacheUser[S].push_back(
ID);
1567 "This is not an extending conversion!");
1569 "This is not a conversion to a SCEVable type!");
1570 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1574 auto Iter = FoldCache.find(
ID);
1575 if (Iter != FoldCache.end())
1576 return Iter->second;
1579 if (!isa<SCEVZeroExtendExpr>(S))
1587 "This is not an extending conversion!");
1589 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1606 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1610 UniqueSCEVs.InsertNode(S, IP);
1619 const SCEV *
X = ST->getOperand();
1633 if (AR->isAffine()) {
1634 const SCEV *Start = AR->getStart();
1635 const SCEV *Step = AR->getStepRecurrence(*
this);
1637 const Loop *L = AR->getLoop();
1641 if (AR->hasNoUnsignedWrap()) {
1643 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1657 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1662 const SCEV *CastedMaxBECount =
1666 if (MaxBECount == RecastedMaxBECount) {
1676 const SCEV *WideMaxBECount =
1678 const SCEV *OperandExtendedAdd =
1684 if (ZAdd == OperandExtendedAdd) {
1688 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1695 OperandExtendedAdd =
1701 if (ZAdd == OperandExtendedAdd) {
1706 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1721 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1724 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1726 if (AR->hasNoUnsignedWrap()) {
1732 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1749 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1760 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1761 const APInt &
C = SC->getAPInt();
1765 const SCEV *SResidual =
1774 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1777 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1793 if (
auto *Div = dyn_cast<SCEVUDivExpr>(
Op))
1797 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1799 if (SA->hasNoUnsignedWrap()) {
1803 for (
const auto *
Op : SA->operands())
1816 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1820 const SCEV *SResidual =
1830 if (
auto *SM = dyn_cast<SCEVMulExpr>(
Op)) {
1832 if (SM->hasNoUnsignedWrap()) {
1836 for (
const auto *
Op : SM->operands())
1853 if (SM->getNumOperands() == 2)
1854 if (
auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1855 if (MulLHS->getAPInt().isPowerOf2())
1856 if (
auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1858 MulLHS->getAPInt().logBase2();
1870 if (isa<SCEVUMinExpr>(
Op) || isa<SCEVUMaxExpr>(
Op)) {
1871 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
1873 for (
auto *Operand :
MinMax->operands())
1875 if (isa<SCEVUMinExpr>(
MinMax))
1881 if (
auto *
MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(
Op)) {
1882 assert(isa<SCEVSequentialUMinExpr>(
MinMax) &&
"Not supported!");
1884 for (
auto *Operand :
MinMax->operands())
1891 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1894 UniqueSCEVs.InsertNode(S, IP);
1902 "This is not an extending conversion!");
1904 "This is not a conversion to a SCEVable type!");
1905 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1909 auto Iter = FoldCache.find(
ID);
1910 if (Iter != FoldCache.end())
1911 return Iter->second;
1914 if (!isa<SCEVSignExtendExpr>(S))
1922 "This is not an extending conversion!");
1924 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1946 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1951 UniqueSCEVs.InsertNode(S, IP);
1960 const SCEV *
X = ST->getOperand();
1969 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1971 if (SA->hasNoSignedWrap()) {
1975 for (
const auto *
Op : SA->operands())
1989 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1993 const SCEV *SResidual =
2007 if (AR->isAffine()) {
2008 const SCEV *Start = AR->getStart();
2009 const SCEV *Step = AR->getStepRecurrence(*
this);
2011 const Loop *L = AR->getLoop();
2015 if (AR->hasNoSignedWrap()) {
2017 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2031 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2037 const SCEV *CastedMaxBECount =
2041 if (MaxBECount == RecastedMaxBECount) {
2051 const SCEV *WideMaxBECount =
2053 const SCEV *OperandExtendedAdd =
2059 if (SAdd == OperandExtendedAdd) {
2063 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2070 OperandExtendedAdd =
2076 if (SAdd == OperandExtendedAdd) {
2088 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2096 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2098 if (AR->hasNoSignedWrap()) {
2104 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2112 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2113 const APInt &
C = SC->getAPInt();
2117 const SCEV *SResidual =
2126 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2129 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2142 if (isa<SCEVSMinExpr>(
Op) || isa<SCEVSMaxExpr>(
Op)) {
2143 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
2145 for (
auto *Operand :
MinMax->operands())
2147 if (isa<SCEVSMinExpr>(
MinMax))
2154 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2157 UniqueSCEVs.InsertNode(S, IP);
2183 "This is not an extending conversion!");
2185 "This is not a conversion to a SCEVable type!");
2190 if (SC->getAPInt().isNegative())
2195 const SCEV *NewOp =
T->getOperand();
2203 if (!isa<SCEVZeroExtendExpr>(ZExt))
2208 if (!isa<SCEVSignExtendExpr>(SExt))
2214 for (
const SCEV *
Op : AR->operands())
2220 if (isa<SCEVSMaxExpr>(
Op))
2253 APInt &AccumulatedConstant,
2256 bool Interesting =
false;
2260 while (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2263 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2265 AccumulatedConstant += Scale *
C->getAPInt();
2270 for (; i != Ops.
size(); ++i) {
2272 if (
Mul && isa<SCEVConstant>(
Mul->getOperand(0))) {
2274 Scale * cast<SCEVConstant>(
Mul->getOperand(0))->getAPInt();
2275 if (
Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(
Mul->getOperand(1))) {
2280 Add->operands(), NewScale, SE);
2286 auto Pair = M.insert({Key, NewScale});
2290 Pair.first->second += NewScale;
2298 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2299 M.insert({Ops[i], Scale});
2303 Pair.first->second += Scale;
2322 case Instruction::Add:
2325 case Instruction::Sub:
2328 case Instruction::Mul:
2338 auto *NarrowTy = cast<IntegerType>(
LHS->
getType());
2342 const SCEV *
A = (this->*Extension)(
2344 const SCEV *LHSB = (this->*Extension)(
LHS, WideTy, 0);
2345 const SCEV *RHSB = (this->*Extension)(
RHS, WideTy, 0);
2353 if (BinOp == Instruction::Mul)
2355 auto *RHSC = dyn_cast<SCEVConstant>(
RHS);
2359 APInt C = RHSC->getAPInt();
2360 unsigned NumBits =
C.getBitWidth();
2361 bool IsSub = (BinOp == Instruction::Sub);
2362 bool IsNegativeConst = (
Signed &&
C.isNegative());
2364 bool OverflowDown = IsSub ^ IsNegativeConst;
2366 if (IsNegativeConst) {
2379 APInt Limit = Min + Magnitude;
2385 APInt Limit = Max - Magnitude;
2390std::optional<SCEV::NoWrapFlags>
2395 return std::nullopt;
2404 bool Deduced =
false;
2406 if (OBO->
getOpcode() != Instruction::Add &&
2409 return std::nullopt;
2418 false,
LHS,
RHS, CtxI)) {
2432 return std::nullopt;
2442 using namespace std::placeholders;
2449 assert(CanAnalyze &&
"don't call from other places!");
2456 auto IsKnownNonNegative = [&](
const SCEV *S) {
2466 if (SignOrUnsignWrap != SignOrUnsignMask &&
2468 isa<SCEVConstant>(Ops[0])) {
2473 return Instruction::Add;
2475 return Instruction::Mul;
2481 const APInt &
C = cast<SCEVConstant>(Ops[0])->getAPInt();
2486 Opcode,
C, OBO::NoSignedWrap);
2494 Opcode,
C, OBO::NoUnsignedWrap);
2504 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2510 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2511 if (UDiv->getOperand(1) == Ops[1])
2513 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2514 if (UDiv->getOperand(1) == Ops[0])
2530 "only nuw or nsw allowed");
2532 if (Ops.
size() == 1)
return Ops[0];
2535 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
2537 "SCEVAddExpr operand types don't match!");
2540 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2545 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2546 [](
const APInt &
C) {
return C.isZero(); },
2547 [](
const APInt &
C) {
return false; });
2551 unsigned Idx = isa<SCEVConstant>(Ops[0]) ? 1 : 0;
2560 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2565 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2566 Add->setNoWrapFlags(ComputeFlags(Ops));
2573 Type *Ty = Ops[0]->getType();
2574 bool FoundMatch =
false;
2575 for (
unsigned i = 0, e = Ops.
size(); i != e-1; ++i)
2576 if (Ops[i] == Ops[i+1]) {
2579 while (i+Count != e && Ops[i+Count] == Ops[i])
2584 if (Ops.
size() == Count)
2588 --i; e -= Count - 1;
2598 auto FindTruncSrcType = [&]() ->
Type * {
2603 if (
auto *
T = dyn_cast<SCEVTruncateExpr>(Ops[
Idx]))
2604 return T->getOperand()->getType();
2605 if (
const auto *
Mul = dyn_cast<SCEVMulExpr>(Ops[
Idx])) {
2606 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2607 if (
const auto *
T = dyn_cast<SCEVTruncateExpr>(LastOp))
2608 return T->getOperand()->getType();
2612 if (
auto *SrcType = FindTruncSrcType()) {
2617 for (
const SCEV *
Op : Ops) {
2619 if (
T->getOperand()->getType() != SrcType) {
2626 }
else if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(
Op)) {
2628 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2630 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2631 if (
T->getOperand()->getType() != SrcType) {
2636 }
else if (
const auto *
C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2654 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2659 if (Ops.
size() == 2) {
2663 const SCEV *
A = Ops[0];
2664 const SCEV *
B = Ops[1];
2665 auto *AddExpr = dyn_cast<SCEVAddExpr>(
B);
2666 auto *
C = dyn_cast<SCEVConstant>(
A);
2667 if (AddExpr &&
C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2668 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2669 auto C2 =
C->getAPInt();
2672 APInt ConstAdd = C1 + C2;
2673 auto AddFlags = AddExpr->getNoWrapFlags();
2699 if (Ops.
size() == 2) {
2701 if (
Mul &&
Mul->getNumOperands() == 2 &&
2702 Mul->getOperand(0)->isAllOnesValue()) {
2705 if (matchURem(
Mul->getOperand(1),
X,
Y) &&
X == Ops[1]) {
2717 bool DeletedAdd =
false;
2731 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2747 if (
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx])) {
2754 struct APIntCompare {
2763 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2764 for (
const SCEV *NewOp : NewOps)
2765 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2768 if (AccumulatedConstant != 0)
2770 for (
auto &MulOp : MulOpLists) {
2771 if (MulOp.first == 1) {
2773 }
else if (MulOp.first != 0) {
2782 if (Ops.
size() == 1)
2791 for (;
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx]); ++
Idx) {
2793 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2794 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2795 if (isa<SCEVConstant>(MulOpSCEV))
2797 for (
unsigned AddOp = 0, e = Ops.
size(); AddOp != e; ++AddOp)
2798 if (MulOpSCEV == Ops[AddOp]) {
2800 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2801 if (
Mul->getNumOperands() != 2) {
2805 Mul->operands().take_front(MulOp));
2813 if (Ops.
size() == 2)
return OuterMul;
2826 for (
unsigned OtherMulIdx =
Idx+1;
2827 OtherMulIdx < Ops.
size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2829 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2833 OMulOp != e; ++OMulOp)
2834 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2836 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2837 if (
Mul->getNumOperands() != 2) {
2839 Mul->operands().take_front(MulOp));
2846 OtherMul->
operands().take_front(OMulOp));
2851 const SCEV *InnerMulSum =
2855 if (Ops.
size() == 2)
return OuterMul;
2872 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
2878 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2886 if (!LIOps.
empty()) {
2911 auto *DefI = getDefiningScopeBound(LIOps);
2913 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2925 if (Ops.
size() == 1)
return NewRec;
2928 for (
unsigned i = 0;; ++i)
2929 if (Ops[i] == AddRec) {
2939 for (
unsigned OtherIdx =
Idx+1;
2940 OtherIdx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2945 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2947 "AddRecExprs are not sorted in reverse dominance order?");
2948 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2951 for (; OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2953 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2954 if (OtherAddRec->getLoop() == AddRecLoop) {
2955 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2957 if (i >= AddRecOps.
size()) {
2958 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2962 AddRecOps[i], OtherAddRec->getOperand(i)};
2965 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
2980 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2988 for (
const SCEV *
Op : Ops)
2992 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
2995 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2996 S =
new (SCEVAllocator)
2998 UniqueSCEVs.InsertNode(S, IP);
3010 for (
const SCEV *
Op : Ops)
3018 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3019 S =
new (SCEVAllocator)
3021 UniqueSCEVs.InsertNode(S, IP);
3022 LoopUsers[
L].push_back(S);
3034 for (
const SCEV *
Op : Ops)
3038 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3041 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3042 S =
new (SCEVAllocator)
SCEVMulExpr(
ID.Intern(SCEVAllocator),
3044 UniqueSCEVs.InsertNode(S, IP);
3053 if (j > 1 && k / j != i) Overflow =
true;
3069 if (n == 0 || n == k)
return 1;
3070 if (k > n)
return 0;
3076 for (
uint64_t i = 1; i <= k; ++i) {
3077 r =
umul_ov(r, n-(i-1), Overflow);
3086 struct FindConstantInAddMulChain {
3087 bool FoundConstant =
false;
3089 bool follow(
const SCEV *S) {
3090 FoundConstant |= isa<SCEVConstant>(S);
3091 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
3094 bool isDone()
const {
3095 return FoundConstant;
3099 FindConstantInAddMulChain
F;
3101 ST.visitAll(StartExpr);
3102 return F.FoundConstant;
3110 "only nuw or nsw allowed");
3112 if (Ops.
size() == 1)
return Ops[0];
3114 Type *ETy = Ops[0]->getType();
3116 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
3118 "SCEVMulExpr operand types don't match!");
3123 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3124 [](
const APInt &
C) {
return C.isOne(); },
3125 [](
const APInt &
C) {
return C.isZero(); });
3136 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3141 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3142 Mul->setNoWrapFlags(ComputeFlags(Ops));
3146 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3147 if (Ops.
size() == 2) {
3164 if (Ops[0]->isAllOnesValue()) {
3169 bool AnyFolded =
false;
3170 for (
const SCEV *AddOp :
Add->operands()) {
3173 if (!isa<SCEVMulExpr>(
Mul)) AnyFolded =
true;
3178 }
else if (
const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3197 AddRec->getNoWrapFlags(FlagsMask));
3210 bool DeletedMul =
false;
3235 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
3240 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3248 if (!LIOps.
empty()) {
3261 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3277 if (Ops.
size() == 1)
return NewRec;
3280 for (
unsigned i = 0;; ++i)
3281 if (Ops[i] == AddRec) {
3302 bool OpsModified =
false;
3303 for (
unsigned OtherIdx =
Idx+1;
3304 OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3307 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3317 bool Overflow =
false;
3323 SmallVector <const SCEV *, 7> SumOps;
3324 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3328 z < ze && !Overflow; ++z) {
3331 if (LargerThan64Bits)
3332 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3334 Coeff = Coeff1*Coeff2;
3349 if (Ops.
size() == 2)
return NewAddRec;
3350 Ops[
Idx] = NewAddRec;
3351 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
3353 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3367 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3375 "SCEVURemExpr operand types don't match!");
3380 if (RHSC->getValue()->isOne())
3384 if (RHSC->getAPInt().isPowerOf2()) {
3403 "SCEVUDivExpr operand can't be pointer!");
3405 "SCEVUDivExpr operand types don't match!");
3412 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3420 if (RHSC->getValue()->isOne())
3425 if (!RHSC->getValue()->isZero()) {
3430 unsigned LZ = RHSC->getAPInt().countl_zero();
3434 if (!RHSC->getAPInt().isPowerOf2())
3440 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*
this))) {
3442 const APInt &StepInt = Step->getAPInt();
3443 const APInt &DivInt = RHSC->getAPInt();
3444 if (!StepInt.
urem(DivInt) &&
3450 for (
const SCEV *
Op : AR->operands())
3457 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3458 if (StartC && !DivInt.
urem(StepInt) &&
3464 const APInt &StartRem = StartInt.
urem(StepInt);
3465 if (StartRem != 0) {
3466 const SCEV *NewLHS =
3469 if (
LHS != NewLHS) {
3479 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3488 for (
const SCEV *
Op : M->operands())
3492 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3493 const SCEV *
Op = M->getOperand(i);
3495 if (!isa<SCEVUDivExpr>(Div) &&
getMulExpr(Div, RHSC) ==
Op) {
3505 if (
auto *DivisorConstant =
3506 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3507 bool Overflow =
false;
3509 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3520 for (
const SCEV *
Op :
A->operands())
3524 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3526 if (isa<SCEVUDivExpr>(
Op) ||
3531 if (
Operands.size() ==
A->getNumOperands())
3538 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3543 if (
const auto *AE = dyn_cast<SCEVAddExpr>(
LHS);
3544 AE && AE->getNumOperands() == 2) {
3545 if (
const auto *VC = dyn_cast<SCEVConstant>(AE->getOperand(0))) {
3546 const APInt &NegC = VC->getAPInt();
3548 const auto *MME = dyn_cast<SCEVSMaxExpr>(AE->getOperand(1));
3549 if (MME && MME->getNumOperands() == 2 &&
3550 isa<SCEVConstant>(MME->getOperand(0)) &&
3551 cast<SCEVConstant>(MME->getOperand(0))->getAPInt() == -NegC &&
3552 MME->getOperand(1) ==
RHS)
3561 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3564 UniqueSCEVs.InsertNode(S, IP);
3594 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3600 if (
const auto *LHSCst = dyn_cast<SCEVConstant>(
Mul->getOperand(0))) {
3601 if (LHSCst == RHSCst) {
3609 APInt Factor =
gcd(LHSCst, RHSCst);
3612 cast<SCEVConstant>(
getConstant(LHSCst->getAPInt().udiv(Factor)));
3614 cast<SCEVConstant>(
getConstant(RHSCst->getAPInt().udiv(Factor)));
3620 Mul = dyn_cast<SCEVMulExpr>(
LHS);
3627 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3628 if (
Mul->getOperand(i) ==
RHS) {
3646 if (
const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3647 if (StepChrec->getLoop() == L) {
3666 "SCEVAddRecExpr operand types don't match!");
3667 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3671 "SCEVAddRecExpr operand is not available at loop entry!");
3689 const Loop *NestedLoop = NestedAR->getLoop();
3690 if (L->contains(NestedLoop)
3695 Operands[0] = NestedAR->getStart();
3699 bool AllInvariant =
all_of(
3711 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3722 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3732 return getOrCreateAddRecExpr(
Operands, L, Flags);
3749 auto *GEPI = dyn_cast<Instruction>(
GEP);
3750 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3761 bool FirstIter =
true;
3763 for (
const SCEV *IndexExpr : IndexExprs) {
3765 if (
StructType *STy = dyn_cast<StructType>(CurTy)) {
3767 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3768 unsigned FieldNo = Index->getZExtValue();
3770 Offsets.push_back(FieldOffset);
3773 CurTy = STy->getTypeAtIndex(Index);
3777 assert(isa<PointerType>(CurTy) &&
3778 "The first index of a GEP indexes a pointer");
3779 CurTy =
GEP->getSourceElementType();
3790 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3791 Offsets.push_back(LocalOffset);
3796 if (Offsets.empty())
3809 "GEP should not change type mid-flight.");
3813SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3816 ID.AddInteger(SCEVType);
3817 for (
const SCEV *
Op : Ops)
3820 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3830 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3831 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
3832 if (Ops.
size() == 1)
return Ops[0];
3835 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
3837 "Operand types don't match!");
3840 "min/max should be consistently pointerish");
3866 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3868 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3873 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3875 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3881 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3887 while (
Idx < Ops.
size() && Ops[
Idx]->getSCEVType() < Kind)
3893 bool DeletedAny =
false;
3894 while (Ops[
Idx]->getSCEVType() == Kind) {
3914 for (
unsigned i = 0, e = Ops.
size() - 1; i != e; ++i) {
3915 if (Ops[i] == Ops[i + 1] ||
3916 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3922 }
else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3931 if (Ops.
size() == 1)
return Ops[0];
3933 assert(!Ops.
empty() &&
"Reduced smax down to nothing!");
3938 ID.AddInteger(Kind);
3939 for (
const SCEV *
Op : Ops)
3942 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3944 return ExistingSCEV;
3946 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
3947 SCEV *S =
new (SCEVAllocator)
3950 UniqueSCEVs.InsertNode(S, IP);
3957class SCEVSequentialMinMaxDeduplicatingVisitor final
3958 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3959 std::optional<const SCEV *>> {
3960 using RetVal = std::optional<const SCEV *>;
3968 bool canRecurseInto(
SCEVTypes Kind)
const {
3971 return RootKind == Kind || NonSequentialRootKind == Kind;
3974 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
3975 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
3976 "Only for min/max expressions.");
3979 if (!canRecurseInto(Kind))
3982 auto *NAry = cast<SCEVNAryExpr>(S);
3984 bool Changed =
visit(Kind, NAry->operands(), NewOps);
3989 return std::nullopt;
3991 return isa<SCEVSequentialMinMaxExpr>(S)
3998 if (!SeenOps.
insert(S).second)
3999 return std::nullopt;
4000 return Base::visit(S);
4006 : SE(SE), RootKind(RootKind),
4007 NonSequentialRootKind(
4013 bool Changed =
false;
4017 for (
const SCEV *
Op : OrigOps) {
4026 NewOps = std::move(Ops);
4032 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4042 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4044 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4046 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4048 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4051 return visitAnyMinMaxExpr(Expr);
4055 return visitAnyMinMaxExpr(Expr);
4059 return visitAnyMinMaxExpr(Expr);
4063 return visitAnyMinMaxExpr(Expr);
4067 return visitAnyMinMaxExpr(Expr);
4070 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4114struct SCEVPoisonCollector {
4115 bool LookThroughMaybePoisonBlocking;
4117 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4118 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4120 bool follow(
const SCEV *S) {
4121 if (!LookThroughMaybePoisonBlocking &&
4125 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
4131 bool isDone()
const {
return false; }
4141 SCEVPoisonCollector PC1(
true);
4146 if (PC1.MaybePoison.empty())
4152 SCEVPoisonCollector PC2(
false);
4162 SCEVPoisonCollector PC(
false);
4185 while (!Worklist.
empty()) {
4187 if (!Visited.
insert(V).second)
4191 if (Visited.
size() > 16)
4196 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4199 auto *
I = dyn_cast<Instruction>(V);
4206 if (
auto *PDI = dyn_cast<PossiblyDisjointInst>(
I))
4207 if (PDI->isDisjoint())
4213 if (
auto *
II = dyn_cast<IntrinsicInst>(
I);
4214 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4221 if (
I->hasPoisonGeneratingAnnotations())
4233 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4234 "Not a SCEVSequentialMinMaxExpr!");
4235 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
4236 if (Ops.
size() == 1)
4240 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4242 "Operand types don't match!");
4245 "min/max should be consistently pointerish");
4253 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4260 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4261 bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4270 bool DeletedAny =
false;
4272 if (Ops[
Idx]->getSCEVType() != Kind) {
4276 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[
Idx]);
4279 SMME->operands().end());
4287 const SCEV *SaturationPoint;
4298 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4299 if (!isGuaranteedNotToCauseUB(Ops[i]))
4316 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) {
4325 ID.AddInteger(Kind);
4326 for (
const SCEV *
Op : Ops)
4329 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4331 return ExistingSCEV;
4334 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
4335 SCEV *S =
new (SCEVAllocator)
4338 UniqueSCEVs.InsertNode(S, IP);
4386 if (
Size.isScalable())
4407 "Cannot get offset for structure containing scalable vector types");
4421 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4422 assert(cast<SCEVUnknown>(S)->getValue() == V &&
4423 "Stale SCEVUnknown in uniquing map!");
4428 FirstUnknown = cast<SCEVUnknown>(S);
4429 UniqueSCEVs.InsertNode(S, IP);
4477 bool PreciseA, PreciseB;
4478 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);