31#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
32#define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
34#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
41 cl::desc(
"When performing SCEV expansion only if it is cheap to do, this "
42 "controls the budget that is considered cheap (default = 4)"));
44using namespace PatternMatch;
66 for (
User *U : V->users()) {
67 if (U->getType() != Ty)
69 CastInst *CI = dyn_cast<CastInst>(U);
75 if (IP->getParent() == CI->
getParent() && &*BIP != CI &&
84 SCEVInsertPointGuard Guard(Builder,
this);
92 assert(!isa<Instruction>(Ret) ||
93 SE.DT.
dominates(cast<Instruction>(Ret), &*BIP));
102 if (
auto *II = dyn_cast<InvokeInst>(
I))
103 IP = II->getNormalDest()->begin();
105 while (isa<PHINode>(IP))
108 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
110 }
else if (isa<CatchSwitchInst>(IP)) {
113 assert(!IP->isEHPad() &&
"unexpected eh pad!");
126SCEVExpander::GetOptimalInsertionPointForCastOf(
Value *V)
const {
129 if (
Argument *
A = dyn_cast<Argument>(V)) {
131 while ((isa<BitCastInst>(IP) &&
132 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
133 cast<BitCastInst>(IP)->getOperand(0) !=
A) ||
134 isa<DbgInfoIntrinsic>(IP))
145 assert(isa<Constant>(V) &&
146 "Expected the cast argument to be a global/constant");
158 assert((Op == Instruction::BitCast ||
159 Op == Instruction::PtrToInt ||
160 Op == Instruction::IntToPtr) &&
161 "InsertNoopCastOfTo cannot perform non-noop casts!");
163 "InsertNoopCastOfTo cannot change sizes!");
170 if (Op == Instruction::IntToPtr) {
171 auto *PtrTy = cast<PointerType>(Ty);
173 auto *Int8PtrTy = Builder.
getInt8PtrTy(PtrTy->getAddressSpace());
175 "alloc size of i8 must by 1 byte for the GEP to be correct");
182 if (Op == Instruction::BitCast) {
183 if (
V->getType() == Ty)
185 if (
CastInst *CI = dyn_cast<CastInst>(V)) {
191 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
193 if (
CastInst *CI = dyn_cast<CastInst>(V))
194 if ((CI->
getOpcode() == Instruction::PtrToInt ||
195 CI->
getOpcode() == Instruction::IntToPtr) &&
200 if ((
CE->getOpcode() == Instruction::PtrToInt ||
201 CE->getOpcode() == Instruction::IntToPtr) &&
204 return CE->getOperand(0);
212 return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
222 if (
Constant *CLHS = dyn_cast<Constant>(LHS))
223 if (
Constant *CRHS = dyn_cast<Constant>(RHS))
228 unsigned ScanLimit = 6;
232 if (IP != BlockBegin) {
234 for (; ScanLimit; --IP, --ScanLimit) {
237 if (isa<DbgInfoIntrinsic>(IP))
242 if (isa<OverflowingBinaryOperator>(
I)) {
250 if (isa<PossiblyExactOperator>(
I) &&
I->isExact())
254 if (IP->getOpcode() == (
unsigned)Opcode && IP->getOperand(0) ==
LHS &&
255 IP->getOperand(1) ==
RHS && !canGenerateIncompatiblePoison(&*IP))
257 if (IP == BlockBegin)
break;
263 SCEVInsertPointGuard Guard(Builder,
this);
268 if (!
L->isLoopInvariant(LHS) || !
L->isLoopInvariant(RHS))
break;
270 if (!Preheader)
break;
313 if (
const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
323 Remainder, SE.
getConstant(
C->getAPInt().srem(FC->getAPInt())));
331 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
334 if (
const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor))
335 if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(M->getOperand(0)))
336 if (!
C->getAPInt().srem(FC->getAPInt())) {
338 NewMulOps[0] = SE.
getConstant(
C->getAPInt().sdiv(FC->getAPInt()));
346 const SCEV *Step =
A->getStepRecurrence(SE);
352 const SCEV *Start =
A->getStart();
370 unsigned NumAddRecs = 0;
371 for (
unsigned i = Ops.
size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
401 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
403 const SCEV *Start =
A->getStart();
404 if (Start->isZero())
break;
407 A->getStepRecurrence(SE),
413 e +=
Add->getNumOperands();
418 if (!AddRecs.
empty()) {
453Value *SCEVExpander::expandAddToGEP(
const SCEV *
const *op_begin,
454 const SCEV *
const *op_end,
460 bool AnyNonZeroIndices =
false;
469 if (!PTy->isOpaque()) {
475 Type *ElTy = PTy->getNonOpaquePointerElementType();
484 for (
const SCEV *Op : Ops) {
491 AnyNonZeroIndices =
true;
499 if (!ScaledOps.
empty()) {
513 : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty);
517 while (
StructType *STy = dyn_cast<StructType>(ElTy)) {
518 bool FoundFieldNo =
false;
520 if (STy->getNumElements() == 0)
break;
528 uint64_t FullOffset =
C->getValue()->getZExtValue();
533 ElTy = STy->getTypeAtIndex(ElIdx);
536 AnyNonZeroIndices =
true;
544 ElTy = STy->getTypeAtIndex(0u);
550 if (
ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
551 ElTy = ATy->getElementType();
564 if (!AnyNonZeroIndices) {
566 if (!PTy->isOpaque())
567 V = InsertNoopCastOfTo(V,
570 assert(!isa<Instruction>(V) ||
577 if (
Constant *CLHS = dyn_cast<Constant>(V))
582 unsigned ScanLimit = 6;
586 if (IP != BlockBegin) {
588 for (; ScanLimit; --IP, --ScanLimit) {
591 if (isa<DbgInfoIntrinsic>(IP))
593 if (IP->getOpcode() == Instruction::GetElementPtr &&
594 IP->getOperand(0) == V && IP->getOperand(1) ==
Idx &&
595 cast<GEPOperator>(&*IP)->getSourceElementType() ==
598 if (IP == BlockBegin)
break;
603 SCEVInsertPointGuard Guard(Builder,
this);
607 if (!
L->isLoopInvariant(V) || !
L->isLoopInvariant(
Idx))
break;
609 if (!Preheader)
break;
620 SCEVInsertPointGuard Guard(Builder,
this);
624 if (!
L->isLoopInvariant(V))
break;
626 bool AnyIndexNotLoopInvariant =
any_of(
627 GepIndices, [L](
Value *Op) {
return !
L->isLoopInvariant(Op); });
629 if (AnyIndexNotLoopInvariant)
633 if (!Preheader)
break;
643 if (
V->getType() != PTy)
644 Casted = InsertNoopCastOfTo(Casted, PTy);
646 Casted, GepIndices,
"scevgep");
655 const SCEV *
const Ops[1] = {
Op};
656 return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
666 if (
A->contains(
B))
return B;
667 if (
B->contains(
A))
return A;
668 if (DT.
dominates(
A->getHeader(),
B->getHeader()))
return B;
669 if (DT.
dominates(
B->getHeader(),
A->getHeader()))
return A;
675const Loop *SCEVExpander::getRelevantLoop(
const SCEV *S) {
677 auto Pair = RelevantLoops.insert(std::make_pair(S,
nullptr));
679 return Pair.first->second;
698 const Loop *
L =
nullptr;
703 return RelevantLoops[S] =
L;
707 if (
const Instruction *
I = dyn_cast<Instruction>(
U->getValue()))
708 return Pair.first->second = SE.LI.
getLoopFor(
I->getParent());
726 bool operator()(std::pair<const Loop *, const SCEV *>
LHS,
727 std::pair<const Loop *, const SCEV *>
RHS)
const {
734 if (
LHS.first !=
RHS.first)
740 if (
LHS.second->isNonConstantNegative()) {
741 if (!
RHS.second->isNonConstantNegative())
743 }
else if (
RHS.second->isNonConstantNegative())
762 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(Op), Op));
770 Value *Sum =
nullptr;
771 for (
auto I = OpsAndLoops.
begin(),
E = OpsAndLoops.
end();
I !=
E;) {
772 const Loop *CurLoop =
I->first;
781 assert(!
Op->getType()->isPointerTy() &&
"Only first op can be pointer");
786 for (;
I !=
E &&
I->first == CurLoop; ++
I) {
789 const SCEV *
X =
I->second;
791 if (!isa<Instruction>(
U->getValue()))
795 Sum = expandAddToGEP(NewOps.
begin(), NewOps.
end(), PTy, Ty, Sum);
796 }
else if (
Op->isNonConstantNegative()) {
799 Sum = InsertNoopCastOfTo(Sum, Ty);
805 Value *
W = expandCodeForImpl(Op, Ty);
806 Sum = InsertNoopCastOfTo(Sum, Ty);
808 if (isa<Constant>(Sum))
std::swap(Sum, W);
825 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(Op), Op));
832 Value *Prod =
nullptr;
833 auto I = OpsAndLoops.
begin();
838 const auto ExpandOpBinPowN = [
this, &
I, &OpsAndLoops, &Ty]() {
848 while (
E != OpsAndLoops.end() && *
I == *
E &&
Exponent != MaxExponent) {
852 assert(
Exponent > 0 &&
"Trying to calculate a zeroth exponent of operand?");
856 Value *
P = expandCodeForImpl(
I->second, Ty);
871 assert(Result &&
"Nothing was expanded?");
875 while (
I != OpsAndLoops.end()) {
878 Prod = ExpandOpBinPowN();
879 }
else if (
I->second->isAllOnesValue()) {
881 Prod = InsertNoopCastOfTo(Prod, Ty);
887 Value *
W = ExpandOpBinPowN();
888 Prod = InsertNoopCastOfTo(Prod, Ty);
890 if (isa<Constant>(Prod))
std::swap(Prod, W);
897 if (
RHS->logBase2() ==
RHS->getBitWidth() - 1)
899 Prod = InsertBinop(Instruction::Shl, Prod,
903 Prod = InsertBinop(Instruction::Mul, Prod, W, S->
getNoWrapFlags(),
918 if (
RHS.isPowerOf2())
919 return InsertBinop(Instruction::LShr, LHS,
934 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
939 if (L == IVIncInsertLoop) {
941 if (
Instruction *OInst = dyn_cast<Instruction>(Op))
942 if (!SE.DT.
dominates(OInst, IVIncInsertPos))
946 IncV = dyn_cast<Instruction>(IncV->
getOperand(0));
956 return isNormalAddRecExprPHI(PN, IncV, L);
971 if (IncV == InsertPos)
978 case Instruction::Add:
979 case Instruction::Sub: {
981 if (!OInst || SE.DT.
dominates(OInst, InsertPos))
982 return dyn_cast<Instruction>(IncV->
getOperand(0));
985 case Instruction::BitCast:
986 return dyn_cast<Instruction>(IncV->
getOperand(0));
987 case Instruction::GetElementPtr:
989 if (isa<Constant>(U))
991 if (
Instruction *OInst = dyn_cast<Instruction>(U)) {
1005 unsigned AS = cast<PointerType>(IncV->
getType())->getAddressSpace();
1011 return dyn_cast<Instruction>(IncV->
getOperand(0));
1027 for (
auto *InsertPtGuard : InsertPointGuards)
1028 if (InsertPtGuard->GetInsertPoint() == It)
1029 InsertPtGuard->SetInsertPoint(NewInsertPt);
1036 bool RecomputePoisonFlags) {
1040 I->dropPoisonGeneratingFlags();
1041 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(
I))
1043 auto *BO = cast<BinaryOperator>(
I);
1052 if (RecomputePoisonFlags)
1053 FixupPoisonFlags(IncV);
1059 if (isa<PHINode>(InsertPos) ||
1079 fixupInsertPoints(
I);
1080 I->moveBefore(InsertPos);
1081 if (RecomputePoisonFlags)
1082 FixupPoisonFlags(
I);
1095 (IVOper =
getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1112 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1115 if (!isa<ConstantInt>(StepV))
1117 GEPPtrTy->getAddressSpace());
1118 IncV = expandAddToGEP(SE.
getSCEV(StepV), GEPPtrTy, IntTy, PN);
1122 IncV = useSubtract ?
1151 if (Phi == Requested) {
1166 if (!isa<IntegerType>(AR->
getType()))
1174 const SCEV *ExtendAfterOp =
1176 return ExtendAfterOp == OpAfterExtend;
1180 if (!isa<IntegerType>(AR->
getType()))
1188 const SCEV *ExtendAfterOp =
1190 return ExtendAfterOp == OpAfterExtend;
1197SCEVExpander::getAddRecExprPHILiterally(
const SCEVAddRecExpr *Normalized,
1203 assert((!IVIncInsertLoop||IVIncInsertPos) &&
"Uninitialized insert position");
1208 PHINode *AddRecPhiMatch =
nullptr;
1215 bool TryNonMatchingSCEV =
1219 for (
PHINode &PN :
L->getHeader()->phis()) {
1227 DebugType,
dbgs() <<
"One incomplete PHI is found: " << PN <<
"\n");
1235 bool IsMatchingSCEV = PhiSCEV == Normalized;
1239 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1250 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1253 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1258 if (IsMatchingSCEV) {
1262 AddRecPhiMatch = &PN;
1268 if ((!TruncTy || InvertStep) &&
1272 AddRecPhiMatch = &PN;
1278 if (AddRecPhiMatch) {
1281 InsertedValues.insert(AddRecPhiMatch);
1283 rememberInstruction(IncV);
1285 ReusedValues.
insert(AddRecPhiMatch);
1286 ReusedValues.
insert(IncV);
1287 return AddRecPhiMatch;
1292 SCEVInsertPointGuard Guard(Builder,
this);
1302 PostIncLoops.
clear();
1305 assert(
L->getLoopPreheader() &&
1306 "Can't expand add recurrences without a loop preheader!");
1308 expandCodeForImpl(Normalized->
getStart(), ExpandTy,
1309 L->getLoopPreheader()->getTerminator());
1313 assert(!isa<Instruction>(StartV) ||
1327 Value *StepV = expandCodeForImpl(
1328 Step, IntTy, &*
L->getHeader()->getFirstInsertionPt());
1333 bool IncrementIsNUW = !useSubtract &&
IsIncrementNUW(SE, Normalized);
1334 bool IncrementIsNSW = !useSubtract &&
IsIncrementNSW(SE, Normalized);
1341 Twine(IVName) +
".iv");
1348 if (!
L->contains(Pred)) {
1359 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1361 if (isa<OverflowingBinaryOperator>(IncV)) {
1363 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1365 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1372 PostIncLoops = SavedPostIncLoops;
1376 InsertedValues.insert(PN);
1377 InsertedIVs.push_back(PN);
1389 if (PostIncLoops.
count(L)) {
1397 const SCEV *PostLoopOffset =
nullptr;
1399 PostLoopOffset = Start;
1401 Normalized = cast<SCEVAddRecExpr>(
1409 const SCEV *PostLoopScale =
nullptr;
1411 PostLoopScale = Step;
1413 if (!Start->isZero()) {
1416 assert(!PostLoopOffset &&
"Start not-null but PostLoopOffset set?");
1417 PostLoopOffset = Start;
1422 Start, Step, Normalized->
getLoop(),
1428 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1431 Type *AddRecPHIExpandTy =
1436 Type *TruncTy =
nullptr;
1437 bool InvertStep =
false;
1438 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1439 IntTy, TruncTy, InvertStep);
1443 if (!PostIncLoops.
count(L))
1448 assert(LatchBlock &&
"PostInc mode requires a unique loop latch!");
1454 if (isa<OverflowingBinaryOperator>(Result)) {
1455 auto *
I = cast<Instruction>(Result);
1457 I->setHasNoUnsignedWrap(
false);
1459 I->setHasNoSignedWrap(
false);
1465 if (isa<Instruction>(Result) &&
1466 !SE.DT.
dominates(cast<Instruction>(Result),
1484 SCEVInsertPointGuard Guard(Builder,
this);
1485 StepV = expandCodeForImpl(
1486 Step, IntTy, &*
L->getHeader()->getFirstInsertionPt());
1488 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1500 if (TruncTy !=
Result->getType())
1506 expandCodeForImpl(Normalized->
getStart(), TruncTy), Result);
1510 if (PostLoopScale) {
1511 assert(S->
isAffine() &&
"Can't linearly scale non-affine recurrences.");
1512 Result = InsertNoopCastOfTo(Result, IntTy);
1514 expandCodeForImpl(PostLoopScale, IntTy));
1518 if (PostLoopOffset) {
1519 if (
PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1520 if (
Result->getType()->isIntegerTy()) {
1521 Value *
Base = expandCodeForImpl(PostLoopOffset, ExpandTy);
1524 Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1527 Result = InsertNoopCastOfTo(Result, IntTy);
1529 Result, expandCodeForImpl(PostLoopOffset, IntTy));
1548 return expandAddRecExprLiterally(S);
1554 PHINode *CanonicalIV =
nullptr;
1555 if (
PHINode *PN =
L->getCanonicalInductionVariable())
1580 assert(StartV->
getType() == PTy &&
"Pointer type mismatch for GEP!");
1595 return expand(SE.
getAddExpr(AddExprLHS, AddExprRHS));
1606 rememberInstruction(CanonicalIV);
1612 if (!PredSeen.
insert(HP).second) {
1619 if (
L->contains(HP)) {
1626 rememberInstruction(
Add);
1637 "IVs with types different from the canonical IV should "
1638 "already have been handled!");
1660 const SCEV *NewS = S;
1662 if (isa<SCEVAddRecExpr>(Ext))
1665 const SCEV *
V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1675 return ReuseOrCreateCast(V, S->
getType(), CastInst::PtrToInt,
1676 GetOptimalInsertionPointForCastOf(V));
1681 Value *
V = expandCodeForImpl(
1689 Value *
V = expandCodeForImpl(
1697 Value *
V = expandCodeForImpl(
1705 bool IsSequential) {
1712 if (IsSequential && i != 0)
1729 return expandMinMaxExpr(S, Intrinsic::smax,
"smax");
1733 return expandMinMaxExpr(S, Intrinsic::umax,
"umax");
1737 return expandMinMaxExpr(S, Intrinsic::smin,
"smin");
1741 return expandMinMaxExpr(S, Intrinsic::umin,
"umin");
1745 return expandMinMaxExpr(S, Intrinsic::umin,
"umin",
true);
1752Value *SCEVExpander::expandCodeForImpl(
const SCEV *SH,
Type *Ty,
1755 Value *
V = expandCodeForImpl(SH, Ty);
1759Value *SCEVExpander::expandCodeForImpl(
const SCEV *SH,
Type *Ty) {
1765 "non-trivial casts should be done with the SCEVs directly!");
1766 V = InsertNoopCastOfTo(V, Ty);
1771Value *SCEVExpander::FindValueInExprValueMap(
const SCEV *S,
1779 if (isa<SCEVConstant>(S))
1785 for (
Value *V : SE.getSCEVValues(S)) {
1791 if (S->
getType() ==
V->getType() &&
1806Value *SCEVExpander::expand(
const SCEV *S) {
1813 auto SafeToHoist = [](
const SCEV *S) {
1815 if (
const auto *
D = dyn_cast<SCEVUDivExpr>(S)) {
1816 if (
const auto *SC = dyn_cast<SCEVConstant>(
D->getRHS()))
1818 return SC->getValue()->isZero();
1828 if (SafeToHoist(S)) {
1830 L =
L->getParentLoop()) {
1833 if (
BasicBlock *Preheader =
L->getLoopPreheader())
1839 InsertPt = &*
L->getHeader()->getFirstInsertionPt();
1845 InsertPt = &*
L->getHeader()->getFirstInsertionPt();
1849 isa<DbgInfoIntrinsic>(InsertPt))) {
1858 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1859 if (
I != InsertedExpressions.end())
1862 SCEVInsertPointGuard Guard(Builder,
this);
1866 Value *
V = FindValueInExprValueMap(S, InsertPt);
1869 V = fixupLCSSAFormFor(V);
1875 if (
auto *
I = dyn_cast<Instruction>(V))
1877 I->dropPoisonGeneratingFlags();
1885 InsertedExpressions[std::make_pair(S, InsertPt)] =
V;
1889void SCEVExpander::rememberInstruction(
Value *
I) {
1890 auto DoInsert = [
this](
Value *
V) {
1891 if (!PostIncLoops.
empty())
1892 InsertedPostIncValues.insert(V);
1894 InsertedValues.insert(V);
1911 for (
PHINode &PN : L->getHeader()->phis())
1925 unsigned NumElim = 0;
1935 auto *Const = dyn_cast<SCEVConstant>(SE.
getSCEV(PN));
1938 return Const->getValue();
1943 if (
Value *V = SimplifyPHINode(Phi)) {
1944 if (V->getType() != Phi->getType())
1947 Phi->replaceAllUsesWith(V);
1951 dbgs() <<
"INDVARS: Eliminated constant iv: " << *Phi
1962 if (Phi->getType()->isIntegerTy() &&
TTI &&
1966 const SCEV *TruncExpr =
1968 ExprToIVMap[TruncExpr] = Phi;
1978 if (
BasicBlock *LatchBlock = L->getLoopLatch()) {
1982 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1984 if (OrigInc && IsomorphicInc) {
1988 if (OrigPhiRef->
getType() == Phi->getType() &&
1989 !(ChainedPhis.count(Phi) ||
1990 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
1991 (ChainedPhis.count(Phi) ||
1992 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
2005 const SCEV *TruncExpr =
2007 if (OrigInc != IsomorphicInc &&
2008 TruncExpr == SE.
getSCEV(IsomorphicInc) &&
2012 DebugType,
dbgs() <<
"INDVARS: Eliminated congruent iv.inc: "
2013 << *IsomorphicInc <<
'\n');
2014 Value *NewInc = OrigInc;
2017 if (
PHINode *PN = dyn_cast<PHINode>(OrigInc))
2024 NewInc =
Builder.CreateTruncOrBitCast(
2025 OrigInc, IsomorphicInc->
getType(), IVName);
2033 dbgs() <<
"INDVARS: Eliminated congruent iv: " << *Phi
2036 DebugType,
dbgs() <<
"INDVARS: Original iv: " << *OrigPhiRef <<
'\n');
2038 Value *NewIV = OrigPhiRef;
2039 if (OrigPhiRef->
getType() != Phi->getType()) {
2040 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
2041 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
2042 NewIV =
Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->
getType(), IVName);
2044 Phi->replaceAllUsesWith(NewIV);
2056 L->getExitingBlocks(ExitingBlocks);
2063 if (!
match(BB->getTerminator(),
2079 return FindValueInExprValueMap(S, At);
2087 const T *S = cast<T>(WorkItem.
S);
2090 struct OperationIndices {
2091 OperationIndices(
unsigned Opc,
size_t min,
size_t max) :
2092 Opcode(Opc), MinIdx(
min), MaxIdx(
max) { }
2106 S->getOperand(0)->getType(),
2110 auto ArithCost = [&](
unsigned Opcode,
unsigned NumRequired,
2111 unsigned MinIdx = 0,
2114 return NumRequired *
2118 auto CmpSelCost = [&](
unsigned Opcode,
unsigned NumRequired,
unsigned MinIdx,
2121 Type *OpType = S->getType();
2127 switch (S->getSCEVType()) {
2135 Cost = CastCost(Instruction::PtrToInt);
2138 Cost = CastCost(Instruction::Trunc);
2141 Cost = CastCost(Instruction::ZExt);
2144 Cost = CastCost(Instruction::SExt);
2147 unsigned Opcode = Instruction::UDiv;
2148 if (
auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
2149 if (SC->getAPInt().isPowerOf2())
2150 Opcode = Instruction::LShr;
2151 Cost = ArithCost(Opcode, 1);
2155 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2161 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
2170 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2171 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2172 switch (S->getSCEVType()) {
2176 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2177 Cost += ArithCost(Instruction::Or,
2178 S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2179 Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2183 assert(!isa<SCEVSequentialMinMaxExpr>(S) &&
2184 "Unhandled SCEV expression type?");
2193 return !Op->isZero();
2196 assert(NumTerms >= 1 &&
"Polynominal should have at least one term.");
2197 assert(!(*std::prev(S->operands().end()))->isZero() &&
2198 "Last operand should not be zero");
2201 int NumNonZeroDegreeNonOneTerms =
2203 auto *SConst = dyn_cast<SCEVConstant>(Op);
2204 return !SConst || SConst->getAPInt().ugt(1);
2213 ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
2214 Cost = AddCost + MulCost;
2217 int PolyDegree = S->getNumOperands() - 1;
2218 assert(PolyDegree >= 1 &&
"Should be at least affine.");
2226 Cost += MulCost * (PolyDegree - 1);
2231 for (
auto &CostOp : Operations) {
2232 for (
auto SCEVOp :
enumerate(S->operands())) {
2234 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2235 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2236 Worklist.
emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2242bool SCEVExpander::isHighCostExpansionHelper(
2250 const SCEV *S = WorkItem.
S;
2252 if (!isa<SCEVConstant>(S) && !Processed.
insert(S).second)
2261 L->getHeader()->getParent()->hasMinSize()
2276 const APInt &
Imm = cast<SCEVConstant>(S)->getAPInt();
2280 return Cost > Budget;
2287 costAndCollectOperands<SCEVCastExpr>(WorkItem,
TTI,
CostKind, Worklist);
2304 costAndCollectOperands<SCEVUDivExpr>(WorkItem,
TTI,
CostKind, Worklist);
2314 assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2315 "Nary expr should have more than 1 operand.");
2319 costAndCollectOperands<SCEVNAryExpr>(WorkItem,
TTI,
CostKind, Worklist);
2320 return Cost > Budget;
2323 assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2324 "Polynomial should be at least linear");
2325 Cost += costAndCollectOperands<SCEVAddRecExpr>(
2327 return Cost > Budget;
2342 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2358 auto *
I = Builder.
CreateICmp(InvPred, Expr0, Expr1,
"ident.check");
2365 "non-affine expression");
2369 const SCEV *ExitCount =
2372 assert(!isa<SCEVCouldNotCompute>(ExitCount) &&
"Invalid loop count");
2388 Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc);
2393 Value *StepValue = expandCodeForImpl(Step, Ty, Loc);
2394 Value *NegStepValue =
2396 Value *StartValue = expandCodeForImpl(Start, ARTy, Loc);
2414 auto ComputeEndCheck = [&]() ->
Value * {
2422 Value *MulV, *OfMul;
2423 if (Step->
isOne()) {
2427 MulV = TruncTripCount;
2431 Intrinsic::umul_with_overflow, Ty);
2433 Builder.
CreateCall(MulF, {AbsStep, TruncTripCount},
"mul");
2438 Value *
Add =
nullptr, *Sub =
nullptr;
2442 if (
PointerType *ARPtrTy = dyn_cast<PointerType>(ARTy)) {
2443 StartValue = InsertNoopCastOfTo(
2444 StartValue, Builder.
getInt8PtrTy(ARPtrTy->getAddressSpace()));
2454 Sub = Builder.
CreateSub(StartValue, MulV);
2457 Value *EndCompareLT =
nullptr;
2458 Value *EndCompareGT =
nullptr;
2459 Value *EndCheck =
nullptr;
2461 EndCheck = EndCompareLT = Builder.
CreateICmp(
2464 EndCheck = EndCompareGT = Builder.
CreateICmp(
2466 if (NeedPosCheck && NeedNegCheck) {
2468 EndCheck = Builder.
CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2470 return Builder.
CreateOr(EndCheck, OfMul);
2472 Value *EndCheck = ComputeEndCheck();
2479 auto *BackedgeCheck =
2485 EndCheck = Builder.
CreateOr(EndCheck, BackedgeCheck);
2493 const auto *
A = cast<SCEVAddRecExpr>(Pred->
getExpr());
2494 Value *NSSWCheck =
nullptr, *NUSWCheck =
nullptr;
2504 if (NUSWCheck && NSSWCheck)
2505 return Builder.
CreateOr(NUSWCheck, NSSWCheck);
2520 for (
const auto *Pred : Union->getPredicates()) {
2530Value *SCEVExpander::fixupLCSSAFormFor(
Value *V) {
2531 auto *DefI = dyn_cast<Instruction>(V);
2532 if (!PreserveLCSSA || !DefI)
2538 if (!DefLoop || UseLoop == DefLoop || DefLoop->
contains(UseLoop))
2549 if (DefI->getType()->isIntegerTy())
2555 auto RemoveUserOnExit =
2562 for (
PHINode *PN : PHIsToRemove) {
2565 InsertedValues.erase(PN);
2566 InsertedPostIncValues.erase(PN);
2592struct SCEVFindUnsafe {
2595 bool IsUnsafe =
false;
2598 : SE(SE), CanonicalMode(CanonicalMode) {}
2600 bool follow(
const SCEV *S) {
2608 const SCEV *Step = AR->getStepRecurrence(SE);
2609 if (!AR->isAffine() && !SE.
dominates(Step, AR->getLoop()->getHeader())) {
2616 if (!AR->getLoop()->getLoopPreheader() &&
2617 (!CanonicalMode || !AR->isAffine())) {
2624 bool isDone()
const {
return IsUnsafe; }
2629 SCEVFindUnsafe Search(SE, CanonicalMode);
2631 return !Search.IsUnsafe;
2649 if (
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2664 InsertedInstructions.end());
2674 [&InsertedSet](
Value *U) {
2675 return InsertedSet.contains(cast<Instruction>(U));
2677 "removed instruction should only be used by instructions inserted "
2678 "during expansion");
2680 assert(!
I->getType()->isVoidTy() &&
2681 "inserted instruction should have non-void types");
2683 I->eraseFromParent();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion.
static InstructionCost costAndCollectOperands(const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, SmallVectorImpl< SCEVOperand > &Worklist)
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static void SimplifyAddOperands(SmallVectorImpl< const SCEV * > &Ops, Type *Ty, ScalarEvolution &SE)
SimplifyAddOperands - Sort and simplify a list of add operands.
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, const SCEV *Factor, ScalarEvolution &SE, const DataLayout &DL)
FactorOutConstant - Test if S is divisible by Factor, using signed division.
static void SplitAddRecs(SmallVectorImpl< const SCEV * > &Ops, Type *Ty, ScalarEvolution &SE)
SplitAddRecs - Flatten a list of add operands, moving addrec start values out to the top level.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallSet class.
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents an incoming formal argument to a Function.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
A constant value that is initialized with an expression using other constant values.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Type * getIndexType(Type *PtrTy) const
Returns the type of a GEP index.
bool isNonIntegralPointerType(PointerType *PT) const
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
const BasicBlock & getEntryBlock() const
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateVScale(Constant *Scaling, const Twine &Name="")
Create a call to llvm.vscale, multiplied by Scaling.
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateFreeze(Value *V, const Twine &Name="")
BasicBlock * GetInsertBlock() const
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
bool replacementPreservesLCSSAForm(Instruction *From, Value *To)
Returns true if replacing From with To everywhere is guaranteed to preserve LCSSA form.
bool movementPreservesLCSSAForm(Instruction *Inst, Instruction *NewLoc)
Checks if moving a specific instruction can break LCSSA in any loop.
Represents a single loop in the control flow graph.
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Class to represent pointers.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
const SCEV * getOperand() const
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
const SCEV * getRHS() const
Returns the right hand side of the predicate.
ICmpInst::Predicate getPredicate() const
const SCEV * getLHS() const
Returns the left hand side of the predicate.
This class represents a constant integer value.
Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
SmallVector< Instruction *, 32 > getAllInsertedInstructions() const
Return a vector containing all instructions inserted during expansion.
bool isSafeToExpand(const SCEV *S) const
Return true if the given expression is safe to expand in the sense that all materialized values are s...
bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint) const
Return true if the given expression is safe to expand in the sense that all materialized values are d...
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
Value * getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Try to find the ValueOffsetPair for S.
Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
bool hoistIVInc(Instruction *IncV, Instruction *InsertPos, bool RecomputePoisonFlags=false)
Utility for hoisting IncV (with all subexpressions requried for its computation) before InsertPos.
void clear()
Erase the contents of the InsertedExpressions map so that users trying to expand the same expression ...
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
Value * expandComparePredicate(const SCEVComparePredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
BasicBlock::iterator findInsertPointAfter(Instruction *I, Instruction *MustDominate) const
Returns a suitable insert point after I, that dominates MustDominate.
void setInsertPoint(Instruction *IP)
Set the current insertion point.
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicateKind getKind() const
This class represents a cast from a pointer to a pointer-sized integer value.
This class represents a signed maximum selection.
This class represents a signed minimum selection.
This class represents a sequential/in-order unsigned minimum selection.
This class represents a sign extension of a small integer value to a larger integer value.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned maximum selection.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
bool isOne() const
Return true if the expression is a constant one.
bool isZero() const
Return true if the expression is a constant zero.
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
SCEVTypes getSCEVType() const
Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
The main scalar evolution driver.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVector< const SCEVPredicate *, 4 > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getUnknown(Value *V)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVMContext & getContext() const
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
uint64_t getSizeInBytes() const
uint64_t getElementOffset(unsigned Idx) const
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
Class to represent struct types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
static PointerType * getInt1PtrTy(LLVMContext &C, unsigned AS=0)
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
iterator_range< value_op_iterator > operand_values()
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE)
Normalize S to be post-increment for all loops present in Loops.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Interval::pred_iterator pred_end(Interval *I)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
bool programUndefinedIfPoison(const Instruction *Inst)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
cl::opt< unsigned > SCEVCheapExpansionBudget
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, IRBuilderBase &Builder, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
@ Mul
Product of integers.
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
constexpr unsigned BitWidth
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
struct for holding enough information to help calculate the cost of the given SCEV when expanded into...
const SCEV * S
The SCEV operand to be costed.
unsigned ParentOpcode
LLVM instruction opcode that uses the operand.
int OperandIdx
The use index of an expanded instruction.
Value * visit(const SCEV *S)