132#define DEBUG_TYPE "loop-reduce"
149 cl::desc(
"Enable LSR phi elimination"));
154 cl::desc(
"Add instruction count to a LSR cost model"));
159 cl::desc(
"Narrow LSR complex solution using"
160 " expectation of registers number"));
166 cl::desc(
"Narrow LSR search space by filtering non-optimal formulae"
167 " with the same ScaledReg and Scale"));
171 cl::desc(
"A flag that overrides the target's preferred addressing mode."),
175 "Prefer pre-indexed addressing mode"),
177 "Prefer post-indexed addressing mode"),
182 cl::init(std::numeric_limits<uint16_t>::max()),
183 cl::desc(
"LSR search space complexity limit"));
187 cl::desc(
"The limit on recursion depth for LSRs setup cost"));
191 cl::desc(
"Attempt to drop solution if it is less profitable"));
195 cl::desc(
"Enable analysis of vscale-relative immediates in LSR"));
199 cl::desc(
"Avoid using scaled registers with vscale-relative addressing"));
205 cl::desc(
"Stress test LSR IV chains"));
215 std::numeric_limits<unsigned>::max();
217 Type *MemTy =
nullptr;
220 MemAccessTy() =
default;
221 MemAccessTy(
Type *Ty,
unsigned AS) : MemTy(Ty), AddrSpace(AS) {}
224 return MemTy ==
Other.MemTy && AddrSpace ==
Other.AddrSpace;
229 static MemAccessTy getUnknown(LLVMContext &Ctx,
230 unsigned AS = UnknownAddressSpace) {
231 return MemAccessTy(Type::getVoidTy(Ctx), AS);
242 SmallBitVector UsedByIndices;
244 void print(raw_ostream &OS)
const;
251 constexpr Immediate(ScalarTy MinVal,
bool Scalable)
252 : FixedOrScalableQuantity(MinVal, Scalable) {}
254 constexpr Immediate(
const FixedOrScalableQuantity<Immediate, int64_t> &V)
255 : FixedOrScalableQuantity(
V) {}
258 constexpr Immediate() =
delete;
260 static constexpr Immediate getFixed(ScalarTy MinVal) {
261 return {MinVal,
false};
263 static constexpr Immediate getScalable(ScalarTy MinVal) {
264 return {MinVal,
true};
266 static constexpr Immediate
get(ScalarTy MinVal,
bool Scalable) {
267 return {MinVal, Scalable};
269 static constexpr Immediate getZero() {
return {0,
false}; }
270 static constexpr Immediate getFixedMin() {
271 return {std::numeric_limits<int64_t>::min(),
false};
273 static constexpr Immediate getFixedMax() {
274 return {std::numeric_limits<int64_t>::max(),
false};
276 static constexpr Immediate getScalableMin() {
277 return {std::numeric_limits<int64_t>::min(),
true};
279 static constexpr Immediate getScalableMax() {
280 return {std::numeric_limits<int64_t>::max(),
true};
283 constexpr bool isLessThanZero()
const {
return Quantity < 0; }
285 constexpr bool isGreaterThanZero()
const {
return Quantity > 0; }
287 constexpr bool isCompatibleImmediate(
const Immediate &Imm)
const {
288 return isZero() ||
Imm.isZero() ||
Imm.Scalable == Scalable;
291 constexpr bool isMin()
const {
292 return Quantity == std::numeric_limits<ScalarTy>::min();
295 constexpr bool isMax()
const {
296 return Quantity == std::numeric_limits<ScalarTy>::max();
300 constexpr Immediate addUnsigned(
const Immediate &
RHS)
const {
301 assert(isCompatibleImmediate(
RHS) &&
"Incompatible Immediates");
302 ScalarTy
Value = (uint64_t)Quantity +
RHS.getKnownMinValue();
303 return {
Value, Scalable ||
RHS.isScalable()};
306 constexpr Immediate subUnsigned(
const Immediate &
RHS)
const {
307 assert(isCompatibleImmediate(
RHS) &&
"Incompatible Immediates");
308 ScalarTy
Value = (uint64_t)Quantity -
RHS.getKnownMinValue();
309 return {
Value, Scalable ||
RHS.isScalable()};
313 constexpr Immediate mulUnsigned(
const ScalarTy
RHS)
const {
314 ScalarTy
Value = (uint64_t)Quantity *
RHS;
315 return {
Value, Scalable};
319 const SCEV *getSCEV(ScalarEvolution &SE,
Type *Ty)
const {
326 const SCEV *getNegativeSCEV(ScalarEvolution &SE,
Type *Ty)
const {
327 const SCEV *NegS = SE.
getConstant(Ty, -(uint64_t)Quantity);
333 const SCEV *getUnknownSCEV(ScalarEvolution &SE,
Type *Ty)
const {
345struct KeyOrderTargetImmediate {
346 bool operator()(
const Immediate &
LHS,
const Immediate &
RHS)
const {
347 if (
LHS.isScalable() && !
RHS.isScalable())
349 if (!
LHS.isScalable() &&
RHS.isScalable())
351 return LHS.getKnownMinValue() <
RHS.getKnownMinValue();
358struct KeyOrderSizeTAndImmediate {
359 bool operator()(
const std::pair<size_t, Immediate> &
LHS,
360 const std::pair<size_t, Immediate> &
RHS)
const {
361 size_t LSize =
LHS.first;
362 size_t RSize =
RHS.first;
364 return LSize < RSize;
365 return KeyOrderTargetImmediate()(
LHS.second,
RHS.second);
370#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
372 OS <<
"[NumUses=" << UsedByIndices.
count() <<
']';
384 using RegUsesTy = DenseMap<const SCEV *, RegSortData>;
386 RegUsesTy RegUsesMap;
390 void countRegister(
const SCEV *
Reg,
size_t LUIdx);
391 void dropRegister(
const SCEV *
Reg,
size_t LUIdx);
392 void swapAndDropUse(
size_t LUIdx,
size_t LastLUIdx);
394 bool isRegUsedByUsesOtherThan(
const SCEV *
Reg,
size_t LUIdx)
const;
396 const SmallBitVector &getUsedByIndices(
const SCEV *
Reg)
const;
412RegUseTracker::countRegister(
const SCEV *
Reg,
size_t LUIdx) {
413 std::pair<RegUsesTy::iterator, bool> Pair = RegUsesMap.try_emplace(
Reg);
414 RegSortData &RSD = Pair.first->second;
417 RSD.UsedByIndices.
resize(std::max(RSD.UsedByIndices.
size(), LUIdx + 1));
418 RSD.UsedByIndices.
set(LUIdx);
422RegUseTracker::dropRegister(
const SCEV *
Reg,
size_t LUIdx) {
423 RegUsesTy::iterator It = RegUsesMap.find(
Reg);
424 assert(It != RegUsesMap.end());
425 RegSortData &RSD = It->second;
427 RSD.UsedByIndices.
reset(LUIdx);
431RegUseTracker::swapAndDropUse(
size_t LUIdx,
size_t LastLUIdx) {
432 assert(LUIdx <= LastLUIdx);
436 for (
auto &Pair : RegUsesMap) {
437 SmallBitVector &UsedByIndices = Pair.second.UsedByIndices;
438 if (LUIdx < UsedByIndices.
size())
439 UsedByIndices[LUIdx] =
440 LastLUIdx < UsedByIndices.
size() ? UsedByIndices[LastLUIdx] :
false;
441 UsedByIndices.
resize(std::min(UsedByIndices.
size(), LastLUIdx));
446RegUseTracker::isRegUsedByUsesOtherThan(
const SCEV *
Reg,
size_t LUIdx)
const {
447 RegUsesTy::const_iterator
I = RegUsesMap.find(
Reg);
448 if (
I == RegUsesMap.end())
450 const SmallBitVector &UsedByIndices =
I->second.UsedByIndices;
452 if (i == -1)
return false;
453 if ((
size_t)i != LUIdx)
return true;
457const SmallBitVector &RegUseTracker::getUsedByIndices(
const SCEV *
Reg)
const {
458 RegUsesTy::const_iterator
I = RegUsesMap.find(
Reg);
459 assert(
I != RegUsesMap.end() &&
"Unknown register!");
460 return I->second.UsedByIndices;
463void RegUseTracker::clear() {
474 GlobalValue *BaseGV =
nullptr;
477 Immediate BaseOffset = Immediate::getZero();
480 bool HasBaseReg =
false;
503 const SCEV *ScaledReg =
nullptr;
508 Immediate UnfoldedOffset = Immediate::getZero();
512 void initialMatch(
const SCEV *S, Loop *L, ScalarEvolution &SE);
516 void canonicalize(
const Loop &L);
520 bool hasZeroEnd()
const;
522 bool countsDownToZero()
const;
524 size_t getNumRegs()
const;
527 void deleteBaseReg(
const SCEV *&S);
529 bool referencesReg(
const SCEV *S)
const;
530 bool hasRegsUsedByUsesOtherThan(
size_t LUIdx,
531 const RegUseTracker &RegUses)
const;
533 void print(raw_ostream &OS)
const;
552 for (
const SCEV *S :
Add->operands())
558 const SCEV *Start, *Step;
573 if (
Mul->getOperand(0)->isAllOnesValue()) {
582 for (
const SCEV *S : MyGood)
584 for (
const SCEV *S : MyBad)
596void Formula::initialMatch(
const SCEV *S, Loop *L, ScalarEvolution &SE) {
603 BaseRegs.push_back(Sum);
609 BaseRegs.push_back(Sum);
624bool Formula::isCanonical(
const Loop &L)
const {
625 assert((Scale == 0 || ScaledReg) &&
626 "ScaledReg must be non-null if Scale is non-zero");
629 return BaseRegs.size() <= 1;
634 if (Scale == 1 && BaseRegs.empty())
643 return none_of(BaseRegs, [&L](
const SCEV *S) {
654void Formula::canonicalize(
const Loop &L) {
658 if (BaseRegs.empty()) {
660 assert(ScaledReg &&
"Expected 1*reg => reg");
661 assert(Scale == 1 &&
"Expected 1*reg => reg");
662 BaseRegs.push_back(ScaledReg);
670 ScaledReg = BaseRegs.pop_back_val();
678 auto I =
find_if(BaseRegs, [&L](
const SCEV *S) {
681 if (
I != BaseRegs.end())
691bool Formula::unscale() {
695 BaseRegs.push_back(ScaledReg);
700bool Formula::hasZeroEnd()
const {
701 if (UnfoldedOffset || BaseOffset)
703 if (BaseRegs.size() != 1 || ScaledReg)
708bool Formula::countsDownToZero()
const {
711 assert(BaseRegs.size() == 1 &&
"hasZeroEnd should mean one BaseReg");
712 const APInt *StepInt;
720size_t Formula::getNumRegs()
const {
721 return !!ScaledReg + BaseRegs.size();
726Type *Formula::getType()
const {
727 return !BaseRegs.empty() ? BaseRegs.front()->getType() :
728 ScaledReg ? ScaledReg->
getType() :
734void Formula::deleteBaseReg(
const SCEV *&S) {
735 if (&S != &BaseRegs.back())
741bool Formula::referencesReg(
const SCEV *S)
const {
747bool Formula::hasRegsUsedByUsesOtherThan(
size_t LUIdx,
748 const RegUseTracker &RegUses)
const {
750 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx))
752 for (
const SCEV *BaseReg : BaseRegs)
753 if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx))
758#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
759void Formula::print(raw_ostream &OS)
const {
765 if (BaseOffset.isNonZero()) {
769 for (
const SCEV *BaseReg : BaseRegs) {
771 OS <<
"reg(" << *
BaseReg <<
')';
773 if (HasBaseReg && BaseRegs.empty()) {
775 OS <<
"**error: HasBaseReg**";
776 }
else if (!HasBaseReg && !BaseRegs.empty()) {
778 OS <<
"**error: !HasBaseReg**";
782 OS << Scale <<
"*reg(";
789 if (UnfoldedOffset.isNonZero()) {
790 if (!
First) OS <<
" + ";
791 OS <<
"imm(" << UnfoldedOffset <<
')';
832 bool IgnoreSignificantBits =
false) {
843 if (
RA.isAllOnes()) {
844 if (
LHS->getType()->isPointerTy())
857 const APInt &LA =
C->getAPInt();
866 if ((IgnoreSignificantBits ||
isAddRecSExtable(AR, SE)) && AR->isAffine()) {
868 IgnoreSignificantBits);
869 if (!Step)
return nullptr;
871 IgnoreSignificantBits);
872 if (!Start)
return nullptr;
885 for (
const SCEV *S :
Add->operands()) {
887 if (!
Op)
return nullptr;
915 for (
const SCEV *S :
Mul->operands()) {
918 IgnoreSignificantBits)) {
938 if (
C->getSignificantBits() <= 64) {
940 return Immediate::getFixed(
C->getSExtValue());
945 if (Result.isNonZero())
951 if (Result.isNonZero())
959 return Immediate::getScalable(
C->getSExtValue());
961 return Immediate::getZero();
996 if (
SI->getPointerOperand() == OperandVal)
1001 switch (
II->getIntrinsicID()) {
1002 case Intrinsic::memset:
1003 case Intrinsic::prefetch:
1004 case Intrinsic::masked_load:
1005 if (
II->getArgOperand(0) == OperandVal)
1008 case Intrinsic::masked_store:
1009 if (
II->getArgOperand(1) == OperandVal)
1012 case Intrinsic::memmove:
1013 case Intrinsic::memcpy:
1014 if (
II->getArgOperand(0) == OperandVal ||
1015 II->getArgOperand(1) == OperandVal)
1020 if (
TTI.getTgtMemIntrinsic(
II, IntrInfo)) {
1021 if (IntrInfo.
PtrVal == OperandVal)
1027 if (RMW->getPointerOperand() == OperandVal)
1030 if (CmpX->getPointerOperand() == OperandVal)
1039 MemAccessTy AccessTy = MemAccessTy::getUnknown(Inst->
getContext());
1043 AccessTy.MemTy = Ty;
1047 AccessTy.AddrSpace =
SI->getPointerAddressSpace();
1049 AccessTy.AddrSpace = LI->getPointerAddressSpace();
1051 AccessTy.AddrSpace = RMW->getPointerAddressSpace();
1053 AccessTy.AddrSpace = CmpX->getPointerAddressSpace();
1055 switch (
II->getIntrinsicID()) {
1056 case Intrinsic::prefetch:
1057 case Intrinsic::memset:
1058 AccessTy.AddrSpace =
II->getArgOperand(0)->getType()->getPointerAddressSpace();
1059 AccessTy.MemTy = OperandVal->
getType();
1061 case Intrinsic::memmove:
1062 case Intrinsic::memcpy:
1064 AccessTy.MemTy = OperandVal->
getType();
1066 case Intrinsic::masked_load:
1067 AccessTy.AddrSpace =
1068 II->getArgOperand(0)->getType()->getPointerAddressSpace();
1070 case Intrinsic::masked_store:
1071 AccessTy.AddrSpace =
1072 II->getArgOperand(1)->getType()->getPointerAddressSpace();
1076 if (
TTI.getTgtMemIntrinsic(
II, IntrInfo) && IntrInfo.
PtrVal) {
1132 if (!Processed.
insert(S).second)
1136 for (
const SCEV *S :
Add->operands()) {
1143 const SCEV *Op0, *Op1;
1152 Value *UVal = U->getValue();
1156 if (UI && UI->
getOpcode() == Instruction::Mul &&
1189 const LSRUse &LU,
const Formula &
F);
1193 const LSRUse &LU,
const Formula &
F,
1200 const Loop *
L =
nullptr;
1201 ScalarEvolution *SE =
nullptr;
1202 const TargetTransformInfo *
TTI =
nullptr;
1203 TargetTransformInfo::LSRCost
C;
1208 Cost(
const Loop *L, ScalarEvolution &SE,
const TargetTransformInfo &
TTI,
1210 L(
L), SE(&SE),
TTI(&
TTI), AMK(AMK) {
1228 return ((
C.Insns |
C.NumRegs |
C.AddRecCost |
C.NumIVMuls |
C.NumBaseAdds
1229 |
C.ImmCost |
C.SetupCost |
C.ScaleCost) != ~0u)
1230 || ((
C.Insns &
C.NumRegs &
C.AddRecCost &
C.NumIVMuls &
C.NumBaseAdds
1231 &
C.ImmCost &
C.SetupCost &
C.ScaleCost) == ~0
u);
1237 return C.NumRegs == ~0
u;
1240 void RateFormula(
const Formula &
F, SmallPtrSetImpl<const SCEV *> &Regs,
1241 const DenseSet<const SCEV *> &VisitedRegs,
const LSRUse &LU,
1242 bool HardwareLoopProfitable,
1243 SmallPtrSetImpl<const SCEV *> *LoserRegs =
nullptr);
1245 void print(raw_ostream &OS)
const;
1249 void RateRegister(
const Formula &
F,
const SCEV *
Reg,
1250 SmallPtrSetImpl<const SCEV *> &Regs,
const LSRUse &LU,
1251 bool HardwareLoopProfitable);
1252 void RatePrimaryRegister(
const Formula &
F,
const SCEV *
Reg,
1253 SmallPtrSetImpl<const SCEV *> &Regs,
1254 const LSRUse &LU,
bool HardwareLoopProfitable,
1255 SmallPtrSetImpl<const SCEV *> *LoserRegs);
1266 Value *OperandValToReplace =
nullptr;
1276 Immediate
Offset = Immediate::getZero();
1278 LSRFixup() =
default;
1280 bool isUseFullyOutsideLoop(
const Loop *L)
const;
1282 void print(raw_ostream &OS)
const;
1292 DenseSet<SmallVector<const SCEV *, 4>> Uniquifier;
1305 using SCEVUseKindPair = PointerIntPair<const SCEV *, 2, KindType>;
1308 MemAccessTy AccessTy;
1314 Immediate MinOffset = Immediate::getFixedMax();
1315 Immediate MaxOffset = Immediate::getFixedMin();
1319 bool AllFixupsOutsideLoop =
true;
1324 bool AllFixupsUnconditional =
true;
1331 bool RigidFormula =
false;
1337 Type *WidestFixupType =
nullptr;
1345 SmallPtrSet<const SCEV *, 4> Regs;
1347 LSRUse(KindType K, MemAccessTy AT) :
Kind(
K), AccessTy(AT) {}
1349 LSRFixup &getNewFixup() {
1350 Fixups.push_back(LSRFixup());
1354 void pushFixup(LSRFixup &f) {
1356 if (Immediate::isKnownGT(
f.Offset, MaxOffset))
1357 MaxOffset =
f.Offset;
1358 if (Immediate::isKnownLT(
f.Offset, MinOffset))
1359 MinOffset =
f.Offset;
1362 bool HasFormulaWithSameRegs(
const Formula &
F)
const;
1363 float getNotSelectedProbability(
const SCEV *
Reg)
const;
1364 bool InsertFormula(
const Formula &
F,
const Loop &L);
1365 void DeleteFormula(Formula &
F);
1366 void RecomputeRegs(
size_t LUIdx, RegUseTracker &Reguses);
1368 void print(raw_ostream &OS)
const;
1375 LSRUse::KindType Kind, MemAccessTy AccessTy,
1376 GlobalValue *BaseGV, Immediate BaseOffset,
1377 bool HasBaseReg, int64_t Scale,
1378 Instruction *
Fixup =
nullptr);
1391 [&](
unsigned i,
const SCEV *
Reg) {
1392 return i + getSetupCost(Reg, Depth - 1);
1401void Cost::RateRegister(
const Formula &
F,
const SCEV *
Reg,
1402 SmallPtrSetImpl<const SCEV *> &Regs,
const LSRUse &LU,
1403 bool HardwareLoopProfitable) {
1408 if (AR->getLoop() != L) {
1415 if (!AR->getLoop()->contains(L)) {
1425 unsigned LoopCost = 1;
1434 F.BaseOffset.isFixed() &&
1435 *Step ==
F.BaseOffset.getFixedValue();
1440 if ((CanPreIndex || CanPostIndex) && LU.AllFixupsUnconditional)
1447 if (LU.Kind == LSRUse::ICmpZero &&
F.countsDownToZero() &&
1448 HardwareLoopProfitable)
1450 C.AddRecCost += LoopCost;
1455 if (!Regs.
count(AR->getOperand(1))) {
1456 RateRegister(
F, AR->getOperand(1), Regs, LU, HardwareLoopProfitable);
1468 C.SetupCost = std::min<unsigned>(
C.SetupCost, 1 << 16);
1477void Cost::RatePrimaryRegister(
const Formula &
F,
const SCEV *
Reg,
1478 SmallPtrSetImpl<const SCEV *> &Regs,
1479 const LSRUse &LU,
bool HardwareLoopProfitable,
1480 SmallPtrSetImpl<const SCEV *> *LoserRegs) {
1481 if (LoserRegs && LoserRegs->
count(
Reg)) {
1486 RateRegister(
F,
Reg, Regs, LU, HardwareLoopProfitable);
1487 if (LoserRegs && isLoser())
1492void Cost::RateFormula(
const Formula &
F, SmallPtrSetImpl<const SCEV *> &Regs,
1493 const DenseSet<const SCEV *> &VisitedRegs,
1494 const LSRUse &LU,
bool HardwareLoopProfitable,
1495 SmallPtrSetImpl<const SCEV *> *LoserRegs) {
1498 assert(
F.isCanonical(*L) &&
"Cost is accurate only for canonical formula");
1500 unsigned PrevAddRecCost =
C.AddRecCost;
1501 unsigned PrevNumRegs =
C.NumRegs;
1502 unsigned PrevNumBaseAdds =
C.NumBaseAdds;
1503 if (
const SCEV *ScaledReg =
F.ScaledReg) {
1504 if (VisitedRegs.
count(ScaledReg)) {
1508 RatePrimaryRegister(
F, ScaledReg, Regs, LU, HardwareLoopProfitable,
1513 for (
const SCEV *BaseReg :
F.BaseRegs) {
1514 if (VisitedRegs.
count(BaseReg)) {
1518 RatePrimaryRegister(
F, BaseReg, Regs, LU, HardwareLoopProfitable,
1525 size_t NumBaseParts =
F.getNumRegs();
1526 if (NumBaseParts > 1)
1531 C.NumBaseAdds += (
F.UnfoldedOffset.isNonZero());
1537 for (
const LSRFixup &
Fixup : LU.Fixups) {
1538 if (
Fixup.Offset.isCompatibleImmediate(
F.BaseOffset)) {
1539 Immediate
Offset =
Fixup.Offset.addUnsigned(
F.BaseOffset);
1543 else if (
Offset.isNonZero())
1545 APInt(64,
Offset.getKnownMinValue(),
true).getSignificantBits();
1549 if (LU.Kind == LSRUse::Address &&
Offset.isNonZero() &&
1570 if (
C.NumRegs > TTIRegNum) {
1573 if (PrevNumRegs > TTIRegNum)
1574 C.Insns += (
C.NumRegs - PrevNumRegs);
1576 C.Insns += (
C.NumRegs - TTIRegNum);
1589 if (LU.Kind == LSRUse::ICmpZero && !
F.hasZeroEnd() &&
1593 C.Insns += (
C.AddRecCost - PrevAddRecCost);
1596 if (LU.Kind != LSRUse::ICmpZero)
1597 C.Insns +=
C.NumBaseAdds - PrevNumBaseAdds;
1603 C.Insns = std::numeric_limits<unsigned>::max();
1604 C.NumRegs = std::numeric_limits<unsigned>::max();
1605 C.AddRecCost = std::numeric_limits<unsigned>::max();
1606 C.NumIVMuls = std::numeric_limits<unsigned>::max();
1607 C.NumBaseAdds = std::numeric_limits<unsigned>::max();
1608 C.ImmCost = std::numeric_limits<unsigned>::max();
1609 C.SetupCost = std::numeric_limits<unsigned>::max();
1610 C.ScaleCost = std::numeric_limits<unsigned>::max();
1614bool Cost::isLess(
const Cost &
Other)
const {
1616 C.Insns !=
Other.C.Insns)
1617 return C.Insns <
Other.C.Insns;
1621#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1624 OS <<
C.Insns <<
" instruction" << (
C.Insns == 1 ?
" " :
"s ");
1625 OS <<
C.NumRegs <<
" reg" << (
C.NumRegs == 1 ?
"" :
"s");
1626 if (
C.AddRecCost != 0)
1627 OS <<
", with addrec cost " <<
C.AddRecCost;
1628 if (
C.NumIVMuls != 0)
1629 OS <<
", plus " <<
C.NumIVMuls <<
" IV mul"
1630 << (
C.NumIVMuls == 1 ?
"" :
"s");
1631 if (
C.NumBaseAdds != 0)
1632 OS <<
", plus " <<
C.NumBaseAdds <<
" base add"
1633 << (
C.NumBaseAdds == 1 ?
"" :
"s");
1634 if (
C.ScaleCost != 0)
1635 OS <<
", plus " <<
C.ScaleCost <<
" scale cost";
1637 OS <<
", plus " <<
C.ImmCost <<
" imm cost";
1638 if (
C.SetupCost != 0)
1639 OS <<
", plus " <<
C.SetupCost <<
" setup cost";
1648bool LSRFixup::isUseFullyOutsideLoop(
const Loop *L)
const {
1651 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1652 if (PN->getIncomingValue(i) == OperandValToReplace &&
1653 L->contains(PN->getIncomingBlock(i)))
1658 return !
L->contains(UserInst);
1661#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1662void LSRFixup::print(raw_ostream &OS)
const {
1667 Store->getOperand(0)->printAsOperand(OS,
false);
1673 OS <<
", OperandValToReplace=";
1676 for (
const Loop *PIL : PostIncLoops) {
1677 OS <<
", PostIncLoop=";
1678 PIL->getHeader()->printAsOperand(OS,
false);
1682 OS <<
", Offset=" <<
Offset;
1692bool LSRUse::HasFormulaWithSameRegs(
const Formula &
F)
const {
1694 if (
F.ScaledReg)
Key.push_back(
F.ScaledReg);
1701float LSRUse::getNotSelectedProbability(
const SCEV *
Reg)
const {
1703 for (
const Formula &
F : Formulae)
1704 if (
F.referencesReg(
Reg))
1706 return ((
float)(Formulae.size() - FNum)) / Formulae.size();
1711bool LSRUse::InsertFormula(
const Formula &
F,
const Loop &L) {
1712 assert(
F.isCanonical(L) &&
"Invalid canonical representation");
1714 if (!Formulae.empty() && RigidFormula)
1718 if (
F.ScaledReg)
Key.push_back(
F.ScaledReg);
1726 assert((!
F.ScaledReg || !
F.ScaledReg->isZero()) &&
1727 "Zero allocated in a scaled register!");
1729 for (
const SCEV *BaseReg :
F.BaseRegs)
1730 assert(!
BaseReg->isZero() &&
"Zero allocated in a base register!");
1734 Formulae.push_back(
F);
1745void LSRUse::DeleteFormula(Formula &
F) {
1746 if (&
F != &Formulae.back())
1748 Formulae.pop_back();
1752void LSRUse::RecomputeRegs(
size_t LUIdx, RegUseTracker &RegUses) {
1754 SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs);
1756 for (
const Formula &
F : Formulae) {
1757 if (
F.ScaledReg) Regs.
insert(
F.ScaledReg);
1762 for (
const SCEV *S : OldRegs)
1764 RegUses.dropRegister(S, LUIdx);
1767#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1768void LSRUse::print(raw_ostream &OS)
const {
1769 OS <<
"LSR Use: Kind=";
1771 case Basic: OS <<
"Basic";
break;
1772 case Special: OS <<
"Special";
break;
1773 case ICmpZero: OS <<
"ICmpZero";
break;
1775 OS <<
"Address of ";
1779 OS << *AccessTy.MemTy;
1782 OS <<
" in addrspace(" << AccessTy.AddrSpace <<
')';
1785 OS <<
", Offsets={";
1786 bool NeedComma =
false;
1787 for (
const LSRFixup &
Fixup : Fixups) {
1788 if (NeedComma) OS <<
',';
1794 if (AllFixupsOutsideLoop)
1795 OS <<
", all-fixups-outside-loop";
1797 if (AllFixupsUnconditional)
1798 OS <<
", all-fixups-unconditional";
1800 if (WidestFixupType)
1801 OS <<
", widest fixup type: " << *WidestFixupType;
1810 LSRUse::KindType Kind, MemAccessTy AccessTy,
1812 bool HasBaseReg, int64_t Scale,
1815 case LSRUse::Address: {
1816 int64_t FixedOffset =
1817 BaseOffset.isScalable() ? 0 : BaseOffset.getFixedValue();
1818 int64_t ScalableOffset =
1819 BaseOffset.isScalable() ? BaseOffset.getKnownMinValue() : 0;
1820 return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, FixedOffset,
1821 HasBaseReg, Scale, AccessTy.AddrSpace,
1822 Fixup, ScalableOffset);
1824 case LSRUse::ICmpZero:
1831 if (Scale != 0 && HasBaseReg && BaseOffset.isNonZero())
1836 if (Scale != 0 && Scale != -1)
1841 if (BaseOffset.isNonZero()) {
1844 if (BaseOffset.isScalable())
1854 BaseOffset = BaseOffset.getFixed(-(
uint64_t)BaseOffset.getFixedValue());
1855 return TTI.isLegalICmpImmediate(BaseOffset.getFixedValue());
1863 return !BaseGV && Scale == 0 && BaseOffset.isZero();
1865 case LSRUse::Special:
1867 return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset.isZero();
1874 Immediate MinOffset, Immediate MaxOffset,
1875 LSRUse::KindType Kind, MemAccessTy AccessTy,
1877 bool HasBaseReg, int64_t Scale) {
1878 if (BaseOffset.isNonZero() &&
1879 (BaseOffset.isScalable() != MinOffset.isScalable() ||
1880 BaseOffset.isScalable() != MaxOffset.isScalable()))
1883 int64_t
Base = BaseOffset.getKnownMinValue();
1884 int64_t Min = MinOffset.getKnownMinValue();
1885 int64_t Max = MaxOffset.getKnownMinValue();
1888 MinOffset = Immediate::get((
uint64_t)
Base + Min, MinOffset.isScalable());
1891 MaxOffset = Immediate::get((
uint64_t)
Base + Max, MaxOffset.isScalable());
1894 HasBaseReg, Scale) &&
1900 Immediate MinOffset, Immediate MaxOffset,
1901 LSRUse::KindType Kind, MemAccessTy AccessTy,
1902 const Formula &
F,
const Loop &L) {
1910 assert((
F.isCanonical(L) ||
F.Scale != 0));
1912 F.BaseGV,
F.BaseOffset,
F.HasBaseReg,
F.Scale);
1917 Immediate MaxOffset, LSRUse::KindType Kind,
1919 Immediate BaseOffset,
bool HasBaseReg, int64_t Scale) {
1922 BaseOffset, HasBaseReg, Scale) ||
1927 BaseGV, BaseOffset,
true, 0));
1931 Immediate MaxOffset, LSRUse::KindType Kind,
1932 MemAccessTy AccessTy,
const Formula &
F) {
1933 return isLegalUse(
TTI, MinOffset, MaxOffset, Kind, AccessTy,
F.BaseGV,
1934 F.BaseOffset,
F.HasBaseReg,
F.Scale);
1940 return TTI.isLegalAddScalableImmediate(
Offset.getKnownMinValue());
1942 return TTI.isLegalAddImmediate(
Offset.getFixedValue());
1946 const LSRUse &LU,
const Formula &
F) {
1948 if (LU.Kind == LSRUse::Address &&
TTI.LSRWithInstrQueries()) {
1949 for (
const LSRFixup &
Fixup : LU.Fixups)
1951 (
F.BaseOffset +
Fixup.Offset),
F.HasBaseReg,
1952 F.Scale,
Fixup.UserInst))
1958 LU.AccessTy,
F.BaseGV,
F.BaseOffset,
F.HasBaseReg,
1963 const LSRUse &LU,
const Formula &
F,
1972 return F.Scale != 1;
1975 case LSRUse::Address: {
1977 int64_t ScalableMin = 0, ScalableMax = 0, FixedMin = 0, FixedMax = 0;
1978 if (
F.BaseOffset.isScalable()) {
1979 ScalableMin = (
F.BaseOffset + LU.MinOffset).getKnownMinValue();
1980 ScalableMax = (
F.BaseOffset + LU.MaxOffset).getKnownMinValue();
1982 FixedMin = (
F.BaseOffset + LU.MinOffset).getFixedValue();
1983 FixedMax = (
F.BaseOffset + LU.MaxOffset).getFixedValue();
1987 F.HasBaseReg,
F.Scale, LU.AccessTy.AddrSpace);
1990 F.HasBaseReg,
F.Scale, LU.AccessTy.AddrSpace);
1993 "Legal addressing mode has an illegal cost!");
1994 return std::max(ScaleCostMinOffset, ScaleCostMaxOffset);
1996 case LSRUse::ICmpZero:
1998 case LSRUse::Special:
2008 LSRUse::KindType Kind, MemAccessTy AccessTy,
2012 if (BaseOffset.isZero() && !BaseGV)
2017 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
2021 if (!HasBaseReg && Scale == 1) {
2031 if (HasBaseReg && BaseOffset.isNonZero() && Kind != LSRUse::ICmpZero &&
2041 Immediate MaxOffset, LSRUse::KindType Kind,
2042 MemAccessTy AccessTy,
const SCEV *S,
2045 if (S->
isZero())
return true;
2053 if (!S->
isZero())
return false;
2056 if (BaseOffset.isZero() && !BaseGV)
2059 if (BaseOffset.isScalable())
2064 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
2067 BaseOffset, HasBaseReg, Scale);
2084 const SCEV *IncExpr;
2086 IVInc(Instruction *U,
Value *O,
const SCEV *
E)
2087 : UserInst(
U), IVOperand(
O), IncExpr(
E) {}
2094 const SCEV *ExprBase =
nullptr;
2096 IVChain() =
default;
2097 IVChain(
const IVInc &Head,
const SCEV *
Base)
2098 : Incs(1, Head), ExprBase(
Base) {}
2103 const_iterator
begin()
const {
2105 return std::next(Incs.
begin());
2107 const_iterator
end()
const {
2112 bool hasIncs()
const {
return Incs.
size() >= 2; }
2121 bool isProfitableIncrement(
const SCEV *OperExpr,
2122 const SCEV *IncExpr,
2130 SmallPtrSet<Instruction*, 4> FarUsers;
2131 SmallPtrSet<Instruction*, 4> NearUsers;
2137 ScalarEvolution &SE;
2140 AssumptionCache &AC;
2141 TargetLibraryInfo &TLI;
2142 const TargetTransformInfo &
TTI;
2144 MemorySSAUpdater *MSSAU;
2148 bool HardwareLoopProfitable =
false;
2162 SetVector<int64_t, SmallVector<int64_t, 8>, SmallSet<int64_t, 8>> Factors;
2169 SmallSetVector<Type *, 4> Types;
2175 RegUseTracker RegUses;
2180 static const unsigned MaxChains = 8;
2186 SmallPtrSet<Use*, MaxChains> IVIncSet;
2189 SmallVector<llvm::WeakVH, 2> ScalarEvolutionIVs;
2195 SmallSetVector<Instruction *, 4> InsertedNonLCSSAInsts;
2197 void OptimizeShadowIV();
2198 bool FindIVUserForCond(Instruction *
Cond, IVStrideUse *&CondUse);
2200 void OptimizeLoopTermCond();
2202 void ChainInstruction(Instruction *UserInst, Instruction *IVOper,
2203 SmallVectorImpl<ChainUsers> &ChainUsersVec);
2204 void FinalizeChain(IVChain &Chain);
2205 void CollectChains();
2206 void GenerateIVChain(
const IVChain &Chain,
2207 SmallVectorImpl<WeakTrackingVH> &DeadInsts);
2209 void CollectInterestingTypesAndFactors();
2210 void CollectFixupsAndInitialFormulae();
2213 using UseMapTy = DenseMap<LSRUse::SCEVUseKindPair, size_t>;
2216 bool reconcileNewOffset(LSRUse &LU, Immediate NewOffset,
bool HasBaseReg,
2217 LSRUse::KindType Kind, MemAccessTy AccessTy);
2219 std::pair<size_t, Immediate> getUse(
const SCEV *&Expr, LSRUse::KindType Kind,
2220 MemAccessTy AccessTy);
2222 void DeleteUse(LSRUse &LU,
size_t LUIdx);
2224 LSRUse *FindUseWithSimilarFormula(
const Formula &
F,
const LSRUse &OrigLU);
2226 void InsertInitialFormula(
const SCEV *S, LSRUse &LU,
size_t LUIdx);
2227 void InsertSupplementalFormula(
const SCEV *S, LSRUse &LU,
size_t LUIdx);
2228 void CountRegisters(
const Formula &
F,
size_t LUIdx);
2229 bool InsertFormula(LSRUse &LU,
unsigned LUIdx,
const Formula &
F);
2230 bool IsFixupExecutedEachIncrement(
const LSRFixup &LF)
const;
2232 void CollectLoopInvariantFixupsAndFormulae();
2234 void GenerateReassociations(LSRUse &LU,
unsigned LUIdx, Formula
Base,
2235 unsigned Depth = 0);
2237 void GenerateReassociationsImpl(LSRUse &LU,
unsigned LUIdx,
2239 size_t Idx,
bool IsScaledReg =
false);
2240 void GenerateCombinations(LSRUse &LU,
unsigned LUIdx, Formula
Base);
2241 void GenerateSymbolicOffsetsImpl(LSRUse &LU,
unsigned LUIdx,
2242 const Formula &
Base,
size_t Idx,
2243 bool IsScaledReg =
false);
2244 void GenerateSymbolicOffsets(LSRUse &LU,
unsigned LUIdx, Formula
Base);
2245 void GenerateConstantOffsetsImpl(LSRUse &LU,
unsigned LUIdx,
2246 const Formula &
Base,
2247 const SmallVectorImpl<Immediate> &Worklist,
2248 size_t Idx,
bool IsScaledReg =
false);
2249 void GenerateConstantOffsets(LSRUse &LU,
unsigned LUIdx, Formula
Base);
2250 void GenerateICmpZeroScales(LSRUse &LU,
unsigned LUIdx, Formula
Base);
2251 void GenerateScales(LSRUse &LU,
unsigned LUIdx, Formula
Base);
2252 void GenerateTruncates(LSRUse &LU,
unsigned LUIdx, Formula
Base);
2253 void GenerateCrossUseConstantOffsets();
2254 void GenerateAllReuseFormulae();
2256 void FilterOutUndesirableDedicatedRegisters();
2258 size_t EstimateSearchSpaceComplexity()
const;
2259 void NarrowSearchSpaceByDetectingSupersets();
2260 void NarrowSearchSpaceByCollapsingUnrolledCode();
2261 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
2262 void NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
2263 void NarrowSearchSpaceByFilterPostInc();
2264 void NarrowSearchSpaceByDeletingCostlyFormulas();
2265 void NarrowSearchSpaceByPickingWinnerRegs();
2266 void NarrowSearchSpaceUsingHeuristics();
2268 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
2270 SmallVectorImpl<const Formula *> &Workspace,
2271 const Cost &CurCost,
2272 const SmallPtrSet<const SCEV *, 16> &CurRegs,
2273 DenseSet<const SCEV *> &VisitedRegs)
const;
2274 void Solve(SmallVectorImpl<const Formula *> &Solution)
const;
2278 const SmallVectorImpl<Instruction *> &Inputs)
const;
2281 const LSRUse &LU)
const;
2283 Value *Expand(
const LSRUse &LU,
const LSRFixup &LF,
const Formula &
F,
2285 SmallVectorImpl<WeakTrackingVH> &DeadInsts)
const;
2286 void RewriteForPHI(PHINode *PN,
const LSRUse &LU,
const LSRFixup &LF,
2288 SmallVectorImpl<WeakTrackingVH> &DeadInsts);
2289 void Rewrite(
const LSRUse &LU,
const LSRFixup &LF,
const Formula &
F,
2290 SmallVectorImpl<WeakTrackingVH> &DeadInsts);
2291 void ImplementSolution(
const SmallVectorImpl<const Formula *> &Solution);
2294 LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, DominatorTree &DT,
2295 LoopInfo &LI,
const TargetTransformInfo &
TTI, AssumptionCache &AC,
2296 TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU);
2298 bool getChanged()
const {
return Changed; }
2299 const SmallVectorImpl<WeakVH> &getScalarEvolutionIVs()
const {
2300 return ScalarEvolutionIVs;
2303 void print_factors_and_types(raw_ostream &OS)
const;
2304 void print_fixups(raw_ostream &OS)
const;
2305 void print_uses(raw_ostream &OS)
const;
2306 void print(raw_ostream &OS)
const;
2314void LSRInstance::OptimizeShadowIV() {
2324 Type *DestTy =
nullptr;
2325 bool IsSigned =
false;
2341 DestTy = UCast->getDestTy();
2345 DestTy = SCast->getDestTy();
2347 if (!DestTy)
continue;
2367 if (Mantissa == -1)
continue;
2371 unsigned Entry, Latch;
2381 if (!Init)
continue;
2382 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ?
2386 BinaryOperator *Incr =
2388 if (!Incr)
continue;
2389 if (Incr->
getOpcode() != Instruction::Add
2390 && Incr->
getOpcode() != Instruction::Sub)
2394 ConstantInt *
C =
nullptr;
2406 if (!
C->getValue().isStrictlyPositive())
2414 Constant *CFP = ConstantFP::get(DestTy,
C->getZExtValue());
2416 Incr->
getOpcode() == Instruction::Add ? Instruction::FAdd
2417 : Instruction::FSub,
2434bool LSRInstance::FindIVUserForCond(Instruction *
Cond, IVStrideUse *&CondUse) {
2435 for (IVStrideUse &U : IU)
2436 if (
U.getUser() ==
Cond) {
2494Instruction *LSRInstance::OptimizeMax(ICmpInst *
Cond, IVStrideUse *&CondUse) {
2509 const SCEV *IterationCount = SE.
getAddExpr(One, BackedgeTakenCount);
2510 if (IterationCount != SE.
getSCEV(Sel))
return Cond;
2516 const SCEVNAryExpr *
Max =
nullptr;
2518 Pred = ICmpInst::ICMP_SLE;
2521 Pred = ICmpInst::ICMP_SLT;
2524 Pred = ICmpInst::ICMP_ULT;
2533 if (
Max->getNumOperands() != 2)
2536 const SCEV *MaxLHS =
Max->getOperand(0);
2537 const SCEV *MaxRHS =
Max->getOperand(1);
2542 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->
isZero() : (MaxLHS != One)))
2553 "Loop condition operand is an addrec in a different loop!");
2557 Value *NewRHS =
nullptr;
2558 if (ICmpInst::isTrueWhenEqual(Pred)) {
2562 if (BO1->isOne() && SE.
getSCEV(BO->getOperand(0)) == MaxRHS)
2563 NewRHS = BO->getOperand(0);
2566 if (BO1->isOne() && SE.
getSCEV(BO->getOperand(0)) == MaxRHS)
2567 NewRHS = BO->getOperand(0);
2575 NewRHS = SU->getValue();
2587 ICmpInst *NewCond =
new ICmpInst(
Cond->getIterator(), Pred,
2588 Cond->getOperand(0), NewRHS,
"scmp");
2592 Cond->replaceAllUsesWith(NewCond);
2595 Cond->eraseFromParent();
2597 if (
Cmp->use_empty()) {
2599 Cmp->eraseFromParent();
2606LSRInstance::OptimizeLoopTermCond() {
2607 SmallPtrSet<Instruction *, 4> PostIncs;
2622 SmallVector<BasicBlock*, 8> ExitingBlocks;
2623 L->getExitingBlocks(ExitingBlocks);
2631 for (BasicBlock *ExitingBlock : ExitingBlocks) {
2653 IVStrideUse *CondUse =
nullptr;
2654 if (!FindIVUserForCond(
Cond, CondUse))
2664 Cond = OptimizeMax(Cmp, CondUse);
2669 if (!DT.
dominates(ExitingBlock, LatchBlock))
2674 if (LatchBlock != ExitingBlock)
2675 for (
const IVStrideUse &UI : IU)
2678 if (&UI != CondUse &&
2682 const SCEV *
A = IU.getStride(*CondUse, L);
2683 const SCEV *
B = IU.getStride(UI, L);
2684 if (!
A || !
B)
continue;
2693 if (
const SCEVConstant *
D =
2695 const ConstantInt *
C =
D->getValue();
2697 if (
C->isOne() ||
C->isMinusOne())
2698 goto decline_post_inc;
2700 if (
C->getValue().getSignificantBits() >= 64 ||
2701 C->getValue().isMinSignedValue())
2702 goto decline_post_inc;
2705 MemAccessTy AccessTy =
2707 int64_t Scale =
C->getSExtValue();
2711 AccessTy.AddrSpace))
2712 goto decline_post_inc;
2717 AccessTy.AddrSpace))
2718 goto decline_post_inc;
2723 LLVM_DEBUG(
dbgs() <<
" Change loop exiting icmp to use postinc iv: "
2731 if (
Cond->hasOneUse()) {
2737 Cond->setName(
L->getHeader()->getName() +
".termcond");
2759 IVIncInsertPos =
L->getLoopLatch()->getTerminator();
2760 for (Instruction *Inst : PostIncs)
2766bool LSRInstance::reconcileNewOffset(LSRUse &LU, Immediate NewOffset,
2767 bool HasBaseReg, LSRUse::KindType Kind,
2768 MemAccessTy AccessTy) {
2769 Immediate NewMinOffset = LU.MinOffset;
2770 Immediate NewMaxOffset = LU.MaxOffset;
2771 MemAccessTy NewAccessTy = AccessTy;
2776 if (LU.Kind != Kind)
2782 if (Kind == LSRUse::Address) {
2783 if (AccessTy.MemTy != LU.AccessTy.MemTy) {
2784 NewAccessTy = MemAccessTy::getUnknown(AccessTy.MemTy->
getContext(),
2785 AccessTy.AddrSpace);
2790 if (Immediate::isKnownLT(NewOffset, LU.MinOffset)) {
2792 LU.MaxOffset - NewOffset, HasBaseReg))
2794 NewMinOffset = NewOffset;
2795 }
else if (Immediate::isKnownGT(NewOffset, LU.MaxOffset)) {
2797 NewOffset - LU.MinOffset, HasBaseReg))
2799 NewMaxOffset = NewOffset;
2805 if (NewAccessTy.MemTy && NewAccessTy.MemTy->
isVoidTy() &&
2806 (NewMinOffset.isScalable() || NewMaxOffset.isScalable()))
2810 LU.MinOffset = NewMinOffset;
2811 LU.MaxOffset = NewMaxOffset;
2812 LU.AccessTy = NewAccessTy;
2819std::pair<size_t, Immediate> LSRInstance::getUse(
const SCEV *&Expr,
2820 LSRUse::KindType Kind,
2821 MemAccessTy AccessTy) {
2822 const SCEV *
Copy = Expr;
2829 Offset = Immediate::getFixed(0);
2832 std::pair<UseMapTy::iterator, bool>
P =
2833 UseMap.
try_emplace(LSRUse::SCEVUseKindPair(Expr, Kind));
2836 size_t LUIdx =
P.first->second;
2837 LSRUse &LU =
Uses[LUIdx];
2838 if (reconcileNewOffset(LU,
Offset,
true, Kind, AccessTy))
2840 return std::make_pair(LUIdx,
Offset);
2844 size_t LUIdx =
Uses.size();
2845 P.first->second = LUIdx;
2846 Uses.push_back(LSRUse(Kind, AccessTy));
2847 LSRUse &LU =
Uses[LUIdx];
2851 return std::make_pair(LUIdx,
Offset);
2855void LSRInstance::DeleteUse(LSRUse &LU,
size_t LUIdx) {
2856 if (&LU != &
Uses.back())
2861 RegUses.swapAndDropUse(LUIdx,
Uses.size());
2867LSRInstance::FindUseWithSimilarFormula(
const Formula &OrigF,
2868 const LSRUse &OrigLU) {
2870 for (LSRUse &LU :
Uses) {
2876 if (&LU != &OrigLU &&
2877 LU.Kind != LSRUse::ICmpZero &&
2878 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy &&
2879 LU.WidestFixupType == OrigLU.WidestFixupType &&
2880 LU.HasFormulaWithSameRegs(OrigF)) {
2882 for (
const Formula &
F : LU.Formulae) {
2885 if (
F.BaseRegs == OrigF.BaseRegs &&
2886 F.ScaledReg == OrigF.ScaledReg &&
2887 F.BaseGV == OrigF.BaseGV &&
2888 F.Scale == OrigF.Scale &&
2889 F.UnfoldedOffset == OrigF.UnfoldedOffset) {
2890 if (
F.BaseOffset.isZero())
2905void LSRInstance::CollectInterestingTypesAndFactors() {
2906 SmallSetVector<const SCEV *, 4> Strides;
2910 for (
const IVStrideUse &U : IU) {
2911 const SCEV *Expr = IU.getExpr(U);
2929 }
while (!Worklist.
empty());
2933 for (SmallSetVector<const SCEV *, 4>::const_iterator
2935 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
2936 std::next(
I); NewStrideIter !=
E; ++NewStrideIter) {
2937 const SCEV *OldStride = *
I;
2938 const SCEV *NewStride = *NewStrideIter;
2948 if (
const SCEVConstant *Factor =
2951 if (Factor->getAPInt().getSignificantBits() <= 64 && !Factor->isZero())
2952 Factors.insert(Factor->getAPInt().getSExtValue());
2953 }
else if (
const SCEVConstant *Factor =
2957 if (Factor->getAPInt().getSignificantBits() <= 64 && !Factor->isZero())
2958 Factors.insert(Factor->getAPInt().getSExtValue());
2964 if (Types.size() == 1)
2976 for(; OI != OE; ++OI) {
2995 return Trunc->getOperand(0);
3028 if (SubExpr->getSCEVType() ==
scAddExpr)
3031 if (SubExpr->getSCEVType() !=
scMulExpr)
3047bool IVChain::isProfitableIncrement(
const SCEV *OperExpr,
3048 const SCEV *IncExpr,
3049 ScalarEvolution &SE) {
3062 SmallPtrSet<const SCEV*, 8> Processed;
3083 if (!Chain.hasIncs())
3086 if (!
Users.empty()) {
3087 LLVM_DEBUG(
dbgs() <<
"Chain: " << *Chain.Incs[0].UserInst <<
" users:\n";
3089 :
Users) {
dbgs() <<
" " << *Inst <<
"\n"; });
3092 assert(!Chain.Incs.empty() &&
"empty IV chains are not allowed");
3101 && SE.
getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) {
3104 const SCEV *LastIncExpr =
nullptr;
3105 unsigned NumConstIncrements = 0;
3106 unsigned NumVarIncrements = 0;
3107 unsigned NumReusedIncrements = 0;
3109 if (
TTI.isProfitableLSRChainElement(Chain.Incs[0].UserInst))
3112 for (
const IVInc &Inc : Chain) {
3113 if (
TTI.isProfitableLSRChainElement(Inc.UserInst))
3115 if (Inc.IncExpr->isZero())
3121 ++NumConstIncrements;
3125 if (Inc.IncExpr == LastIncExpr)
3126 ++NumReusedIncrements;
3130 LastIncExpr = Inc.IncExpr;
3135 if (NumConstIncrements > 1)
3142 cost += NumVarIncrements;
3146 cost -= NumReusedIncrements;
3148 LLVM_DEBUG(
dbgs() <<
"Chain: " << *Chain.Incs[0].UserInst <<
" Cost: " << cost
3155void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
3156 SmallVectorImpl<ChainUsers> &ChainUsersVec) {
3160 const SCEV *
const OperExpr = SE.
getSCEV(NextIV);
3161 const SCEV *
const OperExprBase =
getExprBase(OperExpr);
3165 unsigned ChainIdx = 0, NChains = IVChainVec.size();
3166 const SCEV *LastIncExpr =
nullptr;
3167 for (; ChainIdx < NChains; ++ChainIdx) {
3168 IVChain &Chain = IVChainVec[ChainIdx];
3186 const SCEV *PrevExpr = SE.
getSCEV(PrevIV);
3187 const SCEV *IncExpr = SE.
getMinusSCEV(OperExpr, PrevExpr);
3191 if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) {
3192 LastIncExpr = IncExpr;
3198 if (ChainIdx == NChains) {
3205 LastIncExpr = OperExpr;
3212 IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr),
3214 ChainUsersVec.
resize(NChains);
3215 LLVM_DEBUG(
dbgs() <<
"IV Chain#" << ChainIdx <<
" Head: (" << *UserInst
3216 <<
") IV=" << *LastIncExpr <<
"\n");
3218 LLVM_DEBUG(
dbgs() <<
"IV Chain#" << ChainIdx <<
" Inc: (" << *UserInst
3219 <<
") IV+" << *LastIncExpr <<
"\n");
3221 IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr));
3223 IVChain &Chain = IVChainVec[ChainIdx];
3225 SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers;
3227 if (!LastIncExpr->
isZero()) {
3228 ChainUsersVec[ChainIdx].FarUsers.insert_range(NearUsers);
3237 for (User *U : IVOper->
users()) {
3243 IVChain::const_iterator IncIter = Chain.Incs.begin();
3244 IVChain::const_iterator IncEnd = Chain.Incs.end();
3245 for( ; IncIter != IncEnd; ++IncIter) {
3246 if (IncIter->UserInst == OtherUse)
3249 if (IncIter != IncEnd)
3254 && IU.isIVUserOrOperand(OtherUse)) {
3257 NearUsers.
insert(OtherUse);
3262 ChainUsersVec[ChainIdx].FarUsers.
erase(UserInst);
3287void LSRInstance::CollectChains() {
3291 SmallVector<BasicBlock *,8> LatchPath;
3294 Rung->
getBlock() != LoopHeader; Rung = Rung->getIDom()) {
3300 for (BasicBlock *BB :
reverse(LatchPath)) {
3301 for (Instruction &
I : *BB) {
3313 for (
unsigned ChainIdx = 0, NChains = IVChainVec.size();
3314 ChainIdx < NChains; ++ChainIdx) {
3315 ChainUsersVec[ChainIdx].NearUsers.
erase(&
I);
3318 SmallPtrSet<Instruction*, 4> UniqueOperands;
3321 while (IVOpIter != IVOpEnd) {
3323 if (UniqueOperands.
insert(IVOpInst).second)
3324 ChainInstruction(&
I, IVOpInst, ChainUsersVec);
3325 IVOpIter =
findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
3330 for (PHINode &PN :
L->getHeader()->phis()) {
3337 ChainInstruction(&PN, IncV, ChainUsersVec);
3340 unsigned ChainIdx = 0;
3341 for (
unsigned UsersIdx = 0, NChains = IVChainVec.size();
3342 UsersIdx < NChains; ++UsersIdx) {
3344 ChainUsersVec[UsersIdx].FarUsers, SE,
TTI))
3347 if (ChainIdx != UsersIdx)
3348 IVChainVec[ChainIdx] = IVChainVec[UsersIdx];
3349 FinalizeChain(IVChainVec[ChainIdx]);
3352 IVChainVec.resize(ChainIdx);
3355void LSRInstance::FinalizeChain(IVChain &Chain) {
3356 assert(!Chain.Incs.empty() &&
"empty IV chains are not allowed");
3357 LLVM_DEBUG(
dbgs() <<
"Final Chain: " << *Chain.Incs[0].UserInst <<
"\n");
3359 for (
const IVInc &Inc : Chain) {
3361 auto UseI =
find(Inc.UserInst->operands(), Inc.IVOperand);
3362 assert(UseI != Inc.UserInst->op_end() &&
"cannot find IV operand");
3363 IVIncSet.insert(UseI);
3371 Immediate IncOffset = Immediate::getZero();
3380 C->getSignificantBits() > 64)
3382 IncOffset = Immediate::getScalable(
C->getSExtValue());
3398void LSRInstance::GenerateIVChain(
const IVChain &Chain,
3399 SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
3402 const IVInc &Head = Chain.Incs[0];
3407 Value *IVSrc =
nullptr;
3408 while (IVOpIter != IVOpEnd) {
3419 if (SE.
getSCEV(*IVOpIter) == Head.IncExpr
3420 || SE.
getSCEV(IVSrc) == Head.IncExpr) {
3423 IVOpIter =
findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
3425 if (IVOpIter == IVOpEnd) {
3427 LLVM_DEBUG(
dbgs() <<
"Concealed chain head: " << *Head.UserInst <<
"\n");
3430 assert(IVSrc &&
"Failed to find IV chain source");
3435 const SCEV *LeftOverExpr =
nullptr;
3436 const SCEV *Accum = SE.
getZero(IntTy);
3440 for (
const IVInc &Inc : Chain) {
3443 InsertPt =
L->getLoopLatch()->getTerminator();
3447 Value *IVOper = IVSrc;
3448 if (!Inc.IncExpr->isZero()) {
3453 LeftOverExpr = LeftOverExpr ?
3454 SE.
getAddExpr(LeftOverExpr, IncExpr) : IncExpr;
3458 bool FoundBase =
false;
3459 for (
auto [MapScev, MapIVOper] :
reverse(Bases)) {
3460 const SCEV *Remainder = SE.
getMinusSCEV(Accum, MapScev);
3462 if (!Remainder->
isZero()) {
3464 Value *IncV =
Rewriter.expandCodeFor(Remainder, IntTy, InsertPt);
3465 const SCEV *IVOperExpr =
3467 IVOper =
Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt);
3476 if (!FoundBase && LeftOverExpr && !LeftOverExpr->
isZero()) {
3479 Value *IncV =
Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt);
3482 IVOper =
Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt);
3486 assert(IVTy == IVOper->
getType() &&
"inconsistent IV increment type");
3489 LeftOverExpr =
nullptr;
3493 if (IVTy != OperTy) {
3495 "cannot extend a chained IV");
3497 IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy,
"lsr.chain");
3499 Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper);
3506 for (PHINode &Phi :
L->getHeader()->phis()) {
3510 Phi.getIncomingValueForBlock(
L->getLoopLatch()));
3513 Value *IVOper = IVSrc;
3515 if (IVTy != PostIncTy) {
3517 IRBuilder<> Builder(
L->getLoopLatch()->getTerminator());
3518 Builder.SetCurrentDebugLocation(PostIncV->
getDebugLoc());
3519 IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy,
"lsr.chain");
3521 Phi.replaceUsesOfWith(PostIncV, IVOper);
3527void LSRInstance::CollectFixupsAndInitialFormulae() {
3528 BranchInst *ExitBranch =
nullptr;
3529 bool SaveCmp =
TTI.
canSaveCmp(L, &ExitBranch, &SE, &LI, &DT, &AC, &TLI);
3532 SmallPtrSet<const SCEV *, 16> Regs;
3533 DenseSet<const SCEV *> VisitedRegs;
3534 DenseSet<size_t> VisitedLSRUse;
3536 for (
const IVStrideUse &U : IU) {
3541 assert(UseI != UserInst->
op_end() &&
"cannot find IV operand");
3542 if (IVIncSet.count(UseI)) {
3543 LLVM_DEBUG(
dbgs() <<
"Use is in profitable chain: " << **UseI <<
'\n');
3547 LSRUse::KindType
Kind = LSRUse::Basic;
3548 MemAccessTy AccessTy;
3550 Kind = LSRUse::Address;
3554 const SCEV *S = IU.getExpr(U);
3570 if (CI->isEquality()) {
3573 Value *
NV = CI->getOperand(1);
3574 if (NV ==
U.getOperandValToReplace()) {
3575 CI->setOperand(1, CI->getOperand(0));
3576 CI->setOperand(0, NV);
3577 NV = CI->getOperand(1);
3584 (!
NV->getType()->isPointerTy() ||
3591 Kind = LSRUse::ICmpZero;
3593 }
else if (
L->isLoopInvariant(NV) &&
3596 !
NV->getType()->isPointerTy()) {
3607 Kind = LSRUse::ICmpZero;
3614 for (
size_t i = 0, e = Factors.size(); i != e; ++i)
3615 if (Factors[i] != -1)
3616 Factors.insert(-(uint64_t)Factors[i]);
3622 std::pair<size_t, Immediate>
P = getUse(S, Kind, AccessTy);
3623 size_t LUIdx =
P.first;
3625 LSRUse &LU =
Uses[LUIdx];
3628 LSRFixup &LF = LU.getNewFixup();
3629 LF.UserInst = UserInst;
3630 LF.OperandValToReplace =
U.getOperandValToReplace();
3631 LF.PostIncLoops = TmpPostIncLoops;
3633 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3634 LU.AllFixupsUnconditional &= IsFixupExecutedEachIncrement(LF);
3637 if (!VisitedLSRUse.
count(LUIdx) && !LF.isUseFullyOutsideLoop(L)) {
3639 F.initialMatch(S, L, SE);
3640 BaselineCost.RateFormula(
F, Regs, VisitedRegs, LU,
3641 HardwareLoopProfitable);
3642 VisitedLSRUse.
insert(LUIdx);
3645 if (!LU.WidestFixupType ||
3648 LU.WidestFixupType = LF.OperandValToReplace->
getType();
3651 if (LU.Formulae.empty()) {
3652 InsertInitialFormula(S, LU, LUIdx);
3653 CountRegisters(LU.Formulae.back(), LUIdx);
3662void LSRInstance::InsertInitialFormula(
const SCEV *S, LSRUse &LU,
3666 LU.RigidFormula =
true;
3669 F.initialMatch(S, L, SE);
3670 bool Inserted = InsertFormula(LU, LUIdx,
F);
3671 assert(Inserted &&
"Initial formula already exists!"); (void)Inserted;
3677LSRInstance::InsertSupplementalFormula(
const SCEV *S,
3678 LSRUse &LU,
size_t LUIdx) {
3680 F.BaseRegs.push_back(S);
3681 F.HasBaseReg =
true;
3682 bool Inserted = InsertFormula(LU, LUIdx,
F);
3683 assert(Inserted &&
"Supplemental formula already exists!"); (void)Inserted;
3687void LSRInstance::CountRegisters(
const Formula &
F,
size_t LUIdx) {
3689 RegUses.countRegister(
F.ScaledReg, LUIdx);
3690 for (
const SCEV *BaseReg :
F.BaseRegs)
3691 RegUses.countRegister(BaseReg, LUIdx);
3696bool LSRInstance::InsertFormula(LSRUse &LU,
unsigned LUIdx,
const Formula &
F) {
3699 "Formula is illegal");
3701 if (!LU.InsertFormula(
F, *L))
3704 CountRegisters(
F, LUIdx);
3710bool LSRInstance::IsFixupExecutedEachIncrement(
const LSRFixup &LF)
const {
3722LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
3724 SmallPtrSet<const SCEV *, 32> Visited;
3731 while (!Worklist.
empty()) {
3735 if (!Visited.
insert(S).second)
3746 const Value *
V = US->getValue();
3749 if (
L->contains(Inst))
continue;
3753 for (
const Use &U :
V->uses()) {
3763 if (UserInst->
getParent()->getParent() !=
L->getHeader()->getParent())
3785 bool HasIncompatibleEHPTerminatedBlock =
false;
3787 for (
unsigned int I = 0;
I < PhiNode->getNumIncomingValues();
I++) {
3788 if (PhiNode->getIncomingValue(
I) == ExpectedValue) {
3789 if (PhiNode->getIncomingBlock(
I)->getTerminator()->isEHPad()) {
3790 HasIncompatibleEHPTerminatedBlock =
true;
3795 if (HasIncompatibleEHPTerminatedBlock) {
3818 unsigned OtherIdx = !
U.getOperandNo();
3819 Value *OtherOp = ICI->getOperand(OtherIdx);
3829 std::pair<size_t, Immediate>
P =
3830 getUse(S, LSRUse::Basic, MemAccessTy());
3831 size_t LUIdx =
P.first;
3833 LSRUse &LU =
Uses[LUIdx];
3834 LSRFixup &LF = LU.getNewFixup();
3835 LF.UserInst =
const_cast<Instruction *
>(UserInst);
3836 LF.OperandValToReplace =
U;
3838 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3839 LU.AllFixupsUnconditional &= IsFixupExecutedEachIncrement(LF);
3840 if (!LU.WidestFixupType ||
3843 LU.WidestFixupType = LF.OperandValToReplace->
getType();
3844 InsertSupplementalFormula(US, LU, LUIdx);
3845 CountRegisters(LU.Formulae.back(),
Uses.size() - 1);
3861 unsigned Depth = 0) {
3868 for (
const SCEV *S :
Add->operands()) {
3875 const SCEV *Start, *Step;
3880 if (Start->isZero())
3889 Remainder =
nullptr;
3891 if (Remainder != Start) {
3913 LSRUse &LU,
const SCEV *S,
const Loop *L,
3915 if (LU.Kind != LSRUse::Address ||
3916 !LU.AccessTy.getType()->isIntOrIntVectorTy())
3922 if (
TTI.isIndexedLoadLegal(
TTI.MIM_PostInc, S->
getType()) ||
3931void LSRInstance::GenerateReassociationsImpl(LSRUse &LU,
unsigned LUIdx,
3932 const Formula &
Base,
3933 unsigned Depth,
size_t Idx,
3935 const SCEV *
BaseReg = IsScaledReg ?
Base.ScaledReg :
Base.BaseRegs[Idx];
3943 const SCEV *Remainder =
CollectSubexprs(BaseReg,
nullptr, AddOps, L, SE);
3947 if (AddOps.
size() == 1)
3961 LU.AccessTy, *J,
Base.getNumRegs() > 1))
3966 InnerAddOps.append(std::next(J), std::as_const(AddOps).
end());
3970 if (InnerAddOps.size() == 1 &&
3972 LU.AccessTy, InnerAddOps[0],
Base.getNumRegs() > 1))
3975 const SCEV *InnerSum = SE.
getAddExpr(InnerAddOps);
3980 if (
F.UnfoldedOffset.isNonZero() &&
F.UnfoldedOffset.isScalable())
3989 Immediate::getFixed((uint64_t)
F.UnfoldedOffset.getFixedValue() +
3992 F.ScaledReg =
nullptr;
3995 F.BaseRegs.erase(
F.BaseRegs.begin() + Idx);
3996 }
else if (IsScaledReg)
3997 F.ScaledReg = InnerSum;
3999 F.BaseRegs[Idx] = InnerSum;
4007 Immediate::getFixed((uint64_t)
F.UnfoldedOffset.getFixedValue() +
4010 F.BaseRegs.push_back(*J);
4015 if (InsertFormula(LU, LUIdx,
F))
4022 GenerateReassociations(LU, LUIdx, LU.Formulae.back(),
4028void LSRInstance::GenerateReassociations(LSRUse &LU,
unsigned LUIdx,
4030 assert(
Base.isCanonical(*L) &&
"Input must be in the canonical form");
4035 for (
size_t i = 0, e =
Base.BaseRegs.size(); i != e; ++i)
4036 GenerateReassociationsImpl(LU, LUIdx,
Base,
Depth, i);
4038 if (
Base.Scale == 1)
4039 GenerateReassociationsImpl(LU, LUIdx,
Base,
Depth,
4045void LSRInstance::GenerateCombinations(LSRUse &LU,
unsigned LUIdx,
4048 if (
Base.BaseRegs.size() + (
Base.Scale == 1) +
4049 (
Base.UnfoldedOffset.isNonZero()) <=
4057 Formula NewBase =
Base;
4058 NewBase.BaseRegs.clear();
4059 Type *CombinedIntegerType =
nullptr;
4060 for (
const SCEV *BaseReg :
Base.BaseRegs) {
4063 if (!CombinedIntegerType)
4065 Ops.push_back(BaseReg);
4068 NewBase.BaseRegs.push_back(BaseReg);
4072 if (
Ops.size() == 0)
4077 auto GenerateFormula = [&](
const SCEV *Sum) {
4078 Formula
F = NewBase;
4086 F.BaseRegs.push_back(Sum);
4088 (void)InsertFormula(LU, LUIdx,
F);
4092 if (
Ops.size() > 1) {
4099 if (NewBase.UnfoldedOffset.isNonZero() && NewBase.UnfoldedOffset.isFixed()) {
4100 assert(CombinedIntegerType &&
"Missing a type for the unfolded offset");
4102 NewBase.UnfoldedOffset.getFixedValue(),
true));
4103 NewBase.UnfoldedOffset = Immediate::getFixed(0);
4109void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU,
unsigned LUIdx,
4110 const Formula &
Base,
size_t Idx,
4112 const SCEV *
G = IsScaledReg ?
Base.ScaledReg :
Base.BaseRegs[Idx];
4114 if (
G->isZero() || !GV)
4118 if (!
isLegalUse(
TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
F))
4123 F.BaseRegs[Idx] =
G;
4124 (void)InsertFormula(LU, LUIdx,
F);
4128void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU,
unsigned LUIdx,
4131 if (
Base.BaseGV)
return;
4133 for (
size_t i = 0, e =
Base.BaseRegs.size(); i != e; ++i)
4134 GenerateSymbolicOffsetsImpl(LU, LUIdx,
Base, i);
4135 if (
Base.Scale == 1)
4136 GenerateSymbolicOffsetsImpl(LU, LUIdx,
Base, -1,
4141void LSRInstance::GenerateConstantOffsetsImpl(
4142 LSRUse &LU,
unsigned LUIdx,
const Formula &
Base,
4143 const SmallVectorImpl<Immediate> &Worklist,
size_t Idx,
bool IsScaledReg) {
4145 auto GenerateOffset = [&](
const SCEV *
G, Immediate
Offset) {
4147 if (!
Base.BaseOffset.isCompatibleImmediate(
Offset))
4149 F.BaseOffset =
Base.BaseOffset.subUnsigned(
Offset);
4151 if (
isLegalUse(
TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
F)) {
4153 const SCEV *NewOffset =
Offset.getSCEV(SE,
G->getType());
4159 F.ScaledReg =
nullptr;
4161 F.deleteBaseReg(
F.BaseRegs[Idx]);
4163 }
else if (IsScaledReg)
4166 F.BaseRegs[Idx] = NewG;
4168 (void)InsertFormula(LU, LUIdx,
F);
4172 const SCEV *
G = IsScaledReg ?
Base.ScaledReg :
Base.BaseRegs[Idx];
4183 const APInt *StepInt;
4188 for (Immediate
Offset : Worklist) {
4190 Offset = Immediate::getFixed(
Offset.getFixedValue() - Step);
4196 for (Immediate
Offset : Worklist)
4200 if (
G->isZero() ||
Imm.isZero() ||
4201 !
Base.BaseOffset.isCompatibleImmediate(Imm))
4204 F.BaseOffset =
F.BaseOffset.addUnsigned(Imm);
4205 if (!
isLegalUse(
TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
F))
4210 F.BaseRegs[Idx] =
G;
4215 (void)InsertFormula(LU, LUIdx,
F);
4219void LSRInstance::GenerateConstantOffsets(LSRUse &LU,
unsigned LUIdx,
4225 if (LU.MaxOffset != LU.MinOffset)
4228 for (
size_t i = 0, e =
Base.BaseRegs.size(); i != e; ++i)
4229 GenerateConstantOffsetsImpl(LU, LUIdx,
Base, Worklist, i);
4230 if (
Base.Scale == 1)
4231 GenerateConstantOffsetsImpl(LU, LUIdx,
Base, Worklist, -1,
4237void LSRInstance::GenerateICmpZeroScales(LSRUse &LU,
unsigned LUIdx,
4239 if (LU.Kind != LSRUse::ICmpZero)
return;
4247 if (LU.MinOffset != LU.MaxOffset)
return;
4250 if (
Base.ScaledReg &&
Base.ScaledReg->getType()->isPointerTy())
4252 for (
const SCEV *BaseReg :
Base.BaseRegs)
4253 if (
BaseReg->getType()->isPointerTy())
4255 assert(!
Base.BaseGV &&
"ICmpZero use is not legal!");
4258 for (int64_t Factor : Factors) {
4263 if (
Base.BaseOffset.isMin() && Factor == -1)
4266 if (
Base.BaseOffset.isNonZero() &&
Base.BaseOffset.isScalable())
4268 Immediate NewBaseOffset =
Base.BaseOffset.mulUnsigned(Factor);
4269 assert(Factor != 0 &&
"Zero factor not expected!");
4270 if (NewBaseOffset.getFixedValue() / Factor !=
4271 Base.BaseOffset.getFixedValue())
4279 Immediate
Offset = LU.MinOffset;
4280 if (
Offset.isMin() && Factor == -1)
4283 if (
Offset.getFixedValue() / Factor != LU.MinOffset.getFixedValue())
4291 F.BaseOffset = NewBaseOffset;
4298 F.BaseOffset =
F.BaseOffset.addUnsigned(
Offset).subUnsigned(LU.MinOffset);
4300 const SCEV *FactorS = SE.
getConstant(IntTy, Factor);
4303 for (
size_t i = 0, e =
F.BaseRegs.size(); i != e; ++i) {
4317 if (
F.UnfoldedOffset.isNonZero()) {
4318 if (
F.UnfoldedOffset.isMin() && Factor == -1)
4320 F.UnfoldedOffset =
F.UnfoldedOffset.mulUnsigned(Factor);
4321 if (
F.UnfoldedOffset.getFixedValue() / Factor !=
4322 Base.UnfoldedOffset.getFixedValue())
4326 IntTy,
F.UnfoldedOffset.getFixedValue()))
4331 (void)InsertFormula(LU, LUIdx,
F);
4338void LSRInstance::GenerateScales(LSRUse &LU,
unsigned LUIdx, Formula
Base) {
4345 if (
Base.Scale != 0 && !
Base.unscale())
4348 assert(
Base.Scale == 0 &&
"unscale did not did its job!");
4351 for (int64_t Factor : Factors) {
4352 Base.Scale = Factor;
4353 Base.HasBaseReg =
Base.BaseRegs.size() > 1;
4355 if (!
isLegalUse(
TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
4359 if (LU.Kind == LSRUse::Basic &&
4360 isLegalUse(
TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special,
4361 LU.AccessTy,
Base) &&
4362 LU.AllFixupsOutsideLoop)
4363 LU.Kind = LSRUse::Special;
4369 if (LU.Kind == LSRUse::ICmpZero && !
Base.HasBaseReg &&
4370 Base.BaseOffset.isZero() && !
Base.BaseGV)
4373 for (
size_t i = 0, e =
Base.BaseRegs.size(); i != e; ++i) {
4375 if (AR && (AR->
getLoop() == L || LU.AllFixupsOutsideLoop)) {
4376 const SCEV *FactorS = SE.
getConstant(IntTy, Factor);
4381 if (
const SCEV *Quotient =
getExactSDiv(AR, FactorS, SE,
true))
4382 if (!Quotient->isZero()) {
4385 F.ScaledReg = Quotient;
4386 F.deleteBaseReg(
F.BaseRegs[i]);
4390 if (
F.Scale == 1 && (
F.BaseRegs.empty() ||
4391 (AR->
getLoop() != L && LU.AllFixupsOutsideLoop)))
4395 if (
F.Scale == 1 && LU.AllFixupsOutsideLoop)
4397 (void)InsertFormula(LU, LUIdx,
F);
4413 const SCEV *Result =
nullptr;
4414 for (
auto &L :
Loops) {
4418 if (!New || (Result && New != Result))
4423 assert(Result &&
"failed to create expression");
4428void LSRInstance::GenerateTruncates(LSRUse &LU,
unsigned LUIdx, Formula
Base) {
4430 if (
Base.BaseGV)
return;
4440 if (
Base.ScaledReg &&
Base.ScaledReg->getType()->isPointerTy())
4443 [](
const SCEV *S) { return S->getType()->isPointerTy(); }))
4447 for (
auto &LF : LU.Fixups)
4448 Loops.push_back(LF.PostIncLoops);
4450 for (
Type *SrcTy : Types) {
4459 const SCEV *NewScaledReg =
4461 if (!NewScaledReg || NewScaledReg->
isZero())
4463 F.ScaledReg = NewScaledReg;
4465 bool HasZeroBaseReg =
false;
4466 for (
const SCEV *&BaseReg :
F.BaseRegs) {
4467 const SCEV *NewBaseReg =
4469 if (!NewBaseReg || NewBaseReg->
isZero()) {
4470 HasZeroBaseReg =
true;
4480 if (!
F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses))
4484 (void)InsertFormula(LU, LUIdx,
F);
4497 const SCEV *OrigReg;
4499 WorkItem(
size_t LI, Immediate
I,
const SCEV *R)
4500 : LUIdx(LI),
Imm(
I), OrigReg(
R) {}
4502 void print(raw_ostream &OS)
const;
4508#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4509void WorkItem::print(raw_ostream &OS)
const {
4510 OS <<
"in formulae referencing " << *OrigReg <<
" in use " << LUIdx
4511 <<
" , add offset " <<
Imm;
4521void LSRInstance::GenerateCrossUseConstantOffsets() {
4523 using ImmMapTy = std::map<Immediate, const SCEV *, KeyOrderTargetImmediate>;
4525 DenseMap<const SCEV *, ImmMapTy>
Map;
4526 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
4528 for (
const SCEV *Use : RegUses) {
4531 auto Pair =
Map.try_emplace(
Reg);
4534 Pair.first->second.insert(std::make_pair(Imm, Use));
4535 UsedByIndicesMap[
Reg] |= RegUses.getUsedByIndices(Use);
4542 SmallSet<std::pair<size_t, Immediate>, 32, KeyOrderSizeTAndImmediate>
4544 for (
const SCEV *
Reg : Sequence) {
4545 const ImmMapTy &Imms =
Map.find(
Reg)->second;
4548 if (Imms.size() == 1)
4552 for (
const auto &Entry
4554 <<
' ' <<
Entry.first;
4558 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
4560 const SCEV *OrigReg = J->second;
4562 Immediate JImm = J->first;
4563 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg);
4566 UsedByIndicesMap[
Reg].
count() == 1) {
4574 Immediate
First = Imms.begin()->first;
4575 Immediate
Last = std::prev(Imms.end())->first;
4576 if (!
First.isCompatibleImmediate(
Last)) {
4583 bool Scalable =
First.isScalable() ||
Last.isScalable();
4584 int64_t FI =
First.getKnownMinValue();
4585 int64_t LI =
Last.getKnownMinValue();
4588 int64_t Avg = (FI & LI) + ((FI ^ LI) >> 1);
4591 Avg = Avg + ((FI ^ LI) & ((uint64_t)Avg >> 63));
4592 ImmMapTy::const_iterator OtherImms[] = {
4593 Imms.begin(), std::prev(Imms.end()),
4594 Imms.lower_bound(Immediate::get(Avg, Scalable))};
4595 for (
const auto &M : OtherImms) {
4596 if (M == J || M == JE)
continue;
4597 if (!JImm.isCompatibleImmediate(
M->first))
4601 Immediate
Imm = JImm.subUnsigned(
M->first);
4602 for (
unsigned LUIdx : UsedByIndices.
set_bits())
4604 if (UniqueItems.
insert(std::make_pair(LUIdx, Imm)).second)
4605 WorkItems.
push_back(WorkItem(LUIdx, Imm, OrigReg));
4612 UsedByIndicesMap.
clear();
4613 UniqueItems.
clear();
4616 for (
const WorkItem &WI : WorkItems) {
4617 size_t LUIdx = WI.LUIdx;
4618 LSRUse &LU =
Uses[LUIdx];
4619 Immediate
Imm = WI.Imm;
4620 const SCEV *OrigReg = WI.OrigReg;
4623 const SCEV *NegImmS =
Imm.getNegativeSCEV(SE, IntTy);
4627 for (
size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
4628 Formula
F = LU.Formulae[
L];
4635 if (
F.ScaledReg == OrigReg) {
4636 if (!
F.BaseOffset.isCompatibleImmediate(Imm))
4638 Immediate
Offset =
F.BaseOffset.addUnsigned(
Imm.mulUnsigned(
F.Scale));
4640 const SCEV *S =
Offset.getNegativeSCEV(SE, IntTy);
4641 if (
F.referencesReg(S))
4644 NewF.BaseOffset =
Offset;
4645 if (!
isLegalUse(
TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
4648 NewF.ScaledReg = SE.
getAddExpr(NegImmS, NewF.ScaledReg);
4657 if (NewF.BaseOffset.isNonZero() && NewF.BaseOffset.isScalable())
4659 if (
C->getValue()->isNegative() !=
4660 (NewF.BaseOffset.isLessThanZero()) &&
4661 (
C->getAPInt().abs() * APInt(
BitWidth,
F.Scale))
4662 .ule(std::abs(NewF.BaseOffset.getFixedValue())))
4667 NewF.canonicalize(*this->L);
4668 (void)InsertFormula(LU, LUIdx, NewF);
4671 for (
size_t N = 0, NE =
F.BaseRegs.size();
N != NE; ++
N) {
4673 if (BaseReg != OrigReg)
4676 if (!NewF.BaseOffset.isCompatibleImmediate(Imm) ||
4677 !NewF.UnfoldedOffset.isCompatibleImmediate(Imm) ||
4678 !NewF.BaseOffset.isCompatibleImmediate(NewF.UnfoldedOffset))
4680 NewF.BaseOffset = NewF.BaseOffset.addUnsigned(Imm);
4682 LU.Kind, LU.AccessTy, NewF)) {
4686 Immediate NewUnfoldedOffset = NewF.UnfoldedOffset.addUnsigned(Imm);
4690 NewF.UnfoldedOffset = NewUnfoldedOffset;
4692 NewF.BaseRegs[
N] = SE.
getAddExpr(NegImmS, BaseReg);
4697 for (
const SCEV *NewReg : NewF.BaseRegs)
4699 if (NewF.BaseOffset.isNonZero() && NewF.BaseOffset.isScalable())
4701 if ((
C->getAPInt() + NewF.BaseOffset.getFixedValue())
4703 .slt(std::abs(NewF.BaseOffset.getFixedValue())) &&
4704 (
C->getAPInt() + NewF.BaseOffset.getFixedValue())
4707 NewF.BaseOffset.getFixedValue()))
4712 NewF.canonicalize(*this->L);
4713 (void)InsertFormula(LU, LUIdx, NewF);
4724LSRInstance::GenerateAllReuseFormulae() {
4727 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
4728 LSRUse &LU =
Uses[LUIdx];
4729 for (
size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4730 GenerateReassociations(LU, LUIdx, LU.Formulae[i]);
4731 for (
size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4732 GenerateCombinations(LU, LUIdx, LU.Formulae[i]);
4734 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
4735 LSRUse &LU =
Uses[LUIdx];
4736 for (
size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4737 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]);
4738 for (
size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4739 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]);
4740 for (
size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4741 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]);
4742 for (
size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4743 GenerateScales(LU, LUIdx, LU.Formulae[i]);
4745 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
4746 LSRUse &LU =
Uses[LUIdx];
4747 for (
size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4748 GenerateTruncates(LU, LUIdx, LU.Formulae[i]);
4751 GenerateCrossUseConstantOffsets();
4754 "After generating reuse formulae:\n";
4755 print_uses(
dbgs()));
4760void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
4761 DenseSet<const SCEV *> VisitedRegs;
4762 SmallPtrSet<const SCEV *, 16> Regs;
4763 SmallPtrSet<const SCEV *, 16> LoserRegs;
4765 bool ChangedFormulae =
false;
4770 using BestFormulaeTy = DenseMap<SmallVector<const SCEV *, 4>,
size_t>;
4772 BestFormulaeTy BestFormulae;
4774 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
4775 LSRUse &LU =
Uses[LUIdx];
4780 for (
size_t FIdx = 0, NumForms = LU.Formulae.size();
4781 FIdx != NumForms; ++FIdx) {
4782 Formula &
F = LU.Formulae[FIdx];
4793 CostF.RateFormula(
F, Regs, VisitedRegs, LU, HardwareLoopProfitable,
4795 if (CostF.isLoser()) {
4807 for (
const SCEV *
Reg :
F.BaseRegs) {
4808 if (RegUses.isRegUsedByUsesOtherThan(
Reg, LUIdx))
4812 RegUses.isRegUsedByUsesOtherThan(
F.ScaledReg, LUIdx))
4813 Key.push_back(
F.ScaledReg);
4818 std::pair<BestFormulaeTy::const_iterator, bool>
P =
4819 BestFormulae.insert(std::make_pair(
Key, FIdx));
4823 Formula &Best = LU.Formulae[
P.first->second];
4825 Cost CostBest(L, SE,
TTI, AMK);
4827 CostBest.RateFormula(Best, Regs, VisitedRegs, LU,
4828 HardwareLoopProfitable);
4829 if (CostF.isLess(CostBest))
4833 " in favor of formula ";
4834 Best.print(
dbgs());
dbgs() <<
'\n');
4837 ChangedFormulae =
true;
4839 LU.DeleteFormula(
F);
4847 LU.RecomputeRegs(LUIdx, RegUses);
4850 BestFormulae.clear();
4855 "After filtering out undesirable candidates:\n";
4863size_t LSRInstance::EstimateSearchSpaceComplexity()
const {
4865 for (
const LSRUse &LU :
Uses) {
4866 size_t FSize = LU.Formulae.size();
4881void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
4885 LLVM_DEBUG(
dbgs() <<
"Narrowing the search space by eliminating formulae "
4886 "which use a superset of registers used by other "
4889 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
4890 LSRUse &LU =
Uses[LUIdx];
4892 for (
size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4893 Formula &
F = LU.Formulae[i];
4894 if (
F.BaseOffset.isNonZero() &&
F.BaseOffset.isScalable())
4900 I =
F.BaseRegs.begin(),
E =
F.BaseRegs.end();
I !=
E; ++
I) {
4906 Immediate::getFixed(NewF.BaseOffset.getFixedValue() +
4907 (uint64_t)
C->getValue()->getSExtValue());
4908 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4909 (
I -
F.BaseRegs.begin()));
4910 if (LU.HasFormulaWithSameRegs(NewF)) {
4913 LU.DeleteFormula(
F);
4924 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4925 (
I -
F.BaseRegs.begin()));
4926 if (LU.HasFormulaWithSameRegs(NewF)) {
4929 LU.DeleteFormula(
F);
4940 LU.RecomputeRegs(LUIdx, RegUses);
4949void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
4954 dbgs() <<
"The search space is too complex.\n"
4955 "Narrowing the search space by assuming that uses separated "
4956 "by a constant offset will use the same registers.\n");
4960 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
4961 LSRUse &LU =
Uses[LUIdx];
4962 for (
const Formula &
F : LU.Formulae) {
4963 if (
F.BaseOffset.isZero() || (
F.Scale != 0 &&
F.Scale != 1))
4966 LSRUse *LUThatHas = FindUseWithSimilarFormula(
F, LU);
4970 if (!reconcileNewOffset(*LUThatHas,
F.BaseOffset,
false,
4971 LU.Kind, LU.AccessTy))
4976 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
4977 LUThatHas->AllFixupsUnconditional &= LU.AllFixupsUnconditional;
4980 for (LSRFixup &
Fixup : LU.Fixups) {
4981 Fixup.Offset +=
F.BaseOffset;
4982 LUThatHas->pushFixup(
Fixup);
4988 for (
size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) {
4989 Formula &
F = LUThatHas->Formulae[i];
4990 if (!
isLegalUse(
TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset,
4991 LUThatHas->Kind, LUThatHas->AccessTy,
F)) {
4993 LUThatHas->DeleteFormula(
F);
5001 LUThatHas->RecomputeRegs(LUThatHas - &
Uses.front(), RegUses);
5004 DeleteUse(LU, LUIdx);
5017void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
5021 LLVM_DEBUG(
dbgs() <<
"Narrowing the search space by re-filtering out "
5022 "undesirable dedicated registers.\n");
5024 FilterOutUndesirableDedicatedRegisters();
5039void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
5044 dbgs() <<
"The search space is too complex.\n"
5045 "Narrowing the search space by choosing the best Formula "
5046 "from the Formulae with the same Scale and ScaledReg.\n");
5049 using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>,
size_t>;
5051 BestFormulaeTy BestFormulae;
5053 bool ChangedFormulae =
false;
5055 DenseSet<const SCEV *> VisitedRegs;
5056 SmallPtrSet<const SCEV *, 16> Regs;
5058 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
5059 LSRUse &LU =
Uses[LUIdx];
5064 auto IsBetterThan = [&](Formula &FA, Formula &FB) {
5069 size_t FARegNum = 0;
5070 for (
const SCEV *
Reg : FA.BaseRegs) {
5071 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(
Reg);
5072 FARegNum += (NumUses - UsedByIndices.
count() + 1);
5074 size_t FBRegNum = 0;
5075 for (
const SCEV *
Reg : FB.BaseRegs) {
5076 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(
Reg);
5077 FBRegNum += (NumUses - UsedByIndices.
count() + 1);
5079 if (FARegNum != FBRegNum)
5080 return FARegNum < FBRegNum;
5087 CostFA.RateFormula(FA, Regs, VisitedRegs, LU, HardwareLoopProfitable);
5089 CostFB.RateFormula(FB, Regs, VisitedRegs, LU, HardwareLoopProfitable);
5090 return CostFA.isLess(CostFB);
5094 for (
size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms;
5096 Formula &
F = LU.Formulae[FIdx];
5099 auto P = BestFormulae.insert({{
F.ScaledReg,
F.Scale}, FIdx});
5103 Formula &Best = LU.Formulae[
P.first->second];
5104 if (IsBetterThan(
F, Best))
5108 " in favor of formula ";
5109 Best.print(
dbgs());
dbgs() <<
'\n');
5111 ChangedFormulae =
true;
5113 LU.DeleteFormula(
F);
5119 LU.RecomputeRegs(LUIdx, RegUses);
5122 BestFormulae.clear();
5127 "After filtering out undesirable candidates:\n";
5134void LSRInstance::NarrowSearchSpaceByFilterPostInc() {
5141 "Narrowing the search space by choosing the lowest "
5142 "register Formula for PostInc Uses.\n");
5144 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
5145 LSRUse &LU =
Uses[LUIdx];
5147 if (LU.Kind != LSRUse::Address)
5153 size_t MinRegs = std::numeric_limits<size_t>::max();
5154 for (
const Formula &
F : LU.Formulae)
5155 MinRegs = std::min(
F.getNumRegs(), MinRegs);
5158 for (
size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms;
5160 Formula &
F = LU.Formulae[FIdx];
5161 if (
F.getNumRegs() > MinRegs) {
5164 LU.DeleteFormula(
F);
5171 LU.RecomputeRegs(LUIdx, RegUses);
5222void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() {
5231 SmallPtrSet<const SCEV *, 4> UniqRegs;
5235 DenseMap <const SCEV *, float> RegNumMap;
5236 for (
const SCEV *
Reg : RegUses) {
5240 for (
const LSRUse &LU :
Uses) {
5241 if (!LU.Regs.count(
Reg))
5243 float P = LU.getNotSelectedProbability(
Reg);
5249 RegNumMap.
insert(std::make_pair(
Reg, PNotSel));
5253 dbgs() <<
"Narrowing the search space by deleting costly formulas\n");
5256 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
5257 LSRUse &LU =
Uses[LUIdx];
5259 if (LU.Formulae.size() < 2)
5264 float FMinRegNum = LU.Formulae[0].getNumRegs();
5265 float FMinARegNum = LU.Formulae[0].getNumRegs();
5267 for (
size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
5268 Formula &
F = LU.Formulae[i];
5271 for (
const SCEV *BaseReg :
F.BaseRegs) {
5272 if (UniqRegs.
count(BaseReg))
5274 FRegNum += RegNumMap[
BaseReg] / LU.getNotSelectedProbability(BaseReg);
5277 RegNumMap[
BaseReg] / LU.getNotSelectedProbability(BaseReg);
5279 if (
const SCEV *ScaledReg =
F.ScaledReg) {
5280 if (!UniqRegs.
count(ScaledReg)) {
5282 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg);
5285 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg);
5288 if (FMinRegNum > FRegNum ||
5289 (FMinRegNum == FRegNum && FMinARegNum > FARegNum)) {
5290 FMinRegNum = FRegNum;
5291 FMinARegNum = FARegNum;
5296 dbgs() <<
" with min reg num " << FMinRegNum <<
'\n');
5298 std::swap(LU.Formulae[MinIdx], LU.Formulae[0]);
5299 while (LU.Formulae.size() != 1) {
5302 LU.Formulae.pop_back();
5304 LU.RecomputeRegs(LUIdx, RegUses);
5305 assert(LU.Formulae.size() == 1 &&
"Should be exactly 1 min regs formula");
5306 Formula &
F = LU.Formulae[0];
5322 MemAccessTy AccessType) {
5332 return TTI.isLegalAddressingMode(
5333 AccessType.MemTy,
nullptr,
5334 Diff->getSExtValue(),
5335 true, 0, AccessType.AddrSpace) &&
5336 !
TTI.isLegalAddressingMode(
5337 AccessType.MemTy,
nullptr,
5338 -Diff->getSExtValue(),
5339 true, 0, AccessType.AddrSpace);
5345void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
5348 SmallPtrSet<const SCEV *, 4> Taken;
5356 const SCEV *Best =
nullptr;
5357 unsigned BestNum = 0;
5358 for (
const SCEV *
Reg : RegUses) {
5363 BestNum = RegUses.getUsedByIndices(
Reg).count();
5365 unsigned Count = RegUses.getUsedByIndices(
Reg).count();
5366 if (
Count > BestNum) {
5374 if (
Count == BestNum) {
5375 int LUIdx = RegUses.getUsedByIndices(
Reg).find_first();
5376 if (LUIdx >= 0 &&
Uses[LUIdx].Kind == LSRUse::Address &&
5378 Uses[LUIdx].AccessTy)) {
5385 assert(Best &&
"Failed to find best LSRUse candidate");
5387 LLVM_DEBUG(
dbgs() <<
"Narrowing the search space by assuming " << *Best
5388 <<
" will yield profitable reuse.\n");
5393 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx) {
5394 LSRUse &LU =
Uses[LUIdx];
5395 if (!LU.Regs.count(Best))
continue;
5398 for (
size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
5399 Formula &
F = LU.Formulae[i];
5400 if (!
F.referencesReg(Best)) {
5402 LU.DeleteFormula(
F);
5406 assert(e != 0 &&
"Use has no formulae left! Is Regs inconsistent?");
5412 LU.RecomputeRegs(LUIdx, RegUses);
5423void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
5424 NarrowSearchSpaceByDetectingSupersets();
5425 NarrowSearchSpaceByCollapsingUnrolledCode();
5426 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
5428 NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
5429 NarrowSearchSpaceByFilterPostInc();
5431 NarrowSearchSpaceByDeletingCostlyFormulas();
5433 NarrowSearchSpaceByPickingWinnerRegs();
5437void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
5439 SmallVectorImpl<const Formula *> &Workspace,
5440 const Cost &CurCost,
5441 const SmallPtrSet<const SCEV *, 16> &CurRegs,
5442 DenseSet<const SCEV *> &VisitedRegs)
const {
5453 const LSRUse &LU =
Uses[Workspace.
size()];
5459 SmallSetVector<const SCEV *, 4> ReqRegs;
5460 for (
const SCEV *S : CurRegs)
5461 if (LU.Regs.count(S))
5464 SmallPtrSet<const SCEV *, 16> NewRegs;
5465 Cost NewCost(L, SE,
TTI, AMK);
5466 for (
const Formula &
F : LU.Formulae) {
5474 int NumReqRegsToFind = std::min(
F.getNumRegs(), ReqRegs.
size());
5475 for (
const SCEV *
Reg : ReqRegs) {
5476 if ((
F.ScaledReg &&
F.ScaledReg ==
Reg) ||
5479 if (NumReqRegsToFind == 0)
5483 if (NumReqRegsToFind != 0) {
5494 NewCost.RateFormula(
F, NewRegs, VisitedRegs, LU, HardwareLoopProfitable);
5495 if (NewCost.isLess(SolutionCost)) {
5497 if (Workspace.
size() !=
Uses.size()) {
5498 SolveRecurse(Solution, SolutionCost, Workspace, NewCost,
5499 NewRegs, VisitedRegs);
5500 if (
F.getNumRegs() == 1 && Workspace.
size() == 1)
5501 VisitedRegs.
insert(
F.ScaledReg ?
F.ScaledReg :
F.BaseRegs[0]);
5504 dbgs() <<
".\nRegs:\n";
5505 for (
const SCEV *S : NewRegs)
dbgs()
5506 <<
"- " << *S <<
"\n";
5509 SolutionCost = NewCost;
5510 Solution = Workspace;
5519void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution)
const {
5521 Cost SolutionCost(L, SE,
TTI, AMK);
5522 SolutionCost.Lose();
5523 Cost CurCost(L, SE,
TTI, AMK);
5524 SmallPtrSet<const SCEV *, 16> CurRegs;
5525 DenseSet<const SCEV *> VisitedRegs;
5529 SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
5530 CurRegs, VisitedRegs);
5531 if (Solution.
empty()) {
5538 "The chosen solution requires ";
5539 SolutionCost.print(
dbgs());
dbgs() <<
":\n";
5540 for (
size_t i = 0, e =
Uses.size(); i != e; ++i) {
5545 Solution[i]->print(
dbgs());
5551 const bool EnableDropUnprofitableSolution = [&] {
5563 if (BaselineCost.isLess(SolutionCost)) {
5564 if (!EnableDropUnprofitableSolution)
5566 dbgs() <<
"Baseline is more profitable than chosen solution, "
5567 "add option 'lsr-drop-solution' to drop LSR solution.\n");
5570 "solution, dropping LSR solution.\n";);
5581 const SmallVectorImpl<Instruction *> &Inputs)
5585 bool AllDominate =
true;
5592 for (Instruction *Inst : Inputs) {
5593 if (Inst == Tentative || !DT.
dominates(Inst, Tentative)) {
5594 AllDominate =
false;
5599 if (Tentative->
getParent() == Inst->getParent() &&
5600 (!BetterPos || !DT.
dominates(Inst, BetterPos)))
5610 const Loop *IPLoop = LI.getLoopFor(IP->getParent());
5611 unsigned IPLoopDepth = IPLoop ? IPLoop->
getLoopDepth() : 0;
5615 if (!Rung)
return IP;
5616 Rung = Rung->getIDom();
5617 if (!Rung)
return IP;
5618 IDom = Rung->getBlock();
5621 const Loop *IDomLoop = LI.getLoopFor(IDom);
5622 unsigned IDomDepth = IDomLoop ? IDomLoop->
getLoopDepth() : 0;
5623 if (IDomDepth <= IPLoopDepth &&
5624 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop))
5641 SmallVector<Instruction *, 4> Inputs;
5644 if (LU.Kind == LSRUse::ICmpZero)
5645 if (Instruction *
I =
5648 if (LF.PostIncLoops.
count(L)) {
5649 if (LF.isUseFullyOutsideLoop(L))
5650 Inputs.
push_back(
L->getLoopLatch()->getTerminator());
5656 for (
const Loop *PIL : LF.PostIncLoops) {
5657 if (PIL == L)
continue;
5662 if (!ExitingBlocks.
empty()) {
5664 for (
unsigned i = 1, e = ExitingBlocks.
size(); i != e; ++i)
5671 "Insertion point must be a normal instruction");
5681 while (IP->isEHPad()) ++IP;
5686 while (
Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP)
5694Value *LSRInstance::Expand(
const LSRUse &LU,
const LSRFixup &LF,
5696 SmallVectorImpl<WeakTrackingVH> &DeadInsts)
const {
5697 if (LU.RigidFormula)
5698 return LF.OperandValToReplace;
5702 IP = AdjustInsertPositionForExpand(IP, LF, LU);
5707 Rewriter.setPostInc(LF.PostIncLoops);
5712 Type *Ty =
F.getType();
5726 for (
const SCEV *
Reg :
F.BaseRegs) {
5727 assert(!
Reg->isZero() &&
"Zero allocated in a base register!");
5735 Value *ICmpScaledV =
nullptr;
5737 const SCEV *ScaledS =
F.ScaledReg;
5743 if (LU.Kind == LSRUse::ICmpZero) {
5753 "The only scale supported by ICmpZero uses is -1!");
5754 ICmpScaledV =
Rewriter.expandCodeFor(ScaledS,
nullptr);
5762 if (!
Ops.empty() && LU.Kind == LSRUse::Address &&
5772 Ops.push_back(ScaledS);
5798 assert(
F.BaseOffset.isCompatibleImmediate(LF.Offset) &&
5799 "Expanding mismatched offsets\n");
5801 Immediate
Offset =
F.BaseOffset.addUnsigned(LF.Offset);
5802 if (
Offset.isNonZero()) {
5803 if (LU.Kind == LSRUse::ICmpZero) {
5811 ICmpScaledV = ConstantInt::get(IntTy,
Offset.getFixedValue());
5816 Ops.push_back(
Offset.getUnknownSCEV(SE, IntTy));
5821 Immediate UnfoldedOffset =
F.UnfoldedOffset;
5822 if (UnfoldedOffset.isNonZero()) {
5824 Ops.push_back(UnfoldedOffset.getUnknownSCEV(SE, IntTy));
5828 const SCEV *FullS =
Ops.empty() ?
5839 if (LU.Kind == LSRUse::ICmpZero) {
5843 assert(!
F.BaseGV &&
"ICmp does not support folding a global value and "
5844 "a scale at the same time!");
5845 if (
F.Scale == -1) {
5846 if (ICmpScaledV->
getType() != OpTy) {
5856 assert((
F.Scale == 0 ||
F.Scale == 1) &&
5857 "ICmp does not support folding a global value and "
5858 "a scale at the same time!");
5860 -(uint64_t)
Offset.getFixedValue());
5861 if (
C->getType() != OpTy) {
5865 assert(
C &&
"Cast of ConstantInt should have folded");
5878void LSRInstance::RewriteForPHI(PHINode *PN,
const LSRUse &LU,
5879 const LSRFixup &LF,
const Formula &
F,
5880 SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
5881 DenseMap<BasicBlock *, Value *>
Inserted;
5885 bool needUpdateFixups =
false;
5896 Loop *PNLoop = LI.getLoopFor(Parent);
5897 if (!PNLoop || Parent != PNLoop->
getHeader()) {
5903 CriticalEdgeSplittingOptions(&DT, &LI, MSSAU)
5904 .setMergeIdenticalEdges()
5905 .setKeepOneInputPHIs());
5908 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
5919 if (
L->contains(BB) && !
L->contains(PN))
5927 needUpdateFixups =
true;
5932 std::pair<DenseMap<BasicBlock *, Value *>::iterator,
bool> Pair =
5945 LF.OperandValToReplace->
getType(),
"tmp",
5952 if (
L->contains(
I) && !
L->contains(BB))
5953 InsertedNonLCSSAInsts.insert(
I);
5956 Pair.first->second = FullV;
5963 if (needUpdateFixups) {
5964 for (LSRUse &LU :
Uses)
5965 for (LSRFixup &
Fixup : LU.Fixups)
5969 if (
Fixup.UserInst == PN) {
5972 bool foundInOriginalPHI =
false;
5974 if (val ==
Fixup.OperandValToReplace) {
5975 foundInOriginalPHI =
true;
5980 if (foundInOriginalPHI)
5991 if (val ==
Fixup.OperandValToReplace)
5992 Fixup.UserInst = NewPN;
6002void LSRInstance::Rewrite(
const LSRUse &LU,
const LSRFixup &LF,
6004 SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
6008 RewriteForPHI(PN, LU, LF,
F, DeadInsts);
6014 if (FullV->
getType() != OpTy) {
6026 if (LU.Kind == LSRUse::ICmpZero)
6042 const LSRFixup &
Fixup,
const LSRUse &LU,
6046 if (LU.Kind != LSRUse::Address)
6047 return IVIncInsertPos;
6051 Type *Ty =
I->getType();
6054 return IVIncInsertPos;
6061 return IVIncInsertPos;
6068void LSRInstance::ImplementSolution(
6069 const SmallVectorImpl<const Formula *> &Solution) {
6075 for (
const IVChain &Chain : IVChainVec) {
6081 for (
size_t LUIdx = 0, NumUses =
Uses.size(); LUIdx != NumUses; ++LUIdx)
6082 for (
const LSRFixup &
Fixup :
Uses[LUIdx].Fixups) {
6085 Rewriter.setIVIncInsertPos(L, InsertPos);
6086 Rewrite(
Uses[LUIdx],
Fixup, *Solution[LUIdx], DeadInsts);
6090 auto InsertedInsts = InsertedNonLCSSAInsts.takeVector();
6093 for (
const IVChain &Chain : IVChainVec) {
6094 GenerateIVChain(Chain, DeadInsts);
6098 for (
const WeakVH &
IV :
Rewriter.getInsertedIVs())
6116 for (PHINode &PN :
L->getHeader()->phis()) {
6117 BinaryOperator *BO =
nullptr;
6123 case Instruction::Sub:
6128 case Instruction::Add:
6145 [&](Use &U) {return DT.dominates(IVIncInsertPos, U);}))
6154LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
6155 DominatorTree &DT, LoopInfo &LI,
6156 const TargetTransformInfo &
TTI, AssumptionCache &AC,
6157 TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU)
6158 : IU(IU), SE(SE), DT(DT), LI(LI), AC(AC), TLI(TLI),
TTI(
TTI),
L(
L),
6161 :
TTI.getPreferredAddressingMode(
L, &SE)),
6163 BaselineCost(
L, SE,
TTI, AMK) {
6165 if (!
L->isLoopSimplifyForm())
6173 unsigned NumUsers = 0;
6177 LLVM_DEBUG(
dbgs() <<
"LSR skipping loop, too many IV Users in " << U
6185 auto FirstNonPHI = PN->
getParent()->getFirstNonPHIIt();
6195 L->getHeader()->printAsOperand(
dbgs(),
false);
6201 HardwareLoopProfitable =
6202 TTI.isHardwareLoopProfitable(L, SE, AC, &TLI, HWLoopInfo);
6206#if LLVM_ENABLE_ABI_BREAKING_CHECKS
6209 Rewriter.disableCanonicalMode();
6210 Rewriter.enableLSRMode();
6214 OptimizeLoopTermCond();
6217 if (IU.empty())
return;
6220 if (!
L->isInnermost()) {
6233 CollectInterestingTypesAndFactors();
6234 CollectFixupsAndInitialFormulae();
6235 CollectLoopInvariantFixupsAndFormulae();
6241 print_uses(
dbgs()));
6243 BaselineCost.print(
dbgs());
dbgs() <<
"\n");
6247 GenerateAllReuseFormulae();
6249 FilterOutUndesirableDedicatedRegisters();
6250 NarrowSearchSpaceUsingHeuristics();
6260 if (Solution.
empty())
6265 for (
const LSRUse &LU :
Uses) {
6266 for (
const Formula &
F : LU.Formulae)
6268 F) &&
"Illegal formula generated!");
6273 ImplementSolution(Solution);
6276#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
6277void LSRInstance::print_factors_and_types(
raw_ostream &OS)
const {
6278 if (Factors.empty() &&
Types.empty())
return;
6280 OS <<
"LSR has identified the following interesting factors and types: ";
6283 for (int64_t Factor : Factors) {
6284 if (!
First) OS <<
", ";
6286 OS <<
'*' << Factor;
6289 for (
Type *Ty : Types) {
6290 if (!
First) OS <<
", ";
6292 OS <<
'(' << *Ty <<
')';
6297void LSRInstance::print_fixups(raw_ostream &OS)
const {
6298 OS <<
"LSR is examining the following fixup sites:\n";
6299 for (
const LSRUse &LU :
Uses)
6300 for (
const LSRFixup &LF : LU.Fixups) {
6307void LSRInstance::print_uses(raw_ostream &OS)
const {
6308 OS <<
"LSR is examining the following uses:\n";
6309 for (
const LSRUse &LU :
Uses) {
6313 for (
const Formula &
F : LU.Formulae) {
6321void LSRInstance::print(raw_ostream &OS)
const {
6322 print_factors_and_types(OS);
6334class LoopStrengthReduce :
public LoopPass {
6338 LoopStrengthReduce();
6341 bool runOnLoop(Loop *L, LPPassManager &LPM)
override;
6342 void getAnalysisUsage(AnalysisUsage &AU)
const override;
6347LoopStrengthReduce::LoopStrengthReduce() : LoopPass(
ID) {
6351void LoopStrengthReduce::getAnalysisUsage(
AnalysisUsage &AU)
const {
6378ToDwarfOpIter(SmallVectorImpl<uint64_t> &Expr) {
6379 llvm::DIExpression::expr_op_iterator Begin =
6380 llvm::DIExpression::expr_op_iterator(Expr.
begin());
6381 llvm::DIExpression::expr_op_iterator End =
6382 llvm::DIExpression::expr_op_iterator(Expr.
end());
6383 return {Begin, End};
6386struct SCEVDbgValueBuilder {
6387 SCEVDbgValueBuilder() =
default;
6388 SCEVDbgValueBuilder(
const SCEVDbgValueBuilder &
Base) { clone(
Base); }
6390 void clone(
const SCEVDbgValueBuilder &
Base) {
6391 LocationOps =
Base.LocationOps;
6396 LocationOps.
clear();
6403 SmallVector<Value *, 2> LocationOps;
6406 void pushUInt(uint64_t Operand) { Expr.
push_back(Operand); }
6413 unsigned ArgIndex = 0;
6414 if (It != LocationOps.
end()) {
6415 ArgIndex = std::distance(LocationOps.
begin(), It);
6417 ArgIndex = LocationOps.
size();
6423 void pushValue(
const SCEVUnknown *U) {
6428 bool pushConst(
const SCEVConstant *
C) {
6429 if (
C->getAPInt().getSignificantBits() > 64)
6431 Expr.
push_back(llvm::dwarf::DW_OP_consts);
6432 Expr.
push_back(
C->getAPInt().getSExtValue());
6439 return ToDwarfOpIter(Expr);
6444 bool pushArithmeticExpr(
const llvm::SCEVCommutativeExpr *CommExpr,
6447 "Expected arithmetic SCEV type");
6449 unsigned EmitOperator = 0;
6450 for (
const auto &
Op : CommExpr->
operands()) {
6453 if (EmitOperator >= 1)
6454 pushOperator(DwarfOp);
6461 bool pushCast(
const llvm::SCEVCastExpr *
C,
bool IsSigned) {
6462 const llvm::SCEV *Inner =
C->getOperand(0);
6463 const llvm::Type *
Type =
C->getType();
6464 uint64_t ToWidth =
Type->getIntegerBitWidth();
6465 bool Success = pushSCEV(Inner);
6467 IsSigned ? llvm::dwarf::DW_ATE_signed
6468 : llvm::dwarf::DW_ATE_unsigned};
6469 for (
const auto &
Op : CastOps)
6475 bool pushSCEV(
const llvm::SCEV *S) {
6478 Success &= pushConst(StartInt);
6483 pushLocation(
U->getValue());
6486 Success &= pushArithmeticExpr(MulRec, llvm::dwarf::DW_OP_mul);
6489 Success &= pushSCEV(UDiv->getLHS());
6490 Success &= pushSCEV(UDiv->getRHS());
6491 pushOperator(llvm::dwarf::DW_OP_div);
6497 "Unexpected cast type in SCEV.");
6501 Success &= pushArithmeticExpr(AddExpr, llvm::dwarf::DW_OP_plus);
6516 bool isIdentityFunction(uint64_t
Op,
const SCEV *S) {
6518 if (
C->getAPInt().getSignificantBits() > 64)
6520 int64_t
I =
C->getAPInt().getSExtValue();
6522 case llvm::dwarf::DW_OP_plus:
6523 case llvm::dwarf::DW_OP_minus:
6525 case llvm::dwarf::DW_OP_mul:
6526 case llvm::dwarf::DW_OP_div:
6539 bool SCEVToValueExpr(
const llvm::SCEVAddRecExpr &SAR, ScalarEvolution &SE) {
6545 if (!isIdentityFunction(llvm::dwarf::DW_OP_mul, Stride)) {
6546 if (!pushSCEV(Stride))
6548 pushOperator(llvm::dwarf::DW_OP_mul);
6550 if (!isIdentityFunction(llvm::dwarf::DW_OP_plus, Start)) {
6551 if (!pushSCEV(Start))
6553 pushOperator(llvm::dwarf::DW_OP_plus);
6559 void createOffsetExpr(int64_t
Offset,
Value *OffsetValue) {
6560 pushLocation(OffsetValue);
6563 dbgs() <<
"scev-salvage: Generated IV offset expression. Offset: "
6564 << std::to_string(
Offset) <<
"\n");
6570 bool createIterCountExpr(
const SCEV *S,
6571 const SCEVDbgValueBuilder &IterationCount,
6572 ScalarEvolution &SE) {
6581 LLVM_DEBUG(
dbgs() <<
"scev-salvage: Location to salvage SCEV: " << *S
6585 if (!Rec->isAffine())
6593 clone(IterationCount);
6594 if (!SCEVToValueExpr(*Rec, SE))
6605 bool SCEVToIterCountExpr(
const llvm::SCEVAddRecExpr &SAR,
6606 ScalarEvolution &SE) {
6612 if (!isIdentityFunction(llvm::dwarf::DW_OP_minus, Start)) {
6613 if (!pushSCEV(Start))
6615 pushOperator(llvm::dwarf::DW_OP_minus);
6617 if (!isIdentityFunction(llvm::dwarf::DW_OP_div, Stride)) {
6618 if (!pushSCEV(Stride))
6620 pushOperator(llvm::dwarf::DW_OP_div);
6628 void appendToVectors(SmallVectorImpl<uint64_t> &DestExpr,
6629 SmallVectorImpl<Value *> &DestLocations) {
6631 "Expected the locations vector to contain the IV");
6636 "Expected the location ops to contain the IV.");
6640 for (
const auto &
Op : LocationOps) {
6641 auto It =
find(DestLocations,
Op);
6642 if (It != DestLocations.
end()) {
6644 DestIndexMap.
push_back(std::distance(DestLocations.
begin(), It));
6652 for (
const auto &
Op : expr_ops()) {
6654 Op.appendToVector(DestExpr);
6661 uint64_t NewIndex = DestIndexMap[
Op.getArg(0)];
6669struct DVIRecoveryRec {
6670 DVIRecoveryRec(DbgVariableRecord *DVR)
6671 : DbgRef(DVR), Expr(DVR->getExpression()), HadLocationArgList(
false) {}
6673 DbgVariableRecord *DbgRef;
6675 bool HadLocationArgList;
6681 for (
auto &RE : RecoveryExprs)
6683 RecoveryExprs.clear();
6686 ~DVIRecoveryRec() { clear(); }
6694 auto expr_ops = ToDwarfOpIter(Expr);
6696 for (
auto Op : expr_ops)
6705template <
typename T>
6709 "contain any DW_OP_llvm_arg operands.");
6716template <
typename T>
6721 "Expected expression that references DIArglist locations using "
6722 "DW_OP_llvm_arg operands.");
6724 for (
Value *V : Locations)
6741 if (NumLLVMArgs == 0) {
6748 "Lone LLVM_arg in a DIExpression should refer to location-op 0.");
6778 LLVM_DEBUG(
dbgs() <<
"scev-salvage: restore dbg.value to pre-LSR state\n"
6779 <<
"scev-salvage: post-LSR: " << *DbgVal <<
'\n');
6780 assert(DVIRec.Expr &&
"Expected an expression");
6785 if (!DVIRec.HadLocationArgList) {
6786 assert(DVIRec.LocationOps.size() == 1 &&
6787 "Unexpected number of location ops.");
6791 Value *CachedValue =
6796 for (
WeakVH VH : DVIRec.LocationOps) {
6804 LLVM_DEBUG(
dbgs() <<
"scev-salvage: pre-LSR: " << *DbgVal <<
'\n');
6809 const SCEV *SCEVInductionVar,
6810 SCEVDbgValueBuilder IterCountExpr) {
6824 LocationOpIndexMap.
assign(DVIRec.LocationOps.size(), -1);
6826 NewLocationOps.
push_back(LSRInductionVar);
6828 for (
unsigned i = 0; i < DVIRec.LocationOps.size(); i++) {
6829 WeakVH VH = DVIRec.LocationOps[i];
6835 LocationOpIndexMap[i] = NewLocationOps.
size() - 1;
6837 <<
" now at index " << LocationOpIndexMap[i] <<
"\n");
6845 LLVM_DEBUG(
dbgs() <<
"scev-salvage: SCEV for location at index: " << i
6846 <<
" refers to a location that is now undef or erased. "
6847 "Salvage abandoned.\n");
6851 LLVM_DEBUG(
dbgs() <<
"scev-salvage: salvaging location at index " << i
6852 <<
" with SCEV: " << *DVIRec.SCEVs[i] <<
"\n");
6854 DVIRec.RecoveryExprs[i] = std::make_unique<SCEVDbgValueBuilder>();
6855 SCEVDbgValueBuilder *SalvageExpr = DVIRec.RecoveryExprs[i].get();
6859 if (std::optional<APInt>
Offset =
6861 if (
Offset->getSignificantBits() <= 64)
6862 SalvageExpr->createOffsetExpr(
Offset->getSExtValue(), LSRInductionVar);
6865 }
else if (!SalvageExpr->createIterCountExpr(DVIRec.SCEVs[i], IterCountExpr,
6874 assert(DVIRec.RecoveryExprs.size() == 1 &&
6875 "Expected only a single recovery expression for an empty "
6877 assert(DVIRec.RecoveryExprs[0] &&
6878 "Expected a SCEVDbgSalvageBuilder for location 0");
6879 SCEVDbgValueBuilder *
B = DVIRec.RecoveryExprs[0].get();
6880 B->appendToVectors(
NewExpr, NewLocationOps);
6882 for (
const auto &
Op : DVIRec.Expr->
expr_ops()) {
6890 SCEVDbgValueBuilder *DbgBuilder =
6891 DVIRec.RecoveryExprs[LocationArgIndex].get();
6897 assert(LocationOpIndexMap[
Op.getArg(0)] != -1 &&
6898 "Expected a positive index for the location-op position.");
6899 NewExpr.push_back(LocationOpIndexMap[
Op.getArg(0)]);
6903 DbgBuilder->appendToVectors(
NewExpr, NewLocationOps);
6907 LLVM_DEBUG(
dbgs() <<
"scev-salvage: Updated DVI: " << *DVIRec.DbgRef <<
"\n");
6915 SmallVector<std::unique_ptr<DVIRecoveryRec>, 2> &DVIToUpdate) {
6916 if (DVIToUpdate.empty())
6920 assert(SCEVInductionVar &&
6921 "Anticipated a SCEV for the post-LSR induction variable");
6925 if (!IVAddRec->isAffine())
6933 SCEVDbgValueBuilder IterCountExpr;
6934 IterCountExpr.pushLocation(LSRInductionVar);
6935 if (!IterCountExpr.SCEVToIterCountExpr(*IVAddRec, SE))
6938 LLVM_DEBUG(
dbgs() <<
"scev-salvage: IV SCEV: " << *SCEVInductionVar
6941 for (
auto &DVIRec : DVIToUpdate) {
6942 SalvageDVI(L, SE, LSRInductionVar, *DVIRec, SCEVInductionVar,
6953 SmallVector<std::unique_ptr<DVIRecoveryRec>, 2> &SalvageableDVISCEVs) {
6954 for (
const auto &
B : L->getBlocks()) {
6955 for (
auto &
I : *
B) {
6957 if (!DbgVal.isDbgValue() && !DbgVal.isDbgAssign())
6962 if (DbgVal.isKillLocation())
6967 const auto &HasTranslatableLocationOps =
6969 for (
const auto LocOp : DbgValToTranslate.location_ops()) {
6983 if (!HasTranslatableLocationOps(DbgVal))
6986 std::unique_ptr<DVIRecoveryRec> NewRec =
6987 std::make_unique<DVIRecoveryRec>(&DbgVal);
6991 NewRec->RecoveryExprs.resize(DbgVal.getNumVariableLocationOps());
6992 for (
const auto LocOp : DbgVal.location_ops()) {
6993 NewRec->SCEVs.push_back(SE.
getSCEV(LocOp));
6994 NewRec->LocationOps.push_back(LocOp);
6995 NewRec->HadLocationArgList = DbgVal.hasArgList();
6997 SalvageableDVISCEVs.push_back(std::move(NewRec));
7007 const LSRInstance &LSR) {
7009 auto IsSuitableIV = [&](
PHINode *
P) {
7020 for (
const WeakVH &
IV : LSR.getScalarEvolutionIVs()) {
7027 if (IsSuitableIV(
P))
7031 for (
PHINode &
P : L.getHeader()->phis()) {
7032 if (IsSuitableIV(&
P))
7050 std::unique_ptr<MemorySSAUpdater> MSSAU;
7052 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
7055 const LSRInstance &Reducer =
7056 LSRInstance(L, IU, SE, DT, LI,
TTI, AC, TLI, MSSAU.get());
7057 Changed |= Reducer.getChanged();
7063 const DataLayout &
DL = L->getHeader()->getDataLayout();
7065#if LLVM_ENABLE_ABI_BREAKING_CHECKS
7068 unsigned numFolded = Rewriter.replaceCongruentIVs(L, &DT, DeadInsts, &
TTI);
7082 if (L->isRecursivelyLCSSAForm(DT, LI) && L->getExitBlock()) {
7084 const DataLayout &
DL = L->getHeader()->getDataLayout();
7097 if (SalvageableDVIRecords.
empty())
7103 for (
const auto &L : LI) {
7107 LLVM_DEBUG(
dbgs() <<
"scev-salvage: SCEV salvaging not possible. An IV "
7108 "could not be identified.\n");
7112 for (
auto &Rec : SalvageableDVIRecords)
7114 SalvageableDVIRecords.
clear();
7118bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & ) {
7122 auto &IU = getAnalysis<IVUsersWrapperPass>().getIU();
7123 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
7124 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
7125 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
7126 const auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
7127 *
L->getHeader()->getParent());
7128 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
7129 *
L->getHeader()->getParent());
7130 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
7131 *
L->getHeader()->getParent());
7132 auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>();
7135 MSSA = &MSSAAnalysis->getMSSA();
7152char LoopStrengthReduce::ID = 0;
7155 "Loop Strength Reduction",
false,
false)
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
early cse Early CSE w MemorySSA
Module.h This file contains the declarations for the Module class.
This defines the Use class.
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
This header provides classes for managing per-loop analyses.
static bool SalvageDVI(llvm::Loop *L, ScalarEvolution &SE, llvm::PHINode *LSRInductionVar, DVIRecoveryRec &DVIRec, const SCEV *SCEVInductionVar, SCEVDbgValueBuilder IterCountExpr)
static cl::opt< bool > DropScaledForVScale("lsr-drop-scaled-reg-for-vscale", cl::Hidden, cl::init(true), cl::desc("Avoid using scaled registers with vscale-relative addressing"))
static Value * getWideOperand(Value *Oper)
IVChain logic must consistently peek base TruncInst operands, so wrap it in a convenient helper.
static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE)
Return true if the given add can be sign-extended without changing its value.
static bool mayUsePostIncMode(const TargetTransformInfo &TTI, LSRUse &LU, const SCEV *S, const Loop *L, ScalarEvolution &SE)
Return true if the SCEV represents a value that may end up as a post-increment operation.
static void restorePreTransformState(DVIRecoveryRec &DVIRec)
Restore the DVI's pre-LSR arguments. Substitute undef for any erased values.
static Immediate ExtractImmediate(const SCEV *&S, ScalarEvolution &SE)
If S involves the addition of a constant integer value, return that integer value,...
static bool containsAddRecDependentOnLoop(const SCEV *S, const Loop &L)
static User::op_iterator findIVOperand(User::op_iterator OI, User::op_iterator OE, Loop *L, ScalarEvolution &SE)
Helper for CollectChains that finds an IV operand (computed by an AddRec in this loop) within [OI,...
static cl::opt< TTI::AddressingModeKind > PreferredAddresingMode("lsr-preferred-addressing-mode", cl::Hidden, cl::init(TTI::AMK_None), cl::desc("A flag that overrides the target's preferred addressing mode."), cl::values(clEnumValN(TTI::AMK_None, "none", "Don't prefer any addressing mode"), clEnumValN(TTI::AMK_PreIndexed, "preindexed", "Prefer pre-indexed addressing mode"), clEnumValN(TTI::AMK_PostIndexed, "postindexed", "Prefer post-indexed addressing mode"), clEnumValN(TTI::AMK_All, "all", "Consider all addressing modes")))
static bool isLegalUse(const TargetTransformInfo &TTI, Immediate MinOffset, Immediate MaxOffset, LSRUse::KindType Kind, MemAccessTy AccessTy, GlobalValue *BaseGV, Immediate BaseOffset, bool HasBaseReg, int64_t Scale)
Test whether we know how to expand the current formula.
static void DbgGatherSalvagableDVI(Loop *L, ScalarEvolution &SE, SmallVector< std::unique_ptr< DVIRecoveryRec >, 2 > &SalvageableDVISCEVs)
Identify and cache salvageable DVI locations and expressions along with the corresponding SCEV(s).
static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE)
Return true if the given mul can be sign-extended without changing its value.
static const unsigned MaxSCEVSalvageExpressionSize
Limit the size of expression that SCEV-based salvaging will attempt to translate into a DIExpression.
static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Return true if this AddRec is already a phi in its loop.
static InstructionCost getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F, const Loop &L)
static cl::opt< bool > InsnsCost("lsr-insns-cost", cl::Hidden, cl::init(true), cl::desc("Add instruction count to a LSR cost model"))
static cl::opt< bool > StressIVChain("stress-ivchain", cl::Hidden, cl::init(false), cl::desc("Stress test LSR IV chains"))
static bool isAddressUse(const TargetTransformInfo &TTI, Instruction *Inst, Value *OperandVal)
Returns true if the specified instruction is using the specified value as an address.
static GlobalValue * ExtractSymbol(const SCEV *&S, ScalarEvolution &SE)
If S involves the addition of a GlobalValue address, return that symbol, and mutate S to point to a n...
static void updateDVIWithLocation(T &DbgVal, Value *Location, SmallVectorImpl< uint64_t > &Ops)
Overwrites DVI with the location and Ops as the DIExpression.
static bool isLegalAddImmediate(const TargetTransformInfo &TTI, Immediate Offset)
static cl::opt< cl::boolOrDefault > AllowDropSolutionIfLessProfitable("lsr-drop-solution", cl::Hidden, cl::desc("Attempt to drop solution if it is less profitable"))
static cl::opt< bool > EnableVScaleImmediates("lsr-enable-vscale-immediates", cl::Hidden, cl::init(true), cl::desc("Enable analysis of vscale-relative immediates in LSR"))
static Instruction * getFixupInsertPos(const TargetTransformInfo &TTI, const LSRFixup &Fixup, const LSRUse &LU, Instruction *IVIncInsertPos, DominatorTree &DT)
static const SCEV * getExprBase(const SCEV *S)
Return an approximation of this SCEV expression's "base", or NULL for any constant.
static bool isAlwaysFoldable(const TargetTransformInfo &TTI, LSRUse::KindType Kind, MemAccessTy AccessTy, GlobalValue *BaseGV, Immediate BaseOffset, bool HasBaseReg)
static llvm::PHINode * GetInductionVariable(const Loop &L, ScalarEvolution &SE, const LSRInstance &LSR)
Ideally pick the PHI IV inserted by ScalarEvolutionExpander.
static bool IsSimplerBaseSCEVForTarget(const TargetTransformInfo &TTI, ScalarEvolution &SE, const SCEV *Best, const SCEV *Reg, MemAccessTy AccessType)
static const unsigned MaxIVUsers
MaxIVUsers is an arbitrary threshold that provides an early opportunity for bail out.
static bool isHighCostExpansion(const SCEV *S, SmallPtrSetImpl< const SCEV * > &Processed, ScalarEvolution &SE)
Check if expanding this expression is likely to incur significant cost.
static Value * getValueOrPoison(WeakVH &VH, LLVMContext &C)
Cached location ops may be erased during LSR, in which case a poison is required when restoring from ...
static MemAccessTy getAccessType(const TargetTransformInfo &TTI, Instruction *Inst, Value *OperandVal)
Return the type of the memory being accessed.
static unsigned numLLVMArgOps(SmallVectorImpl< uint64_t > &Expr)
Returns the total number of DW_OP_llvm_arg operands in the expression.
static void DbgRewriteSalvageableDVIs(llvm::Loop *L, ScalarEvolution &SE, llvm::PHINode *LSRInductionVar, SmallVector< std::unique_ptr< DVIRecoveryRec >, 2 > &DVIToUpdate)
Obtain an expression for the iteration count, then attempt to salvage the dbg.value intrinsics.
static cl::opt< bool > EnablePhiElim("enable-lsr-phielim", cl::Hidden, cl::init(true), cl::desc("Enable LSR phi elimination"))
static void UpdateDbgValue(DVIRecoveryRec &DVIRec, SmallVectorImpl< Value * > &NewLocationOps, SmallVectorImpl< uint64_t > &NewExpr)
Write the new expression and new location ops for the dbg.value.
static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Return true if the given addrec can be sign-extended without changing its value.
static void DoInitialMatch(const SCEV *S, Loop *L, SmallVectorImpl< const SCEV * > &Good, SmallVectorImpl< const SCEV * > &Bad, ScalarEvolution &SE)
Recursion helper for initialMatch.
static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F)
Check if the addressing mode defined by F is completely folded in LU at isel time.
static cl::opt< bool > LSRExpNarrow("lsr-exp-narrow", cl::Hidden, cl::init(false), cl::desc("Narrow LSR complex solution using" " expectation of registers number"))
static cl::opt< bool > FilterSameScaledReg("lsr-filter-same-scaled-reg", cl::Hidden, cl::init(true), cl::desc("Narrow LSR search space by filtering non-optimal formulae" " with the same ScaledReg and Scale"))
static void updateDVIWithLocations(T &DbgVal, SmallVectorImpl< Value * > &Locations, SmallVectorImpl< uint64_t > &Ops)
Overwrite DVI with locations placed into a DIArglist.
static cl::opt< unsigned > ComplexityLimit("lsr-complexity-limit", cl::Hidden, cl::init(std::numeric_limits< uint16_t >::max()), cl::desc("LSR search space complexity limit"))
static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE, DominatorTree &DT, LoopInfo &LI, const TargetTransformInfo &TTI, AssumptionCache &AC, TargetLibraryInfo &TLI, MemorySSA *MSSA)
static bool isProfitableChain(IVChain &Chain, SmallPtrSetImpl< Instruction * > &Users, ScalarEvolution &SE, const TargetTransformInfo &TTI)
Return true if the number of registers needed for the chain is estimated to be less than the number r...
static const SCEV * CollectSubexprs(const SCEV *S, const SCEVConstant *C, SmallVectorImpl< const SCEV * > &Ops, const Loop *L, ScalarEvolution &SE, unsigned Depth=0)
Split S into subexpressions which can be pulled out into separate registers.
static const SCEV * getExactSDiv(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE, bool IgnoreSignificantBits=false)
Return an expression for LHS /s RHS, if it can be determined and if the remainder is known to be zero...
static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, Value *Operand, const TargetTransformInfo &TTI)
Return true if the IVInc can be folded into an addressing mode.
static const SCEV * getAnyExtendConsideringPostIncUses(ArrayRef< PostIncLoopSet > Loops, const SCEV *Expr, Type *ToTy, ScalarEvolution &SE)
Extend/Truncate Expr to ToTy considering post-inc uses in Loops.
static unsigned getSetupCost(const SCEV *Reg, unsigned Depth)
static cl::opt< unsigned > SetupCostDepthLimit("lsr-setupcost-depth-limit", cl::Hidden, cl::init(7), cl::desc("The limit on recursion depth for LSRs setup cost"))
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file defines the PointerIntPair class.
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
This file implements a set that has insertion order iteration characteristics.
This file implements the SmallBitVector class.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
static const unsigned UnknownAddressSpace
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Virtual Register Rewriter
static const uint32_t IV[8]
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool isNegative() const
Determine sign of this APInt.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
int64_t getSExtValue() const
Get sign extended value.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
LLVM_ABI AnalysisUsage & addRequiredID(const void *ID)
AnalysisUsage & addPreservedID(const void *ID)
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
InstListType::iterator iterator
Instruction iterators...
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
LLVM_ABI bool isLandingPad() const
Return true if this basic block is a landing pad.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
bool isUnconditional() const
Value * getCondition() const
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI DIArgList * get(LLVMContext &Context, ArrayRef< ValueAsMetadata * > Args)
iterator_range< expr_op_iterator > expr_ops() const
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
LLVM_ABI bool isComplex() const
Return whether the location is computed on the expression stack, meaning it cannot be a simple regist...
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI LLVMContext & getContext()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI bool isKillLocation() const
void setRawLocation(Metadata *NewLocation)
Use of this should generally be avoided; instead, replaceVariableLocationOp and addVariableLocationOp...
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI Instruction * findNearestCommonDominator(Instruction *I1, Instruction *I2) const
Find the nearest instruction I that dominates both I1 and I2, in the sense that a result produced bef...
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
PointerType * getType() const
Global values are always pointers.
IVStrideUse - Keep track of one use of a strided induction variable.
void transformToPostInc(const Loop *L)
transformToPostInc - Transform the expression to post-inc form for the given loop.
Value * getOperandValToReplace() const
getOperandValToReplace - Return the Value of the operand in the user instruction that this IVStrideUs...
void setUser(Instruction *NewUser)
setUser - Assign a new user instruction for this use.
Analysis pass that exposes the IVUsers for a loop.
ilist< IVStrideUse >::const_iterator const_iterator
LLVM_ABI void print(raw_ostream &OS) const
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
LLVM_ABI bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI Type * getAccessType() const LLVM_READONLY
Return the type this instruction accesses in memory, if any.
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
An instruction for reading from memory.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
The legacy pass manager's analysis pass to compute loop information.
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Represents a single loop in the control flow graph.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
An analysis that produces MemorySSA for a function.
Encapsulates MemorySSA, including all data associated with memory accesses.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
void setIncomingValue(unsigned i, Value *V)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static unsigned getIncomingValueNumForOperand(unsigned i)
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This class uses information about analyze scalars to rewrite expressions in canonical form.
This node represents multiplication of some number of SCEVs.
bool hasNoUnsignedWrap() const
bool hasNoSignedWrap() const
ArrayRef< const SCEV * > operands() const
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
unsigned short getExpressionSize() const
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
LLVMContext & getContext() const
size_type size() const
Determine the number of elements in the SetVector.
iterator end()
Get an iterator to the end of the SetVector.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
int find_first() const
Returns the index of the first set bit, -1 if none of the bits are set.
iterator_range< const_set_bits_iterator > set_bits() const
int find_next(unsigned Prev) const
Returns the index of the next set bit following the "Prev" bit.
size_type size() const
Returns the number of bits in this bitvector.
void resize(unsigned N, bool t=false)
Grow or shrink the bitvector.
size_type count() const
Returns the number of bits which are set.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
typename SuperClass::const_iterator const_iterator
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static StackOffset get(int64_t Fixed, int64_t Scalable)
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
bool isVoidTy() const
Return true if this is 'void'.
void setOperand(unsigned i, Value *Val)
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
A nullable Value handle that is nullable.
int getNumOccurrences() const
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
class_match< const SCEVConstant > m_SCEVConstant()
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
class_match< const Loop > m_Loop()
cst_pred_ty< is_specific_cst > m_scev_SpecificInt(uint64_t V)
Match an SCEV constant with a plain unsigned integer.
class_match< const SCEV > m_SCEV()
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
@ DW_OP_LLVM_convert
Only used in LLVM metadata.
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
NodeAddr< UseNode * > Use
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI iterator begin() const
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
unsigned KindType
For isa, dyn_cast, etc operations on TelemetryInfo.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool operator!=(uint64_t V1, const APInt &V2)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI char & LoopSimplifyID
bool isa_and_nonnull(const Y &Val)
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
DomTreeNodeBase< BasicBlock > DomTreeNode
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Examine each PHI in the given block and delete it if it is dead.
LLVM_ABI void initializeLoopStrengthReducePass(PassRegistry &)
auto reverse(ContainerTy &&C)
LLVM_ABI const SCEV * denormalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE)
Denormalize S to be post-increment for all loops present in Loops.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
FunctionAddr VTableAddr Count
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI void SplitLandingPadPredecessors(BasicBlock *OrigBB, ArrayRef< BasicBlock * > Preds, const char *Suffix, const char *Suffix2, SmallVectorImpl< BasicBlock * > &NewBBs, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method transforms the landing pad, OrigBB, by introducing two new basic blocks into the function...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE, bool CheckInvertible=true)
Normalize S to be post-increment for all loops present in Loops.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
LLVM_ABI Pass * createLoopStrengthReducePass()
LLVM_ABI BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow instructions that are not...
constexpr unsigned BitWidth
LLVM_ABI bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr, SmallVectorImpl< PHINode * > *InsertedPHIs=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
SmallPtrSet< const Loop *, 2 > PostIncLoopSet
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI int rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI, ScalarEvolution *SE, const TargetTransformInfo *TTI, SCEVExpander &Rewriter, DominatorTree *DT, ReplaceExitVal ReplaceExitValue, SmallVector< WeakTrackingVH, 16 > &DeadInsts)
If the final value of any expressions that are recurrent in the loop can be computed,...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Attributes of a target dependent hardware loop.
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
TargetTransformInfo & TTI
Information about a load/store intrinsic defined by the target.
Value * PtrVal
This is the pointer that the intrinsic is loading from or storing to.