29#include "llvm/Config/llvm-config.h"
60#define DEBUG_TYPE "memoryssa"
83 "will consider trying to walk past (default = 100)"));
86#ifdef EXPENSIVE_CHECKS
106 MemorySSAAnnotatedWriter(
const MemorySSA *M) : MSSA(M) {}
111 OS <<
"; " << *MA <<
"\n";
117 OS <<
"; " << *MA <<
"\n";
129 MemorySSAWalkerAnnotatedWriter(
MemorySSA *M)
130 : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}
135 OS <<
"; " << *MA <<
"\n";
144 OS <<
" - clobbered by ";
164class MemoryLocOrCall {
169 : MemoryLocOrCall(MUD->getMemoryInst()) {}
171 : MemoryLocOrCall(MUD->getMemoryInst()) {}
174 if (
auto *
C = dyn_cast<CallBase>(Inst)) {
181 if (!isa<FenceInst>(Inst))
199 if (IsCall !=
Other.IsCall)
203 return Loc ==
Other.Loc;
205 if (
Call->getCalledOperand() !=
Other.Call->getCalledOperand())
208 return Call->arg_size() ==
Other.Call->arg_size() &&
209 std::equal(
Call->arg_begin(),
Call->arg_end(),
210 Other.Call->arg_begin());
241 MLOC.getCall()->getCalledOperand()));
243 for (
const Value *
Arg : MLOC.getCall()->args())
248 static bool isEqual(
const MemoryLocOrCall &LHS,
const MemoryLocOrCall &RHS) {
263 bool VolatileUse =
Use->isVolatile();
264 bool VolatileClobber = MayClobber->
isVolatile();
266 if (VolatileUse && VolatileClobber)
279 bool SeqCstUse =
Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
281 AtomicOrdering::Acquire);
282 return !(SeqCstUse || MayClobberIsAcquire);
285template <
typename AliasAnalysisType>
288 const Instruction *UseInst, AliasAnalysisType &AA) {
290 assert(DefInst &&
"Defining instruction not actually an instruction");
292 if (
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
300 switch (II->getIntrinsicID()) {
301 case Intrinsic::invariant_start:
302 case Intrinsic::invariant_end:
303 case Intrinsic::assume:
304 case Intrinsic::experimental_noalias_scope_decl:
305 case Intrinsic::pseudoprobe:
307 case Intrinsic::dbg_declare:
308 case Intrinsic::dbg_label:
309 case Intrinsic::dbg_value:
316 if (
auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
321 if (
auto *DefLoad = dyn_cast<LoadInst>(DefInst))
322 if (
auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
329template <
typename AliasAnalysisType>
331 const MemoryLocOrCall &UseMLOC,
332 AliasAnalysisType &AA) {
350struct UpwardsMemoryQuery {
360 bool SkipSelfAccess =
false;
362 UpwardsMemoryQuery() =
default;
365 : IsCall(
isa<
CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
377 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
378 return I->hasMetadata(LLVMContext::MD_invariant_load) ||
402 bool AllowImpreciseClobber =
false) {
403 assert(MSSA.
dominates(ClobberAt, Start) &&
"Clobber doesn't dominate start?");
407 "liveOnEntry must clobber itself");
411 bool FoundClobber =
false;
417 while (!Worklist.
empty()) {
425 if (MA == ClobberAt) {
426 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
444 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
450 "Found clobber before reaching ClobberAt!");
454 if (
const auto *MU = dyn_cast<MemoryUse>(MA)) {
457 "Can only find use in def chain if Start is a use");
461 assert(isa<MemoryPhi>(MA));
479 if (AllowImpreciseClobber)
484 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
485 "ClobberAt never acted as a clobber");
504 std::optional<ListIndex> Previous;
507 std::optional<ListIndex> Previous)
511 std::optional<ListIndex> Previous)
512 : DefPath(Loc,
Init,
Init, Previous) {}
518 UpwardsMemoryQuery *Query;
519 unsigned *UpwardWalkLimit;
530 assert(
From->getNumOperands() &&
"Phi with no operands?");
544 struct UpwardsWalkResult {
557 walkToPhiOrClobber(DefPath &Desc,
const MemoryAccess *StopAt =
nullptr,
559 assert(!isa<MemoryUse>(Desc.Last) &&
"Uses don't exist in my world");
560 assert(UpwardWalkLimit &&
"Need a valid walk limit");
561 bool LimitAlreadyReached =
false;
566 if (!*UpwardWalkLimit) {
567 *UpwardWalkLimit = 1;
568 LimitAlreadyReached =
true;
573 if (Current == StopAt || Current == SkipStopAt)
574 return {Current,
false};
576 if (
auto *MD = dyn_cast<MemoryDef>(Current)) {
580 if (!--*UpwardWalkLimit)
581 return {Current,
true};
588 if (LimitAlreadyReached)
589 *UpwardWalkLimit = 0;
591 assert(isa<MemoryPhi>(Desc.Last) &&
592 "Ended at a non-clobber that's not a phi?");
593 return {Desc.Last,
false};
597 ListIndex PriorNode) {
602 Paths.emplace_back(
P.second,
P.first, PriorNode);
609 struct TerminatedPath {
623 std::optional<TerminatedPath>
628 assert(!PausedSearches.
empty() &&
"No searches to continue?");
632 while (!PausedSearches.
empty()) {
654 if (!VisitedPhis.
insert({Node.Last, Node.Loc}).second)
658 if (Query->SkipSelfAccess &&
Node.Loc == Query->StartingLoc) {
659 assert(isa<MemoryDef>(Query->OriginalAccess));
660 SkipStopWhere = Query->OriginalAccess;
663 UpwardsWalkResult Res = walkToPhiOrClobber(
Node,
666 if (Res.IsKnownClobber) {
667 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
671 TerminatedPath
Term{Res.Result, PathIndex};
672 if (!MSSA.
dominates(Res.Result, StopWhere))
680 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
685 if (Res.Result != SkipStopWhere)
691 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
697 template <
typename T,
typename Walker>
698 struct generic_def_path_iterator
700 std::forward_iterator_tag, T *> {
701 generic_def_path_iterator() =
default;
702 generic_def_path_iterator(Walker *W, ListIndex
N) :
W(
W),
N(
N) {}
706 generic_def_path_iterator &operator++() {
707 N = curNode().Previous;
711 bool operator==(
const generic_def_path_iterator &O)
const {
712 if (
N.has_value() !=
O.N.has_value())
714 return !
N || *
N == *
O.N;
718 T &curNode()
const {
return W->Paths[*
N]; }
721 std::optional<ListIndex>
N;
724 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
725 using const_def_path_iterator =
726 generic_def_path_iterator<const DefPath, const ClobberWalker>;
729 return make_range(def_path_iterator(
this,
From), def_path_iterator());
734 const_def_path_iterator());
739 TerminatedPath PrimaryClobber;
745 ListIndex defPathIndex(
const DefPath &
N)
const {
747 const DefPath *NP = &
N;
749 "Out of bounds DefPath!");
750 return NP - &
Paths.front();
769 "Reset the optimization state.");
771 Paths.emplace_back(Loc, Start, Phi, std::nullopt);
774 auto PriorPathsSize =
Paths.size();
780 addSearches(Phi, PausedSearches, 0);
786 auto Dom =
Paths.begin();
787 for (
auto I = std::next(Dom),
E =
Paths.end();
I !=
E; ++
I)
788 if (!MSSA.
dominates(
I->Clobber, Dom->Clobber))
792 std::iter_swap(
Last, Dom);
798 "liveOnEntry wasn't treated as a clobber?");
800 const auto *
Target = getWalkTarget(Current);
803 assert(
all_of(TerminatedPaths, [&](
const TerminatedPath &
P) {
810 if (std::optional<TerminatedPath> Blocker = getBlockingAccess(
811 Target, PausedSearches, NewPaused, TerminatedPaths)) {
815 auto Iter =
find_if(def_path(Blocker->LastNode), [&](
const DefPath &
N) {
816 return defPathIndex(N) < PriorPathsSize;
818 assert(Iter != def_path_iterator());
820 DefPath &CurNode = *Iter;
821 assert(CurNode.Last == Current);
848 TerminatedPath
Result{CurNode.Last, defPathIndex(CurNode)};
855 if (NewPaused.
empty()) {
856 MoveDominatedPathToEnd(TerminatedPaths);
858 return {
Result, std::move(TerminatedPaths)};
863 for (ListIndex Paused : NewPaused) {
864 UpwardsWalkResult WR = walkToPhiOrClobber(
Paths[Paused]);
865 if (WR.IsKnownClobber)
869 DefChainEnd = WR.Result;
872 if (!TerminatedPaths.
empty()) {
878 assert(DefChainEnd &&
"Failed to find dominating phi/liveOnEntry");
883 for (
const TerminatedPath &TP : TerminatedPaths) {
886 if (DT.
dominates(ChainBB, TP.Clobber->getBlock()))
893 if (!Clobbers.
empty()) {
894 MoveDominatedPathToEnd(Clobbers);
896 return {
Result, std::move(Clobbers)};
900 [&](ListIndex
I) {
return Paths[
I].Last == DefChainEnd; }));
903 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
905 PriorPathsSize =
Paths.size();
906 PausedSearches.
clear();
907 for (ListIndex
I : NewPaused)
908 addSearches(DefChainPhi, PausedSearches,
I);
911 Current = DefChainPhi;
915 void verifyOptResult(
const OptznResult &R)
const {
917 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
921 void resetPhiOptznState() {
928 : MSSA(MSSA), DT(DT) {}
933 UpwardsMemoryQuery &Q,
unsigned &UpWalkLimit) {
936 UpwardWalkLimit = &UpWalkLimit;
944 if (
auto *MU = dyn_cast<MemoryUse>(Start))
945 Current = MU->getDefiningAccess();
947 DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);
950 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
952 if (WalkResult.IsKnownClobber) {
953 Result = WalkResult.Result;
955 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
956 Current, Q.StartingLoc);
957 verifyOptResult(OptRes);
958 resetPhiOptznState();
959 Result = OptRes.PrimaryClobber.Clobber;
962#ifdef EXPENSIVE_CHECKS
963 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
970struct RenamePassData {
977 : DTN(
D), ChildIt(It), IncomingVal(
M) {}
979 void swap(RenamePassData &
RHS) {
991 ClobberWalker Walker;
1008 bool UseInvariantGroup =
true);
1026 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
false);
1031 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1036 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
false,
false);
1052 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1053 MUD->resetOptimized();
1069 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
true);
1074 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1090 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1091 MUD->resetOptimized();
1098 bool RenameAllUses) {
1101 auto It = PerBlockAccesses.
find(S);
1103 if (It == PerBlockAccesses.
end() || !isa<MemoryPhi>(It->second->front()))
1106 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1107 if (RenameAllUses) {
1108 bool ReplacementDone =
false;
1112 ReplacementDone =
true;
1114 (void) ReplacementDone;
1115 assert(ReplacementDone &&
"Incomplete phi during partial rename");
1125 bool RenameAllUses) {
1126 auto It = PerBlockAccesses.
find(BB);
1128 if (It != PerBlockAccesses.
end()) {
1132 if (MUD->getDefiningAccess() ==
nullptr || RenameAllUses)
1133 MUD->setDefiningAccess(IncomingVal);
1134 if (isa<MemoryDef>(&L))
1150 bool SkipVisited,
bool RenameAllUses) {
1151 assert(Root &&
"Trying to rename accesses in an unreachable block");
1158 if (SkipVisited && AlreadyVisited)
1161 IncomingVal = renameBlock(Root->
getBlock(), IncomingVal, RenameAllUses);
1162 renameSuccessorPhis(Root->
getBlock(), IncomingVal, RenameAllUses);
1165 while (!WorkStack.
empty()) {
1168 IncomingVal = WorkStack.
back().IncomingVal;
1170 if (ChildIt ==
Node->end()) {
1174 ++WorkStack.
back().ChildIt;
1178 AlreadyVisited = !Visited.
insert(BB).second;
1179 if (SkipVisited && AlreadyVisited) {
1186 IncomingVal = &*BlockDefs->rbegin();
1188 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1189 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1198void MemorySSA::markUnreachableAsLiveOnEntry(
BasicBlock *BB) {
1200 "Reachable block found while handling unreachable blocks");
1209 auto It = PerBlockAccesses.
find(S);
1211 if (It == PerBlockAccesses.
end() || !isa<MemoryPhi>(It->second->front()))
1214 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1218 auto It = PerBlockAccesses.
find(BB);
1219 if (It == PerBlockAccesses.
end())
1222 auto &Accesses = It->second;
1223 for (
auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1224 auto Next = std::next(AI);
1227 if (
auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1228 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1230 Accesses->erase(AI);
1236 : DT(DT),
F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1237 SkipWalker(nullptr) {
1243 assert(AA &&
"No alias analysis?");
1245 buildMemorySSA(BatchAA);
1255 for (
const auto &Pair : PerBlockAccesses)
1261 auto Res = PerBlockAccesses.
insert(std::make_pair(BB,
nullptr));
1264 Res.first->second = std::make_unique<AccessList>();
1265 return Res.first->second.get();
1269 auto Res = PerBlockDefs.
insert(std::make_pair(BB,
nullptr));
1272 Res.first->second = std::make_unique<DefsList>();
1273 return Res.first->second.get();
1289 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1295 struct MemlocStackInfo {
1298 unsigned long StackEpoch;
1299 unsigned long PopEpoch;
1304 unsigned long LowerBound;
1307 unsigned long LastKill;
1311 void optimizeUsesInBlock(
const BasicBlock *,
unsigned long &,
unsigned long &,
1316 CachingWalker *Walker;
1335void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1336 const BasicBlock *BB,
unsigned long &StackEpoch,
unsigned long &PopEpoch,
1342 if (Accesses ==
nullptr)
1349 !VersionStack.
empty() &&
1350 "Version stack should have liveOnEntry sentinel dominating everything");
1354 while (VersionStack.
back()->getBlock() == BackBlock)
1360 auto *MU = dyn_cast<MemoryUse>(&MA);
1367 if (MU->isOptimized())
1375 MemoryLocOrCall UseMLOC(MU);
1376 auto &LocInfo = LocStackInfo[UseMLOC];
1380 if (LocInfo.PopEpoch != PopEpoch) {
1381 LocInfo.PopEpoch = PopEpoch;
1382 LocInfo.StackEpoch = StackEpoch;
1394 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1395 !DT->
dominates(LocInfo.LowerBoundBlock, BB)) {
1399 LocInfo.LowerBound = 0;
1400 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1401 LocInfo.LastKillValid =
false;
1403 }
else if (LocInfo.StackEpoch != StackEpoch) {
1407 LocInfo.PopEpoch = PopEpoch;
1408 LocInfo.StackEpoch = StackEpoch;
1410 if (!LocInfo.LastKillValid) {
1411 LocInfo.LastKill = VersionStack.
size() - 1;
1412 LocInfo.LastKillValid =
true;
1417 assert(LocInfo.LowerBound < VersionStack.
size() &&
1418 "Lower bound out of range");
1419 assert(LocInfo.LastKill < VersionStack.
size() &&
1420 "Last kill info out of range");
1422 unsigned long UpperBound = VersionStack.
size() - 1;
1425 LLVM_DEBUG(
dbgs() <<
"MemorySSA skipping optimization of " << *MU <<
" ("
1426 << *(MU->getMemoryInst()) <<
")"
1427 <<
" because there are "
1428 << UpperBound - LocInfo.LowerBound
1429 <<
" stores to disambiguate\n");
1432 LocInfo.LastKillValid =
false;
1435 bool FoundClobberResult =
false;
1437 while (UpperBound > LocInfo.LowerBound) {
1438 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1444 MU, *AA, UpwardWalkLimit);
1446 while (VersionStack[UpperBound] != Result) {
1450 FoundClobberResult =
true;
1454 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1456 FoundClobberResult =
true;
1464 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1465 MU->setDefiningAccess(VersionStack[UpperBound],
true);
1466 LocInfo.LastKill = UpperBound;
1470 MU->setDefiningAccess(VersionStack[LocInfo.LastKill],
true);
1472 LocInfo.LowerBound = VersionStack.
size() - 1;
1473 LocInfo.LowerBoundBlock = BB;
1483 unsigned long StackEpoch = 1;
1484 unsigned long PopEpoch = 1;
1487 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1491void MemorySSA::placePHINodes(
1495 IDFs.setDefiningBlocks(DefiningBlocks);
1497 IDFs.calculate(IDFBlocks);
1500 for (
auto &BB : IDFBlocks)
1501 createMemoryPhi(BB);
1513 &StartingPoint, NextID++));
1522 bool InsertIntoDef =
false;
1531 Accesses = getOrCreateAccessList(&
B);
1532 Accesses->push_back(MUD);
1533 if (isa<MemoryDef>(MUD)) {
1534 InsertIntoDef =
true;
1536 Defs = getOrCreateDefsList(&
B);
1537 Defs->push_back(*MUD);
1543 placePHINodes(DefiningBlocks);
1553 if (!Visited.
count(&BB))
1554 markUnreachableAsLiveOnEntry(&BB);
1561 return Walker.get();
1564 WalkerBase = std::make_unique<ClobberWalkerBase>(
this, DT);
1566 Walker = std::make_unique<CachingWalker>(
this, WalkerBase.get());
1567 return Walker.get();
1572 return SkipWalker.get();
1575 WalkerBase = std::make_unique<ClobberWalkerBase>(
this, DT);
1577 SkipWalker = std::make_unique<SkipSelfWalker>(
this, WalkerBase.get());
1578 return SkipWalker.get();
1588 auto *Accesses = getOrCreateAccessList(BB);
1592 if (isa<MemoryPhi>(NewAccess)) {
1593 Accesses->push_front(NewAccess);
1594 auto *Defs = getOrCreateDefsList(BB);
1595 Defs->push_front(*NewAccess);
1598 *Accesses, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1599 Accesses->insert(AI, NewAccess);
1600 if (!isa<MemoryUse>(NewAccess)) {
1601 auto *Defs = getOrCreateDefsList(BB);
1603 *Defs, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1604 Defs->insert(DI, *NewAccess);
1608 Accesses->push_back(NewAccess);
1609 if (!isa<MemoryUse>(NewAccess)) {
1610 auto *Defs = getOrCreateDefsList(BB);
1611 Defs->push_back(*NewAccess);
1614 BlockNumberingValid.erase(BB);
1620 bool WasEnd = InsertPt == Accesses->end();
1622 if (!isa<MemoryUse>(What)) {
1623 auto *Defs = getOrCreateDefsList(BB);
1629 Defs->push_back(*What);
1630 }
else if (isa<MemoryDef>(InsertPt)) {
1631 Defs->insert(InsertPt->getDefsIterator(), *What);
1633 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1636 if (InsertPt == Accesses->end())
1637 Defs->push_back(*What);
1639 Defs->insert(InsertPt->getDefsIterator(), *What);
1642 BlockNumberingValid.erase(BB);
1652 if (
auto *MD = dyn_cast<MemoryDef>(What))
1663 prepareForMoveTo(What, BB);
1669 if (isa<MemoryPhi>(What)) {
1671 "Can only move a Phi at the beginning of the block");
1673 ValueToMemoryAccess.erase(What->
getBlock());
1674 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1676 assert(Inserted &&
"Cannot move a Phi to a block that already has one");
1679 prepareForMoveTo(What, BB);
1688 ValueToMemoryAccess[BB] = Phi;
1695 bool CreationMustSucceed) {
1696 assert(!isa<PHINode>(
I) &&
"Cannot create a defined access for a PHI");
1698 if (CreationMustSucceed)
1699 assert(NewAccess !=
nullptr &&
"Tried to create a memory access for a "
1700 "non-memory touching instruction");
1702 assert((!Definition || !isa<MemoryUse>(Definition)) &&
1703 "A use cannot be a defining access");
1713 if (
auto *
SI = dyn_cast<StoreInst>(
I)) {
1714 if (!
SI->isUnordered())
1716 }
else if (
auto *LI = dyn_cast<LoadInst>(
I)) {
1717 if (!LI->isUnordered())
1724template <
typename AliasAnalysisType>
1726 AliasAnalysisType *AAP,
1735 switch (II->getIntrinsicID()) {
1738 case Intrinsic::assume:
1739 case Intrinsic::experimental_noalias_scope_decl:
1740 case Intrinsic::pseudoprobe:
1748 if (!
I->mayReadFromMemory() && !
I->mayWriteToMemory())
1757 bool DefCheck, UseCheck;
1762 assert((Def == DefCheck || !DefCheck) &&
1763 "Memory accesses should only be reduced");
1764 if (!Def &&
Use != UseCheck) {
1766 assert(!UseCheck &&
"Invalid template");
1791 MUD =
new MemoryDef(
I->getContext(),
nullptr,
I,
I->getParent(), NextID++);
1793 MUD =
new MemoryUse(
I->getContext(),
nullptr,
I,
I->getParent());
1794 ValueToMemoryAccess[
I] = MUD;
1801 "Trying to remove memory access that still has uses");
1802 BlockNumbering.erase(MA);
1803 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1806 if (!isa<MemoryUse>(MA))
1810 if (
const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1815 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1816 if (VMA->second == MA)
1817 ValueToMemoryAccess.erase(VMA);
1830 if (!isa<MemoryUse>(MA)) {
1831 auto DefsIt = PerBlockDefs.
find(BB);
1832 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1835 PerBlockDefs.
erase(DefsIt);
1840 auto AccessIt = PerBlockAccesses.
find(BB);
1841 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1843 Accesses->erase(MA);
1845 Accesses->remove(MA);
1847 if (Accesses->empty()) {
1848 PerBlockAccesses.
erase(AccessIt);
1849 BlockNumberingValid.erase(BB);
1854 MemorySSAAnnotatedWriter Writer(
this);
1855 F.print(
OS, &Writer);
1858#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1863#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1894 if (
auto *DTNode = DT->
getNode(Pred)) {
1896 if (
auto *DefList =
getBlockDefs(DTNode->getBlock())) {
1897 auto *LastAcc = &*(--DefList->end());
1898 assert(LastAcc == IncAcc &&
1899 "Incorrect incoming access into phi.");
1904 DTNode = DTNode->getIDom();
1922 if (BlockNumberingValid.empty())
1927 if (!ValidBlocks.
count(&BB))
1930 ValidBlocks.
erase(&BB);
1938 unsigned long LastNumber = 0;
1940 auto ThisNumberIter = BlockNumbering.find(&MA);
1941 assert(ThisNumberIter != BlockNumbering.end() &&
1942 "MemoryAccess has no domination number in a valid block!");
1944 unsigned long ThisNumber = ThisNumberIter->second;
1945 assert(ThisNumber > LastNumber &&
1946 "Domination numbers should be strictly increasing!");
1948 LastNumber = ThisNumber;
1953 "All valid BasicBlocks should exist in F -- dangling pointers?");
1978 for (
const Use &U : Phi->
uses()) {
1979 assert(
dominates(Phi, U) &&
"Memory PHI does not dominate it's uses");
1986 "Incomplete MemoryPhi Node");
1990 "Incoming phi block not a block predecessor");
1997 assert((!MA || (AL && (isa<MemoryUse>(MA) ||
DL))) &&
1998 "We have memory affecting instructions "
1999 "in this block but they are not in the "
2000 "access list or defs list");
2008 for (
const Use &U : MD->
uses()) {
2010 "Memory Def does not dominate it's uses");
2024 assert(AL->size() == ActualAccesses.
size() &&
2025 "We don't have the same number of accesses in the block as on the "
2028 "Either we should have a defs list, or we should have no defs");
2030 "We don't have the same number of defs in the block as on the "
2032 auto ALI = AL->begin();
2033 auto AAI = ActualAccesses.
begin();
2034 while (ALI != AL->end() && AAI != ActualAccesses.
end()) {
2035 assert(&*ALI == *AAI &&
"Not the same accesses in the same order");
2039 ActualAccesses.
clear();
2042 auto ADI = ActualDefs.
begin();
2043 while (DLI !=
DL->
end() && ADI != ActualDefs.
end()) {
2044 assert(&*DLI == *ADI &&
"Not the same defs in the same order");
2059 "Null def but use not point to live on entry def");
2062 "Did not find use in def's use list");
2071void MemorySSA::renumberBlock(
const BasicBlock *
B)
const {
2073 unsigned long CurrentNumber = 0;
2075 assert(AL !=
nullptr &&
"Asking to renumber an empty block");
2076 for (
const auto &
I : *AL)
2077 BlockNumbering[&
I] = ++CurrentNumber;
2078 BlockNumberingValid.insert(
B);
2089 "Asking for local domination when accesses are in different blocks!");
2091 if (Dominatee == Dominator)
2104 if (!BlockNumberingValid.count(DominatorBlock))
2105 renumberBlock(DominatorBlock);
2107 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2109 assert(DominatorNum != 0 &&
"Block was not numbered properly");
2110 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2111 assert(DominateeNum != 0 &&
"Block was not numbered properly");
2112 return DominatorNum < DominateeNum;
2117 if (Dominator == Dominatee)
2129 const Use &Dominatee)
const {
2131 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2133 if (UseBB != Dominator->
getBlock())
2154 switch (getValueID()) {
2166 if (
A &&
A->getID())
2172 OS << getID() <<
" = MemoryDef(";
2176 if (isOptimized()) {
2178 printID(getOptimized());
2183 ListSeparator LS(
",");
2184 OS << getID() <<
" = MemoryPhi(";
2185 for (
const auto &Op : operands()) {
2195 if (
unsigned ID = MA->
getID())
2207 if (UO && UO->
getID())
2216#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2236 MemorySSAAnnotatedWriter MSSAWriter;
2240 :
F(
F), MSSAWriter(&MSSA) {}
2243 MemorySSAAnnotatedWriter &
getWriter() {
return MSSAWriter; }
2286 [](std::string &S,
unsigned &
I,
unsigned Idx) ->
void {
2287 std::string Str = S.substr(
I,
Idx -
I);
2289 if (SR.
count(
" = MemoryDef(") || SR.
count(
" = MemoryPhi(") ||
2290 SR.
count(
"MemoryUse("))
2309 return getNodeLabel(Node, CFGInfo).find(
';') != std::string::npos
2310 ?
"style=filled, fillcolor=lightpink"
2318 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2357 OS <<
"MemorySSA for function: " <<
F.getName() <<
"\n";
2367 OS <<
"MemorySSA (walker) for function: " <<
F.getName() <<
"\n";
2368 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2369 F.print(
OS, &Writer);
2396 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2397 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2420 assert(!isa<MemoryUse>(StartingAccess) &&
"Use cannot be defining access");
2423 if (
auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2425 return StartingUseOrDef;
2427 I = StartingUseOrDef->getMemoryInst();
2431 if (!isa<CallBase>(
I) &&
I->isFenceLike())
2432 return StartingUseOrDef;
2435 UpwardsMemoryQuery Q;
2436 Q.OriginalAccess = StartingAccess;
2437 Q.StartingLoc = Loc;
2445 Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit);
2447 dbgs() <<
"Clobber starting at access " << *StartingAccess <<
"\n";
2449 dbgs() <<
" for instruction " << *
I <<
"\n";
2450 dbgs() <<
" is " << *Clobber <<
"\n";
2457 if (!
I.hasMetadata(LLVMContext::MD_invariant_group) ||
I.isVolatile())
2469 if (isa<Constant>(PointerOperand))
2474 PointerUsesQueue.
push_back(PointerOperand);
2481 while (!PointerUsesQueue.
empty()) {
2484 "Null or GlobalValue should not be inserted");
2486 for (
const User *Us :
Ptr->users()) {
2487 auto *U = dyn_cast<Instruction>(Us);
2488 if (!U || U == &
I || !DT.
dominates(U, MostDominatingInstruction))
2492 if (isa<BitCastInst>(U)) {
2496 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(U)) {
2497 if (
GEP->hasAllZeroIndices())
2505 if (U->hasMetadata(LLVMContext::MD_invariant_group) &&
2507 MostDominatingInstruction = U;
2511 return MostDominatingInstruction == &
I ? nullptr : MostDominatingInstruction;
2516 bool SkipSelf,
bool UseInvariantGroup) {
2517 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2519 if (!StartingAccess)
2522 if (UseInvariantGroup) {
2524 *StartingAccess->getMemoryInst(), MSSA->
getDomTree())) {
2525 assert(isa<LoadInst>(
I) || isa<StoreInst>(
I));
2529 if (isa<MemoryUse>(ClobberMA))
2530 return ClobberMA->getDefiningAccess();
2535 bool IsOptimized =
false;
2540 if (StartingAccess->isOptimized()) {
2541 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2542 return StartingAccess->getOptimized();
2546 const Instruction *
I = StartingAccess->getMemoryInst();
2550 if (!isa<CallBase>(
I) &&
I->isFenceLike())
2551 return StartingAccess;
2553 UpwardsMemoryQuery Q(
I, StartingAccess);
2557 StartingAccess->setOptimized(LiveOnEntry);
2564 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2569 StartingAccess->setOptimized(DefiningAccess);
2570 return DefiningAccess;
2574 Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit);
2575 StartingAccess->setOptimized(OptimizedAccess);
2577 OptimizedAccess = StartingAccess->getOptimized();
2579 LLVM_DEBUG(
dbgs() <<
"Starting Memory SSA clobber for " << *
I <<
" is ");
2581 LLVM_DEBUG(
dbgs() <<
"Optimized Memory SSA clobber for " << *
I <<
" is ");
2585 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2586 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2587 assert(isa<MemoryDef>(Q.OriginalAccess));
2588 Q.SkipSelfAccess =
true;
2589 Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit);
2591 Result = OptimizedAccess;
2593 LLVM_DEBUG(
dbgs() <<
"Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2602 if (
auto *
Use = dyn_cast<MemoryUseOrDef>(MA))
2603 return Use->getDefiningAccess();
2609 if (
auto *
Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2610 return Use->getDefiningAccess();
2611 return StartingAccess;
2626bool upward_defs_iterator::IsGuaranteedLoopInvariant(
const Value *
Ptr)
const {
2627 auto IsGuaranteedLoopInvariantBase = [](
const Value *
Ptr) {
2628 Ptr =
Ptr->stripPointerCasts();
2629 if (!isa<Instruction>(
Ptr))
2631 return isa<AllocaInst>(
Ptr);
2634 Ptr =
Ptr->stripPointerCasts();
2635 if (
auto *
I = dyn_cast<Instruction>(
Ptr)) {
2636 if (
I->getParent()->isEntryBlock())
2639 if (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
2640 return IsGuaranteedLoopInvariantBase(
GEP->getPointerOperand()) &&
2641 GEP->hasAllConstantIndices();
2643 return IsGuaranteedLoopInvariantBase(
Ptr);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Atomic ordering constants.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
#define LLVM_ATTRIBUTE_UNUSED
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::optional< std::vector< StOtherPiece > > Other
This file provides utility analysis objects describing memory locations.
static bool instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysisType &AA)
Memory true print Memory SSA static false cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
static bool isUseTriviallyOptimizableToLiveOnEntry(BatchAAResults &AA, const Instruction *I)
static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))
static const char LiveOnEntryStr[]
static LLVM_ATTRIBUTE_UNUSED void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, BatchAAResults &AA, bool AllowImpreciseClobber=false)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
Memory true print Memory SSA Printer
static const Instruction * getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT)
static cl::opt< std::string > DotCFGMSSA("dot-cfg-mssa", cl::value_desc("file name for generated dot file"), cl::desc("file name for generated dot file"), cl::init(""))
static bool isOrdered(const Instruction *I)
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS)
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This defines the Use class.
DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
const Function * getFunction()
MemorySSAAnnotatedWriter & getWriter()
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
virtual void emitBasicBlockStartAnnot(const BasicBlock *, formatted_raw_ostream &)
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
virtual void emitInstructionAnnot(const Instruction *, formatted_raw_ostream &)
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
LLVM Basic Block Representation.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW=nullptr, bool ShouldPreserveUseListOrder=false, bool IsForDebug=false) const
Print the basic block to an output stream with an optional AssemblyAnnotationWriter.
LLVMContext & getContext() const
Get the context in which this basic block lives.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
Extension point for the Value hierarchy.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *, BatchAAResults &) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator
Analysis pass which computes a DominatorTree.
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
BasicBlock * getBlock() const
void print(raw_ostream &OS) const
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
void print(raw_ostream &OS) const
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
Represents phi nodes for memory accesses.
void setIncomingValue(unsigned I, MemoryAccess *V)
unsigned getNumIncomingValues() const
Return the number of incoming edges.
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
void print(raw_ostream &OS) const
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
An analysis that produces MemorySSA for a function.
Result run(Function &F, FunctionAnalysisManager &AM)
MemorySSAPrinterLegacyPass()
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
This is the generic walker interface for walkers of MemorySSA.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
MemorySSAWalker(MemorySSA *)
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Legacy analysis pass which computes MemorySSA.
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
A MemorySSAWalker that does AA walks to disambiguate accesses.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
MemoryAccess * getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
~CachingWalker() override=default
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
ClobberWalkerBase(MemorySSA *M, DominatorTree *D)
MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &, BatchAAResults &, unsigned &)
Walk the use-def chains starting at StartingAccess and find the MemoryAccess that actually clobbers L...
This class is a batch walker of all MemoryUse's in the program, and points their defining access at t...
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA, DominatorTree *DT)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
~SkipSelfWalker() override=default
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
Encapsulates MemorySSA, including all data associated with memory accesses.
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
simple_ilist< MemoryAccess, ilist_tag< MSSAHelpers::DefsOnlyTag > > DefsList
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
void verifyPrevDefInPhis(Function &F) const
MemorySSAWalker * getSkipSelfWalker()
void verifyDominationNumbers(const Function &F) const
Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
MemorySSAWalker * getWalker()
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
DominatorTree & getDomTree() const
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
MemoryAccess * getLiveOnEntryDef() const
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
void ensureOptimizedUses()
By default, uses are not optimized during MemorySSA construction.
void verifyOrderingDominationAndDefUses(Function &F, VerificationLevel=VerificationLevel::Fast) const
Verify ordering: the order and existence of MemoryAccesses matches the order and existence of memory ...
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
iplist< MemoryAccess, ilist_tag< MSSAHelpers::AllAccessTag > > AccessList
void print(raw_ostream &) const
Class that has the common methods + fields of memory uses/defs.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Represents read-only accesses to memory.
void print(raw_ostream &OS) const
A Module instance is used to store all the information related to an LLVM module.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
size_t count(char C) const
Return the number of occurrences of C in the string.
Target - Wrapper for Target specific information.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
void dropAllReferences()
Drop all references to operands.
unsigned getNumOperands() const
LLVM Value Representation.
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
An opaque object representing a hash code.
base_list_type::iterator iterator
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
A simple intrusive list implementation.
reverse_iterator rbegin()
This class provides various memory handling functions that manipulate MemoryBlock instances.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
void initializeMemorySSAPrinterLegacyPassPass(PassRegistry &)
void initializeMemorySSAWrapperPassPass(PassRegistry &)
APInt operator*(APInt a, uint64_t RHS)
auto successors(const MachineBasicBlock *BB)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT)
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
Interval::pred_iterator pred_end(Interval *I)
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
bool isModSet(const ModRefInfo MRI)
auto find_if_not(R &&Range, UnaryPredicate P)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
bool isModOrRefSet(const ModRefInfo MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ ModRef
The access may reference and may modify the value stored in memory.
bool VerifyMemorySSA
Enables verification of MemorySSA.
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
upward_defs_iterator upward_defs_end()
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool isRefSet(const ModRefInfo MRI)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, DOTFuncMSSAInfo *CFGInfo)
Display the raw branch weights from PGO.
std::string getNodeAttributes(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
static std::string getEdgeSourceLabel(const BasicBlock *Node, const_succ_iterator I)
std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo)
DOTGraphTraits(bool IsSimple=false)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
static std::string getEdgeSourceLabel(const void *, EdgeIter)
getEdgeSourceLabel - If you want to label the edge source itself, implement this method.
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
static MemoryLocOrCall getEmptyKey()
static MemoryLocOrCall getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
static size_t size(DOTFuncMSSAInfo *CFGInfo)
static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo)
static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo)
static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)