29#include "llvm/Config/llvm-config.h"
60#define DEBUG_TYPE "memoryssa"
77 "will consider trying to walk past (default = 100)"));
80#ifdef EXPENSIVE_CHECKS
100 MemorySSAAnnotatedWriter(
const MemorySSA *M) : MSSA(M) {}
105 OS <<
"; " << *MA <<
"\n";
111 OS <<
"; " << *MA <<
"\n";
123 MemorySSAWalkerAnnotatedWriter(
MemorySSA *M)
124 : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}
129 OS <<
"; " << *MA <<
"\n";
138 OS <<
" - clobbered by ";
158class MemoryLocOrCall {
163 : MemoryLocOrCall(MUD->getMemoryInst()) {}
165 : MemoryLocOrCall(MUD->getMemoryInst()) {}
168 if (
auto *
C = dyn_cast<CallBase>(Inst)) {
175 if (!isa<FenceInst>(Inst))
193 if (IsCall !=
Other.IsCall)
197 return Loc ==
Other.Loc;
199 if (
Call->getCalledOperand() !=
Other.Call->getCalledOperand())
202 return Call->arg_size() ==
Other.Call->arg_size() &&
203 std::equal(
Call->arg_begin(),
Call->arg_end(),
204 Other.Call->arg_begin());
235 MLOC.getCall()->getCalledOperand()));
237 for (
const Value *Arg : MLOC.getCall()->args())
242 static bool isEqual(
const MemoryLocOrCall &LHS,
const MemoryLocOrCall &RHS) {
257 bool VolatileUse =
Use->isVolatile();
258 bool VolatileClobber = MayClobber->
isVolatile();
260 if (VolatileUse && VolatileClobber)
273 bool SeqCstUse =
Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
275 AtomicOrdering::Acquire);
276 return !(SeqCstUse || MayClobberIsAcquire);
279template <
typename AliasAnalysisType>
282 const Instruction *UseInst, AliasAnalysisType &AA) {
284 assert(DefInst &&
"Defining instruction not actually an instruction");
286 if (
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
294 switch (II->getIntrinsicID()) {
295 case Intrinsic::allow_runtime_check:
296 case Intrinsic::allow_ubsan_check:
297 case Intrinsic::invariant_start:
298 case Intrinsic::invariant_end:
299 case Intrinsic::assume:
300 case Intrinsic::experimental_noalias_scope_decl:
301 case Intrinsic::pseudoprobe:
303 case Intrinsic::dbg_declare:
304 case Intrinsic::dbg_label:
305 case Intrinsic::dbg_value:
312 if (
auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
317 if (
auto *DefLoad = dyn_cast<LoadInst>(DefInst))
318 if (
auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
325template <
typename AliasAnalysisType>
327 const MemoryLocOrCall &UseMLOC,
328 AliasAnalysisType &AA) {
346struct UpwardsMemoryQuery {
356 bool SkipSelfAccess =
false;
358 UpwardsMemoryQuery() =
default;
361 : IsCall(
isa<
CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
369template <
typename AliasAnalysisType>
374 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
375 return I->hasMetadata(LLVMContext::MD_invariant_load) ||
399 bool AllowImpreciseClobber =
false) {
400 assert(MSSA.
dominates(ClobberAt, Start) &&
"Clobber doesn't dominate start?");
404 "liveOnEntry must clobber itself");
408 bool FoundClobber =
false;
414 while (!Worklist.
empty()) {
422 if (MA == ClobberAt) {
423 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
441 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
447 "Found clobber before reaching ClobberAt!");
451 if (
const auto *MU = dyn_cast<MemoryUse>(MA)) {
454 "Can only find use in def chain if Start is a use");
458 assert(isa<MemoryPhi>(MA));
476 if (AllowImpreciseClobber)
481 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
482 "ClobberAt never acted as a clobber");
501 std::optional<ListIndex> Previous;
504 std::optional<ListIndex> Previous)
508 std::optional<ListIndex> Previous)
509 : DefPath(Loc,
Init,
Init, Previous) {}
515 UpwardsMemoryQuery *Query;
516 unsigned *UpwardWalkLimit;
527 assert(
From->getNumOperands() &&
"Phi with no operands?");
541 struct UpwardsWalkResult {
556 assert(!isa<MemoryUse>(
Desc.Last) &&
"Uses don't exist in my world");
557 assert(UpwardWalkLimit &&
"Need a valid walk limit");
558 bool LimitAlreadyReached =
false;
563 if (!*UpwardWalkLimit) {
564 *UpwardWalkLimit = 1;
565 LimitAlreadyReached =
true;
570 if (Current == StopAt || Current == SkipStopAt)
571 return {Current,
false};
573 if (
auto *MD = dyn_cast<MemoryDef>(Current)) {
577 if (!--*UpwardWalkLimit)
578 return {Current,
true};
585 if (LimitAlreadyReached)
586 *UpwardWalkLimit = 0;
589 "Ended at a non-clobber that's not a phi?");
590 return {
Desc.Last,
false};
594 ListIndex PriorNode) {
606 struct TerminatedPath {
620 std::optional<TerminatedPath>
625 assert(!PausedSearches.
empty() &&
"No searches to continue?");
629 while (!PausedSearches.
empty()) {
631 DefPath &
Node = Paths[PathIndex];
651 if (!VisitedPhis.
insert({Node.Last, Node.Loc}).second)
655 if (Query->SkipSelfAccess &&
Node.Loc == Query->StartingLoc) {
656 assert(isa<MemoryDef>(Query->OriginalAccess));
657 SkipStopWhere = Query->OriginalAccess;
660 UpwardsWalkResult Res = walkToPhiOrClobber(
Node,
663 if (Res.IsKnownClobber) {
664 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
668 TerminatedPath
Term{Res.Result, PathIndex};
669 if (!MSSA.
dominates(Res.Result, StopWhere))
677 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
682 if (Res.Result != SkipStopWhere)
688 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
694 template <
typename T,
typename Walker>
695 struct generic_def_path_iterator
697 std::forward_iterator_tag, T *> {
698 generic_def_path_iterator() =
default;
699 generic_def_path_iterator(Walker *W, ListIndex
N) :
W(
W),
N(
N) {}
703 generic_def_path_iterator &operator++() {
704 N = curNode().Previous;
708 bool operator==(
const generic_def_path_iterator &O)
const {
709 if (
N.has_value() !=
O.N.has_value())
711 return !
N || *
N == *
O.N;
715 T &curNode()
const {
return W->Paths[*
N]; }
718 std::optional<ListIndex>
N;
721 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
722 using const_def_path_iterator =
723 generic_def_path_iterator<const DefPath, const ClobberWalker>;
726 return make_range(def_path_iterator(
this,
From), def_path_iterator());
731 const_def_path_iterator());
736 TerminatedPath PrimaryClobber;
742 ListIndex defPathIndex(
const DefPath &
N)
const {
744 const DefPath *NP = &
N;
746 "Out of bounds DefPath!");
747 return NP - &Paths.
front();
766 "Reset the optimization state.");
771 auto PriorPathsSize = Paths.
size();
777 addSearches(Phi, PausedSearches, 0);
783 auto Dom = Paths.
begin();
784 for (
auto I = std::next(Dom),
E = Paths.
end();
I !=
E; ++
I)
785 if (!MSSA.
dominates(
I->Clobber, Dom->Clobber))
789 std::iter_swap(
Last, Dom);
795 "liveOnEntry wasn't treated as a clobber?");
797 const auto *
Target = getWalkTarget(Current);
800 assert(
all_of(TerminatedPaths, [&](
const TerminatedPath &
P) {
807 if (std::optional<TerminatedPath> Blocker = getBlockingAccess(
808 Target, PausedSearches, NewPaused, TerminatedPaths)) {
812 auto Iter =
find_if(def_path(Blocker->LastNode), [&](
const DefPath &
N) {
813 return defPathIndex(N) < PriorPathsSize;
815 assert(Iter != def_path_iterator());
817 DefPath &CurNode = *Iter;
818 assert(CurNode.Last == Current);
845 TerminatedPath
Result{CurNode.Last, defPathIndex(CurNode)};
852 if (NewPaused.
empty()) {
853 MoveDominatedPathToEnd(TerminatedPaths);
855 return {
Result, std::move(TerminatedPaths)};
860 for (ListIndex Paused : NewPaused) {
861 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
862 if (WR.IsKnownClobber)
866 DefChainEnd = WR.Result;
869 if (!TerminatedPaths.
empty()) {
875 assert(DefChainEnd &&
"Failed to find dominating phi/liveOnEntry");
880 for (
const TerminatedPath &TP : TerminatedPaths) {
883 if (DT.
dominates(ChainBB, TP.Clobber->getBlock()))
890 if (!Clobbers.
empty()) {
891 MoveDominatedPathToEnd(Clobbers);
893 return {
Result, std::move(Clobbers)};
897 [&](ListIndex
I) {
return Paths[
I].Last == DefChainEnd; }));
900 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
902 PriorPathsSize = Paths.
size();
903 PausedSearches.
clear();
904 for (ListIndex
I : NewPaused)
905 addSearches(DefChainPhi, PausedSearches,
I);
908 Current = DefChainPhi;
912 void verifyOptResult(
const OptznResult &R)
const {
914 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
918 void resetPhiOptznState() {
925 : MSSA(MSSA), DT(DT) {}
930 UpwardsMemoryQuery &Q,
unsigned &UpWalkLimit) {
933 UpwardWalkLimit = &UpWalkLimit;
941 if (
auto *MU = dyn_cast<MemoryUse>(Start))
942 Current = MU->getDefiningAccess();
944 DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);
947 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
949 if (WalkResult.IsKnownClobber) {
950 Result = WalkResult.Result;
952 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
953 Current, Q.StartingLoc);
954 verifyOptResult(OptRes);
955 resetPhiOptznState();
956 Result = OptRes.PrimaryClobber.Clobber;
959#ifdef EXPENSIVE_CHECKS
960 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
967struct RenamePassData {
974 : DTN(
D), ChildIt(It), IncomingVal(
M) {}
976 void swap(RenamePassData &
RHS) {
988 ClobberWalker Walker;
1005 bool UseInvariantGroup =
true);
1023 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
false);
1028 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1033 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
false,
false);
1049 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1050 MUD->resetOptimized();
1066 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
true);
1071 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1087 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1088 MUD->resetOptimized();
1095 bool RenameAllUses) {
1098 auto It = PerBlockAccesses.
find(S);
1100 if (It == PerBlockAccesses.
end() || !isa<MemoryPhi>(It->second->front()))
1103 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1104 if (RenameAllUses) {
1105 bool ReplacementDone =
false;
1106 for (
unsigned I = 0, E = Phi->getNumIncomingValues();
I != E; ++
I)
1107 if (Phi->getIncomingBlock(
I) == BB) {
1108 Phi->setIncomingValue(
I, IncomingVal);
1109 ReplacementDone =
true;
1111 (void) ReplacementDone;
1112 assert(ReplacementDone &&
"Incomplete phi during partial rename");
1114 Phi->addIncoming(IncomingVal, BB);
1122 bool RenameAllUses) {
1123 auto It = PerBlockAccesses.
find(BB);
1125 if (It != PerBlockAccesses.
end()) {
1129 if (MUD->getDefiningAccess() ==
nullptr || RenameAllUses)
1130 MUD->setDefiningAccess(IncomingVal);
1131 if (isa<MemoryDef>(&L))
1147 bool SkipVisited,
bool RenameAllUses) {
1148 assert(Root &&
"Trying to rename accesses in an unreachable block");
1155 if (SkipVisited && AlreadyVisited)
1158 IncomingVal = renameBlock(Root->
getBlock(), IncomingVal, RenameAllUses);
1159 renameSuccessorPhis(Root->
getBlock(), IncomingVal, RenameAllUses);
1162 while (!WorkStack.
empty()) {
1165 IncomingVal = WorkStack.
back().IncomingVal;
1167 if (ChildIt ==
Node->end()) {
1171 ++WorkStack.
back().ChildIt;
1175 AlreadyVisited = !Visited.
insert(BB).second;
1176 if (SkipVisited && AlreadyVisited) {
1183 IncomingVal = &*BlockDefs->rbegin();
1185 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1186 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1195void MemorySSA::markUnreachableAsLiveOnEntry(
BasicBlock *BB) {
1197 "Reachable block found while handling unreachable blocks");
1206 auto It = PerBlockAccesses.
find(S);
1208 if (It == PerBlockAccesses.
end() || !isa<MemoryPhi>(It->second->front()))
1211 auto *
Phi = cast<MemoryPhi>(&Accesses->front());
1212 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1215 auto It = PerBlockAccesses.
find(BB);
1216 if (It == PerBlockAccesses.
end())
1219 auto &Accesses = It->second;
1220 for (
auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1221 auto Next = std::next(AI);
1224 if (
auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1225 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1227 Accesses->erase(AI);
1233 : DT(DT),
F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1234 SkipWalker(nullptr) {
1240 assert(AA &&
"No alias analysis?");
1242 buildMemorySSA(BatchAA);
1252 for (
const auto &Pair : PerBlockAccesses)
1258 auto Res = PerBlockAccesses.
insert(std::make_pair(BB,
nullptr));
1261 Res.first->second = std::make_unique<AccessList>();
1262 return Res.first->second.get();
1266 auto Res = PerBlockDefs.
insert(std::make_pair(BB,
nullptr));
1269 Res.first->second = std::make_unique<DefsList>();
1270 return Res.first->second.get();
1286 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1292 struct MemlocStackInfo {
1295 unsigned long StackEpoch;
1296 unsigned long PopEpoch;
1301 unsigned long LowerBound;
1304 unsigned long LastKill;
1308 void optimizeUsesInBlock(
const BasicBlock *,
unsigned long &,
unsigned long &,
1313 CachingWalker *Walker;
1332void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1333 const BasicBlock *BB,
unsigned long &StackEpoch,
unsigned long &PopEpoch,
1339 if (Accesses ==
nullptr)
1346 !VersionStack.
empty() &&
1347 "Version stack should have liveOnEntry sentinel dominating everything");
1351 while (VersionStack.
back()->getBlock() == BackBlock)
1357 auto *MU = dyn_cast<MemoryUse>(&MA);
1364 if (MU->isOptimized())
1367 MemoryLocOrCall UseMLOC(MU);
1368 auto &LocInfo = LocStackInfo[UseMLOC];
1372 if (LocInfo.PopEpoch != PopEpoch) {
1373 LocInfo.PopEpoch = PopEpoch;
1374 LocInfo.StackEpoch = StackEpoch;
1386 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1387 !DT->
dominates(LocInfo.LowerBoundBlock, BB)) {
1391 LocInfo.LowerBound = 0;
1392 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1393 LocInfo.LastKillValid =
false;
1395 }
else if (LocInfo.StackEpoch != StackEpoch) {
1399 LocInfo.PopEpoch = PopEpoch;
1400 LocInfo.StackEpoch = StackEpoch;
1402 if (!LocInfo.LastKillValid) {
1403 LocInfo.LastKill = VersionStack.
size() - 1;
1404 LocInfo.LastKillValid =
true;
1409 assert(LocInfo.LowerBound < VersionStack.
size() &&
1410 "Lower bound out of range");
1411 assert(LocInfo.LastKill < VersionStack.
size() &&
1412 "Last kill info out of range");
1414 unsigned long UpperBound = VersionStack.
size() - 1;
1417 LLVM_DEBUG(
dbgs() <<
"MemorySSA skipping optimization of " << *MU <<
" ("
1418 << *(MU->getMemoryInst()) <<
")"
1419 <<
" because there are "
1420 << UpperBound - LocInfo.LowerBound
1421 <<
" stores to disambiguate\n");
1424 LocInfo.LastKillValid =
false;
1427 bool FoundClobberResult =
false;
1429 while (UpperBound > LocInfo.LowerBound) {
1430 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1436 MU, *AA, UpwardWalkLimit);
1438 while (VersionStack[UpperBound] != Result) {
1442 FoundClobberResult =
true;
1446 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1448 FoundClobberResult =
true;
1456 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1457 MU->setDefiningAccess(VersionStack[UpperBound],
true);
1458 LocInfo.LastKill = UpperBound;
1462 MU->setDefiningAccess(VersionStack[LocInfo.LastKill],
true);
1464 LocInfo.LowerBound = VersionStack.
size() - 1;
1465 LocInfo.LowerBoundBlock = BB;
1475 unsigned long StackEpoch = 1;
1476 unsigned long PopEpoch = 1;
1479 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1483void MemorySSA::placePHINodes(
1487 IDFs.setDefiningBlocks(DefiningBlocks);
1489 IDFs.calculate(IDFBlocks);
1492 for (
auto &BB : IDFBlocks)
1493 createMemoryPhi(BB);
1505 &StartingPoint, NextID++));
1514 bool InsertIntoDef =
false;
1523 Accesses = getOrCreateAccessList(&
B);
1524 Accesses->push_back(MUD);
1525 if (isa<MemoryDef>(MUD)) {
1526 InsertIntoDef =
true;
1528 Defs = getOrCreateDefsList(&
B);
1529 Defs->push_back(*MUD);
1535 placePHINodes(DefiningBlocks);
1545 if (!Visited.
count(&BB))
1546 markUnreachableAsLiveOnEntry(&BB);
1553 return Walker.get();
1556 WalkerBase = std::make_unique<ClobberWalkerBase>(
this, DT);
1558 Walker = std::make_unique<CachingWalker>(
this, WalkerBase.get());
1559 return Walker.get();
1564 return SkipWalker.get();
1567 WalkerBase = std::make_unique<ClobberWalkerBase>(
this, DT);
1569 SkipWalker = std::make_unique<SkipSelfWalker>(
this, WalkerBase.get());
1570 return SkipWalker.get();
1580 auto *Accesses = getOrCreateAccessList(BB);
1584 if (isa<MemoryPhi>(NewAccess)) {
1585 Accesses->push_front(NewAccess);
1586 auto *Defs = getOrCreateDefsList(BB);
1587 Defs->push_front(*NewAccess);
1590 *Accesses, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1591 Accesses->insert(AI, NewAccess);
1592 if (!isa<MemoryUse>(NewAccess)) {
1593 auto *Defs = getOrCreateDefsList(BB);
1595 *Defs, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1596 Defs->insert(DI, *NewAccess);
1600 Accesses->push_back(NewAccess);
1601 if (!isa<MemoryUse>(NewAccess)) {
1602 auto *Defs = getOrCreateDefsList(BB);
1603 Defs->push_back(*NewAccess);
1606 BlockNumberingValid.erase(BB);
1612 bool WasEnd = InsertPt == Accesses->end();
1614 if (!isa<MemoryUse>(What)) {
1615 auto *Defs = getOrCreateDefsList(BB);
1621 Defs->push_back(*What);
1622 }
else if (isa<MemoryDef>(InsertPt)) {
1623 Defs->insert(InsertPt->getDefsIterator(), *What);
1625 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1628 if (InsertPt == Accesses->end())
1629 Defs->push_back(*What);
1631 Defs->insert(InsertPt->getDefsIterator(), *What);
1634 BlockNumberingValid.erase(BB);
1644 if (
auto *MD = dyn_cast<MemoryDef>(What))
1655 prepareForMoveTo(What, BB);
1661 if (isa<MemoryPhi>(What)) {
1663 "Can only move a Phi at the beginning of the block");
1665 ValueToMemoryAccess.erase(What->
getBlock());
1666 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1668 assert(Inserted &&
"Cannot move a Phi to a block that already has one");
1671 prepareForMoveTo(What, BB);
1680 ValueToMemoryAccess[BB] = Phi;
1687 bool CreationMustSucceed) {
1688 assert(!isa<PHINode>(
I) &&
"Cannot create a defined access for a PHI");
1690 if (CreationMustSucceed)
1691 assert(NewAccess !=
nullptr &&
"Tried to create a memory access for a "
1692 "non-memory touching instruction");
1694 assert((!Definition || !isa<MemoryUse>(Definition)) &&
1695 "A use cannot be a defining access");
1705 if (
auto *SI = dyn_cast<StoreInst>(
I)) {
1706 if (!SI->isUnordered())
1708 }
else if (
auto *LI = dyn_cast<LoadInst>(
I)) {
1709 if (!LI->isUnordered())
1716template <
typename AliasAnalysisType>
1718 AliasAnalysisType *AAP,
1727 switch (II->getIntrinsicID()) {
1730 case Intrinsic::allow_runtime_check:
1731 case Intrinsic::allow_ubsan_check:
1732 case Intrinsic::assume:
1733 case Intrinsic::experimental_noalias_scope_decl:
1734 case Intrinsic::pseudoprobe:
1742 if (!
I->mayReadFromMemory() && !
I->mayWriteToMemory())
1751 bool DefCheck, UseCheck;
1756 assert((Def == DefCheck || !DefCheck) &&
1757 "Memory accesses should only be reduced");
1758 if (!Def &&
Use != UseCheck) {
1760 assert(!UseCheck &&
"Invalid template");
1785 MUD =
new MemoryDef(
I->getContext(),
nullptr,
I,
I->getParent(), NextID++);
1787 MUD =
new MemoryUse(
I->getContext(),
nullptr,
I,
I->getParent());
1793 ValueToMemoryAccess[
I] = MUD;
1800 "Trying to remove memory access that still has uses");
1801 BlockNumbering.erase(MA);
1802 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1805 if (!isa<MemoryUse>(MA))
1809 if (
const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1814 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1815 if (VMA->second == MA)
1816 ValueToMemoryAccess.erase(VMA);
1829 if (!isa<MemoryUse>(MA)) {
1830 auto DefsIt = PerBlockDefs.
find(BB);
1831 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1834 PerBlockDefs.
erase(DefsIt);
1839 auto AccessIt = PerBlockAccesses.
find(BB);
1840 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1842 Accesses->erase(MA);
1844 Accesses->remove(MA);
1846 if (Accesses->empty()) {
1847 PerBlockAccesses.
erase(AccessIt);
1848 BlockNumberingValid.erase(BB);
1853 MemorySSAAnnotatedWriter Writer(
this);
1854 F.print(
OS, &Writer);
1857#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1862#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1887 for (
unsigned I = 0, E = Phi->getNumIncomingValues();
I != E; ++
I) {
1888 auto *Pred = Phi->getIncomingBlock(
I);
1889 auto *IncAcc = Phi->getIncomingValue(
I);
1893 if (
auto *DTNode = DT->
getNode(Pred)) {
1895 if (
auto *DefList =
getBlockDefs(DTNode->getBlock())) {
1896 auto *LastAcc = &*(--DefList->end());
1897 assert(LastAcc == IncAcc &&
1898 "Incorrect incoming access into phi.");
1903 DTNode = DTNode->getIDom();
1921 if (BlockNumberingValid.empty())
1926 if (!ValidBlocks.
count(&BB))
1929 ValidBlocks.
erase(&BB);
1937 unsigned long LastNumber = 0;
1939 auto ThisNumberIter = BlockNumbering.find(&MA);
1940 assert(ThisNumberIter != BlockNumbering.end() &&
1941 "MemoryAccess has no domination number in a valid block!");
1943 unsigned long ThisNumber = ThisNumberIter->second;
1944 assert(ThisNumber > LastNumber &&
1945 "Domination numbers should be strictly increasing!");
1947 LastNumber = ThisNumber;
1952 "All valid BasicBlocks should exist in F -- dangling pointers?");
1977 for (
const Use &U : Phi->uses()) {
1978 assert(
dominates(Phi, U) &&
"Memory PHI does not dominate it's uses");
1984 "Incomplete MemoryPhi Node");
1985 for (
unsigned I = 0, E = Phi->getNumIncomingValues();
I != E; ++
I) {
1986 verifyUseInDefs(Phi->getIncomingValue(
I), Phi);
1988 "Incoming phi block not a block predecessor");
1995 assert((!MA || (AL && (isa<MemoryUse>(MA) ||
DL))) &&
1996 "We have memory affecting instructions "
1997 "in this block but they are not in the "
1998 "access list or defs list");
2006 for (
const Use &U : MD->
uses()) {
2008 "Memory Def does not dominate it's uses");
2022 assert(AL->size() == ActualAccesses.
size() &&
2023 "We don't have the same number of accesses in the block as on the "
2026 "Either we should have a defs list, or we should have no defs");
2028 "We don't have the same number of defs in the block as on the "
2030 auto ALI = AL->begin();
2031 auto AAI = ActualAccesses.
begin();
2032 while (ALI != AL->end() && AAI != ActualAccesses.
end()) {
2033 assert(&*ALI == *AAI &&
"Not the same accesses in the same order");
2037 ActualAccesses.
clear();
2040 auto ADI = ActualDefs.
begin();
2041 while (DLI !=
DL->
end() && ADI != ActualDefs.
end()) {
2042 assert(&*DLI == *ADI &&
"Not the same defs in the same order");
2057 "Null def but use not point to live on entry def");
2060 "Did not find use in def's use list");
2069void MemorySSA::renumberBlock(
const BasicBlock *
B)
const {
2071 unsigned long CurrentNumber = 0;
2073 assert(AL !=
nullptr &&
"Asking to renumber an empty block");
2074 for (
const auto &
I : *AL)
2075 BlockNumbering[&
I] = ++CurrentNumber;
2076 BlockNumberingValid.insert(
B);
2087 "Asking for local domination when accesses are in different blocks!");
2089 if (Dominatee == Dominator)
2102 if (!BlockNumberingValid.count(DominatorBlock))
2103 renumberBlock(DominatorBlock);
2105 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2107 assert(DominatorNum != 0 &&
"Block was not numbered properly");
2108 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2109 assert(DominateeNum != 0 &&
"Block was not numbered properly");
2110 return DominatorNum < DominateeNum;
2115 if (Dominator == Dominatee)
2127 const Use &Dominatee)
const {
2129 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2131 if (UseBB != Dominator->
getBlock())
2152 switch (getValueID()) {
2164 if (
A &&
A->getID())
2170 OS << getID() <<
" = MemoryDef(";
2174 if (isOptimized()) {
2176 printID(getOptimized());
2181 ListSeparator LS(
",");
2182 OS << getID() <<
" = MemoryPhi(";
2183 for (
const auto &
Op : operands()) {
2193 if (
unsigned ID = MA->
getID())
2205 if (UO && UO->
getID())
2214#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2223 MemorySSAAnnotatedWriter MSSAWriter;
2227 :
F(
F), MSSAWriter(&MSSA) {}
2230 MemorySSAAnnotatedWriter &
getWriter() {
return MSSAWriter; }
2273 [](std::string &S,
unsigned &
I,
unsigned Idx) ->
void {
2274 std::string Str = S.substr(
I,
Idx -
I);
2276 if (SR.
count(
" = MemoryDef(") || SR.
count(
" = MemoryPhi(") ||
2277 SR.
count(
"MemoryUse("))
2296 return getNodeLabel(Node, CFGInfo).find(
';') != std::string::npos
2297 ?
"style=filled, fillcolor=lightpink"
2325 if (EnsureOptimizedUses)
2331 OS <<
"MemorySSA for function: " <<
F.getName() <<
"\n";
2341 OS <<
"MemorySSA (walker) for function: " <<
F.getName() <<
"\n";
2342 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2343 F.print(
OS, &Writer);
2370 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2371 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2394 assert(!isa<MemoryUse>(StartingAccess) &&
"Use cannot be defining access");
2397 if (Loc.
Ptr ==
nullptr)
2398 return StartingAccess;
2401 if (
auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2403 return StartingUseOrDef;
2405 I = StartingUseOrDef->getMemoryInst();
2409 if (!isa<CallBase>(
I) &&
I->isFenceLike())
2410 return StartingUseOrDef;
2413 UpwardsMemoryQuery Q;
2414 Q.OriginalAccess = StartingAccess;
2415 Q.StartingLoc = Loc;
2423 Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit);
2425 dbgs() <<
"Clobber starting at access " << *StartingAccess <<
"\n";
2427 dbgs() <<
" for instruction " << *
I <<
"\n";
2428 dbgs() <<
" is " << *Clobber <<
"\n";
2435 if (!
I.hasMetadata(LLVMContext::MD_invariant_group) ||
I.isVolatile())
2447 if (isa<Constant>(PointerOperand))
2452 PointerUsesQueue.
push_back(PointerOperand);
2459 while (!PointerUsesQueue.
empty()) {
2462 "Null or GlobalValue should not be inserted");
2464 for (
const User *Us :
Ptr->users()) {
2465 auto *U = dyn_cast<Instruction>(Us);
2466 if (!U || U == &
I || !DT.
dominates(U, MostDominatingInstruction))
2470 if (isa<BitCastInst>(U)) {
2474 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(U)) {
2475 if (
GEP->hasAllZeroIndices())
2483 if (U->hasMetadata(LLVMContext::MD_invariant_group) &&
2485 MostDominatingInstruction = U;
2489 return MostDominatingInstruction == &
I ? nullptr : MostDominatingInstruction;
2494 bool SkipSelf,
bool UseInvariantGroup) {
2495 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2497 if (!StartingAccess)
2500 if (UseInvariantGroup) {
2502 *StartingAccess->getMemoryInst(), MSSA->
getDomTree())) {
2503 assert(isa<LoadInst>(
I) || isa<StoreInst>(
I));
2507 if (isa<MemoryUse>(ClobberMA))
2508 return ClobberMA->getDefiningAccess();
2513 bool IsOptimized =
false;
2518 if (StartingAccess->isOptimized()) {
2519 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2520 return StartingAccess->getOptimized();
2524 const Instruction *
I = StartingAccess->getMemoryInst();
2528 if (!isa<CallBase>(
I) &&
I->isFenceLike())
2529 return StartingAccess;
2531 UpwardsMemoryQuery Q(
I, StartingAccess);
2535 StartingAccess->setOptimized(LiveOnEntry);
2542 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2547 StartingAccess->setOptimized(DefiningAccess);
2548 return DefiningAccess;
2552 Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit);
2553 StartingAccess->setOptimized(OptimizedAccess);
2555 OptimizedAccess = StartingAccess->getOptimized();
2557 LLVM_DEBUG(
dbgs() <<
"Starting Memory SSA clobber for " << *
I <<
" is ");
2559 LLVM_DEBUG(
dbgs() <<
"Optimized Memory SSA clobber for " << *
I <<
" is ");
2563 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2564 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2565 assert(isa<MemoryDef>(Q.OriginalAccess));
2566 Q.SkipSelfAccess =
true;
2567 Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit);
2569 Result = OptimizedAccess;
2571 LLVM_DEBUG(
dbgs() <<
"Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2580 if (
auto *
Use = dyn_cast<MemoryUseOrDef>(MA))
2581 return Use->getDefiningAccess();
2587 if (
auto *
Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2588 return Use->getDefiningAccess();
2589 return StartingAccess;
2604bool upward_defs_iterator::IsGuaranteedLoopInvariant(
const Value *
Ptr)
const {
2605 auto IsGuaranteedLoopInvariantBase = [](
const Value *
Ptr) {
2606 Ptr =
Ptr->stripPointerCasts();
2607 if (!isa<Instruction>(
Ptr))
2609 return isa<AllocaInst>(
Ptr);
2612 Ptr =
Ptr->stripPointerCasts();
2613 if (
auto *
I = dyn_cast<Instruction>(
Ptr)) {
2614 if (
I->getParent()->isEntryBlock())
2617 if (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
2618 return IsGuaranteedLoopInvariantBase(
GEP->getPointerOperand()) &&
2619 GEP->hasAllConstantIndices();
2621 return IsGuaranteedLoopInvariantBase(
Ptr);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
#define LLVM_ATTRIBUTE_UNUSED
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::optional< std::vector< StOtherPiece > > Other
This file provides utility analysis objects describing memory locations.
static bool instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysisType &AA)
Memory static true cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))
static const char LiveOnEntryStr[]
static LLVM_ATTRIBUTE_UNUSED void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, BatchAAResults &AA, bool AllowImpreciseClobber=false)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, const Instruction *I)
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
static const Instruction * getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT)
static cl::opt< std::string > DotCFGMSSA("dot-cfg-mssa", cl::value_desc("file name for generated dot file"), cl::desc("file name for generated dot file"), cl::init(""))
static bool isOrdered(const Instruction *I)
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS)
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This defines the Use class.
DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
const Function * getFunction()
MemorySSAAnnotatedWriter & getWriter()
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
virtual void emitBasicBlockStartAnnot(const BasicBlock *, formatted_raw_ostream &)
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
virtual void emitInstructionAnnot(const Instruction *, formatted_raw_ostream &)
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
LLVM Basic Block Representation.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW=nullptr, bool ShouldPreserveUseListOrder=false, bool IsForDebug=false) const
Print the basic block to an output stream with an optional AssemblyAnnotationWriter.
LLVMContext & getContext() const
Get the context in which this basic block lives.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
Extension point for the Value hierarchy.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *, BatchAAResults &) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator
Analysis pass which computes a DominatorTree.
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
BasicBlock * getBlock() const
void print(raw_ostream &OS) const
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
void print(raw_ostream &OS) const
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
Represents phi nodes for memory accesses.
void print(raw_ostream &OS) const
An analysis that produces MemorySSA for a function.
Result run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
This is the generic walker interface for walkers of MemorySSA.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
MemorySSAWalker(MemorySSA *)
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Legacy analysis pass which computes MemorySSA.
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
A MemorySSAWalker that does AA walks to disambiguate accesses.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
MemoryAccess * getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
~CachingWalker() override=default
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
ClobberWalkerBase(MemorySSA *M, DominatorTree *D)
MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &, BatchAAResults &, unsigned &)
Walk the use-def chains starting at StartingAccess and find the MemoryAccess that actually clobbers L...
This class is a batch walker of all MemoryUse's in the program, and points their defining access at t...
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA, DominatorTree *DT)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
~SkipSelfWalker() override=default
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
Encapsulates MemorySSA, including all data associated with memory accesses.
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
simple_ilist< MemoryAccess, ilist_tag< MSSAHelpers::DefsOnlyTag > > DefsList
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
void verifyPrevDefInPhis(Function &F) const
MemorySSAWalker * getSkipSelfWalker()
void verifyDominationNumbers(const Function &F) const
Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
MemorySSAWalker * getWalker()
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
DominatorTree & getDomTree() const
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
MemoryAccess * getLiveOnEntryDef() const
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
void ensureOptimizedUses()
By default, uses are not optimized during MemorySSA construction.
void verifyOrderingDominationAndDefUses(Function &F, VerificationLevel=VerificationLevel::Fast) const
Verify ordering: the order and existence of MemoryAccesses matches the order and existence of memory ...
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
iplist< MemoryAccess, ilist_tag< MSSAHelpers::AllAccessTag > > AccessList
void print(raw_ostream &) const
Class that has the common methods + fields of memory uses/defs.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
void setOptimized(MemoryAccess *)
Sets the optimized use for a MemoryDef.
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Represents read-only accesses to memory.
void print(raw_ostream &OS) const
A Module instance is used to store all the information related to an LLVM module.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
size_t count(char C) const
Return the number of occurrences of C in the string.
Target - Wrapper for Target specific information.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
void dropAllReferences()
Drop all references to operands.
LLVM Value Representation.
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
An opaque object representing a hash code.
base_list_type::iterator iterator
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
A simple intrusive list implementation.
reverse_iterator rbegin()
This class provides various memory handling functions that manipulate MemoryBlock instances.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
NodeAddr< PhiNode * > Phi
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
void initializeMemorySSAWrapperPassPass(PassRegistry &)
APInt operator*(APInt a, uint64_t RHS)
auto successors(const MachineBasicBlock *BB)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT)
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
bool isModSet(const ModRefInfo MRI)
auto find_if_not(R &&Range, UnaryPredicate P)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
bool isModOrRefSet(const ModRefInfo MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool VerifyMemorySSA
Enables verification of MemorySSA.
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
upward_defs_iterator upward_defs_end()
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
unsigned pred_size(const MachineBasicBlock *BB)
bool isRefSet(const ModRefInfo MRI)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, DOTFuncMSSAInfo *CFGInfo)
Display the raw branch weights from PGO.
std::string getNodeAttributes(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
static std::string getEdgeSourceLabel(const BasicBlock *Node, const_succ_iterator I)
std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo)
DOTGraphTraits(bool IsSimple=false)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Description of the encoding of one expression Op.
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
static MemoryLocOrCall getEmptyKey()
static MemoryLocOrCall getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
static size_t size(DOTFuncMSSAInfo *CFGInfo)
static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo)
static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo)
static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)