29#include "llvm/Config/llvm-config.h"
60#define DEBUG_TYPE "memoryssa"
77 "will consider trying to walk past (default = 100)"));
80#ifdef EXPENSIVE_CHECKS
100 MemorySSAAnnotatedWriter(
const MemorySSA *M) : MSSA(M) {}
105 OS <<
"; " << *MA <<
"\n";
111 OS <<
"; " << *MA <<
"\n";
123 MemorySSAWalkerAnnotatedWriter(
MemorySSA *M)
124 : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}
129 OS <<
"; " << *MA <<
"\n";
138 OS <<
" - clobbered by ";
158class MemoryLocOrCall {
163 : MemoryLocOrCall(MUD->getMemoryInst()) {}
165 : MemoryLocOrCall(MUD->getMemoryInst()) {}
168 if (
auto *
C = dyn_cast<CallBase>(Inst)) {
175 if (!isa<FenceInst>(Inst))
193 if (IsCall !=
Other.IsCall)
197 return Loc ==
Other.Loc;
199 if (
Call->getCalledOperand() !=
Other.Call->getCalledOperand())
202 return Call->arg_size() ==
Other.Call->arg_size() &&
203 std::equal(
Call->arg_begin(),
Call->arg_end(),
204 Other.Call->arg_begin());
235 MLOC.getCall()->getCalledOperand()));
237 for (
const Value *Arg : MLOC.getCall()->args())
242 static bool isEqual(
const MemoryLocOrCall &LHS,
const MemoryLocOrCall &RHS) {
257 bool VolatileUse =
Use->isVolatile();
258 bool VolatileClobber = MayClobber->
isVolatile();
260 if (VolatileUse && VolatileClobber)
273 bool SeqCstUse =
Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
275 AtomicOrdering::Acquire);
276 return !(SeqCstUse || MayClobberIsAcquire);
279template <
typename AliasAnalysisType>
282 const Instruction *UseInst, AliasAnalysisType &AA) {
284 assert(DefInst &&
"Defining instruction not actually an instruction");
286 if (
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
294 switch (II->getIntrinsicID()) {
295 case Intrinsic::invariant_start:
296 case Intrinsic::invariant_end:
297 case Intrinsic::assume:
298 case Intrinsic::experimental_noalias_scope_decl:
299 case Intrinsic::pseudoprobe:
301 case Intrinsic::dbg_declare:
302 case Intrinsic::dbg_label:
303 case Intrinsic::dbg_value:
310 if (
auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
315 if (
auto *DefLoad = dyn_cast<LoadInst>(DefInst))
316 if (
auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
323template <
typename AliasAnalysisType>
325 const MemoryLocOrCall &UseMLOC,
326 AliasAnalysisType &AA) {
344struct UpwardsMemoryQuery {
354 bool SkipSelfAccess =
false;
356 UpwardsMemoryQuery() =
default;
359 : IsCall(
isa<
CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
367template <
typename AliasAnalysisType>
372 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
373 return I->hasMetadata(LLVMContext::MD_invariant_load) ||
397 bool AllowImpreciseClobber =
false) {
398 assert(MSSA.
dominates(ClobberAt, Start) &&
"Clobber doesn't dominate start?");
402 "liveOnEntry must clobber itself");
406 bool FoundClobber =
false;
412 while (!Worklist.
empty()) {
420 if (MA == ClobberAt) {
421 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
439 if (
const auto *MD = dyn_cast<MemoryDef>(MA)) {
445 "Found clobber before reaching ClobberAt!");
449 if (
const auto *MU = dyn_cast<MemoryUse>(MA)) {
452 "Can only find use in def chain if Start is a use");
456 assert(isa<MemoryPhi>(MA));
474 if (AllowImpreciseClobber)
479 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
480 "ClobberAt never acted as a clobber");
499 std::optional<ListIndex> Previous;
502 std::optional<ListIndex> Previous)
506 std::optional<ListIndex> Previous)
507 : DefPath(Loc,
Init,
Init, Previous) {}
513 UpwardsMemoryQuery *Query;
514 unsigned *UpwardWalkLimit;
525 assert(
From->getNumOperands() &&
"Phi with no operands?");
539 struct UpwardsWalkResult {
554 assert(!isa<MemoryUse>(
Desc.Last) &&
"Uses don't exist in my world");
555 assert(UpwardWalkLimit &&
"Need a valid walk limit");
556 bool LimitAlreadyReached =
false;
561 if (!*UpwardWalkLimit) {
562 *UpwardWalkLimit = 1;
563 LimitAlreadyReached =
true;
568 if (Current == StopAt || Current == SkipStopAt)
569 return {Current,
false};
571 if (
auto *MD = dyn_cast<MemoryDef>(Current)) {
575 if (!--*UpwardWalkLimit)
576 return {Current,
true};
583 if (LimitAlreadyReached)
584 *UpwardWalkLimit = 0;
587 "Ended at a non-clobber that's not a phi?");
588 return {
Desc.Last,
false};
592 ListIndex PriorNode) {
604 struct TerminatedPath {
618 std::optional<TerminatedPath>
623 assert(!PausedSearches.
empty() &&
"No searches to continue?");
627 while (!PausedSearches.
empty()) {
629 DefPath &
Node = Paths[PathIndex];
649 if (!VisitedPhis.
insert({Node.Last, Node.Loc}).second)
653 if (Query->SkipSelfAccess &&
Node.Loc == Query->StartingLoc) {
654 assert(isa<MemoryDef>(Query->OriginalAccess));
655 SkipStopWhere = Query->OriginalAccess;
658 UpwardsWalkResult Res = walkToPhiOrClobber(
Node,
661 if (Res.IsKnownClobber) {
662 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
666 TerminatedPath
Term{Res.Result, PathIndex};
667 if (!MSSA.
dominates(Res.Result, StopWhere))
675 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
680 if (Res.Result != SkipStopWhere)
686 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
692 template <
typename T,
typename Walker>
693 struct generic_def_path_iterator
695 std::forward_iterator_tag, T *> {
696 generic_def_path_iterator() =
default;
697 generic_def_path_iterator(Walker *W, ListIndex
N) :
W(
W),
N(
N) {}
701 generic_def_path_iterator &operator++() {
702 N = curNode().Previous;
706 bool operator==(
const generic_def_path_iterator &O)
const {
707 if (
N.has_value() !=
O.N.has_value())
709 return !
N || *
N == *
O.N;
713 T &curNode()
const {
return W->Paths[*
N]; }
716 std::optional<ListIndex>
N;
719 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
720 using const_def_path_iterator =
721 generic_def_path_iterator<const DefPath, const ClobberWalker>;
724 return make_range(def_path_iterator(
this,
From), def_path_iterator());
729 const_def_path_iterator());
734 TerminatedPath PrimaryClobber;
740 ListIndex defPathIndex(
const DefPath &
N)
const {
742 const DefPath *NP = &
N;
744 "Out of bounds DefPath!");
745 return NP - &Paths.
front();
764 "Reset the optimization state.");
769 auto PriorPathsSize = Paths.
size();
775 addSearches(Phi, PausedSearches, 0);
781 auto Dom = Paths.
begin();
782 for (
auto I = std::next(Dom),
E = Paths.
end();
I !=
E; ++
I)
783 if (!MSSA.
dominates(
I->Clobber, Dom->Clobber))
787 std::iter_swap(
Last, Dom);
793 "liveOnEntry wasn't treated as a clobber?");
795 const auto *
Target = getWalkTarget(Current);
798 assert(
all_of(TerminatedPaths, [&](
const TerminatedPath &
P) {
805 if (std::optional<TerminatedPath> Blocker = getBlockingAccess(
806 Target, PausedSearches, NewPaused, TerminatedPaths)) {
810 auto Iter =
find_if(def_path(Blocker->LastNode), [&](
const DefPath &
N) {
811 return defPathIndex(N) < PriorPathsSize;
813 assert(Iter != def_path_iterator());
815 DefPath &CurNode = *Iter;
816 assert(CurNode.Last == Current);
843 TerminatedPath
Result{CurNode.Last, defPathIndex(CurNode)};
850 if (NewPaused.
empty()) {
851 MoveDominatedPathToEnd(TerminatedPaths);
853 return {
Result, std::move(TerminatedPaths)};
858 for (ListIndex Paused : NewPaused) {
859 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
860 if (WR.IsKnownClobber)
864 DefChainEnd = WR.Result;
867 if (!TerminatedPaths.
empty()) {
873 assert(DefChainEnd &&
"Failed to find dominating phi/liveOnEntry");
878 for (
const TerminatedPath &TP : TerminatedPaths) {
881 if (DT.
dominates(ChainBB, TP.Clobber->getBlock()))
888 if (!Clobbers.
empty()) {
889 MoveDominatedPathToEnd(Clobbers);
891 return {
Result, std::move(Clobbers)};
895 [&](ListIndex
I) {
return Paths[
I].Last == DefChainEnd; }));
898 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
900 PriorPathsSize = Paths.
size();
901 PausedSearches.
clear();
902 for (ListIndex
I : NewPaused)
903 addSearches(DefChainPhi, PausedSearches,
I);
906 Current = DefChainPhi;
910 void verifyOptResult(
const OptznResult &R)
const {
912 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
916 void resetPhiOptznState() {
923 : MSSA(MSSA), DT(DT) {}
928 UpwardsMemoryQuery &Q,
unsigned &UpWalkLimit) {
931 UpwardWalkLimit = &UpWalkLimit;
939 if (
auto *MU = dyn_cast<MemoryUse>(Start))
940 Current = MU->getDefiningAccess();
942 DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);
945 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
947 if (WalkResult.IsKnownClobber) {
948 Result = WalkResult.Result;
950 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
951 Current, Q.StartingLoc);
952 verifyOptResult(OptRes);
953 resetPhiOptznState();
954 Result = OptRes.PrimaryClobber.Clobber;
957#ifdef EXPENSIVE_CHECKS
958 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
965struct RenamePassData {
972 : DTN(
D), ChildIt(It), IncomingVal(
M) {}
974 void swap(RenamePassData &
RHS) {
986 ClobberWalker Walker;
1003 bool UseInvariantGroup =
true);
1021 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
false);
1026 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1031 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
false,
false);
1047 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1048 MUD->resetOptimized();
1064 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,
true);
1069 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1085 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1086 MUD->resetOptimized();
1093 bool RenameAllUses) {
1096 auto It = PerBlockAccesses.
find(S);
1098 if (It == PerBlockAccesses.
end() || !isa<MemoryPhi>(It->second->front()))
1101 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1102 if (RenameAllUses) {
1103 bool ReplacementDone =
false;
1104 for (
unsigned I = 0,
E = Phi->getNumIncomingValues();
I !=
E; ++
I)
1105 if (Phi->getIncomingBlock(
I) == BB) {
1106 Phi->setIncomingValue(
I, IncomingVal);
1107 ReplacementDone =
true;
1109 (void) ReplacementDone;
1110 assert(ReplacementDone &&
"Incomplete phi during partial rename");
1112 Phi->addIncoming(IncomingVal, BB);
1120 bool RenameAllUses) {
1121 auto It = PerBlockAccesses.
find(BB);
1123 if (It != PerBlockAccesses.
end()) {
1127 if (MUD->getDefiningAccess() ==
nullptr || RenameAllUses)
1128 MUD->setDefiningAccess(IncomingVal);
1129 if (isa<MemoryDef>(&L))
1145 bool SkipVisited,
bool RenameAllUses) {
1146 assert(Root &&
"Trying to rename accesses in an unreachable block");
1153 if (SkipVisited && AlreadyVisited)
1156 IncomingVal = renameBlock(Root->
getBlock(), IncomingVal, RenameAllUses);
1157 renameSuccessorPhis(Root->
getBlock(), IncomingVal, RenameAllUses);
1160 while (!WorkStack.
empty()) {
1163 IncomingVal = WorkStack.
back().IncomingVal;
1165 if (ChildIt ==
Node->end()) {
1169 ++WorkStack.
back().ChildIt;
1173 AlreadyVisited = !Visited.
insert(BB).second;
1174 if (SkipVisited && AlreadyVisited) {
1181 IncomingVal = &*BlockDefs->rbegin();
1183 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1184 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1193void MemorySSA::markUnreachableAsLiveOnEntry(
BasicBlock *BB) {
1195 "Reachable block found while handling unreachable blocks");
1204 auto It = PerBlockAccesses.
find(S);
1206 if (It == PerBlockAccesses.
end() || !isa<MemoryPhi>(It->second->front()))
1209 auto *
Phi = cast<MemoryPhi>(&Accesses->front());
1210 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1213 auto It = PerBlockAccesses.
find(BB);
1214 if (It == PerBlockAccesses.
end())
1217 auto &Accesses = It->second;
1218 for (
auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1219 auto Next = std::next(AI);
1222 if (
auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1223 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1225 Accesses->erase(AI);
1231 : DT(DT),
F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1232 SkipWalker(nullptr) {
1238 assert(AA &&
"No alias analysis?");
1240 buildMemorySSA(BatchAA);
1250 for (
const auto &Pair : PerBlockAccesses)
1256 auto Res = PerBlockAccesses.
insert(std::make_pair(BB,
nullptr));
1259 Res.first->second = std::make_unique<AccessList>();
1260 return Res.first->second.get();
1264 auto Res = PerBlockDefs.
insert(std::make_pair(BB,
nullptr));
1267 Res.first->second = std::make_unique<DefsList>();
1268 return Res.first->second.get();
1284 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1290 struct MemlocStackInfo {
1293 unsigned long StackEpoch;
1294 unsigned long PopEpoch;
1299 unsigned long LowerBound;
1302 unsigned long LastKill;
1306 void optimizeUsesInBlock(
const BasicBlock *,
unsigned long &,
unsigned long &,
1311 CachingWalker *Walker;
1330void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1331 const BasicBlock *BB,
unsigned long &StackEpoch,
unsigned long &PopEpoch,
1337 if (Accesses ==
nullptr)
1344 !VersionStack.
empty() &&
1345 "Version stack should have liveOnEntry sentinel dominating everything");
1349 while (VersionStack.
back()->getBlock() == BackBlock)
1355 auto *MU = dyn_cast<MemoryUse>(&MA);
1362 if (MU->isOptimized())
1365 MemoryLocOrCall UseMLOC(MU);
1366 auto &LocInfo = LocStackInfo[UseMLOC];
1370 if (LocInfo.PopEpoch != PopEpoch) {
1371 LocInfo.PopEpoch = PopEpoch;
1372 LocInfo.StackEpoch = StackEpoch;
1384 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1385 !DT->
dominates(LocInfo.LowerBoundBlock, BB)) {
1389 LocInfo.LowerBound = 0;
1390 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1391 LocInfo.LastKillValid =
false;
1393 }
else if (LocInfo.StackEpoch != StackEpoch) {
1397 LocInfo.PopEpoch = PopEpoch;
1398 LocInfo.StackEpoch = StackEpoch;
1400 if (!LocInfo.LastKillValid) {
1401 LocInfo.LastKill = VersionStack.
size() - 1;
1402 LocInfo.LastKillValid =
true;
1407 assert(LocInfo.LowerBound < VersionStack.
size() &&
1408 "Lower bound out of range");
1409 assert(LocInfo.LastKill < VersionStack.
size() &&
1410 "Last kill info out of range");
1412 unsigned long UpperBound = VersionStack.
size() - 1;
1415 LLVM_DEBUG(
dbgs() <<
"MemorySSA skipping optimization of " << *MU <<
" ("
1416 << *(MU->getMemoryInst()) <<
")"
1417 <<
" because there are "
1418 << UpperBound - LocInfo.LowerBound
1419 <<
" stores to disambiguate\n");
1422 LocInfo.LastKillValid =
false;
1425 bool FoundClobberResult =
false;
1427 while (UpperBound > LocInfo.LowerBound) {
1428 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1434 MU, *AA, UpwardWalkLimit);
1436 while (VersionStack[UpperBound] != Result) {
1440 FoundClobberResult =
true;
1444 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1446 FoundClobberResult =
true;
1454 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1455 MU->setDefiningAccess(VersionStack[UpperBound],
true);
1456 LocInfo.LastKill = UpperBound;
1460 MU->setDefiningAccess(VersionStack[LocInfo.LastKill],
true);
1462 LocInfo.LowerBound = VersionStack.
size() - 1;
1463 LocInfo.LowerBoundBlock = BB;
1473 unsigned long StackEpoch = 1;
1474 unsigned long PopEpoch = 1;
1477 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1481void MemorySSA::placePHINodes(
1485 IDFs.setDefiningBlocks(DefiningBlocks);
1487 IDFs.calculate(IDFBlocks);
1490 for (
auto &BB : IDFBlocks)
1491 createMemoryPhi(BB);
1503 &StartingPoint, NextID++));
1512 bool InsertIntoDef =
false;
1521 Accesses = getOrCreateAccessList(&
B);
1522 Accesses->push_back(MUD);
1523 if (isa<MemoryDef>(MUD)) {
1524 InsertIntoDef =
true;
1526 Defs = getOrCreateDefsList(&
B);
1527 Defs->push_back(*MUD);
1533 placePHINodes(DefiningBlocks);
1543 if (!Visited.
count(&BB))
1544 markUnreachableAsLiveOnEntry(&BB);
1551 return Walker.get();
1554 WalkerBase = std::make_unique<ClobberWalkerBase>(
this, DT);
1556 Walker = std::make_unique<CachingWalker>(
this, WalkerBase.get());
1557 return Walker.get();
1562 return SkipWalker.get();
1565 WalkerBase = std::make_unique<ClobberWalkerBase>(
this, DT);
1567 SkipWalker = std::make_unique<SkipSelfWalker>(
this, WalkerBase.get());
1568 return SkipWalker.get();
1578 auto *Accesses = getOrCreateAccessList(BB);
1582 if (isa<MemoryPhi>(NewAccess)) {
1583 Accesses->push_front(NewAccess);
1584 auto *Defs = getOrCreateDefsList(BB);
1585 Defs->push_front(*NewAccess);
1588 *Accesses, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1589 Accesses->insert(AI, NewAccess);
1590 if (!isa<MemoryUse>(NewAccess)) {
1591 auto *Defs = getOrCreateDefsList(BB);
1593 *Defs, [](
const MemoryAccess &MA) {
return isa<MemoryPhi>(MA); });
1594 Defs->insert(DI, *NewAccess);
1598 Accesses->push_back(NewAccess);
1599 if (!isa<MemoryUse>(NewAccess)) {
1600 auto *Defs = getOrCreateDefsList(BB);
1601 Defs->push_back(*NewAccess);
1604 BlockNumberingValid.erase(BB);
1610 bool WasEnd = InsertPt == Accesses->end();
1612 if (!isa<MemoryUse>(What)) {
1613 auto *Defs = getOrCreateDefsList(BB);
1619 Defs->push_back(*What);
1620 }
else if (isa<MemoryDef>(InsertPt)) {
1621 Defs->insert(InsertPt->getDefsIterator(), *What);
1623 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1626 if (InsertPt == Accesses->end())
1627 Defs->push_back(*What);
1629 Defs->insert(InsertPt->getDefsIterator(), *What);
1632 BlockNumberingValid.erase(BB);
1642 if (
auto *MD = dyn_cast<MemoryDef>(What))
1653 prepareForMoveTo(What, BB);
1659 if (isa<MemoryPhi>(What)) {
1661 "Can only move a Phi at the beginning of the block");
1663 ValueToMemoryAccess.erase(What->
getBlock());
1664 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1666 assert(Inserted &&
"Cannot move a Phi to a block that already has one");
1669 prepareForMoveTo(What, BB);
1678 ValueToMemoryAccess[BB] = Phi;
1685 bool CreationMustSucceed) {
1686 assert(!isa<PHINode>(
I) &&
"Cannot create a defined access for a PHI");
1688 if (CreationMustSucceed)
1689 assert(NewAccess !=
nullptr &&
"Tried to create a memory access for a "
1690 "non-memory touching instruction");
1692 assert((!Definition || !isa<MemoryUse>(Definition)) &&
1693 "A use cannot be a defining access");
1703 if (
auto *SI = dyn_cast<StoreInst>(
I)) {
1704 if (!SI->isUnordered())
1706 }
else if (
auto *LI = dyn_cast<LoadInst>(
I)) {
1707 if (!LI->isUnordered())
1714template <
typename AliasAnalysisType>
1716 AliasAnalysisType *AAP,
1725 switch (II->getIntrinsicID()) {
1728 case Intrinsic::assume:
1729 case Intrinsic::experimental_noalias_scope_decl:
1730 case Intrinsic::pseudoprobe:
1738 if (!
I->mayReadFromMemory() && !
I->mayWriteToMemory())
1747 bool DefCheck, UseCheck;
1752 assert((Def == DefCheck || !DefCheck) &&
1753 "Memory accesses should only be reduced");
1754 if (!Def &&
Use != UseCheck) {
1756 assert(!UseCheck &&
"Invalid template");
1781 MUD =
new MemoryDef(
I->getContext(),
nullptr,
I,
I->getParent(), NextID++);
1783 MUD =
new MemoryUse(
I->getContext(),
nullptr,
I,
I->getParent());
1789 ValueToMemoryAccess[
I] = MUD;
1796 "Trying to remove memory access that still has uses");
1797 BlockNumbering.erase(MA);
1798 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1801 if (!isa<MemoryUse>(MA))
1805 if (
const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1810 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1811 if (VMA->second == MA)
1812 ValueToMemoryAccess.erase(VMA);
1825 if (!isa<MemoryUse>(MA)) {
1826 auto DefsIt = PerBlockDefs.
find(BB);
1827 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1830 PerBlockDefs.
erase(DefsIt);
1835 auto AccessIt = PerBlockAccesses.
find(BB);
1836 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1838 Accesses->erase(MA);
1840 Accesses->remove(MA);
1842 if (Accesses->empty()) {
1843 PerBlockAccesses.
erase(AccessIt);
1844 BlockNumberingValid.erase(BB);
1849 MemorySSAAnnotatedWriter Writer(
this);
1850 F.print(
OS, &Writer);
1853#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1858#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1883 for (
unsigned I = 0,
E = Phi->getNumIncomingValues();
I !=
E; ++
I) {
1884 auto *Pred = Phi->getIncomingBlock(
I);
1885 auto *IncAcc = Phi->getIncomingValue(
I);
1889 if (
auto *DTNode = DT->
getNode(Pred)) {
1891 if (
auto *DefList =
getBlockDefs(DTNode->getBlock())) {
1892 auto *LastAcc = &*(--DefList->end());
1893 assert(LastAcc == IncAcc &&
1894 "Incorrect incoming access into phi.");
1899 DTNode = DTNode->getIDom();
1917 if (BlockNumberingValid.empty())
1922 if (!ValidBlocks.
count(&BB))
1925 ValidBlocks.
erase(&BB);
1933 unsigned long LastNumber = 0;
1935 auto ThisNumberIter = BlockNumbering.find(&MA);
1936 assert(ThisNumberIter != BlockNumbering.end() &&
1937 "MemoryAccess has no domination number in a valid block!");
1939 unsigned long ThisNumber = ThisNumberIter->second;
1940 assert(ThisNumber > LastNumber &&
1941 "Domination numbers should be strictly increasing!");
1943 LastNumber = ThisNumber;
1948 "All valid BasicBlocks should exist in F -- dangling pointers?");
1973 for (
const Use &U : Phi->uses()) {
1974 assert(
dominates(Phi, U) &&
"Memory PHI does not dominate it's uses");
1979 assert(Phi->getNumOperands() ==
static_cast<unsigned>(std::distance(
1981 "Incomplete MemoryPhi Node");
1982 for (
unsigned I = 0,
E = Phi->getNumIncomingValues();
I !=
E; ++
I) {
1983 verifyUseInDefs(Phi->getIncomingValue(
I), Phi);
1985 "Incoming phi block not a block predecessor");
1992 assert((!MA || (AL && (isa<MemoryUse>(MA) ||
DL))) &&
1993 "We have memory affecting instructions "
1994 "in this block but they are not in the "
1995 "access list or defs list");
2003 for (
const Use &U : MD->
uses()) {
2005 "Memory Def does not dominate it's uses");
2019 assert(AL->size() == ActualAccesses.
size() &&
2020 "We don't have the same number of accesses in the block as on the "
2023 "Either we should have a defs list, or we should have no defs");
2025 "We don't have the same number of defs in the block as on the "
2027 auto ALI = AL->begin();
2028 auto AAI = ActualAccesses.
begin();
2029 while (ALI != AL->end() && AAI != ActualAccesses.
end()) {
2030 assert(&*ALI == *AAI &&
"Not the same accesses in the same order");
2034 ActualAccesses.
clear();
2037 auto ADI = ActualDefs.
begin();
2038 while (DLI !=
DL->
end() && ADI != ActualDefs.
end()) {
2039 assert(&*DLI == *ADI &&
"Not the same defs in the same order");
2054 "Null def but use not point to live on entry def");
2057 "Did not find use in def's use list");
2066void MemorySSA::renumberBlock(
const BasicBlock *
B)
const {
2068 unsigned long CurrentNumber = 0;
2070 assert(AL !=
nullptr &&
"Asking to renumber an empty block");
2071 for (
const auto &
I : *AL)
2072 BlockNumbering[&
I] = ++CurrentNumber;
2073 BlockNumberingValid.insert(
B);
2084 "Asking for local domination when accesses are in different blocks!");
2086 if (Dominatee == Dominator)
2099 if (!BlockNumberingValid.count(DominatorBlock))
2100 renumberBlock(DominatorBlock);
2102 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2104 assert(DominatorNum != 0 &&
"Block was not numbered properly");
2105 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2106 assert(DominateeNum != 0 &&
"Block was not numbered properly");
2107 return DominatorNum < DominateeNum;
2112 if (Dominator == Dominatee)
2124 const Use &Dominatee)
const {
2126 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2128 if (UseBB != Dominator->
getBlock())
2149 switch (getValueID()) {
2161 if (
A &&
A->getID())
2167 OS << getID() <<
" = MemoryDef(";
2171 if (isOptimized()) {
2173 printID(getOptimized());
2178 ListSeparator LS(
",");
2179 OS << getID() <<
" = MemoryPhi(";
2180 for (
const auto &
Op : operands()) {
2190 if (
unsigned ID = MA->
getID())
2202 if (UO && UO->
getID())
2211#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2220 MemorySSAAnnotatedWriter MSSAWriter;
2224 :
F(
F), MSSAWriter(&MSSA) {}
2227 MemorySSAAnnotatedWriter &
getWriter() {
return MSSAWriter; }
2270 [](std::string &S,
unsigned &
I,
unsigned Idx) ->
void {
2271 std::string Str = S.substr(
I,
Idx -
I);
2273 if (SR.
count(
" = MemoryDef(") || SR.
count(
" = MemoryPhi(") ||
2274 SR.
count(
"MemoryUse("))
2293 return getNodeLabel(Node, CFGInfo).find(
';') != std::string::npos
2294 ?
"style=filled, fillcolor=lightpink"
2322 if (EnsureOptimizedUses)
2328 OS <<
"MemorySSA for function: " <<
F.getName() <<
"\n";
2338 OS <<
"MemorySSA (walker) for function: " <<
F.getName() <<
"\n";
2339 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2340 F.print(
OS, &Writer);
2367 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2368 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2391 assert(!isa<MemoryUse>(StartingAccess) &&
"Use cannot be defining access");
2394 if (Loc.
Ptr ==
nullptr)
2395 return StartingAccess;
2398 if (
auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2400 return StartingUseOrDef;
2402 I = StartingUseOrDef->getMemoryInst();
2406 if (!isa<CallBase>(
I) &&
I->isFenceLike())
2407 return StartingUseOrDef;
2410 UpwardsMemoryQuery Q;
2411 Q.OriginalAccess = StartingAccess;
2412 Q.StartingLoc = Loc;
2420 Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit);
2422 dbgs() <<
"Clobber starting at access " << *StartingAccess <<
"\n";
2424 dbgs() <<
" for instruction " << *
I <<
"\n";
2425 dbgs() <<
" is " << *Clobber <<
"\n";
2432 if (!
I.hasMetadata(LLVMContext::MD_invariant_group) ||
I.isVolatile())
2444 if (isa<Constant>(PointerOperand))
2449 PointerUsesQueue.
push_back(PointerOperand);
2456 while (!PointerUsesQueue.
empty()) {
2459 "Null or GlobalValue should not be inserted");
2461 for (
const User *Us :
Ptr->users()) {
2462 auto *U = dyn_cast<Instruction>(Us);
2463 if (!U || U == &
I || !DT.
dominates(U, MostDominatingInstruction))
2467 if (isa<BitCastInst>(U)) {
2471 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(U)) {
2472 if (
GEP->hasAllZeroIndices())
2480 if (U->hasMetadata(LLVMContext::MD_invariant_group) &&
2482 MostDominatingInstruction = U;
2486 return MostDominatingInstruction == &
I ? nullptr : MostDominatingInstruction;
2491 bool SkipSelf,
bool UseInvariantGroup) {
2492 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2494 if (!StartingAccess)
2497 if (UseInvariantGroup) {
2499 *StartingAccess->getMemoryInst(), MSSA->
getDomTree())) {
2500 assert(isa<LoadInst>(
I) || isa<StoreInst>(
I));
2504 if (isa<MemoryUse>(ClobberMA))
2505 return ClobberMA->getDefiningAccess();
2510 bool IsOptimized =
false;
2515 if (StartingAccess->isOptimized()) {
2516 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2517 return StartingAccess->getOptimized();
2521 const Instruction *
I = StartingAccess->getMemoryInst();
2525 if (!isa<CallBase>(
I) &&
I->isFenceLike())
2526 return StartingAccess;
2528 UpwardsMemoryQuery Q(
I, StartingAccess);
2532 StartingAccess->setOptimized(LiveOnEntry);
2539 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2544 StartingAccess->setOptimized(DefiningAccess);
2545 return DefiningAccess;
2549 Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit);
2550 StartingAccess->setOptimized(OptimizedAccess);
2552 OptimizedAccess = StartingAccess->getOptimized();
2554 LLVM_DEBUG(
dbgs() <<
"Starting Memory SSA clobber for " << *
I <<
" is ");
2556 LLVM_DEBUG(
dbgs() <<
"Optimized Memory SSA clobber for " << *
I <<
" is ");
2560 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2561 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2562 assert(isa<MemoryDef>(Q.OriginalAccess));
2563 Q.SkipSelfAccess =
true;
2564 Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit);
2566 Result = OptimizedAccess;
2568 LLVM_DEBUG(
dbgs() <<
"Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2577 if (
auto *
Use = dyn_cast<MemoryUseOrDef>(MA))
2578 return Use->getDefiningAccess();
2584 if (
auto *
Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2585 return Use->getDefiningAccess();
2586 return StartingAccess;
2601bool upward_defs_iterator::IsGuaranteedLoopInvariant(
const Value *
Ptr)
const {
2602 auto IsGuaranteedLoopInvariantBase = [](
const Value *
Ptr) {
2603 Ptr =
Ptr->stripPointerCasts();
2604 if (!isa<Instruction>(
Ptr))
2606 return isa<AllocaInst>(
Ptr);
2609 Ptr =
Ptr->stripPointerCasts();
2610 if (
auto *
I = dyn_cast<Instruction>(
Ptr)) {
2611 if (
I->getParent()->isEntryBlock())
2614 if (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
2615 return IsGuaranteedLoopInvariantBase(
GEP->getPointerOperand()) &&
2616 GEP->hasAllConstantIndices();
2618 return IsGuaranteedLoopInvariantBase(
Ptr);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
#define LLVM_ATTRIBUTE_UNUSED
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::optional< std::vector< StOtherPiece > > Other
This file provides utility analysis objects describing memory locations.
static bool instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysisType &AA)
Memory static true cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))
static const char LiveOnEntryStr[]
static LLVM_ATTRIBUTE_UNUSED void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, BatchAAResults &AA, bool AllowImpreciseClobber=false)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, const Instruction *I)
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
static const Instruction * getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT)
static cl::opt< std::string > DotCFGMSSA("dot-cfg-mssa", cl::value_desc("file name for generated dot file"), cl::desc("file name for generated dot file"), cl::init(""))
static bool isOrdered(const Instruction *I)
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS)
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This defines the Use class.
DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
const Function * getFunction()
MemorySSAAnnotatedWriter & getWriter()
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
virtual void emitBasicBlockStartAnnot(const BasicBlock *, formatted_raw_ostream &)
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
virtual void emitInstructionAnnot(const Instruction *, formatted_raw_ostream &)
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
LLVM Basic Block Representation.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW=nullptr, bool ShouldPreserveUseListOrder=false, bool IsForDebug=false) const
Print the basic block to an output stream with an optional AssemblyAnnotationWriter.
LLVMContext & getContext() const
Get the context in which this basic block lives.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
Extension point for the Value hierarchy.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *, BatchAAResults &) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator
Analysis pass which computes a DominatorTree.
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
BasicBlock * getBlock() const
void print(raw_ostream &OS) const
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
void print(raw_ostream &OS) const
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
Represents phi nodes for memory accesses.
void print(raw_ostream &OS) const
An analysis that produces MemorySSA for a function.
Result run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
This is the generic walker interface for walkers of MemorySSA.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
MemorySSAWalker(MemorySSA *)
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Legacy analysis pass which computes MemorySSA.
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
A MemorySSAWalker that does AA walks to disambiguate accesses.
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
MemoryAccess * getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
~CachingWalker() override=default
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
ClobberWalkerBase(MemorySSA *M, DominatorTree *D)
MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &, BatchAAResults &, unsigned &)
Walk the use-def chains starting at StartingAccess and find the MemoryAccess that actually clobbers L...
This class is a batch walker of all MemoryUse's in the program, and points their defining access at t...
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA, DominatorTree *DT)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
~SkipSelfWalker() override=default
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
Encapsulates MemorySSA, including all data associated with memory accesses.
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
simple_ilist< MemoryAccess, ilist_tag< MSSAHelpers::DefsOnlyTag > > DefsList
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
void verifyPrevDefInPhis(Function &F) const
MemorySSAWalker * getSkipSelfWalker()
void verifyDominationNumbers(const Function &F) const
Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
MemorySSAWalker * getWalker()
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
DominatorTree & getDomTree() const
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
MemoryAccess * getLiveOnEntryDef() const
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
void ensureOptimizedUses()
By default, uses are not optimized during MemorySSA construction.
void verifyOrderingDominationAndDefUses(Function &F, VerificationLevel=VerificationLevel::Fast) const
Verify ordering: the order and existence of MemoryAccesses matches the order and existence of memory ...
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
iplist< MemoryAccess, ilist_tag< MSSAHelpers::AllAccessTag > > AccessList
void print(raw_ostream &) const
Class that has the common methods + fields of memory uses/defs.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
void setOptimized(MemoryAccess *)
Sets the optimized use for a MemoryDef.
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Represents read-only accesses to memory.
void print(raw_ostream &OS) const
A Module instance is used to store all the information related to an LLVM module.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
size_t count(char C) const
Return the number of occurrences of C in the string.
Target - Wrapper for Target specific information.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
void dropAllReferences()
Drop all references to operands.
LLVM Value Representation.
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
An opaque object representing a hash code.
base_list_type::iterator iterator
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
A simple intrusive list implementation.
reverse_iterator rbegin()
This class provides various memory handling functions that manipulate MemoryBlock instances.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
NodeAddr< PhiNode * > Phi
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
void initializeMemorySSAWrapperPassPass(PassRegistry &)
APInt operator*(APInt a, uint64_t RHS)
auto successors(const MachineBasicBlock *BB)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT)
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
Interval::pred_iterator pred_end(Interval *I)
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
bool isModSet(const ModRefInfo MRI)
auto find_if_not(R &&Range, UnaryPredicate P)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
bool isModOrRefSet(const ModRefInfo MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool VerifyMemorySSA
Enables verification of MemorySSA.
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
upward_defs_iterator upward_defs_end()
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool isRefSet(const ModRefInfo MRI)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, DOTFuncMSSAInfo *CFGInfo)
Display the raw branch weights from PGO.
std::string getNodeAttributes(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
static std::string getEdgeSourceLabel(const BasicBlock *Node, const_succ_iterator I)
std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo)
DOTGraphTraits(bool IsSimple=false)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Description of the encoding of one expression Op.
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
static MemoryLocOrCall getEmptyKey()
static MemoryLocOrCall getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
static size_t size(DOTFuncMSSAInfo *CFGInfo)
static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo)
static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo)
static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)