24#define DEBUG_TYPE "memoryssa"
41 auto Cached = CachedPreviousDef.find(BB);
42 if (Cached != CachedPreviousDef.end())
43 return Cached->second;
50 VisitedBlocks.insert(BB);
52 MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
53 CachedPreviousDef.insert({BB, Result});
57 if (VisitedBlocks.count(BB)) {
62 CachedPreviousDef.insert({BB, Result});
66 if (VisitedBlocks.insert(BB).second) {
73 bool UniqueIncomingAccess =
true;
77 auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
79 SingleAccess = IncomingAccess;
80 else if (IncomingAccess != SingleAccess)
81 UniqueIncomingAccess =
false;
92 auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
94 if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
97 assert(Phi->operands().empty() &&
"Expected empty Phi");
98 Phi->replaceAllUsesWith(SingleAccess);
101 Result = SingleAccess;
102 }
else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
104 Phi = MSSA->createMemoryPhi(BB);
109 if (Phi->getNumOperands() != 0) {
111 if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.
begin())) {
119 Phi->addIncoming(&*PhiOps[i++], Pred);
120 InsertedPHIs.push_back(Phi);
126 VisitedBlocks.erase(BB);
127 CachedPreviousDef.insert({BB, Result});
138 if (
auto *LocalResult = getPreviousDefInBlock(MA))
141 return getPreviousDefRecursive(MA->
getBlock(), CachedPreviousDef);
153 if (!isa<MemoryUse>(MA)) {
156 if (Iter != Defs->rend())
162 if (!isa<MemoryUse>(U))
163 return cast<MemoryAccess>(&U);
179 return &*Defs->rbegin();
182 return getPreviousDefRecursive(BB, CachedPreviousDef);
190 std::copy(
Phi->user_begin(),
Phi->user_end(), std::back_inserter(
Uses));
192 if (
MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
193 tryRemoveTrivialPhi(UsePhi);
203 assert(Phi &&
"Can only remove concrete Phi.");
204 auto OperRange =
Phi->operands();
205 return tryRemoveTrivialPhi(Phi, OperRange);
207template <
class RangeType>
211 if (NonOptPhis.count(Phi))
223 Same = cast<MemoryAccess>(&*
Op);
235 return recursePhi(
Same);
239 VisitedBlocks.clear();
240 InsertedPHIs.clear();
256 if (!RenameUses && !InsertedPHIs.empty()) {
259 assert((!Defs || (++Defs->begin() == Defs->end())) &&
260 "Block may have only a Phi or no defs");
263 if (RenameUses && InsertedPHIs.size()) {
271 if (
auto *MD = dyn_cast<MemoryDef>(FirstDef))
272 FirstDef = MD->getDefiningAccess();
278 for (
auto &MP : InsertedPHIs)
279 if (
MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
280 MSSA->
renamePass(Phi->getBlock(),
nullptr, Visited);
290 assert(i != -1 &&
"Should have found the basic block in the phi");
314 VisitedBlocks.clear();
315 InsertedPHIs.clear();
319 bool DefBeforeSameBlock =
false;
321 !(isa<MemoryPhi>(DefBefore) &&
323 DefBeforeSameBlock =
true;
329 if (DefBeforeSameBlock) {
333 User *Usr = U.getUser();
334 return !isa<MemoryUse>(Usr) && Usr != MD;
348 unsigned NewPhiIndex = InsertedPHIs.size();
349 if (!DefBeforeSameBlock) {
371 for (
const auto &VH : InsertedPHIs)
372 if (
const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
373 DefiningBlocks.
insert(RealPHI->getBlock());
379 for (
auto *BBIDF : IDFBlocks) {
382 MPhi = MSSA->createMemoryPhi(BBIDF);
385 ExistingPhis.
insert(MPhi);
393 NonOptPhis.insert(MPhi);
395 for (
auto &MPhi : NewInsertedPHIs) {
396 auto *BBIDF = MPhi->getBlock();
399 MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
405 NewPhiIndex = InsertedPHIs.size();
406 for (
auto &MPhi : NewInsertedPHIs) {
407 InsertedPHIs.push_back(&*MPhi);
416 unsigned NewPhiIndexEnd = InsertedPHIs.size();
418 while (!FixupList.
empty()) {
419 unsigned StartingPHISize = InsertedPHIs.size();
420 fixupDefs(FixupList);
423 FixupList.
append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
427 unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
429 tryRemoveTrivialPhis(
ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
441 if (
auto *MD = dyn_cast<MemoryDef>(FirstDef))
447 for (
auto &MP : InsertedPHIs) {
448 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
450 MSSA->
renamePass(Phi->getBlock(),
nullptr, Visited);
454 for (
const auto &MP : ExistingPhis) {
455 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
457 MSSA->
renamePass(Phi->getBlock(),
nullptr, Visited);
465 for (
const auto &Var : Vars) {
466 MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
474 if (
MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
475 NonOptPhis.erase(Phi);
478 if (++DefIter != Defs->end()) {
479 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
493 while (!Worklist.
empty()) {
498 auto *FirstDef = &*Defs->begin();
500 assert(!isa<MemoryPhi>(FirstDef) &&
501 "Should have already handled phi nodes!");
505 "Should have dominated the new access");
510 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
514 for (
const auto *S :
successors(FixupBlock)) {
522 if (!Seen.
insert(S).second)
533 MPhi->unorderedDeleteIncomingBlock(
From);
534 tryRemoveTrivialPhi(MPhi);
550 tryRemoveTrivialPhi(MPhi);
561 MA = cast<MemoryAccess>(Arg);
571 bool CloneWasSimplified,
574 if (
MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
577 assert(DefMUDI &&
"Found MemoryUseOrDef with no Instruction.");
579 cast_or_null<Instruction>(VMap.
lookup(DefMUDI))) {
581 if (!CloneWasSimplified)
582 assert(InsnDefining &&
"Defining instruction cannot be nullptr.");
583 else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
585 auto DefIt = DefMUD->getDefsIterator();
590 "Previous def must exist");
592 &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
597 MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
599 InsnDefining = NewDefPhi;
601 assert(InsnDefining &&
"Defining instruction cannot be nullptr.");
608 bool CloneWasSimplified) {
623 dyn_cast_or_null<Instruction>(VMap.
lookup(
Insn))) {
627 MPhiMap, CloneWasSimplified, MSSA),
628 CloneWasSimplified ?
nullptr : MUD,
629 CloneWasSimplified ?
false :
true);
645 auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
646 bool HasUniqueIncomingValue =
true;
648 for (
unsigned I = 0,
E = MPhi->getNumIncomingValues();
I !=
E; ++
I) {
651 if (IBB != Preheader) {
652 NewMPhi->addIncoming(
IV, IBB);
653 if (HasUniqueIncomingValue) {
656 else if (UniqueValue !=
IV)
657 HasUniqueIncomingValue =
false;
664 auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
665 MPhi->setIncomingValue(0, AccFromPreheader);
666 MPhi->setIncomingBlock(0, Preheader);
667 for (
unsigned I = MPhi->getNumIncomingValues() - 1;
I >= 1; --
I)
668 MPhi->unorderedDeleteIncoming(
I);
669 MPhi->addIncoming(NewMPhi, BEBlock);
673 tryRemoveTrivialPhi(NewMPhi);
679 bool IgnoreIncomingWithNoClones) {
683 assert(Phi && NewPhi &&
"Invalid Phi nodes.");
687 for (
unsigned It = 0,
E = Phi->getNumIncomingValues(); It <
E; ++It) {
688 MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
689 BasicBlock *IncBB = Phi->getIncomingBlock(It);
693 else if (IgnoreIncomingWithNoClones)
700 if (!NewPhiBBPreds.
count(IncBB))
704 if (
MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
707 assert(IncI &&
"Found MemoryUseOrDef with no Instruction.");
709 cast_or_null<Instruction>(VMap.
lookup(IncI))) {
712 "MemoryUseOrDef cannot be null, all preds processed.");
715 NewPhi->addIncoming(IncMUD, IncBB);
717 MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
719 NewPhi->addIncoming(NewDefPhi, IncBB);
721 NewPhi->addIncoming(IncPhi, IncBB);
725 MPhiMap[Phi] = SingleAccess;
736 "Cloned block should have no accesses");
740 MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
741 MPhiMap[MPhi] = NewPhi;
744 cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
747 for (
auto *BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
750 for (
auto *BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
753 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
768 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
769 cloneUsesAndDefs(BB, P1, VM, MPhiMap,
true);
772template <
typename Iter>
773void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
778 for (
auto *Exit : ExitBlocks)
791 privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
798 auto GetPtr = [&](
const std::unique_ptr<ValueToValueMapTy> &
I) {
801 using MappedIteratorType =
804 auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
805 auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
806 privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
814 for (
const auto &Update : Updates) {
815 if (Update.getKind() == DT.
Insert)
816 InsertUpdates.
push_back({DT.
Insert, Update.getFrom(), Update.getTo()});
818 DeleteUpdates.
push_back({DT.
Delete, Update.getFrom(), Update.getTo()});
819 RevDeleteUpdates.
push_back({DT.
Insert, Update.getFrom(), Update.getTo()});
823 if (!DeleteUpdates.
empty()) {
824 if (!InsertUpdates.
empty()) {
856 for (
auto &Update : DeleteUpdates)
875 return &*(--Defs->
end());
880 for (
auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
895 if (IDom->getBlock() != BB) {
896 BB = IDom->getBlock();
902 assert(Count == 1 && Pred &&
"Single predecessor expected.");
915 auto FindNearestCommonDominator =
918 for (
auto *BB : BBSet)
925 auto GetNoLongerDomBlocks =
928 if (PrevIDom == CurrIDom)
930 BlocksPrevDom.push_back(PrevIDom);
934 if (UpIDom == CurrIDom)
936 BlocksPrevDom.push_back(UpIDom);
961 for (
const auto &Edge : Updates) {
963 auto &AddedBlockSet = PredMap[BB].Added;
964 AddedBlockSet.
insert(Edge.getFrom());
970 for (
auto &BBPredPair : PredMap) {
971 auto *BB = BBPredPair.first;
972 const auto &AddedBlockSet = BBPredPair.second.Added;
973 auto &PrevBlockSet = BBPredPair.second.Prev;
974 for (
auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
975 if (!AddedBlockSet.count(Pi))
976 PrevBlockSet.insert(Pi);
977 EdgeCountMap[{Pi, BB}]++;
980 if (PrevBlockSet.empty()) {
981 assert(
pred_size(BB) == AddedBlockSet.size() &&
"Duplicate edges added.");
984 <<
"Adding a predecessor to a block with no predecessors. "
985 "This must be an edge added to a new, likely cloned, block. "
986 "Its memory accesses must be already correct, assuming completed "
987 "via the updateExitBlocksForClonedLoop API. "
988 "Assert a single such edge is added so no phi addition or "
989 "additional processing is required.\n");
990 assert(AddedBlockSet.size() == 1 &&
991 "Can only handle adding one predecessor to a new block.");
998 for (
auto *BB : NewBlocks)
1006 for (
const auto &Edge : Updates) {
1009 InsertedPhis.
push_back(MSSA->createMemoryPhi(BB));
1013 for (
auto &BBPredPair : PredMap) {
1014 auto *BB = BBPredPair.first;
1015 const auto &PrevBlockSet = BBPredPair.second.Prev;
1016 const auto &AddedBlockSet = BBPredPair.second.Added;
1017 assert(!PrevBlockSet.empty() &&
1018 "At least one previous predecessor must exist.");
1027 for (
auto *AddedPred : AddedBlockSet) {
1028 auto *DefPn = GetLastDef(AddedPred);
1029 assert(DefPn !=
nullptr &&
"Unable to find last definition.");
1030 LastDefAddedPred[AddedPred] = DefPn;
1037 for (
auto *Pred : AddedBlockSet) {
1038 auto *LastDefForPred = LastDefAddedPred[Pred];
1039 for (
int I = 0,
E = EdgeCountMap[{Pred, BB}];
I <
E; ++
I)
1045 auto *P1 = *PrevBlockSet.begin();
1050 bool InsertPhi =
false;
1051 for (
auto LastDefPredPair : LastDefAddedPred)
1052 if (DefP1 != LastDefPredPair.second) {
1068 for (
auto *Pred : AddedBlockSet) {
1069 auto *LastDefForPred = LastDefAddedPred[Pred];
1070 for (
int I = 0,
E = EdgeCountMap[{Pred, BB}];
I <
E; ++
I)
1073 for (
auto *Pred : PrevBlockSet)
1074 for (
int I = 0,
E = EdgeCountMap[{Pred, BB}];
I <
E; ++
I)
1081 BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1082 assert(PrevIDom &&
"Previous IDom should exists");
1084 assert(NewIDom &&
"BB should have a new valid idom");
1086 "New idom should dominate old idom");
1087 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1090 tryRemoveTrivialPhis(InsertedPhis);
1094 for (
auto &VH : InsertedPhis)
1095 if (
auto *MPhi = cast_or_null<MemoryPhi>(VH))
1096 BlocksToProcess.
push_back(MPhi->getBlock());
1100 if (!BlocksToProcess.
empty()) {
1103 BlocksToProcess.
end());
1104 IDFs.setDefiningBlocks(DefiningBlocks);
1105 IDFs.calculate(IDFBlocks);
1109 for (
auto *BBIDF : IDFBlocks)
1111 auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1112 InsertedPhis.push_back(IDFPhi);
1113 PhisToFill.
insert(IDFPhi);
1116 for (
auto *BBIDF : IDFBlocks) {
1118 assert(IDFPhi &&
"Phi must exist");
1119 if (!PhisToFill.
count(IDFPhi)) {
1122 for (
unsigned I = 0,
E = IDFPhi->getNumIncomingValues();
I <
E; ++
I)
1123 IDFPhi->setIncomingValue(
I, GetLastDef(IDFPhi->getIncomingBlock(
I)));
1125 for (
auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF))
1126 IDFPhi->addIncoming(GetLastDef(Pi), Pi);
1134 for (
auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1136 for (
auto &DefToReplaceUses : *DefsList) {
1137 BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1140 if (
MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1141 BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1142 if (!DT.
dominates(DominatingBlock, DominatedBlock))
1143 U.set(GetLastDef(DominatedBlock));
1146 if (!DT.
dominates(DominatingBlock, DominatedBlock)) {
1151 assert(IDom &&
"Block must have a valid IDom.");
1152 U.set(GetLastDef(IDom->getBlock()));
1154 cast<MemoryUseOrDef>(Usr)->resetOptimized();
1161 tryRemoveTrivialPhis(InsertedPhis);
1165template <
class WhereType>
1169 for (
auto *U : What->
users())
1170 if (
MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
1171 NonOptPhis.insert(PhiUser);
1177 MSSA->
moveTo(What, BB, Where);
1180 if (
auto *MD = dyn_cast<MemoryDef>(What))
1203 return moveTo(What, BB, Where);
1219 assert(Start->getParent() == To &&
"Incorrect Start instruction");
1225 auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1227 auto NextIt = ++MUD->getIterator();
1230 : cast<MemoryUseOrDef>(&*NextIt);
1242 if (Defs && !Defs->
empty())
1243 if (
auto *Phi = dyn_cast<MemoryPhi>(&*Defs->
begin()))
1244 tryRemoveTrivialPhi(Phi);
1251 "To block is expected to be free of MemoryAccesses.");
1252 moveAllAccesses(
From, To, Start);
1255 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(
From), To);
1261 "From block is expected to have a single predecessor (To).");
1262 moveAllAccesses(
From, To, Start);
1265 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(
From), To);
1270 bool IdenticalEdgesWereMerged) {
1272 "Access list should be null for a new block.");
1278 "Should have moved all predecessors.");
1281 assert(!Preds.
empty() &&
"Must be moving at least one predecessor to the "
1282 "new immediate predecessor.");
1283 MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1287 if (!IdenticalEdgesWereMerged)
1289 "If identical edges were not merged, we cannot have duplicate "
1290 "blocks in the predecessors");
1293 NewPhi->addIncoming(MA, B);
1294 if (!IdenticalEdgesWereMerged)
1300 Phi->addIncoming(NewPhi, New);
1301 tryRemoveTrivialPhi(NewPhi);
1307 "Trying to remove the live on entry def");
1311 if (
MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1319 "We can't delete this memory phi");
1321 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1327 if (!isa<MemoryUse>(MA) && !MA->
use_empty()) {
1341 assert(NewDefTarget != MA &&
"Going into an infinite loop");
1344 if (
auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1345 MUD->resetOptimized();
1347 if (
MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1349 U.set(NewDefTarget);
1359 if (!PhisToCheck.
empty()) {
1362 PhisToCheck.
clear();
1364 unsigned PhisSize = PhisToOptimize.size();
1365 while (PhisSize-- > 0)
1367 cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1368 tryRemoveTrivialPhi(MP);
1377 assert(TI &&
"Basic block expected to have a terminator instruction");
1379 if (!DeadBlocks.
count(Succ))
1381 MP->unorderedDeleteIncomingBlock(BB);
1382 tryRemoveTrivialPhi(MP);
1403 for (
const auto &VH : UpdatedPHIs)
1404 if (
auto *MPhi = cast_or_null<MemoryPhi>(VH))
1405 tryRemoveTrivialPhi(MPhi);
1411 auto BBI =
I->getIterator(), BBE = BB->
end();
1421 MPhi->unorderedDeleteIncomingBlock(BB);
1426 tryRemoveTrivialPhis(UpdatedPHIs);
1440 "New and old access must be in the same block");
1450 "New and old access must be in the same block");
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Rewrite Partial Register Uses
mir Rename Register Operands
static MemoryAccess * getNewDefiningAccessForClone(MemoryAccess *MA, const ValueToValueMapTy &VMap, PhiToDefMap &MPhiMap, bool CloneWasSimplified, MemorySSA *MSSA)
static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, MemoryAccess *NewDef)
static MemoryAccess * onlySingleValue(MemoryPhi *MP)
If all arguments of a MemoryPHI are defined by the same incoming argument, return that argument.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
This file defines the SmallPtrSet class.
static const uint32_t IV[8]
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
reverse_iterator rbegin()
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class represents an Operation in the Expression.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
DomTreeNodeBase * getIDom() const
void applyUpdates(ArrayRef< UpdateType > Updates)
Inform the dominator tree about a sequence of CFG edge insertions and deletions and perform a batch u...
static constexpr UpdateKind Delete
static constexpr UpdateKind Insert
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Instruction * findNearestCommonDominator(Instruction *I1, Instruction *I2) const
Find the nearest instruction I that dominates both I1 and I2, in the sense that a result produced bef...
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
void calculate(SmallVectorImpl< NodeTy * > &IDFBlocks)
Calculate iterated dominance frontiers.
void setDefiningBlocks(const SmallPtrSetImpl< NodeTy * > &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
AllAccessType::reverse_self_iterator getReverseIterator()
DefsOnlyType::self_iterator getDefsIterator()
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
BasicBlock * getBlock() const
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list.
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Represents phi nodes for memory accesses.
void setIncomingValue(unsigned I, MemoryAccess *V)
iterator_range< block_iterator > blocks()
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before an existing MemoryAccess.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where)
void removeEdge(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To following an edge deletion between From and To.
void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VM, bool IgnoreIncomingWithNoClones=false)
Update MemorySSA after a loop was cloned, given the blocks in RPO order, the exit blocks and a 1:1 ma...
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block.
void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
void removeDuplicatePhiEdgesBetween(const BasicBlock *From, const BasicBlock *To)
Update the MemoryPhi in To to have a single incoming edge from From, following a CFG change that repl...
void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader, BasicBlock *LoopPreheader, BasicBlock *BackedgeBlock)
Update MemorySSA when inserting a unique backedge block for a loop.
void insertUse(MemoryUse *Use, bool RenameUses=false)
void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was spliced into From and To.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
void applyInsertUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG insert updates, analogous with the DT edge updates.
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM)
void applyUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT, bool UpdateDTFirst=false)
Apply CFG updates, analogous with the DT edge updates.
void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was merged into To.
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
void updateExitBlocksForClonedLoop(ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VMap, DominatorTree &DT)
Update phi nodes in exit block successors following cloning.
void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where)
Encapsulates MemorySSA, including all data associated with memory accesses.
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
MemoryAccess * getLiveOnEntryDef() const
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Class that has the common methods + fields of memory uses/defs.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
Represents read-only accesses to memory.
iterator end()
Get an iterator to the end of the SetVector.
void clear()
Completely clear the SetVector.
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
bool empty() const
Determine if the SetVector is empty or not.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Value handle that tracks a Value across RAUW.
A Use represents the edge between a Value definition and its users.
void dropAllReferences()
Drop all references to operands.
unsigned getNumOperands() const
static void ValueIsRAUWd(Value *Old, Value *New)
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
A simple intrusive list implementation.
iterator insert(iterator I, reference Node)
Insert a node by reference; never copies.
bool empty() const
Check if the list is empty in constant time.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Interval::pred_iterator pred_end(Interval *I)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
OutputIt copy(R &&Range, OutputIt Out)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
unsigned pred_size(const MachineBasicBlock *BB)