LLVM  14.0.0git
MemorySSAUpdater.cpp
Go to the documentation of this file.
1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSAUpdater class.
10 //
11 //===----------------------------------------------------------------===//
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Dominators.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IRBuilder.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Metadata.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Support/Debug.h"
29 #include <algorithm>
30 
31 #define DEBUG_TYPE "memoryssa"
32 using namespace llvm;
33 
34 // This is the marker algorithm from "Simple and Efficient Construction of
35 // Static Single Assignment Form"
36 // The simple, non-marker algorithm places phi nodes at any join
37 // Here, we place markers, and only place phi nodes if they end up necessary.
38 // They are only necessary if they break a cycle (IE we recursively visit
39 // ourselves again), or we discover, while getting the value of the operands,
40 // that there are two or more definitions needing to be merged.
41 // This still will leave non-minimal form in the case of irreducible control
42 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
43 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
44  BasicBlock *BB,
45  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
46  // First, do a cache lookup. Without this cache, certain CFG structures
47  // (like a series of if statements) take exponential time to visit.
48  auto Cached = CachedPreviousDef.find(BB);
49  if (Cached != CachedPreviousDef.end())
50  return Cached->second;
51 
52  // If this method is called from an unreachable block, return LoE.
53  if (!MSSA->DT->isReachableFromEntry(BB))
54  return MSSA->getLiveOnEntryDef();
55 
56  if (BasicBlock *Pred = BB->getUniquePredecessor()) {
57  VisitedBlocks.insert(BB);
58  // Single predecessor case, just recurse, we can only have one definition.
59  MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
60  CachedPreviousDef.insert({BB, Result});
61  return Result;
62  }
63 
64  if (VisitedBlocks.count(BB)) {
65  // We hit our node again, meaning we had a cycle, we must insert a phi
66  // node to break it so we have an operand. The only case this will
67  // insert useless phis is if we have irreducible control flow.
68  MemoryAccess *Result = MSSA->createMemoryPhi(BB);
69  CachedPreviousDef.insert({BB, Result});
70  return Result;
71  }
72 
73  if (VisitedBlocks.insert(BB).second) {
74  // Mark us visited so we can detect a cycle
76 
77  // Recurse to get the values in our predecessors for placement of a
78  // potential phi node. This will insert phi nodes if we cycle in order to
79  // break the cycle and have an operand.
80  bool UniqueIncomingAccess = true;
81  MemoryAccess *SingleAccess = nullptr;
82  for (auto *Pred : predecessors(BB)) {
83  if (MSSA->DT->isReachableFromEntry(Pred)) {
84  auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
85  if (!SingleAccess)
86  SingleAccess = IncomingAccess;
87  else if (IncomingAccess != SingleAccess)
88  UniqueIncomingAccess = false;
89  PhiOps.push_back(IncomingAccess);
90  } else
91  PhiOps.push_back(MSSA->getLiveOnEntryDef());
92  }
93 
94  // Now try to simplify the ops to avoid placing a phi.
95  // This may return null if we never created a phi yet, that's okay
96  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
97 
98  // See if we can avoid the phi by simplifying it.
99  auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
100  // If we couldn't simplify, we may have to create a phi
101  if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
102  // A concrete Phi only exists if we created an empty one to break a cycle.
103  if (Phi) {
104  assert(Phi->operands().empty() && "Expected empty Phi");
105  Phi->replaceAllUsesWith(SingleAccess);
106  removeMemoryAccess(Phi);
107  }
108  Result = SingleAccess;
109  } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
110  if (!Phi)
111  Phi = MSSA->createMemoryPhi(BB);
112 
113  // See if the existing phi operands match what we need.
114  // Unlike normal SSA, we only allow one phi node per block, so we can't just
115  // create a new one.
116  if (Phi->getNumOperands() != 0) {
117  // FIXME: Figure out whether this is dead code and if so remove it.
118  if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
119  // These will have been filled in by the recursive read we did above.
120  llvm::copy(PhiOps, Phi->op_begin());
122  }
123  } else {
124  unsigned i = 0;
125  for (auto *Pred : predecessors(BB))
126  Phi->addIncoming(&*PhiOps[i++], Pred);
127  InsertedPHIs.push_back(Phi);
128  }
129  Result = Phi;
130  }
131 
132  // Set ourselves up for the next variable by resetting visited state.
133  VisitedBlocks.erase(BB);
134  CachedPreviousDef.insert({BB, Result});
135  return Result;
136  }
137  llvm_unreachable("Should have hit one of the three cases above");
138 }
139 
140 // This starts at the memory access, and goes backwards in the block to find the
141 // previous definition. If a definition is not found the block of the access,
142 // it continues globally, creating phi nodes to ensure we have a single
143 // definition.
144 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
145  if (auto *LocalResult = getPreviousDefInBlock(MA))
146  return LocalResult;
148  return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
149 }
150 
151 // This starts at the memory access, and goes backwards in the block to the find
152 // the previous definition. If the definition is not found in the block of the
153 // access, it returns nullptr.
154 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
155  auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
156 
157  // It's possible there are no defs, or we got handed the first def to start.
158  if (Defs) {
159  // If this is a def, we can just use the def iterators.
160  if (!isa<MemoryUse>(MA)) {
161  auto Iter = MA->getReverseDefsIterator();
162  ++Iter;
163  if (Iter != Defs->rend())
164  return &*Iter;
165  } else {
166  // Otherwise, have to walk the all access iterator.
167  auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
168  for (auto &U : make_range(++MA->getReverseIterator(), End))
169  if (!isa<MemoryUse>(U))
170  return cast<MemoryAccess>(&U);
171  // Note that if MA comes before Defs->begin(), we won't hit a def.
172  return nullptr;
173  }
174  }
175  return nullptr;
176 }
177 
178 // This starts at the end of block
179 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
180  BasicBlock *BB,
181  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
182  auto *Defs = MSSA->getWritableBlockDefs(BB);
183 
184  if (Defs) {
185  CachedPreviousDef.insert({BB, &*Defs->rbegin()});
186  return &*Defs->rbegin();
187  }
188 
189  return getPreviousDefRecursive(BB, CachedPreviousDef);
190 }
191 // Recurse over a set of phi uses to eliminate the trivial ones
192 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
193  if (!Phi)
194  return nullptr;
195  TrackingVH<MemoryAccess> Res(Phi);
197  std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
198  for (auto &U : Uses)
199  if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
200  tryRemoveTrivialPhi(UsePhi);
201  return Res;
202 }
203 
204 // Eliminate trivial phis
205 // Phis are trivial if they are defined either by themselves, or all the same
206 // argument.
207 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
208 // We recursively try to remove them.
209 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
210  assert(Phi && "Can only remove concrete Phi.");
211  auto OperRange = Phi->operands();
212  return tryRemoveTrivialPhi(Phi, OperRange);
213 }
214 template <class RangeType>
215 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
216  RangeType &Operands) {
217  // Bail out on non-opt Phis.
218  if (NonOptPhis.count(Phi))
219  return Phi;
220 
221  // Detect equal or self arguments
222  MemoryAccess *Same = nullptr;
223  for (auto &Op : Operands) {
224  // If the same or self, good so far
225  if (Op == Phi || Op == Same)
226  continue;
227  // not the same, return the phi since it's not eliminatable by us
228  if (Same)
229  return Phi;
230  Same = cast<MemoryAccess>(&*Op);
231  }
232  // Never found a non-self reference, the phi is undef
233  if (Same == nullptr)
234  return MSSA->getLiveOnEntryDef();
235  if (Phi) {
236  Phi->replaceAllUsesWith(Same);
237  removeMemoryAccess(Phi);
238  }
239 
240  // We should only end up recursing in case we replaced something, in which
241  // case, we may have made other Phis trivial.
242  return recursePhi(Same);
243 }
244 
245 void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
246  InsertedPHIs.clear();
247  MU->setDefiningAccess(getPreviousDef(MU));
248 
249  // In cases without unreachable blocks, because uses do not create new
250  // may-defs, there are only two cases:
251  // 1. There was a def already below us, and therefore, we should not have
252  // created a phi node because it was already needed for the def.
253  //
254  // 2. There is no def below us, and therefore, there is no extra renaming work
255  // to do.
256 
257  // In cases with unreachable blocks, where the unnecessary Phis were
258  // optimized out, adding the Use may re-insert those Phis. Hence, when
259  // inserting Uses outside of the MSSA creation process, and new Phis were
260  // added, rename all uses if we are asked.
261 
262  if (!RenameUses && !InsertedPHIs.empty()) {
263  auto *Defs = MSSA->getBlockDefs(MU->getBlock());
264  (void)Defs;
265  assert((!Defs || (++Defs->begin() == Defs->end())) &&
266  "Block may have only a Phi or no defs");
267  }
268 
269  if (RenameUses && InsertedPHIs.size()) {
271  BasicBlock *StartBlock = MU->getBlock();
272 
273  if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
274  MemoryAccess *FirstDef = &*Defs->begin();
275  // Convert to incoming value if it's a memorydef. A phi *is* already an
276  // incoming value.
277  if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
278  FirstDef = MD->getDefiningAccess();
279 
280  MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
281  }
282  // We just inserted a phi into this block, so the incoming value will
283  // become the phi anyway, so it does not matter what we pass.
284  for (auto &MP : InsertedPHIs)
285  if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
286  MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
287  }
288 }
289 
290 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
292  MemoryAccess *NewDef) {
293  // Replace any operand with us an incoming block with the new defining
294  // access.
295  int i = MP->getBasicBlockIndex(BB);
296  assert(i != -1 && "Should have found the basic block in the phi");
297  // We can't just compare i against getNumOperands since one is signed and the
298  // other not. So use it to index into the block iterator.
299  for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
300  ++BBIter) {
301  if (*BBIter != BB)
302  break;
303  MP->setIncomingValue(i, NewDef);
304  ++i;
305  }
306 }
307 
308 // A brief description of the algorithm:
309 // First, we compute what should define the new def, using the SSA
310 // construction algorithm.
311 // Then, we update the defs below us (and any new phi nodes) in the graph to
312 // point to the correct new defs, to ensure we only have one variable, and no
313 // disconnected stores.
314 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
315  InsertedPHIs.clear();
316 
317  // See if we had a local def, and if not, go hunting.
318  MemoryAccess *DefBefore = getPreviousDef(MD);
319  bool DefBeforeSameBlock = false;
320  if (DefBefore->getBlock() == MD->getBlock() &&
321  !(isa<MemoryPhi>(DefBefore) &&
322  llvm::is_contained(InsertedPHIs, DefBefore)))
323  DefBeforeSameBlock = true;
324 
325  // There is a def before us, which means we can replace any store/phi uses
326  // of that thing with us, since we are in the way of whatever was there
327  // before.
328  // We now define that def's memorydefs and memoryphis
329  if (DefBeforeSameBlock) {
330  DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
331  // Leave the MemoryUses alone.
332  // Also make sure we skip ourselves to avoid self references.
333  User *Usr = U.getUser();
334  return !isa<MemoryUse>(Usr) && Usr != MD;
335  // Defs are automatically unoptimized when the user is set to MD below,
336  // because the isOptimized() call will fail to find the same ID.
337  });
338  }
339 
340  // and that def is now our defining access.
341  MD->setDefiningAccess(DefBefore);
342 
343  SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
344 
345  SmallSet<WeakVH, 8> ExistingPhis;
346 
347  // Remember the index where we may insert new phis.
348  unsigned NewPhiIndex = InsertedPHIs.size();
349  if (!DefBeforeSameBlock) {
350  // If there was a local def before us, we must have the same effect it
351  // did. Because every may-def is the same, any phis/etc we would create, it
352  // would also have created. If there was no local def before us, we
353  // performed a global update, and have to search all successors and make
354  // sure we update the first def in each of them (following all paths until
355  // we hit the first def along each path). This may also insert phi nodes.
356  // TODO: There are other cases we can skip this work, such as when we have a
357  // single successor, and only used a straight line of single pred blocks
358  // backwards to find the def. To make that work, we'd have to track whether
359  // getDefRecursive only ever used the single predecessor case. These types
360  // of paths also only exist in between CFG simplifications.
361 
362  // If this is the first def in the block and this insert is in an arbitrary
363  // place, compute IDF and place phis.
364  SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
365 
366  // If this is the last Def in the block, we may need additional Phis.
367  // Compute IDF in all cases, as renaming needs to be done even when MD is
368  // not the last access, because it can introduce a new access past which a
369  // previous access was optimized; that access needs to be reoptimized.
370  DefiningBlocks.insert(MD->getBlock());
371  for (const auto &VH : InsertedPHIs)
372  if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
373  DefiningBlocks.insert(RealPHI->getBlock());
374  ForwardIDFCalculator IDFs(*MSSA->DT);
376  IDFs.setDefiningBlocks(DefiningBlocks);
377  IDFs.calculate(IDFBlocks);
378  SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
379  for (auto *BBIDF : IDFBlocks) {
380  auto *MPhi = MSSA->getMemoryAccess(BBIDF);
381  if (!MPhi) {
382  MPhi = MSSA->createMemoryPhi(BBIDF);
383  NewInsertedPHIs.push_back(MPhi);
384  } else {
385  ExistingPhis.insert(MPhi);
386  }
387  // Add the phis created into the IDF blocks to NonOptPhis, so they are not
388  // optimized out as trivial by the call to getPreviousDefFromEnd below.
389  // Once they are complete, all these Phis are added to the FixupList, and
390  // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
391  // need fixing as well, and potentially be trivial before this insertion,
392  // hence add all IDF Phis. See PR43044.
393  NonOptPhis.insert(MPhi);
394  }
395  for (auto &MPhi : NewInsertedPHIs) {
396  auto *BBIDF = MPhi->getBlock();
397  for (auto *Pred : predecessors(BBIDF)) {
399  MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
400  }
401  }
402 
403  // Re-take the index where we're adding the new phis, because the above call
404  // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
405  NewPhiIndex = InsertedPHIs.size();
406  for (auto &MPhi : NewInsertedPHIs) {
407  InsertedPHIs.push_back(&*MPhi);
408  FixupList.push_back(&*MPhi);
409  }
410 
411  FixupList.push_back(MD);
412  }
413 
414  // Remember the index where we stopped inserting new phis above, since the
415  // fixupDefs call in the loop below may insert more, that are already minimal.
416  unsigned NewPhiIndexEnd = InsertedPHIs.size();
417 
418  while (!FixupList.empty()) {
419  unsigned StartingPHISize = InsertedPHIs.size();
420  fixupDefs(FixupList);
421  FixupList.clear();
422  // Put any new phis on the fixup list, and process them
423  FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
424  }
425 
426  // Optimize potentially non-minimal phis added in this method.
427  unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
428  if (NewPhiSize)
429  tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
430 
431  // Now that all fixups are done, rename all uses if we are asked. Skip
432  // renaming for defs in unreachable blocks.
433  BasicBlock *StartBlock = MD->getBlock();
434  if (RenameUses && MSSA->getDomTree().getNode(StartBlock)) {
436  // We are guaranteed there is a def in the block, because we just got it
437  // handed to us in this function.
438  MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
439  // Convert to incoming value if it's a memorydef. A phi *is* already an
440  // incoming value.
441  if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
442  FirstDef = MD->getDefiningAccess();
443 
444  MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
445  // We just inserted a phi into this block, so the incoming value will become
446  // the phi anyway, so it does not matter what we pass.
447  for (auto &MP : InsertedPHIs) {
448  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
449  if (Phi)
450  MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
451  }
452  // Existing Phi blocks may need renaming too, if an access was previously
453  // optimized and the inserted Defs "covers" the Optimized value.
454  for (auto &MP : ExistingPhis) {
455  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
456  if (Phi)
457  MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
458  }
459  }
460 }
461 
462 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
465  for (auto &Var : Vars) {
466  MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
467  if (!NewDef)
468  continue;
469  // First, see if there is a local def after the operand.
470  auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
471  auto DefIter = NewDef->getDefsIterator();
472 
473  // The temporary Phi is being fixed, unmark it for not to optimize.
474  if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
475  NonOptPhis.erase(Phi);
476 
477  // If there is a local def after us, we only have to rename that.
478  if (++DefIter != Defs->end()) {
479  cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
480  continue;
481  }
482 
483  // Otherwise, we need to search down through the CFG.
484  // For each of our successors, handle it directly if their is a phi, or
485  // place on the fixup worklist.
486  for (const auto *S : successors(NewDef->getBlock())) {
487  if (auto *MP = MSSA->getMemoryAccess(S))
488  setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
489  else
490  Worklist.push_back(S);
491  }
492 
493  while (!Worklist.empty()) {
494  const BasicBlock *FixupBlock = Worklist.pop_back_val();
495 
496  // Get the first def in the block that isn't a phi node.
497  if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
498  auto *FirstDef = &*Defs->begin();
499  // The loop above and below should have taken care of phi nodes
500  assert(!isa<MemoryPhi>(FirstDef) &&
501  "Should have already handled phi nodes!");
502  // We are now this def's defining access, make sure we actually dominate
503  // it
504  assert(MSSA->dominates(NewDef, FirstDef) &&
505  "Should have dominated the new access");
506 
507  // This may insert new phi nodes, because we are not guaranteed the
508  // block we are processing has a single pred, and depending where the
509  // store was inserted, it may require phi nodes below it.
510  cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
511  return;
512  }
513  // We didn't find a def, so we must continue.
514  for (const auto *S : successors(FixupBlock)) {
515  // If there is a phi node, handle it.
516  // Otherwise, put the block on the worklist
517  if (auto *MP = MSSA->getMemoryAccess(S))
518  setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
519  else {
520  // If we cycle, we should have ended up at a phi node that we already
521  // processed. FIXME: Double check this
522  if (!Seen.insert(S).second)
523  continue;
524  Worklist.push_back(S);
525  }
526  }
527  }
528  }
529 }
530 
532  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
533  MPhi->unorderedDeleteIncomingBlock(From);
534  tryRemoveTrivialPhi(MPhi);
535  }
536 }
537 
539  const BasicBlock *To) {
540  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
541  bool Found = false;
542  MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
543  if (From != B)
544  return false;
545  if (Found)
546  return true;
547  Found = true;
548  return false;
549  });
550  tryRemoveTrivialPhi(MPhi);
551  }
552 }
553 
554 /// If all arguments of a MemoryPHI are defined by the same incoming
555 /// argument, return that argument.
557  MemoryAccess *MA = nullptr;
558 
559  for (auto &Arg : MP->operands()) {
560  if (!MA)
561  MA = cast<MemoryAccess>(Arg);
562  else if (MA != Arg)
563  return nullptr;
564  }
565  return MA;
566 }
567 
569  const ValueToValueMapTy &VMap,
570  PhiToDefMap &MPhiMap,
571  bool CloneWasSimplified,
572  MemorySSA *MSSA) {
573  MemoryAccess *InsnDefining = MA;
574  if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
575  if (!MSSA->isLiveOnEntryDef(DefMUD)) {
576  Instruction *DefMUDI = DefMUD->getMemoryInst();
577  assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
578  if (Instruction *NewDefMUDI =
579  cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
580  InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
581  if (!CloneWasSimplified)
582  assert(InsnDefining && "Defining instruction cannot be nullptr.");
583  else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
584  // The clone was simplified, it's no longer a MemoryDef, look up.
585  auto DefIt = DefMUD->getDefsIterator();
586  // Since simplified clones only occur in single block cloning, a
587  // previous definition must exist, otherwise NewDefMUDI would not
588  // have been found in VMap.
589  assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
590  "Previous def must exist");
591  InsnDefining = getNewDefiningAccessForClone(
592  &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
593  }
594  }
595  }
596  } else {
597  MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
598  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
599  InsnDefining = NewDefPhi;
600  }
601  assert(InsnDefining && "Defining instruction cannot be nullptr.");
602  return InsnDefining;
603 }
604 
605 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
606  const ValueToValueMapTy &VMap,
607  PhiToDefMap &MPhiMap,
608  bool CloneWasSimplified) {
609  const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
610  if (!Acc)
611  return;
612  for (const MemoryAccess &MA : *Acc) {
613  if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
614  Instruction *Insn = MUD->getMemoryInst();
615  // Entry does not exist if the clone of the block did not clone all
616  // instructions. This occurs in LoopRotate when cloning instructions
617  // from the old header to the old preheader. The cloned instruction may
618  // also be a simplified Value, not an Instruction (see LoopRotate).
619  // Also in LoopRotate, even when it's an instruction, due to it being
620  // simplified, it may be a Use rather than a Def, so we cannot use MUD as
621  // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
622  if (Instruction *NewInsn =
623  dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
624  MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
625  NewInsn,
626  getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
627  MPhiMap, CloneWasSimplified, MSSA),
628  /*Template=*/CloneWasSimplified ? nullptr : MUD,
629  /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
630  if (NewUseOrDef)
631  MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
632  }
633  }
634  }
635 }
636 
638  BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
639  auto *MPhi = MSSA->getMemoryAccess(Header);
640  if (!MPhi)
641  return;
642 
643  // Create phi node in the backedge block and populate it with the same
644  // incoming values as MPhi. Skip incoming values coming from Preheader.
645  auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
646  bool HasUniqueIncomingValue = true;
647  MemoryAccess *UniqueValue = nullptr;
648  for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
649  BasicBlock *IBB = MPhi->getIncomingBlock(I);
650  MemoryAccess *IV = MPhi->getIncomingValue(I);
651  if (IBB != Preheader) {
652  NewMPhi->addIncoming(IV, IBB);
653  if (HasUniqueIncomingValue) {
654  if (!UniqueValue)
655  UniqueValue = IV;
656  else if (UniqueValue != IV)
657  HasUniqueIncomingValue = false;
658  }
659  }
660  }
661 
662  // Update incoming edges into MPhi. Remove all but the incoming edge from
663  // Preheader. Add an edge from NewMPhi
664  auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
665  MPhi->setIncomingValue(0, AccFromPreheader);
666  MPhi->setIncomingBlock(0, Preheader);
667  for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
668  MPhi->unorderedDeleteIncoming(I);
669  MPhi->addIncoming(NewMPhi, BEBlock);
670 
671  // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
672  // replaced with the unique value.
673  tryRemoveTrivialPhi(NewMPhi);
674 }
675 
677  ArrayRef<BasicBlock *> ExitBlocks,
678  const ValueToValueMapTy &VMap,
679  bool IgnoreIncomingWithNoClones) {
680  PhiToDefMap MPhiMap;
681 
682  auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
683  assert(Phi && NewPhi && "Invalid Phi nodes.");
684  BasicBlock *NewPhiBB = NewPhi->getBlock();
685  SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
686  pred_end(NewPhiBB));
687  for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
688  MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
689  BasicBlock *IncBB = Phi->getIncomingBlock(It);
690 
691  if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
692  IncBB = NewIncBB;
693  else if (IgnoreIncomingWithNoClones)
694  continue;
695 
696  // Now we have IncBB, and will need to add incoming from it to NewPhi.
697 
698  // If IncBB is not a predecessor of NewPhiBB, then do not add it.
699  // NewPhiBB was cloned without that edge.
700  if (!NewPhiBBPreds.count(IncBB))
701  continue;
702 
703  // Determine incoming value and add it as incoming from IncBB.
704  if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
705  if (!MSSA->isLiveOnEntryDef(IncMUD)) {
706  Instruction *IncI = IncMUD->getMemoryInst();
707  assert(IncI && "Found MemoryUseOrDef with no Instruction.");
708  if (Instruction *NewIncI =
709  cast_or_null<Instruction>(VMap.lookup(IncI))) {
710  IncMUD = MSSA->getMemoryAccess(NewIncI);
711  assert(IncMUD &&
712  "MemoryUseOrDef cannot be null, all preds processed.");
713  }
714  }
715  NewPhi->addIncoming(IncMUD, IncBB);
716  } else {
717  MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
718  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
719  NewPhi->addIncoming(NewDefPhi, IncBB);
720  else
721  NewPhi->addIncoming(IncPhi, IncBB);
722  }
723  }
724  if (auto *SingleAccess = onlySingleValue(NewPhi)) {
725  MPhiMap[Phi] = SingleAccess;
726  removeMemoryAccess(NewPhi);
727  }
728  };
729 
730  auto ProcessBlock = [&](BasicBlock *BB) {
731  BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
732  if (!NewBlock)
733  return;
734 
735  assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
736  "Cloned block should have no accesses");
737 
738  // Add MemoryPhi.
739  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
740  MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
741  MPhiMap[MPhi] = NewPhi;
742  }
743  // Update Uses and Defs.
744  cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
745  };
746 
747  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
748  ProcessBlock(BB);
749 
750  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
751  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
752  if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
753  FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
754 }
755 
757  BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
758  // All defs/phis from outside BB that are used in BB, are valid uses in P1.
759  // Since those defs/phis must have dominated BB, and also dominate P1.
760  // Defs from BB being used in BB will be replaced with the cloned defs from
761  // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
762  // incoming def into the Phi from P1.
763  // Instructions cloned into the predecessor are in practice sometimes
764  // simplified, so disable the use of the template, and create an access from
765  // scratch.
766  PhiToDefMap MPhiMap;
767  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
768  MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
769  cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
770 }
771 
772 template <typename Iter>
773 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
774  ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
775  DominatorTree &DT) {
777  // Update/insert phis in all successors of exit blocks.
778  for (auto *Exit : ExitBlocks)
779  for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
780  if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
781  BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
782  Updates.push_back({DT.Insert, NewExit, ExitSucc});
783  }
784  applyInsertUpdates(Updates, DT);
785 }
786 
788  ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
789  DominatorTree &DT) {
790  const ValueToValueMapTy *const Arr[] = {&VMap};
791  privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
792  std::end(Arr), DT);
793 }
794 
796  ArrayRef<BasicBlock *> ExitBlocks,
797  ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
798  auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
799  return I.get();
800  };
801  using MappedIteratorType =
803  decltype(GetPtr)>;
804  auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
805  auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
806  privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
807 }
808 
810  DominatorTree &DT, bool UpdateDT) {
811  SmallVector<CFGUpdate, 4> DeleteUpdates;
812  SmallVector<CFGUpdate, 4> RevDeleteUpdates;
813  SmallVector<CFGUpdate, 4> InsertUpdates;
814  for (auto &Update : Updates) {
815  if (Update.getKind() == DT.Insert)
816  InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
817  else {
818  DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()});
819  RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
820  }
821  }
822 
823  if (!DeleteUpdates.empty()) {
824  if (!InsertUpdates.empty()) {
825  if (!UpdateDT) {
827  // Deletes are reversed applied, because this CFGView is pretending the
828  // deletes did not happen yet, hence the edges still exist.
829  DT.applyUpdates(Empty, RevDeleteUpdates);
830  } else {
831  // Apply all updates, with the RevDeleteUpdates as PostCFGView.
832  DT.applyUpdates(Updates, RevDeleteUpdates);
833  }
834 
835  // Note: the MSSA update below doesn't distinguish between a GD with
836  // (RevDelete,false) and (Delete, true), but this matters for the DT
837  // updates above; for "children" purposes they are equivalent; but the
838  // updates themselves convey the desired update, used inside DT only.
839  GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
840  applyInsertUpdates(InsertUpdates, DT, &GD);
841  // Update DT to redelete edges; this matches the real CFG so we can
842  // perform the standard update without a postview of the CFG.
843  DT.applyUpdates(DeleteUpdates);
844  } else {
845  if (UpdateDT)
846  DT.applyUpdates(DeleteUpdates);
847  }
848  } else {
849  if (UpdateDT)
850  DT.applyUpdates(Updates);
852  applyInsertUpdates(InsertUpdates, DT, &GD);
853  }
854 
855  // Update for deleted edges
856  for (auto &Update : DeleteUpdates)
857  removeEdge(Update.getFrom(), Update.getTo());
858 }
859 
861  DominatorTree &DT) {
863  applyInsertUpdates(Updates, DT, &GD);
864 }
865 
867  DominatorTree &DT,
868  const GraphDiff<BasicBlock *> *GD) {
869  // Get recursive last Def, assuming well formed MSSA and updated DT.
870  auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
871  while (true) {
873  // Return last Def or Phi in BB, if it exists.
874  if (Defs)
875  return &*(--Defs->end());
876 
877  // Check number of predecessors, we only care if there's more than one.
878  unsigned Count = 0;
879  BasicBlock *Pred = nullptr;
880  for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
881  Pred = Pi;
882  Count++;
883  if (Count == 2)
884  break;
885  }
886 
887  // If BB has multiple predecessors, get last definition from IDom.
888  if (Count != 1) {
889  // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
890  // DT is invalidated. Return LoE as its last def. This will be added to
891  // MemoryPhi node, and later deleted when the block is deleted.
892  if (!DT.getNode(BB))
893  return MSSA->getLiveOnEntryDef();
894  if (auto *IDom = DT.getNode(BB)->getIDom())
895  if (IDom->getBlock() != BB) {
896  BB = IDom->getBlock();
897  continue;
898  }
899  return MSSA->getLiveOnEntryDef();
900  } else {
901  // Single predecessor, BB cannot be dead. GetLastDef of Pred.
902  assert(Count == 1 && Pred && "Single predecessor expected.");
903  // BB can be unreachable though, return LoE if that is the case.
904  if (!DT.getNode(BB))
905  return MSSA->getLiveOnEntryDef();
906  BB = Pred;
907  }
908  };
909  llvm_unreachable("Unable to get last definition.");
910  };
911 
912  // Get nearest IDom given a set of blocks.
913  // TODO: this can be optimized by starting the search at the node with the
914  // lowest level (highest in the tree).
915  auto FindNearestCommonDominator =
916  [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
917  BasicBlock *PrevIDom = *BBSet.begin();
918  for (auto *BB : BBSet)
919  PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
920  return PrevIDom;
921  };
922 
923  // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
924  // include CurrIDom.
925  auto GetNoLongerDomBlocks =
926  [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
927  SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
928  if (PrevIDom == CurrIDom)
929  return;
930  BlocksPrevDom.push_back(PrevIDom);
931  BasicBlock *NextIDom = PrevIDom;
932  while (BasicBlock *UpIDom =
933  DT.getNode(NextIDom)->getIDom()->getBlock()) {
934  if (UpIDom == CurrIDom)
935  break;
936  BlocksPrevDom.push_back(UpIDom);
937  NextIDom = UpIDom;
938  }
939  };
940 
941  // Map a BB to its predecessors: added + previously existing. To get a
942  // deterministic order, store predecessors as SetVectors. The order in each
943  // will be defined by the order in Updates (fixed) and the order given by
944  // children<> (also fixed). Since we further iterate over these ordered sets,
945  // we lose the information of multiple edges possibly existing between two
946  // blocks, so we'll keep and EdgeCount map for that.
947  // An alternate implementation could keep unordered set for the predecessors,
948  // traverse either Updates or children<> each time to get the deterministic
949  // order, and drop the usage of EdgeCount. This alternate approach would still
950  // require querying the maps for each predecessor, and children<> call has
951  // additional computation inside for creating the snapshot-graph predecessors.
952  // As such, we favor using a little additional storage and less compute time.
953  // This decision can be revisited if we find the alternative more favorable.
954 
955  struct PredInfo {
958  };
960 
961  for (auto &Edge : Updates) {
962  BasicBlock *BB = Edge.getTo();
963  auto &AddedBlockSet = PredMap[BB].Added;
964  AddedBlockSet.insert(Edge.getFrom());
965  }
966 
967  // Store all existing predecessor for each BB, at least one must exist.
970  for (auto &BBPredPair : PredMap) {
971  auto *BB = BBPredPair.first;
972  const auto &AddedBlockSet = BBPredPair.second.Added;
973  auto &PrevBlockSet = BBPredPair.second.Prev;
974  for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
975  if (!AddedBlockSet.count(Pi))
976  PrevBlockSet.insert(Pi);
977  EdgeCountMap[{Pi, BB}]++;
978  }
979 
980  if (PrevBlockSet.empty()) {
981  assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
982  LLVM_DEBUG(
983  dbgs()
984  << "Adding a predecessor to a block with no predecessors. "
985  "This must be an edge added to a new, likely cloned, block. "
986  "Its memory accesses must be already correct, assuming completed "
987  "via the updateExitBlocksForClonedLoop API. "
988  "Assert a single such edge is added so no phi addition or "
989  "additional processing is required.\n");
990  assert(AddedBlockSet.size() == 1 &&
991  "Can only handle adding one predecessor to a new block.");
992  // Need to remove new blocks from PredMap. Remove below to not invalidate
993  // iterator here.
994  NewBlocks.insert(BB);
995  }
996  }
997  // Nothing to process for new/cloned blocks.
998  for (auto *BB : NewBlocks)
999  PredMap.erase(BB);
1000 
1001  SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
1002  SmallVector<WeakVH, 8> InsertedPhis;
1003 
1004  // First create MemoryPhis in all blocks that don't have one. Create in the
1005  // order found in Updates, not in PredMap, to get deterministic numbering.
1006  for (auto &Edge : Updates) {
1007  BasicBlock *BB = Edge.getTo();
1008  if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
1009  InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
1010  }
1011 
1012  // Now we'll fill in the MemoryPhis with the right incoming values.
1013  for (auto &BBPredPair : PredMap) {
1014  auto *BB = BBPredPair.first;
1015  const auto &PrevBlockSet = BBPredPair.second.Prev;
1016  const auto &AddedBlockSet = BBPredPair.second.Added;
1017  assert(!PrevBlockSet.empty() &&
1018  "At least one previous predecessor must exist.");
1019 
1020  // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
1021  // keeping this map before the loop. We can reuse already populated entries
1022  // if an edge is added from the same predecessor to two different blocks,
1023  // and this does happen in rotate. Note that the map needs to be updated
1024  // when deleting non-necessary phis below, if the phi is in the map by
1025  // replacing the value with DefP1.
1027  for (auto *AddedPred : AddedBlockSet) {
1028  auto *DefPn = GetLastDef(AddedPred);
1029  assert(DefPn != nullptr && "Unable to find last definition.");
1030  LastDefAddedPred[AddedPred] = DefPn;
1031  }
1032 
1033  MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
1034  // If Phi is not empty, add an incoming edge from each added pred. Must
1035  // still compute blocks with defs to replace for this block below.
1036  if (NewPhi->getNumOperands()) {
1037  for (auto *Pred : AddedBlockSet) {
1038  auto *LastDefForPred = LastDefAddedPred[Pred];
1039  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1040  NewPhi->addIncoming(LastDefForPred, Pred);
1041  }
1042  } else {
1043  // Pick any existing predecessor and get its definition. All other
1044  // existing predecessors should have the same one, since no phi existed.
1045  auto *P1 = *PrevBlockSet.begin();
1046  MemoryAccess *DefP1 = GetLastDef(P1);
1047 
1048  // Check DefP1 against all Defs in LastDefPredPair. If all the same,
1049  // nothing to add.
1050  bool InsertPhi = false;
1051  for (auto LastDefPredPair : LastDefAddedPred)
1052  if (DefP1 != LastDefPredPair.second) {
1053  InsertPhi = true;
1054  break;
1055  }
1056  if (!InsertPhi) {
1057  // Since NewPhi may be used in other newly added Phis, replace all uses
1058  // of NewPhi with the definition coming from all predecessors (DefP1),
1059  // before deleting it.
1060  NewPhi->replaceAllUsesWith(DefP1);
1061  removeMemoryAccess(NewPhi);
1062  continue;
1063  }
1064 
1065  // Update Phi with new values for new predecessors and old value for all
1066  // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1067  // sets, the order of entries in NewPhi is deterministic.
1068  for (auto *Pred : AddedBlockSet) {
1069  auto *LastDefForPred = LastDefAddedPred[Pred];
1070  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1071  NewPhi->addIncoming(LastDefForPred, Pred);
1072  }
1073  for (auto *Pred : PrevBlockSet)
1074  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1075  NewPhi->addIncoming(DefP1, Pred);
1076  }
1077 
1078  // Get all blocks that used to dominate BB and no longer do after adding
1079  // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1080  assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
1081  BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1082  assert(PrevIDom && "Previous IDom should exists");
1083  BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
1084  assert(NewIDom && "BB should have a new valid idom");
1085  assert(DT.dominates(NewIDom, PrevIDom) &&
1086  "New idom should dominate old idom");
1087  GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1088  }
1089 
1090  tryRemoveTrivialPhis(InsertedPhis);
1091  // Create the set of blocks that now have a definition. We'll use this to
1092  // compute IDF and add Phis there next.
1093  SmallVector<BasicBlock *, 8> BlocksToProcess;
1094  for (auto &VH : InsertedPhis)
1095  if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1096  BlocksToProcess.push_back(MPhi->getBlock());
1097 
1098  // Compute IDF and add Phis in all IDF blocks that do not have one.
1100  if (!BlocksToProcess.empty()) {
1101  ForwardIDFCalculator IDFs(DT, GD);
1102  SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
1103  BlocksToProcess.end());
1104  IDFs.setDefiningBlocks(DefiningBlocks);
1105  IDFs.calculate(IDFBlocks);
1106 
1107  SmallSetVector<MemoryPhi *, 4> PhisToFill;
1108  // First create all needed Phis.
1109  for (auto *BBIDF : IDFBlocks)
1110  if (!MSSA->getMemoryAccess(BBIDF)) {
1111  auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1112  InsertedPhis.push_back(IDFPhi);
1113  PhisToFill.insert(IDFPhi);
1114  }
1115  // Then update or insert their correct incoming values.
1116  for (auto *BBIDF : IDFBlocks) {
1117  auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
1118  assert(IDFPhi && "Phi must exist");
1119  if (!PhisToFill.count(IDFPhi)) {
1120  // Update existing Phi.
1121  // FIXME: some updates may be redundant, try to optimize and skip some.
1122  for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
1123  IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
1124  } else {
1125  for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF))
1126  IDFPhi->addIncoming(GetLastDef(Pi), Pi);
1127  }
1128  }
1129  }
1130 
1131  // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1132  // longer dominate, replace those with the closest dominating def.
1133  // This will also update optimized accesses, as they're also uses.
1134  for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1135  if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
1136  for (auto &DefToReplaceUses : *DefsList) {
1137  BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1138  Value::use_iterator UI = DefToReplaceUses.use_begin(),
1139  E = DefToReplaceUses.use_end();
1140  for (; UI != E;) {
1141  Use &U = *UI;
1142  ++UI;
1143  MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
1144  if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1145  BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1146  if (!DT.dominates(DominatingBlock, DominatedBlock))
1147  U.set(GetLastDef(DominatedBlock));
1148  } else {
1149  BasicBlock *DominatedBlock = Usr->getBlock();
1150  if (!DT.dominates(DominatingBlock, DominatedBlock)) {
1151  if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
1152  U.set(DomBlPhi);
1153  else {
1154  auto *IDom = DT.getNode(DominatedBlock)->getIDom();
1155  assert(IDom && "Block must have a valid IDom.");
1156  U.set(GetLastDef(IDom->getBlock()));
1157  }
1158  cast<MemoryUseOrDef>(Usr)->resetOptimized();
1159  }
1160  }
1161  }
1162  }
1163  }
1164  }
1165  tryRemoveTrivialPhis(InsertedPhis);
1166 }
1167 
1168 // Move What before Where in the MemorySSA IR.
1169 template <class WhereType>
1170 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1171  WhereType Where) {
1172  // Mark MemoryPhi users of What not to be optimized.
1173  for (auto *U : What->users())
1174  if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
1175  NonOptPhis.insert(PhiUser);
1176 
1177  // Replace all our users with our defining access.
1178  What->replaceAllUsesWith(What->getDefiningAccess());
1179 
1180  // Let MemorySSA take care of moving it around in the lists.
1181  MSSA->moveTo(What, BB, Where);
1182 
1183  // Now reinsert it into the IR and do whatever fixups needed.
1184  if (auto *MD = dyn_cast<MemoryDef>(What))
1185  insertDef(MD, /*RenameUses=*/true);
1186  else
1187  insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);
1188 
1189  // Clear dangling pointers. We added all MemoryPhi users, but not all
1190  // of them are removed by fixupDefs().
1191  NonOptPhis.clear();
1192 }
1193 
1194 // Move What before Where in the MemorySSA IR.
1196  moveTo(What, Where->getBlock(), Where->getIterator());
1197 }
1198 
1199 // Move What after Where in the MemorySSA IR.
1201  moveTo(What, Where->getBlock(), ++Where->getIterator());
1202 }
1203 
1205  MemorySSA::InsertionPlace Where) {
1206  if (Where != MemorySSA::InsertionPlace::BeforeTerminator)
1207  return moveTo(What, BB, Where);
1208 
1209  if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator()))
1210  return moveBefore(What, Where);
1211  else
1212  return moveTo(What, BB, MemorySSA::InsertionPlace::End);
1213 }
1214 
1215 // All accesses in To used to be in From. Move to end and update access lists.
1216 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1217  Instruction *Start) {
1218 
1220  if (!Accs)
1221  return;
1222 
1223  assert(Start->getParent() == To && "Incorrect Start instruction");
1224  MemoryAccess *FirstInNew = nullptr;
1225  for (Instruction &I : make_range(Start->getIterator(), To->end()))
1226  if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1227  break;
1228  if (FirstInNew) {
1229  auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1230  do {
1231  auto NextIt = ++MUD->getIterator();
1232  MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1233  ? nullptr
1234  : cast<MemoryUseOrDef>(&*NextIt);
1235  MSSA->moveTo(MUD, To, MemorySSA::End);
1236  // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
1237  // to retrieve it again.
1238  Accs = MSSA->getWritableBlockAccesses(From);
1239  MUD = NextMUD;
1240  } while (MUD);
1241  }
1242 
1243  // If all accesses were moved and only a trivial Phi remains, we try to remove
1244  // that Phi. This is needed when From is going to be deleted.
1245  auto *Defs = MSSA->getWritableBlockDefs(From);
1246  if (Defs && !Defs->empty())
1247  if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin()))
1248  tryRemoveTrivialPhi(Phi);
1249 }
1250 
1252  BasicBlock *To,
1253  Instruction *Start) {
1254  assert(MSSA->getBlockAccesses(To) == nullptr &&
1255  "To block is expected to be free of MemoryAccesses.");
1256  moveAllAccesses(From, To, Start);
1257  for (BasicBlock *Succ : successors(To))
1258  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1259  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1260 }
1261 
1263  Instruction *Start) {
1264  assert(From->getUniquePredecessor() == To &&
1265  "From block is expected to have a single predecessor (To).");
1266  moveAllAccesses(From, To, Start);
1267  for (BasicBlock *Succ : successors(From))
1268  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1269  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1270 }
1271 
1273  BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1274  bool IdenticalEdgesWereMerged) {
1275  assert(!MSSA->getWritableBlockAccesses(New) &&
1276  "Access list should be null for a new block.");
1277  MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1278  if (!Phi)
1279  return;
1280  if (Old->hasNPredecessors(1)) {
1281  assert(pred_size(New) == Preds.size() &&
1282  "Should have moved all predecessors.");
1283  MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1284  } else {
1285  assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1286  "new immediate predecessor.");
1287  MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1288  SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
1289  // Currently only support the case of removing a single incoming edge when
1290  // identical edges were not merged.
1291  if (!IdenticalEdgesWereMerged)
1292  assert(PredsSet.size() == Preds.size() &&
1293  "If identical edges were not merged, we cannot have duplicate "
1294  "blocks in the predecessors");
1296  if (PredsSet.count(B)) {
1297  NewPhi->addIncoming(MA, B);
1298  if (!IdenticalEdgesWereMerged)
1299  PredsSet.erase(B);
1300  return true;
1301  }
1302  return false;
1303  });
1304  Phi->addIncoming(NewPhi, New);
1305  tryRemoveTrivialPhi(NewPhi);
1306  }
1307 }
1308 
1310  assert(!MSSA->isLiveOnEntryDef(MA) &&
1311  "Trying to remove the live on entry def");
1312  // We can only delete phi nodes if they have no uses, or we can replace all
1313  // uses with a single definition.
1314  MemoryAccess *NewDefTarget = nullptr;
1315  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1316  // Note that it is sufficient to know that all edges of the phi node have
1317  // the same argument. If they do, by the definition of dominance frontiers
1318  // (which we used to place this phi), that argument must dominate this phi,
1319  // and thus, must dominate the phi's uses, and so we will not hit the assert
1320  // below.
1321  NewDefTarget = onlySingleValue(MP);
1322  assert((NewDefTarget || MP->use_empty()) &&
1323  "We can't delete this memory phi");
1324  } else {
1325  NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1326  }
1327 
1328  SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1329 
1330  // Re-point the uses at our defining access
1331  if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1332  // Reset optimized on users of this store, and reset the uses.
1333  // A few notes:
1334  // 1. This is a slightly modified version of RAUW to avoid walking the
1335  // uses twice here.
1336  // 2. If we wanted to be complete, we would have to reset the optimized
1337  // flags on users of phi nodes if doing the below makes a phi node have all
1338  // the same arguments. Instead, we prefer users to removeMemoryAccess those
1339  // phi nodes, because doing it here would be N^3.
1340  if (MA->hasValueHandle())
1341  ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1342  // Note: We assume MemorySSA is not used in metadata since it's not really
1343  // part of the IR.
1344 
1345  assert(NewDefTarget != MA && "Going into an infinite loop");
1346  while (!MA->use_empty()) {
1347  Use &U = *MA->use_begin();
1348  if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1349  MUD->resetOptimized();
1350  if (OptimizePhis)
1351  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1352  PhisToCheck.insert(MP);
1353  U.set(NewDefTarget);
1354  }
1355  }
1356 
1357  // The call below to erase will destroy MA, so we can't change the order we
1358  // are doing things here
1359  MSSA->removeFromLookups(MA);
1360  MSSA->removeFromLists(MA);
1361 
1362  // Optionally optimize Phi uses. This will recursively remove trivial phis.
1363  if (!PhisToCheck.empty()) {
1364  SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1365  PhisToCheck.end()};
1366  PhisToCheck.clear();
1367 
1368  unsigned PhisSize = PhisToOptimize.size();
1369  while (PhisSize-- > 0)
1370  if (MemoryPhi *MP =
1371  cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1372  tryRemoveTrivialPhi(MP);
1373  }
1374 }
1375 
1377  const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
1378  // First delete all uses of BB in MemoryPhis.
1379  for (BasicBlock *BB : DeadBlocks) {
1380  Instruction *TI = BB->getTerminator();
1381  assert(TI && "Basic block expected to have a terminator instruction");
1382  for (BasicBlock *Succ : successors(TI))
1383  if (!DeadBlocks.count(Succ))
1384  if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1385  MP->unorderedDeleteIncomingBlock(BB);
1386  tryRemoveTrivialPhi(MP);
1387  }
1388  // Drop all references of all accesses in BB
1390  for (MemoryAccess &MA : *Acc)
1391  MA.dropAllReferences();
1392  }
1393 
1394  // Next, delete all memory accesses in each block
1395  for (BasicBlock *BB : DeadBlocks) {
1397  if (!Acc)
1398  continue;
1399  for (MemoryAccess &MA : llvm::make_early_inc_range(*Acc)) {
1400  MSSA->removeFromLookups(&MA);
1401  MSSA->removeFromLists(&MA);
1402  }
1403  }
1404 }
1405 
1406 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1407  for (auto &VH : UpdatedPHIs)
1408  if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1409  tryRemoveTrivialPhi(MPhi);
1410 }
1411 
1413  const BasicBlock *BB = I->getParent();
1414  // Remove memory accesses in BB for I and all following instructions.
1415  auto BBI = I->getIterator(), BBE = BB->end();
1416  // FIXME: If this becomes too expensive, iterate until the first instruction
1417  // with a memory access, then iterate over MemoryAccesses.
1418  while (BBI != BBE)
1419  removeMemoryAccess(&*(BBI++));
1420  // Update phis in BB's successors to remove BB.
1421  SmallVector<WeakVH, 16> UpdatedPHIs;
1422  for (const BasicBlock *Successor : successors(BB)) {
1424  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
1425  MPhi->unorderedDeleteIncomingBlock(BB);
1426  UpdatedPHIs.push_back(MPhi);
1427  }
1428  }
1429  // Optimize trivial phis.
1430  tryRemoveTrivialPhis(UpdatedPHIs);
1431 }
1432 
1434  Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1435  MemorySSA::InsertionPlace Point) {
1436  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1437  MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1438  return NewAccess;
1439 }
1440 
1442  Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1443  assert(I->getParent() == InsertPt->getBlock() &&
1444  "New and old access must be in the same block");
1445  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1446  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1447  InsertPt->getIterator());
1448  return NewAccess;
1449 }
1450 
1452  Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1453  assert(I->getParent() == InsertPt->getBlock() &&
1454  "New and old access must be in the same block");
1455  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1456  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1457  ++InsertPt->getIterator());
1458  return NewAccess;
1459 }
i
i
Definition: README.txt:29
llvm::BasicBlock::end
iterator end()
Definition: BasicBlock.h:298
llvm::predecessors
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:127
llvm::MemoryPhi::unorderedDeleteIncomingIf
void unorderedDeleteIncomingIf(Fn &&Pred)
Definition: MemorySSA.h:607
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
llvm::DominatorTreeBase::findNearestCommonDominator
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
Definition: GenericDomTree.h:468
llvm::simple_ilist::empty
LLVM_NODISCARD bool empty() const
Check if the list is empty in constant time.
Definition: simple_ilist.h:131
llvm::MemorySSA::getWritableBlockDefs
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:812
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
Metadata.h
llvm::MemorySSAUpdater::createMemoryAccessBefore
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before or after an existing MemoryAccess.
Definition: MemorySSAUpdater.cpp:1441
llvm::User::operands
op_range operands()
Definition: User.h:242
MemorySSAUpdater.h
llvm::DenseMapBase::lookup
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:197
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::User::dropAllReferences
void dropAllReferences()
Drop all references to operands.
Definition: User.h:299
getNewDefiningAccessForClone
static MemoryAccess * getNewDefiningAccessForClone(MemoryAccess *MA, const ValueToValueMapTy &VMap, PhiToDefMap &MPhiMap, bool CloneWasSimplified, MemorySSA *MSSA)
Definition: MemorySSAUpdater.cpp:568
llvm::SmallDenseMap
Definition: DenseMap.h:880
llvm::MemorySSA::dominates
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2154
llvm::MemorySSA::renamePass
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
Definition: MemorySSA.h:825
llvm::MemorySSAUpdater::removeBlocks
void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
Definition: MemorySSAUpdater.cpp:1376
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::MemoryAccess::getReverseIterator
AllAccessType::reverse_self_iterator getReverseIterator()
Definition: MemorySSA.h:184
llvm::MemorySSA::insertIntoListsBefore
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
Definition: MemorySSA.cpp:1663
Module.h
llvm::sys::path::end
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:233
llvm::MemorySSA::getLiveOnEntryDef
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:746
llvm::sys::path::begin
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:224
llvm::DominatorTreeBase::Insert
static constexpr UpdateKind Insert
Definition: GenericDomTree.h:242
llvm::copy
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1602
llvm::MemorySSAUpdater::moveAllAfterSpliceBlocks
void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was spliced into From and To.
Definition: MemorySSAUpdater.cpp:1251
llvm::DominatorTreeBase::getNode
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Definition: GenericDomTree.h:351
llvm::SmallSet
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
llvm::mapped_iterator
Definition: STLExtras.h:277
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
llvm::Value::user_begin
user_iterator user_begin()
Definition: Value.h:398
llvm::MemoryPhi
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:484
STLExtras.h
llvm::BasicBlock::hasNPredecessors
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
Definition: BasicBlock.cpp:290
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::MemoryUse
Represents read-only accesses to memory.
Definition: MemorySSA.h:320
llvm::successors
succ_range successors(Instruction *I)
Definition: CFG.h:262
llvm::IDFCalculatorBase::calculate
void calculate(SmallVectorImpl< NodeTy * > &IDFBlocks)
Calculate iterated dominance frontiers.
Definition: GenericIteratedDominanceFrontier.h:131
llvm::iplist
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
Definition: ilist.h:390
llvm::MemorySSAUpdater::applyUpdates
void applyUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT, bool UpdateDTFirst=false)
Apply CFG updates, analogous with the DT edge updates.
Definition: MemorySSAUpdater.cpp:809
llvm::MemorySSAUpdater::createMemoryAccessInBB
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block, with a specified clobbering defin...
Definition: MemorySSAUpdater.cpp:1433
llvm::MemoryPhi::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:545
llvm::DominatorTreeBase::applyUpdates
void applyUpdates(ArrayRef< UpdateType > Updates)
Inform the dominator tree about a sequence of CFG edge insertions and deletions and perform a batch u...
Definition: GenericDomTree.h:544
llvm::ArrayRef::empty
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::MemorySSA::insertIntoListsForBlock
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1631
llvm::GraphDiff
Definition: CFGDiff.h:57
Uses
SmallPtrSet< MachineInstr *, 2 > Uses
Definition: ARMLowOverheadLoops.cpp:589
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
llvm::MemoryPhi::getBasicBlockIndex
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: MemorySSA.h:577
llvm::lltok::equal
@ equal
Definition: LLToken.h:25
llvm::LoopBlocksRPO
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:115
llvm::MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock
void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader, BasicBlock *LoopPreheader, BasicBlock *BackedgeBlock)
Update MemorySSA when inserting a unique backedge block for a loop.
Definition: MemorySSAUpdater.cpp:637
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
FormattedStream.h
llvm::MemorySSAUpdater::insertUse
void insertUse(MemoryUse *Use, bool RenameUses=false)
Definition: MemorySSAUpdater.cpp:245
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::begin
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:82
llvm::MemoryAccess::getIterator
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list.
Definition: MemorySSA.h:178
llvm::Value::use_iterator
use_iterator_impl< Use > use_iterator
Definition: Value.h:354
llvm::MemorySSA::isLiveOnEntryDef
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:742
llvm::MemorySSA::moveTo
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
Definition: MemorySSA.cpp:1707
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:648
llvm::User
Definition: User.h:44
llvm::MemorySSA::Beginning
@ Beginning
Definition: MemorySSA.h:793
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:296
llvm::MemorySSAUpdater::moveAfter
void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where)
Definition: MemorySSAUpdater.cpp:1200
llvm::MemorySSAUpdater::updateForClonedBlockIntoPred
void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM)
Definition: MemorySSAUpdater.cpp:756
llvm::MemorySSAUpdater::moveToPlace
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
Definition: MemorySSAUpdater.cpp:1204
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MemoryAccess::getReverseDefsIterator
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
Definition: MemorySSA.h:196
llvm::Instruction
Definition: Instruction.h:45
llvm::MemoryPhi::addIncoming
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:566
llvm::MemorySSA::getBlockAccesses
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:762
llvm::Use::getUser
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:73
llvm::MemorySSA::removeFromLookups
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
Definition: MemorySSA.cpp:1838
SmallPtrSet.h
llvm::Instruction::getSuccessor
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
Definition: Instruction.cpp:777
llvm::iterator_range::empty
bool empty() const
Definition: iterator_range.h:46
llvm::MemoryPhi::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:532
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::empty
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:72
llvm::MemoryAccess::getBlock
BasicBlock * getBlock() const
Definition: MemorySSA.h:159
LoopIterator.h
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:345
llvm::MemorySSAUpdater::removeDuplicatePhiEdgesBetween
void removeDuplicatePhiEdgesBetween(const BasicBlock *From, const BasicBlock *To)
Update the MemoryPhi in To to have a single incoming edge from From, following a CFG change that repl...
Definition: MemorySSAUpdater.cpp:538
llvm::ValueHandleBase::ValueIsRAUWd
static void ValueIsRAUWd(Value *Old, Value *New)
Definition: Value.cpp:1190
llvm::MemoryPhi::block_begin
block_iterator block_begin()
Definition: MemorySSA.h:505
Operands
mir Rename Register Operands
Definition: MIRNamerPass.cpp:78
llvm::Value::hasValueHandle
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
Definition: Value.h:555
llvm::MemorySSAUpdater::applyInsertUpdates
void applyInsertUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG insert updates, analogous with the DT edge updates.
Definition: MemorySSAUpdater.cpp:860
llvm::Value::user_end
user_iterator user_end()
Definition: Value.h:406
llvm::Use::set
void set(Value *Val)
Definition: Value.h:864
BasicBlock.h
llvm::simple_ilist::begin
iterator begin()
Definition: simple_ilist.h:117
llvm::MemorySSAUpdater::removeEdge
void removeEdge(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To following an edge deletion between From and To.
Definition: MemorySSAUpdater.cpp:531
llvm::DomTreeNodeBase::getBlock
NodeT * getBlock() const
Definition: GenericDomTree.h:88
llvm::MemorySSAUpdater::changeToUnreachable
void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
Definition: MemorySSAUpdater.cpp:1412
llvm::MemorySSAUpdater::updateForClonedLoop
void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VM, bool IgnoreIncomingWithNoClones=false)
Update MemorySSA after a loop was cloned, given the blocks in RPO order, the exit blocks and a 1:1 ma...
Definition: MemorySSAUpdater.cpp:676
llvm::DenseMap
Definition: DenseMap.h:714
llvm::MemorySSA
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:705
llvm::pdb::Empty
@ Empty
Definition: PDBTypes.h:394
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::MemorySSA::getMemoryAccess
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:722
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:576
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1616
llvm::MemoryDef
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:377
llvm::MemorySSA::getDomTree
DominatorTree & getDomTree() const
Definition: MemorySSA.h:730
llvm::TrackingVH
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:331
llvm::MemorySSA::InsertionPlace
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:793
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
onlySingleValue
static MemoryAccess * onlySingleValue(MemoryPhi *MP)
If all arguments of a MemoryPHI are defined by the same incoming argument, return that argument.
Definition: MemorySSAUpdater.cpp:556
llvm::DominatorTree::isReachableFromEntry
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:328
llvm::IDFCalculator
Definition: IteratedDominanceFrontier.h:39
llvm::Value::use_begin
use_iterator use_begin()
Definition: Value.h:361
llvm::IDFCalculatorBase::setDefiningBlocks
void setDefiningBlocks(const SmallPtrSetImpl< NodeTy * > &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
Definition: GenericIteratedDominanceFrontier.h:75
llvm::MemoryPhi::getIncomingValue
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:535
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::insert
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
llvm::MemorySSA::createDefinedAccess
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
Definition: MemorySSA.cpp:1738
llvm::MemorySSAUpdater::createMemoryAccessAfter
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Definition: MemorySSAUpdater.cpp:1451
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MemorySSAUpdater::insertDef
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
Definition: MemorySSAUpdater.cpp:314
DataLayout.h
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::MemorySSA::getBlockDefs
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition: MemorySSA.h:770
llvm::pred_size
unsigned pred_size(const BasicBlock *BB)
Get the number of predecessors of BB.
Definition: CFG.h:124
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
llvm::MemoryPhi::block_end
block_iterator block_end()
Definition: MemorySSA.h:513
llvm::MemoryPhi::setIncomingValue
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:536
llvm::MemorySSA::End
@ End
Definition: MemorySSA.h:793
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::SmallSet::insert
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:180
llvm::MemoryAccess
Definition: MemorySSA.h:137
llvm::ValueMap< const Value *, WeakTrackingVH >
llvm::DenseMapBase< SmallDenseMap< KeyT, ValueT, 4, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:152
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:92
llvm::MemorySSAUpdater::updateExitBlocksForClonedLoop
void updateExitBlocksForClonedLoop(ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VMap, DominatorTree &DT)
Update phi nodes in exit block successors following cloning.
Definition: MemorySSAUpdater.cpp:787
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::clear
void clear()
Completely clear the SetVector.
Definition: SetVector.h:220
llvm::MemorySSAUpdater::moveBefore
void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where)
Definition: MemorySSAUpdater.cpp:1195
Insn
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
Definition: AArch64MIPeepholeOpt.cpp:74
llvm::MemoryUseOrDef::getDefiningAccess
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:257
llvm::Successor
@ Successor
Definition: SIMachineScheduler.h:35
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:324
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:153
llvm::MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
Definition: MemorySSAUpdater.cpp:1272
llvm::MemorySSAUpdater::moveAllAfterMergeBlocks
void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was merged into To.
Definition: MemorySSAUpdater.cpp:1262
GlobalVariable.h
llvm::pred_end
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
llvm::DenseMapBase::size
unsigned size() const
Definition: DenseMap.h:100
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::count
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:215
llvm::MemorySSA::removeFromLists
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
Definition: MemorySSA.cpp:1865
llvm::MemoryUseOrDef
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:247
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::MemorySSA::getWritableBlockAccesses
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:806
IteratedDominanceFrontier.h
llvm::pred_begin
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::end
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:92
MemorySSA.h
llvm::simple_ilist::end
iterator end()
Definition: simple_ilist.h:119
llvm::User::getNumOperands
unsigned getNumOperands() const
Definition: User.h:191
llvm::MemoryAccess::getDefsIterator
DefsOnlyType::self_iterator getDefsIterator()
Definition: MemorySSA.h:190
llvm::User::op_begin
op_iterator op_begin()
Definition: User.h:234
Dominators.h
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
llvm::simple_ilist
A simple intrusive list implementation.
Definition: simple_ilist.h:78
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
setMemoryPhiValueForBlock
static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, MemoryAccess *NewDef)
Definition: MemorySSAUpdater.cpp:291
llvm::Value::replaceUsesWithIf
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:540
llvm::ValueMap::lookup
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:165
llvm::simple_ilist::insert
iterator insert(iterator I, reference Node)
Insert a node by reference; never copies.
Definition: simple_ilist.h:159
LLVMContext.h
From
BlockVerifier::State From
Definition: BlockVerifier.cpp:55
ProcessBlock
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
Definition: Sink.cpp:179
llvm::MemoryUseOrDef::setDefiningAccess
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=AliasResult(AliasResult::MayAlias))
Definition: MemorySSA.h:299
copy
we should consider alternate ways to model stack dependencies Lots of things could be done in WebAssemblyTargetTransformInfo cpp there are numerous optimization related hooks that can be overridden in WebAssemblyTargetLowering Instead of the OptimizeReturned which should consider preserving the returned attribute through to MachineInstrs and extending the MemIntrinsicResults pass to do this optimization on calls too That would also let the WebAssemblyPeephole pass clean up dead defs for such as it does for stores Consider implementing and or getMachineCombinerPatterns Find a clean way to fix the problem which leads to the Shrink Wrapping pass being run after the WebAssembly PEI pass When setting multiple variables to the same we currently get code like const It could be done with a smaller encoding like local tee $pop5 local copy
Definition: README.txt:101
Debug.h
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:422
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:154
llvm::MemorySSAUpdater::removeMemoryAccess
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Definition: MemorySSAUpdater.cpp:1309
SetVector.h
llvm::DominatorTreeBase::Delete
static constexpr UpdateKind Delete
Definition: GenericDomTree.h:243
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::User::op_end
op_iterator op_end()
Definition: User.h:236