LLVM  14.0.0git
MemorySSAUpdater.cpp
Go to the documentation of this file.
1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSAUpdater class.
10 //
11 //===----------------------------------------------------------------===//
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Dominators.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IRBuilder.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Metadata.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Support/Debug.h"
29 #include <algorithm>
30 
31 #define DEBUG_TYPE "memoryssa"
32 using namespace llvm;
33 
34 // This is the marker algorithm from "Simple and Efficient Construction of
35 // Static Single Assignment Form"
36 // The simple, non-marker algorithm places phi nodes at any join
37 // Here, we place markers, and only place phi nodes if they end up necessary.
38 // They are only necessary if they break a cycle (IE we recursively visit
39 // ourselves again), or we discover, while getting the value of the operands,
40 // that there are two or more definitions needing to be merged.
41 // This still will leave non-minimal form in the case of irreducible control
42 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
43 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
44  BasicBlock *BB,
45  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
46  // First, do a cache lookup. Without this cache, certain CFG structures
47  // (like a series of if statements) take exponential time to visit.
48  auto Cached = CachedPreviousDef.find(BB);
49  if (Cached != CachedPreviousDef.end())
50  return Cached->second;
51 
52  // If this method is called from an unreachable block, return LoE.
53  if (!MSSA->DT->isReachableFromEntry(BB))
54  return MSSA->getLiveOnEntryDef();
55 
56  if (BasicBlock *Pred = BB->getUniquePredecessor()) {
57  VisitedBlocks.insert(BB);
58  // Single predecessor case, just recurse, we can only have one definition.
59  MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
60  CachedPreviousDef.insert({BB, Result});
61  return Result;
62  }
63 
64  if (VisitedBlocks.count(BB)) {
65  // We hit our node again, meaning we had a cycle, we must insert a phi
66  // node to break it so we have an operand. The only case this will
67  // insert useless phis is if we have irreducible control flow.
68  MemoryAccess *Result = MSSA->createMemoryPhi(BB);
69  CachedPreviousDef.insert({BB, Result});
70  return Result;
71  }
72 
73  if (VisitedBlocks.insert(BB).second) {
74  // Mark us visited so we can detect a cycle
76 
77  // Recurse to get the values in our predecessors for placement of a
78  // potential phi node. This will insert phi nodes if we cycle in order to
79  // break the cycle and have an operand.
80  bool UniqueIncomingAccess = true;
81  MemoryAccess *SingleAccess = nullptr;
82  for (auto *Pred : predecessors(BB)) {
83  if (MSSA->DT->isReachableFromEntry(Pred)) {
84  auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
85  if (!SingleAccess)
86  SingleAccess = IncomingAccess;
87  else if (IncomingAccess != SingleAccess)
88  UniqueIncomingAccess = false;
89  PhiOps.push_back(IncomingAccess);
90  } else
91  PhiOps.push_back(MSSA->getLiveOnEntryDef());
92  }
93 
94  // Now try to simplify the ops to avoid placing a phi.
95  // This may return null if we never created a phi yet, that's okay
96  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
97 
98  // See if we can avoid the phi by simplifying it.
99  auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
100  // If we couldn't simplify, we may have to create a phi
101  if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
102  // A concrete Phi only exists if we created an empty one to break a cycle.
103  if (Phi) {
104  assert(Phi->operands().empty() && "Expected empty Phi");
105  Phi->replaceAllUsesWith(SingleAccess);
106  removeMemoryAccess(Phi);
107  }
108  Result = SingleAccess;
109  } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
110  if (!Phi)
111  Phi = MSSA->createMemoryPhi(BB);
112 
113  // See if the existing phi operands match what we need.
114  // Unlike normal SSA, we only allow one phi node per block, so we can't just
115  // create a new one.
116  if (Phi->getNumOperands() != 0) {
117  // FIXME: Figure out whether this is dead code and if so remove it.
118  if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
119  // These will have been filled in by the recursive read we did above.
120  llvm::copy(PhiOps, Phi->op_begin());
122  }
123  } else {
124  unsigned i = 0;
125  for (auto *Pred : predecessors(BB))
126  Phi->addIncoming(&*PhiOps[i++], Pred);
127  InsertedPHIs.push_back(Phi);
128  }
129  Result = Phi;
130  }
131 
132  // Set ourselves up for the next variable by resetting visited state.
133  VisitedBlocks.erase(BB);
134  CachedPreviousDef.insert({BB, Result});
135  return Result;
136  }
137  llvm_unreachable("Should have hit one of the three cases above");
138 }
139 
140 // This starts at the memory access, and goes backwards in the block to find the
141 // previous definition. If a definition is not found the block of the access,
142 // it continues globally, creating phi nodes to ensure we have a single
143 // definition.
144 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
145  if (auto *LocalResult = getPreviousDefInBlock(MA))
146  return LocalResult;
148  return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
149 }
150 
151 // This starts at the memory access, and goes backwards in the block to the find
152 // the previous definition. If the definition is not found in the block of the
153 // access, it returns nullptr.
154 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
155  auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
156 
157  // It's possible there are no defs, or we got handed the first def to start.
158  if (Defs) {
159  // If this is a def, we can just use the def iterators.
160  if (!isa<MemoryUse>(MA)) {
161  auto Iter = MA->getReverseDefsIterator();
162  ++Iter;
163  if (Iter != Defs->rend())
164  return &*Iter;
165  } else {
166  // Otherwise, have to walk the all access iterator.
167  auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
168  for (auto &U : make_range(++MA->getReverseIterator(), End))
169  if (!isa<MemoryUse>(U))
170  return cast<MemoryAccess>(&U);
171  // Note that if MA comes before Defs->begin(), we won't hit a def.
172  return nullptr;
173  }
174  }
175  return nullptr;
176 }
177 
178 // This starts at the end of block
179 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
180  BasicBlock *BB,
181  DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
182  auto *Defs = MSSA->getWritableBlockDefs(BB);
183 
184  if (Defs) {
185  CachedPreviousDef.insert({BB, &*Defs->rbegin()});
186  return &*Defs->rbegin();
187  }
188 
189  return getPreviousDefRecursive(BB, CachedPreviousDef);
190 }
191 // Recurse over a set of phi uses to eliminate the trivial ones
192 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
193  if (!Phi)
194  return nullptr;
195  TrackingVH<MemoryAccess> Res(Phi);
197  std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
198  for (auto &U : Uses)
199  if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
200  tryRemoveTrivialPhi(UsePhi);
201  return Res;
202 }
203 
204 // Eliminate trivial phis
205 // Phis are trivial if they are defined either by themselves, or all the same
206 // argument.
207 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
208 // We recursively try to remove them.
209 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
210  assert(Phi && "Can only remove concrete Phi.");
211  auto OperRange = Phi->operands();
212  return tryRemoveTrivialPhi(Phi, OperRange);
213 }
214 template <class RangeType>
215 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
216  RangeType &Operands) {
217  // Bail out on non-opt Phis.
218  if (NonOptPhis.count(Phi))
219  return Phi;
220 
221  // Detect equal or self arguments
222  MemoryAccess *Same = nullptr;
223  for (auto &Op : Operands) {
224  // If the same or self, good so far
225  if (Op == Phi || Op == Same)
226  continue;
227  // not the same, return the phi since it's not eliminatable by us
228  if (Same)
229  return Phi;
230  Same = cast<MemoryAccess>(&*Op);
231  }
232  // Never found a non-self reference, the phi is undef
233  if (Same == nullptr)
234  return MSSA->getLiveOnEntryDef();
235  if (Phi) {
236  Phi->replaceAllUsesWith(Same);
237  removeMemoryAccess(Phi);
238  }
239 
240  // We should only end up recursing in case we replaced something, in which
241  // case, we may have made other Phis trivial.
242  return recursePhi(Same);
243 }
244 
245 void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
246  InsertedPHIs.clear();
247  MU->setDefiningAccess(getPreviousDef(MU));
248 
249  // In cases without unreachable blocks, because uses do not create new
250  // may-defs, there are only two cases:
251  // 1. There was a def already below us, and therefore, we should not have
252  // created a phi node because it was already needed for the def.
253  //
254  // 2. There is no def below us, and therefore, there is no extra renaming work
255  // to do.
256 
257  // In cases with unreachable blocks, where the unnecessary Phis were
258  // optimized out, adding the Use may re-insert those Phis. Hence, when
259  // inserting Uses outside of the MSSA creation process, and new Phis were
260  // added, rename all uses if we are asked.
261 
262  if (!RenameUses && !InsertedPHIs.empty()) {
263  auto *Defs = MSSA->getBlockDefs(MU->getBlock());
264  (void)Defs;
265  assert((!Defs || (++Defs->begin() == Defs->end())) &&
266  "Block may have only a Phi or no defs");
267  }
268 
269  if (RenameUses && InsertedPHIs.size()) {
271  BasicBlock *StartBlock = MU->getBlock();
272 
273  if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
274  MemoryAccess *FirstDef = &*Defs->begin();
275  // Convert to incoming value if it's a memorydef. A phi *is* already an
276  // incoming value.
277  if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
278  FirstDef = MD->getDefiningAccess();
279 
280  MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
281  }
282  // We just inserted a phi into this block, so the incoming value will
283  // become the phi anyway, so it does not matter what we pass.
284  for (auto &MP : InsertedPHIs)
285  if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
286  MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
287  }
288 }
289 
290 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
292  MemoryAccess *NewDef) {
293  // Replace any operand with us an incoming block with the new defining
294  // access.
295  int i = MP->getBasicBlockIndex(BB);
296  assert(i != -1 && "Should have found the basic block in the phi");
297  // We can't just compare i against getNumOperands since one is signed and the
298  // other not. So use it to index into the block iterator.
299  for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
300  ++BBIter) {
301  if (*BBIter != BB)
302  break;
303  MP->setIncomingValue(i, NewDef);
304  ++i;
305  }
306 }
307 
308 // A brief description of the algorithm:
309 // First, we compute what should define the new def, using the SSA
310 // construction algorithm.
311 // Then, we update the defs below us (and any new phi nodes) in the graph to
312 // point to the correct new defs, to ensure we only have one variable, and no
313 // disconnected stores.
314 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
315  InsertedPHIs.clear();
316 
317  // See if we had a local def, and if not, go hunting.
318  MemoryAccess *DefBefore = getPreviousDef(MD);
319  bool DefBeforeSameBlock = false;
320  if (DefBefore->getBlock() == MD->getBlock() &&
321  !(isa<MemoryPhi>(DefBefore) &&
322  llvm::is_contained(InsertedPHIs, DefBefore)))
323  DefBeforeSameBlock = true;
324 
325  // There is a def before us, which means we can replace any store/phi uses
326  // of that thing with us, since we are in the way of whatever was there
327  // before.
328  // We now define that def's memorydefs and memoryphis
329  if (DefBeforeSameBlock) {
330  DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
331  // Leave the MemoryUses alone.
332  // Also make sure we skip ourselves to avoid self references.
333  User *Usr = U.getUser();
334  return !isa<MemoryUse>(Usr) && Usr != MD;
335  // Defs are automatically unoptimized when the user is set to MD below,
336  // because the isOptimized() call will fail to find the same ID.
337  });
338  }
339 
340  // and that def is now our defining access.
341  MD->setDefiningAccess(DefBefore);
342 
343  SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
344 
345  SmallSet<WeakVH, 8> ExistingPhis;
346 
347  // Remember the index where we may insert new phis.
348  unsigned NewPhiIndex = InsertedPHIs.size();
349  if (!DefBeforeSameBlock) {
350  // If there was a local def before us, we must have the same effect it
351  // did. Because every may-def is the same, any phis/etc we would create, it
352  // would also have created. If there was no local def before us, we
353  // performed a global update, and have to search all successors and make
354  // sure we update the first def in each of them (following all paths until
355  // we hit the first def along each path). This may also insert phi nodes.
356  // TODO: There are other cases we can skip this work, such as when we have a
357  // single successor, and only used a straight line of single pred blocks
358  // backwards to find the def. To make that work, we'd have to track whether
359  // getDefRecursive only ever used the single predecessor case. These types
360  // of paths also only exist in between CFG simplifications.
361 
362  // If this is the first def in the block and this insert is in an arbitrary
363  // place, compute IDF and place phis.
364  SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
365 
366  // If this is the last Def in the block, we may need additional Phis.
367  // Compute IDF in all cases, as renaming needs to be done even when MD is
368  // not the last access, because it can introduce a new access past which a
369  // previous access was optimized; that access needs to be reoptimized.
370  DefiningBlocks.insert(MD->getBlock());
371  for (const auto &VH : InsertedPHIs)
372  if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
373  DefiningBlocks.insert(RealPHI->getBlock());
374  ForwardIDFCalculator IDFs(*MSSA->DT);
376  IDFs.setDefiningBlocks(DefiningBlocks);
377  IDFs.calculate(IDFBlocks);
378  SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
379  for (auto *BBIDF : IDFBlocks) {
380  auto *MPhi = MSSA->getMemoryAccess(BBIDF);
381  if (!MPhi) {
382  MPhi = MSSA->createMemoryPhi(BBIDF);
383  NewInsertedPHIs.push_back(MPhi);
384  } else {
385  ExistingPhis.insert(MPhi);
386  }
387  // Add the phis created into the IDF blocks to NonOptPhis, so they are not
388  // optimized out as trivial by the call to getPreviousDefFromEnd below.
389  // Once they are complete, all these Phis are added to the FixupList, and
390  // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
391  // need fixing as well, and potentially be trivial before this insertion,
392  // hence add all IDF Phis. See PR43044.
393  NonOptPhis.insert(MPhi);
394  }
395  for (auto &MPhi : NewInsertedPHIs) {
396  auto *BBIDF = MPhi->getBlock();
397  for (auto *Pred : predecessors(BBIDF)) {
399  MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
400  }
401  }
402 
403  // Re-take the index where we're adding the new phis, because the above call
404  // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
405  NewPhiIndex = InsertedPHIs.size();
406  for (auto &MPhi : NewInsertedPHIs) {
407  InsertedPHIs.push_back(&*MPhi);
408  FixupList.push_back(&*MPhi);
409  }
410 
411  FixupList.push_back(MD);
412  }
413 
414  // Remember the index where we stopped inserting new phis above, since the
415  // fixupDefs call in the loop below may insert more, that are already minimal.
416  unsigned NewPhiIndexEnd = InsertedPHIs.size();
417 
418  while (!FixupList.empty()) {
419  unsigned StartingPHISize = InsertedPHIs.size();
420  fixupDefs(FixupList);
421  FixupList.clear();
422  // Put any new phis on the fixup list, and process them
423  FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
424  }
425 
426  // Optimize potentially non-minimal phis added in this method.
427  unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
428  if (NewPhiSize)
429  tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
430 
431  // Now that all fixups are done, rename all uses if we are asked. Skip
432  // renaming for defs in unreachable blocks.
433  BasicBlock *StartBlock = MD->getBlock();
434  if (RenameUses && MSSA->getDomTree().getNode(StartBlock)) {
436  // We are guaranteed there is a def in the block, because we just got it
437  // handed to us in this function.
438  MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
439  // Convert to incoming value if it's a memorydef. A phi *is* already an
440  // incoming value.
441  if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
442  FirstDef = MD->getDefiningAccess();
443 
444  MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
445  // We just inserted a phi into this block, so the incoming value will become
446  // the phi anyway, so it does not matter what we pass.
447  for (auto &MP : InsertedPHIs) {
448  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
449  if (Phi)
450  MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
451  }
452  // Existing Phi blocks may need renaming too, if an access was previously
453  // optimized and the inserted Defs "covers" the Optimized value.
454  for (auto &MP : ExistingPhis) {
455  MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
456  if (Phi)
457  MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
458  }
459  }
460 }
461 
462 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
465  for (auto &Var : Vars) {
466  MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
467  if (!NewDef)
468  continue;
469  // First, see if there is a local def after the operand.
470  auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
471  auto DefIter = NewDef->getDefsIterator();
472 
473  // The temporary Phi is being fixed, unmark it for not to optimize.
474  if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
475  NonOptPhis.erase(Phi);
476 
477  // If there is a local def after us, we only have to rename that.
478  if (++DefIter != Defs->end()) {
479  cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
480  continue;
481  }
482 
483  // Otherwise, we need to search down through the CFG.
484  // For each of our successors, handle it directly if their is a phi, or
485  // place on the fixup worklist.
486  for (const auto *S : successors(NewDef->getBlock())) {
487  if (auto *MP = MSSA->getMemoryAccess(S))
488  setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
489  else
490  Worklist.push_back(S);
491  }
492 
493  while (!Worklist.empty()) {
494  const BasicBlock *FixupBlock = Worklist.back();
495  Worklist.pop_back();
496 
497  // Get the first def in the block that isn't a phi node.
498  if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
499  auto *FirstDef = &*Defs->begin();
500  // The loop above and below should have taken care of phi nodes
501  assert(!isa<MemoryPhi>(FirstDef) &&
502  "Should have already handled phi nodes!");
503  // We are now this def's defining access, make sure we actually dominate
504  // it
505  assert(MSSA->dominates(NewDef, FirstDef) &&
506  "Should have dominated the new access");
507 
508  // This may insert new phi nodes, because we are not guaranteed the
509  // block we are processing has a single pred, and depending where the
510  // store was inserted, it may require phi nodes below it.
511  cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
512  return;
513  }
514  // We didn't find a def, so we must continue.
515  for (const auto *S : successors(FixupBlock)) {
516  // If there is a phi node, handle it.
517  // Otherwise, put the block on the worklist
518  if (auto *MP = MSSA->getMemoryAccess(S))
519  setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
520  else {
521  // If we cycle, we should have ended up at a phi node that we already
522  // processed. FIXME: Double check this
523  if (!Seen.insert(S).second)
524  continue;
525  Worklist.push_back(S);
526  }
527  }
528  }
529  }
530 }
531 
533  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
534  MPhi->unorderedDeleteIncomingBlock(From);
535  tryRemoveTrivialPhi(MPhi);
536  }
537 }
538 
540  const BasicBlock *To) {
541  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
542  bool Found = false;
543  MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
544  if (From != B)
545  return false;
546  if (Found)
547  return true;
548  Found = true;
549  return false;
550  });
551  tryRemoveTrivialPhi(MPhi);
552  }
553 }
554 
555 /// If all arguments of a MemoryPHI are defined by the same incoming
556 /// argument, return that argument.
558  MemoryAccess *MA = nullptr;
559 
560  for (auto &Arg : MP->operands()) {
561  if (!MA)
562  MA = cast<MemoryAccess>(Arg);
563  else if (MA != Arg)
564  return nullptr;
565  }
566  return MA;
567 }
568 
570  const ValueToValueMapTy &VMap,
571  PhiToDefMap &MPhiMap,
572  bool CloneWasSimplified,
573  MemorySSA *MSSA) {
574  MemoryAccess *InsnDefining = MA;
575  if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
576  if (!MSSA->isLiveOnEntryDef(DefMUD)) {
577  Instruction *DefMUDI = DefMUD->getMemoryInst();
578  assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
579  if (Instruction *NewDefMUDI =
580  cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
581  InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
582  if (!CloneWasSimplified)
583  assert(InsnDefining && "Defining instruction cannot be nullptr.");
584  else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
585  // The clone was simplified, it's no longer a MemoryDef, look up.
586  auto DefIt = DefMUD->getDefsIterator();
587  // Since simplified clones only occur in single block cloning, a
588  // previous definition must exist, otherwise NewDefMUDI would not
589  // have been found in VMap.
590  assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
591  "Previous def must exist");
592  InsnDefining = getNewDefiningAccessForClone(
593  &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
594  }
595  }
596  }
597  } else {
598  MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
599  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
600  InsnDefining = NewDefPhi;
601  }
602  assert(InsnDefining && "Defining instruction cannot be nullptr.");
603  return InsnDefining;
604 }
605 
606 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
607  const ValueToValueMapTy &VMap,
608  PhiToDefMap &MPhiMap,
609  bool CloneWasSimplified) {
610  const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
611  if (!Acc)
612  return;
613  for (const MemoryAccess &MA : *Acc) {
614  if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
615  Instruction *Insn = MUD->getMemoryInst();
616  // Entry does not exist if the clone of the block did not clone all
617  // instructions. This occurs in LoopRotate when cloning instructions
618  // from the old header to the old preheader. The cloned instruction may
619  // also be a simplified Value, not an Instruction (see LoopRotate).
620  // Also in LoopRotate, even when it's an instruction, due to it being
621  // simplified, it may be a Use rather than a Def, so we cannot use MUD as
622  // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
623  if (Instruction *NewInsn =
624  dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
625  MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
626  NewInsn,
627  getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
628  MPhiMap, CloneWasSimplified, MSSA),
629  /*Template=*/CloneWasSimplified ? nullptr : MUD,
630  /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
631  if (NewUseOrDef)
632  MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
633  }
634  }
635  }
636 }
637 
639  BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
640  auto *MPhi = MSSA->getMemoryAccess(Header);
641  if (!MPhi)
642  return;
643 
644  // Create phi node in the backedge block and populate it with the same
645  // incoming values as MPhi. Skip incoming values coming from Preheader.
646  auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
647  bool HasUniqueIncomingValue = true;
648  MemoryAccess *UniqueValue = nullptr;
649  for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
650  BasicBlock *IBB = MPhi->getIncomingBlock(I);
651  MemoryAccess *IV = MPhi->getIncomingValue(I);
652  if (IBB != Preheader) {
653  NewMPhi->addIncoming(IV, IBB);
654  if (HasUniqueIncomingValue) {
655  if (!UniqueValue)
656  UniqueValue = IV;
657  else if (UniqueValue != IV)
658  HasUniqueIncomingValue = false;
659  }
660  }
661  }
662 
663  // Update incoming edges into MPhi. Remove all but the incoming edge from
664  // Preheader. Add an edge from NewMPhi
665  auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
666  MPhi->setIncomingValue(0, AccFromPreheader);
667  MPhi->setIncomingBlock(0, Preheader);
668  for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
669  MPhi->unorderedDeleteIncoming(I);
670  MPhi->addIncoming(NewMPhi, BEBlock);
671 
672  // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
673  // replaced with the unique value.
674  tryRemoveTrivialPhi(NewMPhi);
675 }
676 
678  ArrayRef<BasicBlock *> ExitBlocks,
679  const ValueToValueMapTy &VMap,
680  bool IgnoreIncomingWithNoClones) {
681  PhiToDefMap MPhiMap;
682 
683  auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
684  assert(Phi && NewPhi && "Invalid Phi nodes.");
685  BasicBlock *NewPhiBB = NewPhi->getBlock();
686  SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
687  pred_end(NewPhiBB));
688  for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
689  MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
690  BasicBlock *IncBB = Phi->getIncomingBlock(It);
691 
692  if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
693  IncBB = NewIncBB;
694  else if (IgnoreIncomingWithNoClones)
695  continue;
696 
697  // Now we have IncBB, and will need to add incoming from it to NewPhi.
698 
699  // If IncBB is not a predecessor of NewPhiBB, then do not add it.
700  // NewPhiBB was cloned without that edge.
701  if (!NewPhiBBPreds.count(IncBB))
702  continue;
703 
704  // Determine incoming value and add it as incoming from IncBB.
705  if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
706  if (!MSSA->isLiveOnEntryDef(IncMUD)) {
707  Instruction *IncI = IncMUD->getMemoryInst();
708  assert(IncI && "Found MemoryUseOrDef with no Instruction.");
709  if (Instruction *NewIncI =
710  cast_or_null<Instruction>(VMap.lookup(IncI))) {
711  IncMUD = MSSA->getMemoryAccess(NewIncI);
712  assert(IncMUD &&
713  "MemoryUseOrDef cannot be null, all preds processed.");
714  }
715  }
716  NewPhi->addIncoming(IncMUD, IncBB);
717  } else {
718  MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
719  if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
720  NewPhi->addIncoming(NewDefPhi, IncBB);
721  else
722  NewPhi->addIncoming(IncPhi, IncBB);
723  }
724  }
725  if (auto *SingleAccess = onlySingleValue(NewPhi)) {
726  MPhiMap[Phi] = SingleAccess;
727  removeMemoryAccess(NewPhi);
728  }
729  };
730 
731  auto ProcessBlock = [&](BasicBlock *BB) {
732  BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
733  if (!NewBlock)
734  return;
735 
736  assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
737  "Cloned block should have no accesses");
738 
739  // Add MemoryPhi.
740  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
741  MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
742  MPhiMap[MPhi] = NewPhi;
743  }
744  // Update Uses and Defs.
745  cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
746  };
747 
748  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
749  ProcessBlock(BB);
750 
751  for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
752  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
753  if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
754  FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
755 }
756 
758  BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
759  // All defs/phis from outside BB that are used in BB, are valid uses in P1.
760  // Since those defs/phis must have dominated BB, and also dominate P1.
761  // Defs from BB being used in BB will be replaced with the cloned defs from
762  // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
763  // incoming def into the Phi from P1.
764  // Instructions cloned into the predecessor are in practice sometimes
765  // simplified, so disable the use of the template, and create an access from
766  // scratch.
767  PhiToDefMap MPhiMap;
768  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
769  MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
770  cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
771 }
772 
773 template <typename Iter>
774 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
775  ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
776  DominatorTree &DT) {
778  // Update/insert phis in all successors of exit blocks.
779  for (auto *Exit : ExitBlocks)
780  for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
781  if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
782  BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
783  Updates.push_back({DT.Insert, NewExit, ExitSucc});
784  }
785  applyInsertUpdates(Updates, DT);
786 }
787 
789  ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
790  DominatorTree &DT) {
791  const ValueToValueMapTy *const Arr[] = {&VMap};
792  privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
793  std::end(Arr), DT);
794 }
795 
797  ArrayRef<BasicBlock *> ExitBlocks,
798  ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
799  auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
800  return I.get();
801  };
802  using MappedIteratorType =
804  decltype(GetPtr)>;
805  auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
806  auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
807  privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
808 }
809 
811  DominatorTree &DT, bool UpdateDT) {
812  SmallVector<CFGUpdate, 4> DeleteUpdates;
813  SmallVector<CFGUpdate, 4> RevDeleteUpdates;
814  SmallVector<CFGUpdate, 4> InsertUpdates;
815  for (auto &Update : Updates) {
816  if (Update.getKind() == DT.Insert)
817  InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
818  else {
819  DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()});
820  RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
821  }
822  }
823 
824  if (!DeleteUpdates.empty()) {
825  if (!UpdateDT) {
827  // Deletes are reversed applied, because this CFGView is pretending the
828  // deletes did not happen yet, hence the edges still exist.
829  DT.applyUpdates(Empty, RevDeleteUpdates);
830  } else {
831  // Apply all updates, with the RevDeleteUpdates as PostCFGView.
832  DT.applyUpdates(Updates, RevDeleteUpdates);
833  }
834 
835  // Note: the MSSA update below doesn't distinguish between a GD with
836  // (RevDelete,false) and (Delete, true), but this matters for the DT
837  // updates above; for "children" purposes they are equivalent; but the
838  // updates themselves convey the desired update, used inside DT only.
839  GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
840  applyInsertUpdates(InsertUpdates, DT, &GD);
841  // Update DT to redelete edges; this matches the real CFG so we can perform
842  // the standard update without a postview of the CFG.
843  DT.applyUpdates(DeleteUpdates);
844  } else {
845  if (UpdateDT)
846  DT.applyUpdates(Updates);
848  applyInsertUpdates(InsertUpdates, DT, &GD);
849  }
850 
851  // Update for deleted edges
852  for (auto &Update : DeleteUpdates)
853  removeEdge(Update.getFrom(), Update.getTo());
854 }
855 
857  DominatorTree &DT) {
859  applyInsertUpdates(Updates, DT, &GD);
860 }
861 
863  DominatorTree &DT,
864  const GraphDiff<BasicBlock *> *GD) {
865  // Get recursive last Def, assuming well formed MSSA and updated DT.
866  auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
867  while (true) {
869  // Return last Def or Phi in BB, if it exists.
870  if (Defs)
871  return &*(--Defs->end());
872 
873  // Check number of predecessors, we only care if there's more than one.
874  unsigned Count = 0;
875  BasicBlock *Pred = nullptr;
876  for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
877  Pred = Pi;
878  Count++;
879  if (Count == 2)
880  break;
881  }
882 
883  // If BB has multiple predecessors, get last definition from IDom.
884  if (Count != 1) {
885  // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
886  // DT is invalidated. Return LoE as its last def. This will be added to
887  // MemoryPhi node, and later deleted when the block is deleted.
888  if (!DT.getNode(BB))
889  return MSSA->getLiveOnEntryDef();
890  if (auto *IDom = DT.getNode(BB)->getIDom())
891  if (IDom->getBlock() != BB) {
892  BB = IDom->getBlock();
893  continue;
894  }
895  return MSSA->getLiveOnEntryDef();
896  } else {
897  // Single predecessor, BB cannot be dead. GetLastDef of Pred.
898  assert(Count == 1 && Pred && "Single predecessor expected.");
899  // BB can be unreachable though, return LoE if that is the case.
900  if (!DT.getNode(BB))
901  return MSSA->getLiveOnEntryDef();
902  BB = Pred;
903  }
904  };
905  llvm_unreachable("Unable to get last definition.");
906  };
907 
908  // Get nearest IDom given a set of blocks.
909  // TODO: this can be optimized by starting the search at the node with the
910  // lowest level (highest in the tree).
911  auto FindNearestCommonDominator =
912  [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
913  BasicBlock *PrevIDom = *BBSet.begin();
914  for (auto *BB : BBSet)
915  PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
916  return PrevIDom;
917  };
918 
919  // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
920  // include CurrIDom.
921  auto GetNoLongerDomBlocks =
922  [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
923  SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
924  if (PrevIDom == CurrIDom)
925  return;
926  BlocksPrevDom.push_back(PrevIDom);
927  BasicBlock *NextIDom = PrevIDom;
928  while (BasicBlock *UpIDom =
929  DT.getNode(NextIDom)->getIDom()->getBlock()) {
930  if (UpIDom == CurrIDom)
931  break;
932  BlocksPrevDom.push_back(UpIDom);
933  NextIDom = UpIDom;
934  }
935  };
936 
937  // Map a BB to its predecessors: added + previously existing. To get a
938  // deterministic order, store predecessors as SetVectors. The order in each
939  // will be defined by the order in Updates (fixed) and the order given by
940  // children<> (also fixed). Since we further iterate over these ordered sets,
941  // we lose the information of multiple edges possibly existing between two
942  // blocks, so we'll keep and EdgeCount map for that.
943  // An alternate implementation could keep unordered set for the predecessors,
944  // traverse either Updates or children<> each time to get the deterministic
945  // order, and drop the usage of EdgeCount. This alternate approach would still
946  // require querying the maps for each predecessor, and children<> call has
947  // additional computation inside for creating the snapshot-graph predecessors.
948  // As such, we favor using a little additional storage and less compute time.
949  // This decision can be revisited if we find the alternative more favorable.
950 
951  struct PredInfo {
954  };
956 
957  for (auto &Edge : Updates) {
958  BasicBlock *BB = Edge.getTo();
959  auto &AddedBlockSet = PredMap[BB].Added;
960  AddedBlockSet.insert(Edge.getFrom());
961  }
962 
963  // Store all existing predecessor for each BB, at least one must exist.
966  for (auto &BBPredPair : PredMap) {
967  auto *BB = BBPredPair.first;
968  const auto &AddedBlockSet = BBPredPair.second.Added;
969  auto &PrevBlockSet = BBPredPair.second.Prev;
970  for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
971  if (!AddedBlockSet.count(Pi))
972  PrevBlockSet.insert(Pi);
973  EdgeCountMap[{Pi, BB}]++;
974  }
975 
976  if (PrevBlockSet.empty()) {
977  assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
978  LLVM_DEBUG(
979  dbgs()
980  << "Adding a predecessor to a block with no predecessors. "
981  "This must be an edge added to a new, likely cloned, block. "
982  "Its memory accesses must be already correct, assuming completed "
983  "via the updateExitBlocksForClonedLoop API. "
984  "Assert a single such edge is added so no phi addition or "
985  "additional processing is required.\n");
986  assert(AddedBlockSet.size() == 1 &&
987  "Can only handle adding one predecessor to a new block.");
988  // Need to remove new blocks from PredMap. Remove below to not invalidate
989  // iterator here.
990  NewBlocks.insert(BB);
991  }
992  }
993  // Nothing to process for new/cloned blocks.
994  for (auto *BB : NewBlocks)
995  PredMap.erase(BB);
996 
997  SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
998  SmallVector<WeakVH, 8> InsertedPhis;
999 
1000  // First create MemoryPhis in all blocks that don't have one. Create in the
1001  // order found in Updates, not in PredMap, to get deterministic numbering.
1002  for (auto &Edge : Updates) {
1003  BasicBlock *BB = Edge.getTo();
1004  if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
1005  InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
1006  }
1007 
1008  // Now we'll fill in the MemoryPhis with the right incoming values.
1009  for (auto &BBPredPair : PredMap) {
1010  auto *BB = BBPredPair.first;
1011  const auto &PrevBlockSet = BBPredPair.second.Prev;
1012  const auto &AddedBlockSet = BBPredPair.second.Added;
1013  assert(!PrevBlockSet.empty() &&
1014  "At least one previous predecessor must exist.");
1015 
1016  // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
1017  // keeping this map before the loop. We can reuse already populated entries
1018  // if an edge is added from the same predecessor to two different blocks,
1019  // and this does happen in rotate. Note that the map needs to be updated
1020  // when deleting non-necessary phis below, if the phi is in the map by
1021  // replacing the value with DefP1.
1023  for (auto *AddedPred : AddedBlockSet) {
1024  auto *DefPn = GetLastDef(AddedPred);
1025  assert(DefPn != nullptr && "Unable to find last definition.");
1026  LastDefAddedPred[AddedPred] = DefPn;
1027  }
1028 
1029  MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
1030  // If Phi is not empty, add an incoming edge from each added pred. Must
1031  // still compute blocks with defs to replace for this block below.
1032  if (NewPhi->getNumOperands()) {
1033  for (auto *Pred : AddedBlockSet) {
1034  auto *LastDefForPred = LastDefAddedPred[Pred];
1035  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1036  NewPhi->addIncoming(LastDefForPred, Pred);
1037  }
1038  } else {
1039  // Pick any existing predecessor and get its definition. All other
1040  // existing predecessors should have the same one, since no phi existed.
1041  auto *P1 = *PrevBlockSet.begin();
1042  MemoryAccess *DefP1 = GetLastDef(P1);
1043 
1044  // Check DefP1 against all Defs in LastDefPredPair. If all the same,
1045  // nothing to add.
1046  bool InsertPhi = false;
1047  for (auto LastDefPredPair : LastDefAddedPred)
1048  if (DefP1 != LastDefPredPair.second) {
1049  InsertPhi = true;
1050  break;
1051  }
1052  if (!InsertPhi) {
1053  // Since NewPhi may be used in other newly added Phis, replace all uses
1054  // of NewPhi with the definition coming from all predecessors (DefP1),
1055  // before deleting it.
1056  NewPhi->replaceAllUsesWith(DefP1);
1057  removeMemoryAccess(NewPhi);
1058  continue;
1059  }
1060 
1061  // Update Phi with new values for new predecessors and old value for all
1062  // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1063  // sets, the order of entries in NewPhi is deterministic.
1064  for (auto *Pred : AddedBlockSet) {
1065  auto *LastDefForPred = LastDefAddedPred[Pred];
1066  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1067  NewPhi->addIncoming(LastDefForPred, Pred);
1068  }
1069  for (auto *Pred : PrevBlockSet)
1070  for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1071  NewPhi->addIncoming(DefP1, Pred);
1072  }
1073 
1074  // Get all blocks that used to dominate BB and no longer do after adding
1075  // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1076  assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
1077  BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1078  assert(PrevIDom && "Previous IDom should exists");
1079  BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
1080  assert(NewIDom && "BB should have a new valid idom");
1081  assert(DT.dominates(NewIDom, PrevIDom) &&
1082  "New idom should dominate old idom");
1083  GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1084  }
1085 
1086  tryRemoveTrivialPhis(InsertedPhis);
1087  // Create the set of blocks that now have a definition. We'll use this to
1088  // compute IDF and add Phis there next.
1089  SmallVector<BasicBlock *, 8> BlocksToProcess;
1090  for (auto &VH : InsertedPhis)
1091  if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1092  BlocksToProcess.push_back(MPhi->getBlock());
1093 
1094  // Compute IDF and add Phis in all IDF blocks that do not have one.
1096  if (!BlocksToProcess.empty()) {
1097  ForwardIDFCalculator IDFs(DT, GD);
1098  SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
1099  BlocksToProcess.end());
1100  IDFs.setDefiningBlocks(DefiningBlocks);
1101  IDFs.calculate(IDFBlocks);
1102 
1103  SmallSetVector<MemoryPhi *, 4> PhisToFill;
1104  // First create all needed Phis.
1105  for (auto *BBIDF : IDFBlocks)
1106  if (!MSSA->getMemoryAccess(BBIDF)) {
1107  auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1108  InsertedPhis.push_back(IDFPhi);
1109  PhisToFill.insert(IDFPhi);
1110  }
1111  // Then update or insert their correct incoming values.
1112  for (auto *BBIDF : IDFBlocks) {
1113  auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
1114  assert(IDFPhi && "Phi must exist");
1115  if (!PhisToFill.count(IDFPhi)) {
1116  // Update existing Phi.
1117  // FIXME: some updates may be redundant, try to optimize and skip some.
1118  for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
1119  IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
1120  } else {
1121  for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF))
1122  IDFPhi->addIncoming(GetLastDef(Pi), Pi);
1123  }
1124  }
1125  }
1126 
1127  // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1128  // longer dominate, replace those with the closest dominating def.
1129  // This will also update optimized accesses, as they're also uses.
1130  for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1131  if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
1132  for (auto &DefToReplaceUses : *DefsList) {
1133  BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1134  Value::use_iterator UI = DefToReplaceUses.use_begin(),
1135  E = DefToReplaceUses.use_end();
1136  for (; UI != E;) {
1137  Use &U = *UI;
1138  ++UI;
1139  MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
1140  if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1141  BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1142  if (!DT.dominates(DominatingBlock, DominatedBlock))
1143  U.set(GetLastDef(DominatedBlock));
1144  } else {
1145  BasicBlock *DominatedBlock = Usr->getBlock();
1146  if (!DT.dominates(DominatingBlock, DominatedBlock)) {
1147  if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
1148  U.set(DomBlPhi);
1149  else {
1150  auto *IDom = DT.getNode(DominatedBlock)->getIDom();
1151  assert(IDom && "Block must have a valid IDom.");
1152  U.set(GetLastDef(IDom->getBlock()));
1153  }
1154  cast<MemoryUseOrDef>(Usr)->resetOptimized();
1155  }
1156  }
1157  }
1158  }
1159  }
1160  }
1161  tryRemoveTrivialPhis(InsertedPhis);
1162 }
1163 
1164 // Move What before Where in the MemorySSA IR.
1165 template <class WhereType>
1166 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1167  WhereType Where) {
1168  // Mark MemoryPhi users of What not to be optimized.
1169  for (auto *U : What->users())
1170  if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
1171  NonOptPhis.insert(PhiUser);
1172 
1173  // Replace all our users with our defining access.
1174  What->replaceAllUsesWith(What->getDefiningAccess());
1175 
1176  // Let MemorySSA take care of moving it around in the lists.
1177  MSSA->moveTo(What, BB, Where);
1178 
1179  // Now reinsert it into the IR and do whatever fixups needed.
1180  if (auto *MD = dyn_cast<MemoryDef>(What))
1181  insertDef(MD, /*RenameUses=*/true);
1182  else
1183  insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);
1184 
1185  // Clear dangling pointers. We added all MemoryPhi users, but not all
1186  // of them are removed by fixupDefs().
1187  NonOptPhis.clear();
1188 }
1189 
1190 // Move What before Where in the MemorySSA IR.
1192  moveTo(What, Where->getBlock(), Where->getIterator());
1193 }
1194 
1195 // Move What after Where in the MemorySSA IR.
1197  moveTo(What, Where->getBlock(), ++Where->getIterator());
1198 }
1199 
1201  MemorySSA::InsertionPlace Where) {
1202  if (Where != MemorySSA::InsertionPlace::BeforeTerminator)
1203  return moveTo(What, BB, Where);
1204 
1205  if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator()))
1206  return moveBefore(What, Where);
1207  else
1208  return moveTo(What, BB, MemorySSA::InsertionPlace::End);
1209 }
1210 
1211 // All accesses in To used to be in From. Move to end and update access lists.
1212 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1213  Instruction *Start) {
1214 
1216  if (!Accs)
1217  return;
1218 
1219  assert(Start->getParent() == To && "Incorrect Start instruction");
1220  MemoryAccess *FirstInNew = nullptr;
1221  for (Instruction &I : make_range(Start->getIterator(), To->end()))
1222  if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1223  break;
1224  if (FirstInNew) {
1225  auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1226  do {
1227  auto NextIt = ++MUD->getIterator();
1228  MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1229  ? nullptr
1230  : cast<MemoryUseOrDef>(&*NextIt);
1231  MSSA->moveTo(MUD, To, MemorySSA::End);
1232  // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
1233  // to retrieve it again.
1234  Accs = MSSA->getWritableBlockAccesses(From);
1235  MUD = NextMUD;
1236  } while (MUD);
1237  }
1238 
1239  // If all accesses were moved and only a trivial Phi remains, we try to remove
1240  // that Phi. This is needed when From is going to be deleted.
1241  auto *Defs = MSSA->getWritableBlockDefs(From);
1242  if (Defs && !Defs->empty())
1243  if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin()))
1244  tryRemoveTrivialPhi(Phi);
1245 }
1246 
1248  BasicBlock *To,
1249  Instruction *Start) {
1250  assert(MSSA->getBlockAccesses(To) == nullptr &&
1251  "To block is expected to be free of MemoryAccesses.");
1252  moveAllAccesses(From, To, Start);
1253  for (BasicBlock *Succ : successors(To))
1254  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1255  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1256 }
1257 
1259  Instruction *Start) {
1260  assert(From->getUniquePredecessor() == To &&
1261  "From block is expected to have a single predecessor (To).");
1262  moveAllAccesses(From, To, Start);
1263  for (BasicBlock *Succ : successors(From))
1264  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1265  MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1266 }
1267 
1269  BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1270  bool IdenticalEdgesWereMerged) {
1271  assert(!MSSA->getWritableBlockAccesses(New) &&
1272  "Access list should be null for a new block.");
1273  MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1274  if (!Phi)
1275  return;
1276  if (Old->hasNPredecessors(1)) {
1277  assert(pred_size(New) == Preds.size() &&
1278  "Should have moved all predecessors.");
1279  MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1280  } else {
1281  assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1282  "new immediate predecessor.");
1283  MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1284  SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
1285  // Currently only support the case of removing a single incoming edge when
1286  // identical edges were not merged.
1287  if (!IdenticalEdgesWereMerged)
1288  assert(PredsSet.size() == Preds.size() &&
1289  "If identical edges were not merged, we cannot have duplicate "
1290  "blocks in the predecessors");
1292  if (PredsSet.count(B)) {
1293  NewPhi->addIncoming(MA, B);
1294  if (!IdenticalEdgesWereMerged)
1295  PredsSet.erase(B);
1296  return true;
1297  }
1298  return false;
1299  });
1300  Phi->addIncoming(NewPhi, New);
1301  tryRemoveTrivialPhi(NewPhi);
1302  }
1303 }
1304 
1306  assert(!MSSA->isLiveOnEntryDef(MA) &&
1307  "Trying to remove the live on entry def");
1308  // We can only delete phi nodes if they have no uses, or we can replace all
1309  // uses with a single definition.
1310  MemoryAccess *NewDefTarget = nullptr;
1311  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1312  // Note that it is sufficient to know that all edges of the phi node have
1313  // the same argument. If they do, by the definition of dominance frontiers
1314  // (which we used to place this phi), that argument must dominate this phi,
1315  // and thus, must dominate the phi's uses, and so we will not hit the assert
1316  // below.
1317  NewDefTarget = onlySingleValue(MP);
1318  assert((NewDefTarget || MP->use_empty()) &&
1319  "We can't delete this memory phi");
1320  } else {
1321  NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1322  }
1323 
1324  SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1325 
1326  // Re-point the uses at our defining access
1327  if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1328  // Reset optimized on users of this store, and reset the uses.
1329  // A few notes:
1330  // 1. This is a slightly modified version of RAUW to avoid walking the
1331  // uses twice here.
1332  // 2. If we wanted to be complete, we would have to reset the optimized
1333  // flags on users of phi nodes if doing the below makes a phi node have all
1334  // the same arguments. Instead, we prefer users to removeMemoryAccess those
1335  // phi nodes, because doing it here would be N^3.
1336  if (MA->hasValueHandle())
1337  ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1338  // Note: We assume MemorySSA is not used in metadata since it's not really
1339  // part of the IR.
1340 
1341  assert(NewDefTarget != MA && "Going into an infinite loop");
1342  while (!MA->use_empty()) {
1343  Use &U = *MA->use_begin();
1344  if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1345  MUD->resetOptimized();
1346  if (OptimizePhis)
1347  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1348  PhisToCheck.insert(MP);
1349  U.set(NewDefTarget);
1350  }
1351  }
1352 
1353  // The call below to erase will destroy MA, so we can't change the order we
1354  // are doing things here
1355  MSSA->removeFromLookups(MA);
1356  MSSA->removeFromLists(MA);
1357 
1358  // Optionally optimize Phi uses. This will recursively remove trivial phis.
1359  if (!PhisToCheck.empty()) {
1360  SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1361  PhisToCheck.end()};
1362  PhisToCheck.clear();
1363 
1364  unsigned PhisSize = PhisToOptimize.size();
1365  while (PhisSize-- > 0)
1366  if (MemoryPhi *MP =
1367  cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1368  tryRemoveTrivialPhi(MP);
1369  }
1370 }
1371 
1373  const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
1374  // First delete all uses of BB in MemoryPhis.
1375  for (BasicBlock *BB : DeadBlocks) {
1376  Instruction *TI = BB->getTerminator();
1377  assert(TI && "Basic block expected to have a terminator instruction");
1378  for (BasicBlock *Succ : successors(TI))
1379  if (!DeadBlocks.count(Succ))
1380  if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1381  MP->unorderedDeleteIncomingBlock(BB);
1382  tryRemoveTrivialPhi(MP);
1383  }
1384  // Drop all references of all accesses in BB
1386  for (MemoryAccess &MA : *Acc)
1387  MA.dropAllReferences();
1388  }
1389 
1390  // Next, delete all memory accesses in each block
1391  for (BasicBlock *BB : DeadBlocks) {
1393  if (!Acc)
1394  continue;
1395  for (MemoryAccess &MA : llvm::make_early_inc_range(*Acc)) {
1396  MSSA->removeFromLookups(&MA);
1397  MSSA->removeFromLists(&MA);
1398  }
1399  }
1400 }
1401 
1402 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1403  for (auto &VH : UpdatedPHIs)
1404  if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1405  tryRemoveTrivialPhi(MPhi);
1406 }
1407 
1409  const BasicBlock *BB = I->getParent();
1410  // Remove memory accesses in BB for I and all following instructions.
1411  auto BBI = I->getIterator(), BBE = BB->end();
1412  // FIXME: If this becomes too expensive, iterate until the first instruction
1413  // with a memory access, then iterate over MemoryAccesses.
1414  while (BBI != BBE)
1415  removeMemoryAccess(&*(BBI++));
1416  // Update phis in BB's successors to remove BB.
1417  SmallVector<WeakVH, 16> UpdatedPHIs;
1418  for (const BasicBlock *Successor : successors(BB)) {
1420  if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
1421  MPhi->unorderedDeleteIncomingBlock(BB);
1422  UpdatedPHIs.push_back(MPhi);
1423  }
1424  }
1425  // Optimize trivial phis.
1426  tryRemoveTrivialPhis(UpdatedPHIs);
1427 }
1428 
1430  Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1431  MemorySSA::InsertionPlace Point) {
1432  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1433  MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1434  return NewAccess;
1435 }
1436 
1438  Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1439  assert(I->getParent() == InsertPt->getBlock() &&
1440  "New and old access must be in the same block");
1441  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1442  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1443  InsertPt->getIterator());
1444  return NewAccess;
1445 }
1446 
1448  Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1449  assert(I->getParent() == InsertPt->getBlock() &&
1450  "New and old access must be in the same block");
1451  MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1452  MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1453  ++InsertPt->getIterator());
1454  return NewAccess;
1455 }
i
i
Definition: README.txt:29
llvm::BasicBlock::end
iterator end()
Definition: BasicBlock.h:298
llvm::predecessors
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:127
llvm::MemoryPhi::unorderedDeleteIncomingIf
void unorderedDeleteIncomingIf(Fn &&Pred)
Definition: MemorySSA.h:610
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::DominatorTreeBase::findNearestCommonDominator
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
Definition: GenericDomTree.h:468
llvm::simple_ilist::empty
LLVM_NODISCARD bool empty() const
Check if the list is empty in constant time.
Definition: simple_ilist.h:131
llvm::MemorySSA::getWritableBlockDefs
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:814
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
Metadata.h
llvm::MemorySSAUpdater::createMemoryAccessBefore
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before or after an existing MemoryAccess.
Definition: MemorySSAUpdater.cpp:1437
llvm::User::operands
op_range operands()
Definition: User.h:242
MemorySSAUpdater.h
llvm::DenseMapBase::lookup
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:197
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::User::dropAllReferences
void dropAllReferences()
Drop all references to operands.
Definition: User.h:299
getNewDefiningAccessForClone
static MemoryAccess * getNewDefiningAccessForClone(MemoryAccess *MA, const ValueToValueMapTy &VMap, PhiToDefMap &MPhiMap, bool CloneWasSimplified, MemorySSA *MSSA)
Definition: MemorySSAUpdater.cpp:569
llvm::SmallDenseMap
Definition: DenseMap.h:880
llvm::MemorySSA::dominates
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2114
llvm::MemorySSA::renamePass
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
Definition: MemorySSA.h:827
llvm::MemorySSAUpdater::removeBlocks
void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
Definition: MemorySSAUpdater.cpp:1372
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::MemoryAccess::getReverseIterator
AllAccessType::reverse_self_iterator getReverseIterator()
Definition: MemorySSA.h:187
llvm::MemorySSA::insertIntoListsBefore
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
Definition: MemorySSA.cpp:1630
Module.h
llvm::sys::path::end
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:233
llvm::MemorySSA::getLiveOnEntryDef
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:749
llvm::sys::path::begin
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:224
llvm::DominatorTreeBase::Insert
static constexpr UpdateKind Insert
Definition: GenericDomTree.h:242
llvm::copy
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1598
llvm::MemorySSAUpdater::moveAllAfterSpliceBlocks
void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was spliced into From and To.
Definition: MemorySSAUpdater.cpp:1247
llvm::DominatorTreeBase::getNode
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Definition: GenericDomTree.h:351
llvm::SmallSet
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
llvm::mapped_iterator
Definition: STLExtras.h:277
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
llvm::Value::user_begin
user_iterator user_begin()
Definition: Value.h:398
llvm::MemoryPhi
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:487
STLExtras.h
llvm::BasicBlock::hasNPredecessors
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
Definition: BasicBlock.cpp:286
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::MemoryUse
Represents read-only accesses to memory.
Definition: MemorySSA.h:323
llvm::successors
succ_range successors(Instruction *I)
Definition: CFG.h:262
llvm::IDFCalculatorBase::calculate
void calculate(SmallVectorImpl< NodeTy * > &IDFBlocks)
Calculate iterated dominance frontiers.
Definition: GenericIteratedDominanceFrontier.h:131
llvm::iplist
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
Definition: ilist.h:390
llvm::MemorySSAUpdater::applyUpdates
void applyUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT, bool UpdateDTFirst=false)
Apply CFG updates, analogous with the DT edge updates.
Definition: MemorySSAUpdater.cpp:810
llvm::MemorySSAUpdater::createMemoryAccessInBB
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block, with a specified clobbering defin...
Definition: MemorySSAUpdater.cpp:1429
llvm::MemoryPhi::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:548
llvm::DominatorTreeBase::applyUpdates
void applyUpdates(ArrayRef< UpdateType > Updates)
Inform the dominator tree about a sequence of CFG edge insertions and deletions and perform a batch u...
Definition: GenericDomTree.h:544
llvm::ArrayRef::empty
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:122
llvm::MemorySSA::insertIntoListsForBlock
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1598
llvm::GraphDiff
Definition: CFGDiff.h:57
Uses
SmallPtrSet< MachineInstr *, 2 > Uses
Definition: ARMLowOverheadLoops.cpp:579
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
llvm::MemoryPhi::getBasicBlockIndex
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: MemorySSA.h:580
llvm::lltok::equal
@ equal
Definition: LLToken.h:25
llvm::LoopBlocksRPO
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:115
llvm::MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock
void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader, BasicBlock *LoopPreheader, BasicBlock *BackedgeBlock)
Update MemorySSA when inserting a unique backedge block for a loop.
Definition: MemorySSAUpdater.cpp:638
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
FormattedStream.h
llvm::MemorySSAUpdater::insertUse
void insertUse(MemoryUse *Use, bool RenameUses=false)
Definition: MemorySSAUpdater.cpp:245
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::begin
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:82
llvm::MemoryAccess::getIterator
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list.
Definition: MemorySSA.h:181
llvm::Value::use_iterator
use_iterator_impl< Use > use_iterator
Definition: Value.h:354
llvm::MemorySSA::isLiveOnEntryDef
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:745
llvm::MemorySSA::moveTo
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
Definition: MemorySSA.cpp:1674
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:648
llvm::User
Definition: User.h:44
llvm::MemorySSA::Beginning
@ Beginning
Definition: MemorySSA.h:795
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:296
llvm::MemorySSAUpdater::moveAfter
void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where)
Definition: MemorySSAUpdater.cpp:1196
llvm::MemorySSAUpdater::updateForClonedBlockIntoPred
void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM)
Definition: MemorySSAUpdater.cpp:757
llvm::MemorySSAUpdater::moveToPlace
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
Definition: MemorySSAUpdater.cpp:1200
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MemoryAccess::getReverseDefsIterator
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
Definition: MemorySSA.h:199
llvm::Instruction
Definition: Instruction.h:45
llvm::MemoryPhi::addIncoming
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:569
llvm::MemorySSA::getBlockAccesses
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:765
llvm::Use::getUser
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:73
llvm::MemorySSA::removeFromLookups
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
Definition: MemorySSA.cpp:1804
SmallPtrSet.h
llvm::Instruction::getSuccessor
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
Definition: Instruction.cpp:777
llvm::iterator_range::empty
bool empty() const
Definition: iterator_range.h:46
llvm::MemoryPhi::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:535
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::empty
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:72
llvm::MemoryAccess::getBlock
BasicBlock * getBlock() const
Definition: MemorySSA.h:162
LoopIterator.h
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:345
llvm::MemorySSAUpdater::removeDuplicatePhiEdgesBetween
void removeDuplicatePhiEdgesBetween(const BasicBlock *From, const BasicBlock *To)
Update the MemoryPhi in To to have a single incoming edge from From, following a CFG change that repl...
Definition: MemorySSAUpdater.cpp:539
llvm::ValueHandleBase::ValueIsRAUWd
static void ValueIsRAUWd(Value *Old, Value *New)
Definition: Value.cpp:1180
llvm::MemoryPhi::block_begin
block_iterator block_begin()
Definition: MemorySSA.h:508
Operands
mir Rename Register Operands
Definition: MIRNamerPass.cpp:78
llvm::Value::hasValueHandle
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
Definition: Value.h:551
llvm::MemorySSAUpdater::applyInsertUpdates
void applyInsertUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG insert updates, analogous with the DT edge updates.
Definition: MemorySSAUpdater.cpp:856
llvm::Value::user_end
user_iterator user_end()
Definition: Value.h:406
llvm::Use::set
void set(Value *Val)
Definition: Value.h:860
BasicBlock.h
llvm::simple_ilist::begin
iterator begin()
Definition: simple_ilist.h:117
llvm::MemorySSAUpdater::removeEdge
void removeEdge(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To following an edge deletion between From and To.
Definition: MemorySSAUpdater.cpp:532
llvm::DomTreeNodeBase::getBlock
NodeT * getBlock() const
Definition: GenericDomTree.h:88
llvm::MemorySSAUpdater::changeToUnreachable
void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
Definition: MemorySSAUpdater.cpp:1408
llvm::MemorySSAUpdater::updateForClonedLoop
void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VM, bool IgnoreIncomingWithNoClones=false)
Update MemorySSA after a loop was cloned, given the blocks in RPO order, the exit blocks and a 1:1 ma...
Definition: MemorySSAUpdater.cpp:677
llvm::DenseMap
Definition: DenseMap.h:714
llvm::MemorySSA
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:708
llvm::pdb::Empty
@ Empty
Definition: PDBTypes.h:394
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::MemorySSA::getMemoryAccess
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:725
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:572
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1612
llvm::MemoryDef
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:380
llvm::MemorySSA::getDomTree
DominatorTree & getDomTree() const
Definition: MemorySSA.h:733
llvm::TrackingVH
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:331
llvm::MemorySSA::InsertionPlace
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:795
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
onlySingleValue
static MemoryAccess * onlySingleValue(MemoryPhi *MP)
If all arguments of a MemoryPHI are defined by the same incoming argument, return that argument.
Definition: MemorySSAUpdater.cpp:557
llvm::DominatorTree::isReachableFromEntry
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:328
llvm::IDFCalculator
Definition: IteratedDominanceFrontier.h:39
llvm::Value::use_begin
use_iterator use_begin()
Definition: Value.h:361
llvm::IDFCalculatorBase::setDefiningBlocks
void setDefiningBlocks(const SmallPtrSetImpl< NodeTy * > &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
Definition: GenericIteratedDominanceFrontier.h:75
llvm::MemoryPhi::getIncomingValue
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:538
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::insert
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
llvm::MemorySSA::createDefinedAccess
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
Definition: MemorySSA.cpp:1705
llvm::MemorySSAUpdater::createMemoryAccessAfter
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Definition: MemorySSAUpdater.cpp:1447
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MemorySSAUpdater::insertDef
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
Definition: MemorySSAUpdater.cpp:314
DataLayout.h
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::MemorySSA::getBlockDefs
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition: MemorySSA.h:773
llvm::pred_size
unsigned pred_size(const BasicBlock *BB)
Get the number of predecessors of BB.
Definition: CFG.h:124
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:520
llvm::MemoryPhi::block_end
block_iterator block_end()
Definition: MemorySSA.h:516
llvm::MemoryPhi::setIncomingValue
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:539
llvm::MemorySSA::End
@ End
Definition: MemorySSA.h:795
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::SmallSet::insert
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:180
llvm::MemoryAccess
Definition: MemorySSA.h:140
llvm::ValueMap< const Value *, WeakTrackingVH >
llvm::DenseMapBase< SmallDenseMap< KeyT, ValueT, 4, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:148
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:92
llvm::MemorySSAUpdater::updateExitBlocksForClonedLoop
void updateExitBlocksForClonedLoop(ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VMap, DominatorTree &DT)
Update phi nodes in exit block successors following cloning.
Definition: MemorySSAUpdater.cpp:788
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::clear
void clear()
Completely clear the SetVector.
Definition: SetVector.h:220
llvm::MemorySSAUpdater::moveBefore
void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where)
Definition: MemorySSAUpdater.cpp:1191
llvm::MemoryUseOrDef::getDefiningAccess
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:260
llvm::Successor
@ Successor
Definition: SIMachineScheduler.h:33
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:153
llvm::MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
Definition: MemorySSAUpdater.cpp:1268
llvm::MemorySSAUpdater::moveAllAfterMergeBlocks
void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was merged into To.
Definition: MemorySSAUpdater.cpp:1258
GlobalVariable.h
llvm::pred_end
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
llvm::DenseMapBase::size
unsigned size() const
Definition: DenseMap.h:100
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::count
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:215
llvm::MemorySSA::removeFromLists
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
Definition: MemorySSA.cpp:1831
llvm::MemoryUseOrDef
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:250
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::MemorySSA::getWritableBlockAccesses
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:808
IteratedDominanceFrontier.h
llvm::pred_begin
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::end
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:92
MemorySSA.h
llvm::simple_ilist::end
iterator end()
Definition: simple_ilist.h:119
llvm::User::getNumOperands
unsigned getNumOperands() const
Definition: User.h:191
llvm::MemoryAccess::getDefsIterator
DefsOnlyType::self_iterator getDefsIterator()
Definition: MemorySSA.h:193
llvm::User::op_begin
op_iterator op_begin()
Definition: User.h:234
Dominators.h
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
llvm::simple_ilist
A simple intrusive list implementation.
Definition: simple_ilist.h:78
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
setMemoryPhiValueForBlock
static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, MemoryAccess *NewDef)
Definition: MemorySSAUpdater.cpp:291
llvm::Value::replaceUsesWithIf
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:528
llvm::ValueMap::lookup
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:165
llvm::simple_ilist::insert
iterator insert(iterator I, reference Node)
Insert a node by reference; never copies.
Definition: simple_ilist.h:159
LLVMContext.h
From
BlockVerifier::State From
Definition: BlockVerifier.cpp:55
ProcessBlock
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
Definition: Sink.cpp:179
llvm::MemoryUseOrDef::setDefiningAccess
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=AliasResult(AliasResult::MayAlias))
Definition: MemorySSA.h:302
copy
we should consider alternate ways to model stack dependencies Lots of things could be done in WebAssemblyTargetTransformInfo cpp there are numerous optimization related hooks that can be overridden in WebAssemblyTargetLowering Instead of the OptimizeReturned which should consider preserving the returned attribute through to MachineInstrs and extending the MemIntrinsicResults pass to do this optimization on calls too That would also let the WebAssemblyPeephole pass clean up dead defs for such as it does for stores Consider implementing and or getMachineCombinerPatterns Find a clean way to fix the problem which leads to the Shrink Wrapping pass being run after the WebAssembly PEI pass When setting multiple variables to the same we currently get code like const It could be done with a smaller encoding like local tee $pop5 local copy
Definition: README.txt:101
Debug.h
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:422
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:154
llvm::MemorySSAUpdater::removeMemoryAccess
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Definition: MemorySSAUpdater.cpp:1305
SetVector.h
llvm::DominatorTreeBase::Delete
static constexpr UpdateKind Delete
Definition: GenericDomTree.h:243
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::User::op_end
op_iterator op_end()
Definition: User.h:236