LLVM  14.0.0git
MemorySSA.cpp
Go to the documentation of this file.
1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/iterator.h"
31 #include "llvm/Config/llvm-config.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/PassManager.h"
42 #include "llvm/IR/Use.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/Compiler.h"
49 #include "llvm/Support/Debug.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <cstdlib>
56 #include <iterator>
57 #include <memory>
58 #include <utility>
59 
60 using namespace llvm;
61 
62 #define DEBUG_TYPE "memoryssa"
63 
65  DotCFGMSSA("dot-cfg-mssa",
66  cl::value_desc("file name for generated dot file"),
67  cl::desc("file name for generated dot file"), cl::init(""));
68 
69 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
70  true)
74  true)
75 
81 
82 static cl::opt<unsigned> MaxCheckLimit(
83  "memssa-check-limit", cl::Hidden, cl::init(100),
84  cl::desc("The maximum number of stores/phis MemorySSA"
85  "will consider trying to walk past (default = 100)"));
86 
87 // Always verify MemorySSA if expensive checking is enabled.
88 #ifdef EXPENSIVE_CHECKS
89 bool llvm::VerifyMemorySSA = true;
90 #else
91 bool llvm::VerifyMemorySSA = false;
92 #endif
93 /// Enables memory ssa as a dependency for loop passes in legacy pass manager.
95  "enable-mssa-loop-dependency", cl::Hidden, cl::init(true),
96  cl::desc("Enable MemorySSA dependency for loop pass manager"));
97 
99  VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
100  cl::Hidden, cl::desc("Enable verification of MemorySSA."));
101 
102 namespace llvm {
103 
104 /// An assembly annotator class to print Memory SSA information in
105 /// comments.
107  friend class MemorySSA;
108 
109  const MemorySSA *MSSA;
110 
111 public:
113 
115  formatted_raw_ostream &OS) override {
116  if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
117  OS << "; " << *MA << "\n";
118  }
119 
121  formatted_raw_ostream &OS) override {
122  if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
123  OS << "; " << *MA << "\n";
124  }
125 };
126 
127 } // end namespace llvm
128 
129 namespace {
130 
131 /// Our current alias analysis API differentiates heavily between calls and
132 /// non-calls, and functions called on one usually assert on the other.
133 /// This class encapsulates the distinction to simplify other code that wants
134 /// "Memory affecting instructions and related data" to use as a key.
135 /// For example, this class is used as a densemap key in the use optimizer.
136 class MemoryLocOrCall {
137 public:
138  bool IsCall = false;
139 
140  MemoryLocOrCall(MemoryUseOrDef *MUD)
141  : MemoryLocOrCall(MUD->getMemoryInst()) {}
142  MemoryLocOrCall(const MemoryUseOrDef *MUD)
143  : MemoryLocOrCall(MUD->getMemoryInst()) {}
144 
145  MemoryLocOrCall(Instruction *Inst) {
146  if (auto *C = dyn_cast<CallBase>(Inst)) {
147  IsCall = true;
148  Call = C;
149  } else {
150  IsCall = false;
151  // There is no such thing as a memorylocation for a fence inst, and it is
152  // unique in that regard.
153  if (!isa<FenceInst>(Inst))
154  Loc = MemoryLocation::get(Inst);
155  }
156  }
157 
158  explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
159 
160  const CallBase *getCall() const {
161  assert(IsCall);
162  return Call;
163  }
164 
165  MemoryLocation getLoc() const {
166  assert(!IsCall);
167  return Loc;
168  }
169 
170  bool operator==(const MemoryLocOrCall &Other) const {
171  if (IsCall != Other.IsCall)
172  return false;
173 
174  if (!IsCall)
175  return Loc == Other.Loc;
176 
177  if (Call->getCalledOperand() != Other.Call->getCalledOperand())
178  return false;
179 
180  return Call->arg_size() == Other.Call->arg_size() &&
181  std::equal(Call->arg_begin(), Call->arg_end(),
182  Other.Call->arg_begin());
183  }
184 
185 private:
186  union {
187  const CallBase *Call;
188  MemoryLocation Loc;
189  };
190 };
191 
192 } // end anonymous namespace
193 
194 namespace llvm {
195 
196 template <> struct DenseMapInfo<MemoryLocOrCall> {
197  static inline MemoryLocOrCall getEmptyKey() {
198  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
199  }
200 
201  static inline MemoryLocOrCall getTombstoneKey() {
202  return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
203  }
204 
205  static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
206  if (!MLOC.IsCall)
207  return hash_combine(
208  MLOC.IsCall,
210 
211  hash_code hash =
213  MLOC.getCall()->getCalledOperand()));
214 
215  for (const Value *Arg : MLOC.getCall()->args())
217  return hash;
218  }
219 
220  static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
221  return LHS == RHS;
222  }
223 };
224 
225 } // end namespace llvm
226 
227 /// This does one-way checks to see if Use could theoretically be hoisted above
228 /// MayClobber. This will not check the other way around.
229 ///
230 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
231 /// MayClobber, with no potentially clobbering operations in between them.
232 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
233 static bool areLoadsReorderable(const LoadInst *Use,
234  const LoadInst *MayClobber) {
235  bool VolatileUse = Use->isVolatile();
236  bool VolatileClobber = MayClobber->isVolatile();
237  // Volatile operations may never be reordered with other volatile operations.
238  if (VolatileUse && VolatileClobber)
239  return false;
240  // Otherwise, volatile doesn't matter here. From the language reference:
241  // 'optimizers may change the order of volatile operations relative to
242  // non-volatile operations.'"
243 
244  // If a load is seq_cst, it cannot be moved above other loads. If its ordering
245  // is weaker, it can be moved above other loads. We just need to be sure that
246  // MayClobber isn't an acquire load, because loads can't be moved above
247  // acquire loads.
248  //
249  // Note that this explicitly *does* allow the free reordering of monotonic (or
250  // weaker) loads of the same address.
251  bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
252  bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
254  return !(SeqCstUse || MayClobberIsAcquire);
255 }
256 
257 namespace {
258 
259 struct ClobberAlias {
260  bool IsClobber;
262 };
263 
264 } // end anonymous namespace
265 
266 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
267 // ignored if IsClobber = false.
268 template <typename AliasAnalysisType>
269 static ClobberAlias
271  const Instruction *UseInst, AliasAnalysisType &AA) {
272  Instruction *DefInst = MD->getMemoryInst();
273  assert(DefInst && "Defining instruction not actually an instruction");
275 
276  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
277  // These intrinsics will show up as affecting memory, but they are just
278  // markers, mostly.
279  //
280  // FIXME: We probably don't actually want MemorySSA to model these at all
281  // (including creating MemoryAccesses for them): we just end up inventing
282  // clobbers where they don't really exist at all. Please see D43269 for
283  // context.
284  switch (II->getIntrinsicID()) {
285  case Intrinsic::invariant_start:
286  case Intrinsic::invariant_end:
287  case Intrinsic::assume:
288  case Intrinsic::experimental_noalias_scope_decl:
289  return {false, AliasResult(AliasResult::NoAlias)};
290  case Intrinsic::dbg_addr:
291  case Intrinsic::dbg_declare:
292  case Intrinsic::dbg_label:
293  case Intrinsic::dbg_value:
294  llvm_unreachable("debuginfo shouldn't have associated defs!");
295  default:
296  break;
297  }
298  }
299 
300  if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
301  ModRefInfo I = AA.getModRefInfo(DefInst, CB);
303  return {isModOrRefSet(I), AR};
304  }
305 
306  if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
307  if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
308  return {!areLoadsReorderable(UseLoad, DefLoad),
310 
311  ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
313  return {isModSet(I), AR};
314 }
315 
316 template <typename AliasAnalysisType>
317 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
318  const MemoryUseOrDef *MU,
319  const MemoryLocOrCall &UseMLOC,
320  AliasAnalysisType &AA) {
321  // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
322  // to exist while MemoryLocOrCall is pushed through places.
323  if (UseMLOC.IsCall)
325  AA);
326  return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
327  AA);
328 }
329 
330 // Return true when MD may alias MU, return false otherwise.
332  AliasAnalysis &AA) {
333  return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
334 }
335 
336 namespace {
337 
338 struct UpwardsMemoryQuery {
339  // True if our original query started off as a call
340  bool IsCall = false;
341  // The pointer location we started the query with. This will be empty if
342  // IsCall is true.
343  MemoryLocation StartingLoc;
344  // This is the instruction we were querying about.
345  const Instruction *Inst = nullptr;
346  // The MemoryAccess we actually got called with, used to test local domination
347  const MemoryAccess *OriginalAccess = nullptr;
349  bool SkipSelfAccess = false;
350 
351  UpwardsMemoryQuery() = default;
352 
353  UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
354  : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
355  if (!IsCall)
356  StartingLoc = MemoryLocation::get(Inst);
357  }
358 };
359 
360 } // end anonymous namespace
361 
362 template <typename AliasAnalysisType>
363 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
364  const Instruction *I) {
365  // If the memory can't be changed, then loads of the memory can't be
366  // clobbered.
367  if (auto *LI = dyn_cast<LoadInst>(I))
368  return I->hasMetadata(LLVMContext::MD_invariant_load) ||
369  AA.pointsToConstantMemory(MemoryLocation::get(LI));
370  return false;
371 }
372 
373 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
374 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
375 ///
376 /// This is meant to be as simple and self-contained as possible. Because it
377 /// uses no cache, etc., it can be relatively expensive.
378 ///
379 /// \param Start The MemoryAccess that we want to walk from.
380 /// \param ClobberAt A clobber for Start.
381 /// \param StartLoc The MemoryLocation for Start.
382 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
383 /// \param Query The UpwardsMemoryQuery we used for our search.
384 /// \param AA The AliasAnalysis we used for our search.
385 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
386 
387 template <typename AliasAnalysisType>
388 LLVM_ATTRIBUTE_UNUSED static void
390  const MemoryLocation &StartLoc, const MemorySSA &MSSA,
391  const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
392  bool AllowImpreciseClobber = false) {
393  assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
394 
395  if (MSSA.isLiveOnEntryDef(Start)) {
396  assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
397  "liveOnEntry must clobber itself");
398  return;
399  }
400 
401  bool FoundClobber = false;
404  Worklist.emplace_back(Start, StartLoc);
405  // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
406  // is found, complain.
407  while (!Worklist.empty()) {
408  auto MAP = Worklist.pop_back_val();
409  // All we care about is that nothing from Start to ClobberAt clobbers Start.
410  // We learn nothing from revisiting nodes.
411  if (!VisitedPhis.insert(MAP).second)
412  continue;
413 
414  for (const auto *MA : def_chain(MAP.first)) {
415  if (MA == ClobberAt) {
416  if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
417  // instructionClobbersQuery isn't essentially free, so don't use `|=`,
418  // since it won't let us short-circuit.
419  //
420  // Also, note that this can't be hoisted out of the `Worklist` loop,
421  // since MD may only act as a clobber for 1 of N MemoryLocations.
422  FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
423  if (!FoundClobber) {
424  ClobberAlias CA =
425  instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
426  if (CA.IsClobber) {
427  FoundClobber = true;
428  // Not used: CA.AR;
429  }
430  }
431  }
432  break;
433  }
434 
435  // We should never hit liveOnEntry, unless it's the clobber.
436  assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
437 
438  if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
439  // If Start is a Def, skip self.
440  if (MD == Start)
441  continue;
442 
443  assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
444  .IsClobber &&
445  "Found clobber before reaching ClobberAt!");
446  continue;
447  }
448 
449  if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
450  (void)MU;
451  assert (MU == Start &&
452  "Can only find use in def chain if Start is a use");
453  continue;
454  }
455 
456  assert(isa<MemoryPhi>(MA));
457 
458  // Add reachable phi predecessors
459  for (auto ItB = upward_defs_begin(
460  {const_cast<MemoryAccess *>(MA), MAP.second},
461  MSSA.getDomTree()),
462  ItE = upward_defs_end();
463  ItB != ItE; ++ItB)
464  if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
465  Worklist.emplace_back(*ItB);
466  }
467  }
468 
469  // If the verify is done following an optimization, it's possible that
470  // ClobberAt was a conservative clobbering, that we can now infer is not a
471  // true clobbering access. Don't fail the verify if that's the case.
472  // We do have accesses that claim they're optimized, but could be optimized
473  // further. Updating all these can be expensive, so allow it for now (FIXME).
474  if (AllowImpreciseClobber)
475  return;
476 
477  // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
478  // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
479  assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
480  "ClobberAt never acted as a clobber");
481 }
482 
483 namespace {
484 
485 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
486 /// in one class.
487 template <class AliasAnalysisType> class ClobberWalker {
488  /// Save a few bytes by using unsigned instead of size_t.
489  using ListIndex = unsigned;
490 
491  /// Represents a span of contiguous MemoryDefs, potentially ending in a
492  /// MemoryPhi.
493  struct DefPath {
494  MemoryLocation Loc;
495  // Note that, because we always walk in reverse, Last will always dominate
496  // First. Also note that First and Last are inclusive.
499  Optional<ListIndex> Previous;
500 
501  DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
502  Optional<ListIndex> Previous)
503  : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
504 
505  DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
506  Optional<ListIndex> Previous)
507  : DefPath(Loc, Init, Init, Previous) {}
508  };
509 
510  const MemorySSA &MSSA;
511  AliasAnalysisType &AA;
512  DominatorTree &DT;
513  UpwardsMemoryQuery *Query;
514  unsigned *UpwardWalkLimit;
515 
516  // Phi optimization bookkeeping:
517  // List of DefPath to process during the current phi optimization walk.
519  // List of visited <Access, Location> pairs; we can skip paths already
520  // visited with the same memory location.
522  // Record if phi translation has been performed during the current phi
523  // optimization walk, as merging alias results after phi translation can
524  // yield incorrect results. Context in PR46156.
525  bool PerformedPhiTranslation = false;
526 
527  /// Find the nearest def or phi that `From` can legally be optimized to.
528  const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
529  assert(From->getNumOperands() && "Phi with no operands?");
530 
531  BasicBlock *BB = From->getBlock();
533  DomTreeNode *Node = DT.getNode(BB);
534  while ((Node = Node->getIDom())) {
535  auto *Defs = MSSA.getBlockDefs(Node->getBlock());
536  if (Defs)
537  return &*Defs->rbegin();
538  }
539  return Result;
540  }
541 
542  /// Result of calling walkToPhiOrClobber.
543  struct UpwardsWalkResult {
544  /// The "Result" of the walk. Either a clobber, the last thing we walked, or
545  /// both. Include alias info when clobber found.
547  bool IsKnownClobber;
549  };
550 
551  /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
552  /// This will update Desc.Last as it walks. It will (optionally) also stop at
553  /// StopAt.
554  ///
555  /// This does not test for whether StopAt is a clobber
556  UpwardsWalkResult
557  walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
558  const MemoryAccess *SkipStopAt = nullptr) const {
559  assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
560  assert(UpwardWalkLimit && "Need a valid walk limit");
561  bool LimitAlreadyReached = false;
562  // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
563  // it to 1. This will not do any alias() calls. It either returns in the
564  // first iteration in the loop below, or is set back to 0 if all def chains
565  // are free of MemoryDefs.
566  if (!*UpwardWalkLimit) {
567  *UpwardWalkLimit = 1;
568  LimitAlreadyReached = true;
569  }
570 
571  for (MemoryAccess *Current : def_chain(Desc.Last)) {
572  Desc.Last = Current;
573  if (Current == StopAt || Current == SkipStopAt)
574  return {Current, false, AliasResult(AliasResult::MayAlias)};
575 
576  if (auto *MD = dyn_cast<MemoryDef>(Current)) {
577  if (MSSA.isLiveOnEntryDef(MD))
578  return {MD, true, AliasResult(AliasResult::MustAlias)};
579 
580  if (!--*UpwardWalkLimit)
581  return {Current, true, AliasResult(AliasResult::MayAlias)};
582 
583  ClobberAlias CA =
584  instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
585  if (CA.IsClobber)
586  return {MD, true, CA.AR};
587  }
588  }
589 
590  if (LimitAlreadyReached)
591  *UpwardWalkLimit = 0;
592 
593  assert(isa<MemoryPhi>(Desc.Last) &&
594  "Ended at a non-clobber that's not a phi?");
595  return {Desc.Last, false, AliasResult(AliasResult::MayAlias)};
596  }
597 
598  void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
599  ListIndex PriorNode) {
600  auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT,
601  &PerformedPhiTranslation);
602  auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
603  for (const MemoryAccessPair &P : UpwardDefs) {
604  PausedSearches.push_back(Paths.size());
605  Paths.emplace_back(P.second, P.first, PriorNode);
606  }
607  }
608 
609  /// Represents a search that terminated after finding a clobber. This clobber
610  /// may or may not be present in the path of defs from LastNode..SearchStart,
611  /// since it may have been retrieved from cache.
612  struct TerminatedPath {
613  MemoryAccess *Clobber;
614  ListIndex LastNode;
615  };
616 
617  /// Get an access that keeps us from optimizing to the given phi.
618  ///
619  /// PausedSearches is an array of indices into the Paths array. Its incoming
620  /// value is the indices of searches that stopped at the last phi optimization
621  /// target. It's left in an unspecified state.
622  ///
623  /// If this returns None, NewPaused is a vector of searches that terminated
624  /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
626  getBlockingAccess(const MemoryAccess *StopWhere,
627  SmallVectorImpl<ListIndex> &PausedSearches,
628  SmallVectorImpl<ListIndex> &NewPaused,
629  SmallVectorImpl<TerminatedPath> &Terminated) {
630  assert(!PausedSearches.empty() && "No searches to continue?");
631 
632  // BFS vs DFS really doesn't make a difference here, so just do a DFS with
633  // PausedSearches as our stack.
634  while (!PausedSearches.empty()) {
635  ListIndex PathIndex = PausedSearches.pop_back_val();
636  DefPath &Node = Paths[PathIndex];
637 
638  // If we've already visited this path with this MemoryLocation, we don't
639  // need to do so again.
640  //
641  // NOTE: That we just drop these paths on the ground makes caching
642  // behavior sporadic. e.g. given a diamond:
643  // A
644  // B C
645  // D
646  //
647  // ...If we walk D, B, A, C, we'll only cache the result of phi
648  // optimization for A, B, and D; C will be skipped because it dies here.
649  // This arguably isn't the worst thing ever, since:
650  // - We generally query things in a top-down order, so if we got below D
651  // without needing cache entries for {C, MemLoc}, then chances are
652  // that those cache entries would end up ultimately unused.
653  // - We still cache things for A, so C only needs to walk up a bit.
654  // If this behavior becomes problematic, we can fix without a ton of extra
655  // work.
656  if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) {
657  if (PerformedPhiTranslation) {
658  // If visiting this path performed Phi translation, don't continue,
659  // since it may not be correct to merge results from two paths if one
660  // relies on the phi translation.
661  TerminatedPath Term{Node.Last, PathIndex};
662  return Term;
663  }
664  continue;
665  }
666 
667  const MemoryAccess *SkipStopWhere = nullptr;
668  if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
669  assert(isa<MemoryDef>(Query->OriginalAccess));
670  SkipStopWhere = Query->OriginalAccess;
671  }
672 
673  UpwardsWalkResult Res = walkToPhiOrClobber(Node,
674  /*StopAt=*/StopWhere,
675  /*SkipStopAt=*/SkipStopWhere);
676  if (Res.IsKnownClobber) {
677  assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
678 
679  // If this wasn't a cache hit, we hit a clobber when walking. That's a
680  // failure.
681  TerminatedPath Term{Res.Result, PathIndex};
682  if (!MSSA.dominates(Res.Result, StopWhere))
683  return Term;
684 
685  // Otherwise, it's a valid thing to potentially optimize to.
686  Terminated.push_back(Term);
687  continue;
688  }
689 
690  if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
691  // We've hit our target. Save this path off for if we want to continue
692  // walking. If we are in the mode of skipping the OriginalAccess, and
693  // we've reached back to the OriginalAccess, do not save path, we've
694  // just looped back to self.
695  if (Res.Result != SkipStopWhere)
696  NewPaused.push_back(PathIndex);
697  continue;
698  }
699 
700  assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
701  addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
702  }
703 
704  return None;
705  }
706 
707  template <typename T, typename Walker>
708  struct generic_def_path_iterator
709  : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
710  std::forward_iterator_tag, T *> {
711  generic_def_path_iterator() {}
712  generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
713 
714  T &operator*() const { return curNode(); }
715 
716  generic_def_path_iterator &operator++() {
717  N = curNode().Previous;
718  return *this;
719  }
720 
721  bool operator==(const generic_def_path_iterator &O) const {
722  if (N.hasValue() != O.N.hasValue())
723  return false;
724  return !N.hasValue() || *N == *O.N;
725  }
726 
727  private:
728  T &curNode() const { return W->Paths[*N]; }
729 
730  Walker *W = nullptr;
732  };
733 
734  using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
735  using const_def_path_iterator =
736  generic_def_path_iterator<const DefPath, const ClobberWalker>;
737 
738  iterator_range<def_path_iterator> def_path(ListIndex From) {
739  return make_range(def_path_iterator(this, From), def_path_iterator());
740  }
741 
742  iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
743  return make_range(const_def_path_iterator(this, From),
744  const_def_path_iterator());
745  }
746 
747  struct OptznResult {
748  /// The path that contains our result.
749  TerminatedPath PrimaryClobber;
750  /// The paths that we can legally cache back from, but that aren't
751  /// necessarily the result of the Phi optimization.
752  SmallVector<TerminatedPath, 4> OtherClobbers;
753  };
754 
755  ListIndex defPathIndex(const DefPath &N) const {
756  // The assert looks nicer if we don't need to do &N
757  const DefPath *NP = &N;
758  assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
759  "Out of bounds DefPath!");
760  return NP - &Paths.front();
761  }
762 
763  /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
764  /// that act as legal clobbers. Note that this won't return *all* clobbers.
765  ///
766  /// Phi optimization algorithm tl;dr:
767  /// - Find the earliest def/phi, A, we can optimize to
768  /// - Find if all paths from the starting memory access ultimately reach A
769  /// - If not, optimization isn't possible.
770  /// - Otherwise, walk from A to another clobber or phi, A'.
771  /// - If A' is a def, we're done.
772  /// - If A' is a phi, try to optimize it.
773  ///
774  /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
775  /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
776  OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
777  const MemoryLocation &Loc) {
778  assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&
779  "Reset the optimization state.");
780 
781  Paths.emplace_back(Loc, Start, Phi, None);
782  // Stores how many "valid" optimization nodes we had prior to calling
783  // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
784  auto PriorPathsSize = Paths.size();
785 
786  SmallVector<ListIndex, 16> PausedSearches;
787  SmallVector<ListIndex, 8> NewPaused;
788  SmallVector<TerminatedPath, 4> TerminatedPaths;
789 
790  addSearches(Phi, PausedSearches, 0);
791 
792  // Moves the TerminatedPath with the "most dominated" Clobber to the end of
793  // Paths.
794  auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
795  assert(!Paths.empty() && "Need a path to move");
796  auto Dom = Paths.begin();
797  for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
798  if (!MSSA.dominates(I->Clobber, Dom->Clobber))
799  Dom = I;
800  auto Last = Paths.end() - 1;
801  if (Last != Dom)
802  std::iter_swap(Last, Dom);
803  };
804 
805  MemoryPhi *Current = Phi;
806  while (true) {
807  assert(!MSSA.isLiveOnEntryDef(Current) &&
808  "liveOnEntry wasn't treated as a clobber?");
809 
810  const auto *Target = getWalkTarget(Current);
811  // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
812  // optimization for the prior phi.
813  assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
814  return MSSA.dominates(P.Clobber, Target);
815  }));
816 
817  // FIXME: This is broken, because the Blocker may be reported to be
818  // liveOnEntry, and we'll happily wait for that to disappear (read: never)
819  // For the moment, this is fine, since we do nothing with blocker info.
820  if (Optional<TerminatedPath> Blocker = getBlockingAccess(
821  Target, PausedSearches, NewPaused, TerminatedPaths)) {
822 
823  // Find the node we started at. We can't search based on N->Last, since
824  // we may have gone around a loop with a different MemoryLocation.
825  auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
826  return defPathIndex(N) < PriorPathsSize;
827  });
828  assert(Iter != def_path_iterator());
829 
830  DefPath &CurNode = *Iter;
831  assert(CurNode.Last == Current);
832 
833  // Two things:
834  // A. We can't reliably cache all of NewPaused back. Consider a case
835  // where we have two paths in NewPaused; one of which can't optimize
836  // above this phi, whereas the other can. If we cache the second path
837  // back, we'll end up with suboptimal cache entries. We can handle
838  // cases like this a bit better when we either try to find all
839  // clobbers that block phi optimization, or when our cache starts
840  // supporting unfinished searches.
841  // B. We can't reliably cache TerminatedPaths back here without doing
842  // extra checks; consider a case like:
843  // T
844  // / \
845  // D C
846  // \ /
847  // S
848  // Where T is our target, C is a node with a clobber on it, D is a
849  // diamond (with a clobber *only* on the left or right node, N), and
850  // S is our start. Say we walk to D, through the node opposite N
851  // (read: ignoring the clobber), and see a cache entry in the top
852  // node of D. That cache entry gets put into TerminatedPaths. We then
853  // walk up to C (N is later in our worklist), find the clobber, and
854  // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
855  // the bottom part of D to the cached clobber, ignoring the clobber
856  // in N. Again, this problem goes away if we start tracking all
857  // blockers for a given phi optimization.
858  TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
859  return {Result, {}};
860  }
861 
862  // If there's nothing left to search, then all paths led to valid clobbers
863  // that we got from our cache; pick the nearest to the start, and allow
864  // the rest to be cached back.
865  if (NewPaused.empty()) {
866  MoveDominatedPathToEnd(TerminatedPaths);
867  TerminatedPath Result = TerminatedPaths.pop_back_val();
868  return {Result, std::move(TerminatedPaths)};
869  }
870 
871  MemoryAccess *DefChainEnd = nullptr;
873  for (ListIndex Paused : NewPaused) {
874  UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
875  if (WR.IsKnownClobber)
876  Clobbers.push_back({WR.Result, Paused});
877  else
878  // Micro-opt: If we hit the end of the chain, save it.
879  DefChainEnd = WR.Result;
880  }
881 
882  if (!TerminatedPaths.empty()) {
883  // If we couldn't find the dominating phi/liveOnEntry in the above loop,
884  // do it now.
885  if (!DefChainEnd)
886  for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
887  DefChainEnd = MA;
888  assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
889 
890  // If any of the terminated paths don't dominate the phi we'll try to
891  // optimize, we need to figure out what they are and quit.
892  const BasicBlock *ChainBB = DefChainEnd->getBlock();
893  for (const TerminatedPath &TP : TerminatedPaths) {
894  // Because we know that DefChainEnd is as "high" as we can go, we
895  // don't need local dominance checks; BB dominance is sufficient.
896  if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
897  Clobbers.push_back(TP);
898  }
899  }
900 
901  // If we have clobbers in the def chain, find the one closest to Current
902  // and quit.
903  if (!Clobbers.empty()) {
904  MoveDominatedPathToEnd(Clobbers);
905  TerminatedPath Result = Clobbers.pop_back_val();
906  return {Result, std::move(Clobbers)};
907  }
908 
909  assert(all_of(NewPaused,
910  [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
911 
912  // Because liveOnEntry is a clobber, this must be a phi.
913  auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
914 
915  PriorPathsSize = Paths.size();
916  PausedSearches.clear();
917  for (ListIndex I : NewPaused)
918  addSearches(DefChainPhi, PausedSearches, I);
919  NewPaused.clear();
920 
921  Current = DefChainPhi;
922  }
923  }
924 
925  void verifyOptResult(const OptznResult &R) const {
926  assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
927  return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
928  }));
929  }
930 
931  void resetPhiOptznState() {
932  Paths.clear();
933  VisitedPhis.clear();
934  PerformedPhiTranslation = false;
935  }
936 
937 public:
938  ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
939  : MSSA(MSSA), AA(AA), DT(DT) {}
940 
941  AliasAnalysisType *getAA() { return &AA; }
942  /// Finds the nearest clobber for the given query, optimizing phis if
943  /// possible.
944  MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
945  unsigned &UpWalkLimit) {
946  Query = &Q;
947  UpwardWalkLimit = &UpWalkLimit;
948  // Starting limit must be > 0.
949  if (!UpWalkLimit)
950  UpWalkLimit++;
951 
952  MemoryAccess *Current = Start;
953  // This walker pretends uses don't exist. If we're handed one, silently grab
954  // its def. (This has the nice side-effect of ensuring we never cache uses)
955  if (auto *MU = dyn_cast<MemoryUse>(Start))
956  Current = MU->getDefiningAccess();
957 
958  DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
959  // Fast path for the overly-common case (no crazy phi optimization
960  // necessary)
961  UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
963  if (WalkResult.IsKnownClobber) {
964  Result = WalkResult.Result;
965  Q.AR = WalkResult.AR;
966  } else {
967  OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
968  Current, Q.StartingLoc);
969  verifyOptResult(OptRes);
970  resetPhiOptznState();
971  Result = OptRes.PrimaryClobber.Clobber;
972  }
973 
974 #ifdef EXPENSIVE_CHECKS
975  if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
976  checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
977 #endif
978  return Result;
979  }
980 };
981 
982 struct RenamePassData {
983  DomTreeNode *DTN;
985  MemoryAccess *IncomingVal;
986 
987  RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
988  MemoryAccess *M)
989  : DTN(D), ChildIt(It), IncomingVal(M) {}
990 
991  void swap(RenamePassData &RHS) {
992  std::swap(DTN, RHS.DTN);
993  std::swap(ChildIt, RHS.ChildIt);
994  std::swap(IncomingVal, RHS.IncomingVal);
995  }
996 };
997 
998 } // end anonymous namespace
999 
1000 namespace llvm {
1001 
1002 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
1003  ClobberWalker<AliasAnalysisType> Walker;
1004  MemorySSA *MSSA;
1005 
1006 public:
1007  ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
1008  : Walker(*M, *A, *D), MSSA(M) {}
1009 
1010  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
1011  const MemoryLocation &,
1012  unsigned &);
1013  // Third argument (bool), defines whether the clobber search should skip the
1014  // original queried access. If true, there will be a follow-up query searching
1015  // for a clobber access past "self". Note that the Optimized access is not
1016  // updated if a new clobber is found by this SkipSelf search. If this
1017  // additional query becomes heavily used we may decide to cache the result.
1018  // Walker instantiations will decide how to set the SkipSelf bool.
1019  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
1020 };
1021 
1022 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1023 /// longer does caching on its own, but the name has been retained for the
1024 /// moment.
1025 template <class AliasAnalysisType>
1026 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1027  ClobberWalkerBase<AliasAnalysisType> *Walker;
1028 
1029 public:
1030  CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1031  : MemorySSAWalker(M), Walker(W) {}
1032  ~CachingWalker() override = default;
1033 
1035 
1036  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1037  return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1038  }
1039  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1040  const MemoryLocation &Loc,
1041  unsigned &UWL) {
1042  return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1043  }
1044 
1045  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1046  unsigned UpwardWalkLimit = MaxCheckLimit;
1047  return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1048  }
1049  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1050  const MemoryLocation &Loc) override {
1051  unsigned UpwardWalkLimit = MaxCheckLimit;
1052  return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1053  }
1054 
1055  void invalidateInfo(MemoryAccess *MA) override {
1056  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1057  MUD->resetOptimized();
1058  }
1059 };
1060 
1061 template <class AliasAnalysisType>
1062 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1063  ClobberWalkerBase<AliasAnalysisType> *Walker;
1064 
1065 public:
1066  SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1067  : MemorySSAWalker(M), Walker(W) {}
1068  ~SkipSelfWalker() override = default;
1069 
1071 
1072  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1073  return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1074  }
1075  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1076  const MemoryLocation &Loc,
1077  unsigned &UWL) {
1078  return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1079  }
1080 
1081  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1082  unsigned UpwardWalkLimit = MaxCheckLimit;
1083  return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1084  }
1085  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1086  const MemoryLocation &Loc) override {
1087  unsigned UpwardWalkLimit = MaxCheckLimit;
1088  return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1089  }
1090 
1091  void invalidateInfo(MemoryAccess *MA) override {
1092  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1093  MUD->resetOptimized();
1094  }
1095 };
1096 
1097 } // end namespace llvm
1098 
1099 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1100  bool RenameAllUses) {
1101  // Pass through values to our successors
1102  for (const BasicBlock *S : successors(BB)) {
1103  auto It = PerBlockAccesses.find(S);
1104  // Rename the phi nodes in our successor block
1105  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1106  continue;
1107  AccessList *Accesses = It->second.get();
1108  auto *Phi = cast<MemoryPhi>(&Accesses->front());
1109  if (RenameAllUses) {
1110  bool ReplacementDone = false;
1111  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1112  if (Phi->getIncomingBlock(I) == BB) {
1113  Phi->setIncomingValue(I, IncomingVal);
1114  ReplacementDone = true;
1115  }
1116  (void) ReplacementDone;
1117  assert(ReplacementDone && "Incomplete phi during partial rename");
1118  } else
1119  Phi->addIncoming(IncomingVal, BB);
1120  }
1121 }
1122 
1123 /// Rename a single basic block into MemorySSA form.
1124 /// Uses the standard SSA renaming algorithm.
1125 /// \returns The new incoming value.
1126 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1127  bool RenameAllUses) {
1128  auto It = PerBlockAccesses.find(BB);
1129  // Skip most processing if the list is empty.
1130  if (It != PerBlockAccesses.end()) {
1131  AccessList *Accesses = It->second.get();
1132  for (MemoryAccess &L : *Accesses) {
1133  if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1134  if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1135  MUD->setDefiningAccess(IncomingVal);
1136  if (isa<MemoryDef>(&L))
1137  IncomingVal = &L;
1138  } else {
1139  IncomingVal = &L;
1140  }
1141  }
1142  }
1143  return IncomingVal;
1144 }
1145 
1146 /// This is the standard SSA renaming algorithm.
1147 ///
1148 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1149 /// in phi nodes in our successors.
1150 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1152  bool SkipVisited, bool RenameAllUses) {
1153  assert(Root && "Trying to rename accesses in an unreachable block");
1154 
1156  // Skip everything if we already renamed this block and we are skipping.
1157  // Note: You can't sink this into the if, because we need it to occur
1158  // regardless of whether we skip blocks or not.
1159  bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1160  if (SkipVisited && AlreadyVisited)
1161  return;
1162 
1163  IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1164  renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1165  WorkStack.push_back({Root, Root->begin(), IncomingVal});
1166 
1167  while (!WorkStack.empty()) {
1168  DomTreeNode *Node = WorkStack.back().DTN;
1169  DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1170  IncomingVal = WorkStack.back().IncomingVal;
1171 
1172  if (ChildIt == Node->end()) {
1173  WorkStack.pop_back();
1174  } else {
1175  DomTreeNode *Child = *ChildIt;
1176  ++WorkStack.back().ChildIt;
1177  BasicBlock *BB = Child->getBlock();
1178  // Note: You can't sink this into the if, because we need it to occur
1179  // regardless of whether we skip blocks or not.
1180  AlreadyVisited = !Visited.insert(BB).second;
1181  if (SkipVisited && AlreadyVisited) {
1182  // We already visited this during our renaming, which can happen when
1183  // being asked to rename multiple blocks. Figure out the incoming val,
1184  // which is the last def.
1185  // Incoming value can only change if there is a block def, and in that
1186  // case, it's the last block def in the list.
1187  if (auto *BlockDefs = getWritableBlockDefs(BB))
1188  IncomingVal = &*BlockDefs->rbegin();
1189  } else
1190  IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1191  renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1192  WorkStack.push_back({Child, Child->begin(), IncomingVal});
1193  }
1194  }
1195 }
1196 
1197 /// This handles unreachable block accesses by deleting phi nodes in
1198 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1199 /// being uses of the live on entry definition.
1200 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1201  assert(!DT->isReachableFromEntry(BB) &&
1202  "Reachable block found while handling unreachable blocks");
1203 
1204  // Make sure phi nodes in our reachable successors end up with a
1205  // LiveOnEntryDef for our incoming edge, even though our block is forward
1206  // unreachable. We could just disconnect these blocks from the CFG fully,
1207  // but we do not right now.
1208  for (const BasicBlock *S : successors(BB)) {
1209  if (!DT->isReachableFromEntry(S))
1210  continue;
1211  auto It = PerBlockAccesses.find(S);
1212  // Rename the phi nodes in our successor block
1213  if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1214  continue;
1215  AccessList *Accesses = It->second.get();
1216  auto *Phi = cast<MemoryPhi>(&Accesses->front());
1217  Phi->addIncoming(LiveOnEntryDef.get(), BB);
1218  }
1219 
1220  auto It = PerBlockAccesses.find(BB);
1221  if (It == PerBlockAccesses.end())
1222  return;
1223 
1224  auto &Accesses = It->second;
1225  for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1226  auto Next = std::next(AI);
1227  // If we have a phi, just remove it. We are going to replace all
1228  // users with live on entry.
1229  if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1230  UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1231  else
1232  Accesses->erase(AI);
1233  AI = Next;
1234  }
1235 }
1236 
1238  : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1239  SkipWalker(nullptr), NextID(0) {
1240  // Build MemorySSA using a batch alias analysis. This reuses the internal
1241  // state that AA collects during an alias()/getModRefInfo() call. This is
1242  // safe because there are no CFG changes while building MemorySSA and can
1243  // significantly reduce the time spent by the compiler in AA, because we will
1244  // make queries about all the instructions in the Function.
1245  assert(AA && "No alias analysis?");
1246  BatchAAResults BatchAA(*AA);
1247  buildMemorySSA(BatchAA);
1248  // Intentionally leave AA to nullptr while building so we don't accidently
1249  // use non-batch AliasAnalysis.
1250  this->AA = AA;
1251  // Also create the walker here.
1252  getWalker();
1253 }
1254 
1256  // Drop all our references
1257  for (const auto &Pair : PerBlockAccesses)
1258  for (MemoryAccess &MA : *Pair.second)
1259  MA.dropAllReferences();
1260 }
1261 
1262 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1263  auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1264 
1265  if (Res.second)
1266  Res.first->second = std::make_unique<AccessList>();
1267  return Res.first->second.get();
1268 }
1269 
1270 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1271  auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1272 
1273  if (Res.second)
1274  Res.first->second = std::make_unique<DefsList>();
1275  return Res.first->second.get();
1276 }
1277 
1278 namespace llvm {
1279 
1280 /// This class is a batch walker of all MemoryUse's in the program, and points
1281 /// their defining access at the thing that actually clobbers them. Because it
1282 /// is a batch walker that touches everything, it does not operate like the
1283 /// other walkers. This walker is basically performing a top-down SSA renaming
1284 /// pass, where the version stack is used as the cache. This enables it to be
1285 /// significantly more time and memory efficient than using the regular walker,
1286 /// which is walking bottom-up.
1288 public:
1289  OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1290  BatchAAResults *BAA, DominatorTree *DT)
1291  : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1292 
1293  void optimizeUses();
1294 
1295 private:
1296  /// This represents where a given memorylocation is in the stack.
1297  struct MemlocStackInfo {
1298  // This essentially is keeping track of versions of the stack. Whenever
1299  // the stack changes due to pushes or pops, these versions increase.
1300  unsigned long StackEpoch;
1301  unsigned long PopEpoch;
1302  // This is the lower bound of places on the stack to check. It is equal to
1303  // the place the last stack walk ended.
1304  // Note: Correctness depends on this being initialized to 0, which densemap
1305  // does
1306  unsigned long LowerBound;
1307  const BasicBlock *LowerBoundBlock;
1308  // This is where the last walk for this memory location ended.
1309  unsigned long LastKill;
1310  bool LastKillValid;
1312  };
1313 
1314  void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1317 
1318  MemorySSA *MSSA;
1319  CachingWalker<BatchAAResults> *Walker;
1320  BatchAAResults *AA;
1321  DominatorTree *DT;
1322 };
1323 
1324 } // end namespace llvm
1325 
1326 /// Optimize the uses in a given block This is basically the SSA renaming
1327 /// algorithm, with one caveat: We are able to use a single stack for all
1328 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1329 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1330 /// going to be some position in that stack of possible ones.
1331 ///
1332 /// We track the stack positions that each MemoryLocation needs
1333 /// to check, and last ended at. This is because we only want to check the
1334 /// things that changed since last time. The same MemoryLocation should
1335 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1336 /// things like this, and if they start, we can modify MemoryLocOrCall to
1337 /// include relevant data)
1338 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1339  const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1340  SmallVectorImpl<MemoryAccess *> &VersionStack,
1342 
1343  /// If no accesses, nothing to do.
1345  if (Accesses == nullptr)
1346  return;
1347 
1348  // Pop everything that doesn't dominate the current block off the stack,
1349  // increment the PopEpoch to account for this.
1350  while (true) {
1351  assert(
1352  !VersionStack.empty() &&
1353  "Version stack should have liveOnEntry sentinel dominating everything");
1354  BasicBlock *BackBlock = VersionStack.back()->getBlock();
1355  if (DT->dominates(BackBlock, BB))
1356  break;
1357  while (VersionStack.back()->getBlock() == BackBlock)
1358  VersionStack.pop_back();
1359  ++PopEpoch;
1360  }
1361 
1362  for (MemoryAccess &MA : *Accesses) {
1363  auto *MU = dyn_cast<MemoryUse>(&MA);
1364  if (!MU) {
1365  VersionStack.push_back(&MA);
1366  ++StackEpoch;
1367  continue;
1368  }
1369 
1370  if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1371  MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1372  continue;
1373  }
1374 
1375  MemoryLocOrCall UseMLOC(MU);
1376  auto &LocInfo = LocStackInfo[UseMLOC];
1377  // If the pop epoch changed, it means we've removed stuff from top of
1378  // stack due to changing blocks. We may have to reset the lower bound or
1379  // last kill info.
1380  if (LocInfo.PopEpoch != PopEpoch) {
1381  LocInfo.PopEpoch = PopEpoch;
1382  LocInfo.StackEpoch = StackEpoch;
1383  // If the lower bound was in something that no longer dominates us, we
1384  // have to reset it.
1385  // We can't simply track stack size, because the stack may have had
1386  // pushes/pops in the meantime.
1387  // XXX: This is non-optimal, but only is slower cases with heavily
1388  // branching dominator trees. To get the optimal number of queries would
1389  // be to make lowerbound and lastkill a per-loc stack, and pop it until
1390  // the top of that stack dominates us. This does not seem worth it ATM.
1391  // A much cheaper optimization would be to always explore the deepest
1392  // branch of the dominator tree first. This will guarantee this resets on
1393  // the smallest set of blocks.
1394  if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1395  !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1396  // Reset the lower bound of things to check.
1397  // TODO: Some day we should be able to reset to last kill, rather than
1398  // 0.
1399  LocInfo.LowerBound = 0;
1400  LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1401  LocInfo.LastKillValid = false;
1402  }
1403  } else if (LocInfo.StackEpoch != StackEpoch) {
1404  // If all that has changed is the StackEpoch, we only have to check the
1405  // new things on the stack, because we've checked everything before. In
1406  // this case, the lower bound of things to check remains the same.
1407  LocInfo.PopEpoch = PopEpoch;
1408  LocInfo.StackEpoch = StackEpoch;
1409  }
1410  if (!LocInfo.LastKillValid) {
1411  LocInfo.LastKill = VersionStack.size() - 1;
1412  LocInfo.LastKillValid = true;
1413  LocInfo.AR = AliasResult::MayAlias;
1414  }
1415 
1416  // At this point, we should have corrected last kill and LowerBound to be
1417  // in bounds.
1418  assert(LocInfo.LowerBound < VersionStack.size() &&
1419  "Lower bound out of range");
1420  assert(LocInfo.LastKill < VersionStack.size() &&
1421  "Last kill info out of range");
1422  // In any case, the new upper bound is the top of the stack.
1423  unsigned long UpperBound = VersionStack.size() - 1;
1424 
1425  if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1426  LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1427  << *(MU->getMemoryInst()) << ")"
1428  << " because there are "
1429  << UpperBound - LocInfo.LowerBound
1430  << " stores to disambiguate\n");
1431  // Because we did not walk, LastKill is no longer valid, as this may
1432  // have been a kill.
1433  LocInfo.LastKillValid = false;
1434  continue;
1435  }
1436  bool FoundClobberResult = false;
1437  unsigned UpwardWalkLimit = MaxCheckLimit;
1438  while (UpperBound > LocInfo.LowerBound) {
1439  if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1440  // For phis, use the walker, see where we ended up, go there
1441  MemoryAccess *Result =
1442  Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
1443  // We are guaranteed to find it or something is wrong
1444  while (VersionStack[UpperBound] != Result) {
1445  assert(UpperBound != 0);
1446  --UpperBound;
1447  }
1448  FoundClobberResult = true;
1449  break;
1450  }
1451 
1452  MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1453  ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1454  if (CA.IsClobber) {
1455  FoundClobberResult = true;
1456  LocInfo.AR = CA.AR;
1457  break;
1458  }
1459  --UpperBound;
1460  }
1461 
1462  // Note: Phis always have AliasResult AR set to MayAlias ATM.
1463 
1464  // At the end of this loop, UpperBound is either a clobber, or lower bound
1465  // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1466  if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1467  // We were last killed now by where we got to
1468  if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1469  LocInfo.AR = None;
1470  MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1471  LocInfo.LastKill = UpperBound;
1472  } else {
1473  // Otherwise, we checked all the new ones, and now we know we can get to
1474  // LastKill.
1475  MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1476  }
1477  LocInfo.LowerBound = VersionStack.size() - 1;
1478  LocInfo.LowerBoundBlock = BB;
1479  }
1480 }
1481 
1482 /// Optimize uses to point to their actual clobbering definitions.
1484  SmallVector<MemoryAccess *, 16> VersionStack;
1486  VersionStack.push_back(MSSA->getLiveOnEntryDef());
1487 
1488  unsigned long StackEpoch = 1;
1489  unsigned long PopEpoch = 1;
1490  // We perform a non-recursive top-down dominator tree walk.
1491  for (const auto *DomNode : depth_first(DT->getRootNode()))
1492  optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1493  LocStackInfo);
1494 }
1495 
1496 void MemorySSA::placePHINodes(
1497  const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1498  // Determine where our MemoryPhi's should go
1499  ForwardIDFCalculator IDFs(*DT);
1500  IDFs.setDefiningBlocks(DefiningBlocks);
1502  IDFs.calculate(IDFBlocks);
1503 
1504  // Now place MemoryPhi nodes.
1505  for (auto &BB : IDFBlocks)
1506  createMemoryPhi(BB);
1507 }
1508 
1509 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1510  // We create an access to represent "live on entry", for things like
1511  // arguments or users of globals, where the memory they use is defined before
1512  // the beginning of the function. We do not actually insert it into the IR.
1513  // We do not define a live on exit for the immediate uses, and thus our
1514  // semantics do *not* imply that something with no immediate uses can simply
1515  // be removed.
1516  BasicBlock &StartingPoint = F.getEntryBlock();
1517  LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1518  &StartingPoint, NextID++));
1519 
1520  // We maintain lists of memory accesses per-block, trading memory for time. We
1521  // could just look up the memory access for every possible instruction in the
1522  // stream.
1523  SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1524  // Go through each block, figure out where defs occur, and chain together all
1525  // the accesses.
1526  for (BasicBlock &B : F) {
1527  bool InsertIntoDef = false;
1528  AccessList *Accesses = nullptr;
1529  DefsList *Defs = nullptr;
1530  for (Instruction &I : B) {
1531  MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1532  if (!MUD)
1533  continue;
1534 
1535  if (!Accesses)
1536  Accesses = getOrCreateAccessList(&B);
1537  Accesses->push_back(MUD);
1538  if (isa<MemoryDef>(MUD)) {
1539  InsertIntoDef = true;
1540  if (!Defs)
1541  Defs = getOrCreateDefsList(&B);
1542  Defs->push_back(*MUD);
1543  }
1544  }
1545  if (InsertIntoDef)
1546  DefiningBlocks.insert(&B);
1547  }
1548  placePHINodes(DefiningBlocks);
1549 
1550  // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1551  // filled in with all blocks.
1553  renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1554 
1555  ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1556  CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1557  OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1558 
1559  // Mark the uses in unreachable blocks as live on entry, so that they go
1560  // somewhere.
1561  for (auto &BB : F)
1562  if (!Visited.count(&BB))
1563  markUnreachableAsLiveOnEntry(&BB);
1564 }
1565 
1566 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1567 
1568 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1569  if (Walker)
1570  return Walker.get();
1571 
1572  if (!WalkerBase)
1573  WalkerBase =
1574  std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1575 
1576  Walker =
1577  std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1578  return Walker.get();
1579 }
1580 
1582  if (SkipWalker)
1583  return SkipWalker.get();
1584 
1585  if (!WalkerBase)
1586  WalkerBase =
1587  std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1588 
1589  SkipWalker =
1590  std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1591  return SkipWalker.get();
1592  }
1593 
1594 
1595 // This is a helper function used by the creation routines. It places NewAccess
1596 // into the access and defs lists for a given basic block, at the given
1597 // insertion point.
1599  const BasicBlock *BB,
1600  InsertionPlace Point) {
1601  auto *Accesses = getOrCreateAccessList(BB);
1602  if (Point == Beginning) {
1603  // If it's a phi node, it goes first, otherwise, it goes after any phi
1604  // nodes.
1605  if (isa<MemoryPhi>(NewAccess)) {
1606  Accesses->push_front(NewAccess);
1607  auto *Defs = getOrCreateDefsList(BB);
1608  Defs->push_front(*NewAccess);
1609  } else {
1610  auto AI = find_if_not(
1611  *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1612  Accesses->insert(AI, NewAccess);
1613  if (!isa<MemoryUse>(NewAccess)) {
1614  auto *Defs = getOrCreateDefsList(BB);
1615  auto DI = find_if_not(
1616  *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1617  Defs->insert(DI, *NewAccess);
1618  }
1619  }
1620  } else {
1621  Accesses->push_back(NewAccess);
1622  if (!isa<MemoryUse>(NewAccess)) {
1623  auto *Defs = getOrCreateDefsList(BB);
1624  Defs->push_back(*NewAccess);
1625  }
1626  }
1627  BlockNumberingValid.erase(BB);
1628 }
1629 
1631  AccessList::iterator InsertPt) {
1632  auto *Accesses = getWritableBlockAccesses(BB);
1633  bool WasEnd = InsertPt == Accesses->end();
1634  Accesses->insert(AccessList::iterator(InsertPt), What);
1635  if (!isa<MemoryUse>(What)) {
1636  auto *Defs = getOrCreateDefsList(BB);
1637  // If we got asked to insert at the end, we have an easy job, just shove it
1638  // at the end. If we got asked to insert before an existing def, we also get
1639  // an iterator. If we got asked to insert before a use, we have to hunt for
1640  // the next def.
1641  if (WasEnd) {
1642  Defs->push_back(*What);
1643  } else if (isa<MemoryDef>(InsertPt)) {
1644  Defs->insert(InsertPt->getDefsIterator(), *What);
1645  } else {
1646  while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1647  ++InsertPt;
1648  // Either we found a def, or we are inserting at the end
1649  if (InsertPt == Accesses->end())
1650  Defs->push_back(*What);
1651  else
1652  Defs->insert(InsertPt->getDefsIterator(), *What);
1653  }
1654  }
1655  BlockNumberingValid.erase(BB);
1656 }
1657 
1658 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1659  // Keep it in the lookup tables, remove from the lists
1660  removeFromLists(What, false);
1661 
1662  // Note that moving should implicitly invalidate the optimized state of a
1663  // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1664  // MemoryDef.
1665  if (auto *MD = dyn_cast<MemoryDef>(What))
1666  MD->resetOptimized();
1667  What->setBlock(BB);
1668 }
1669 
1670 // Move What before Where in the IR. The end result is that What will belong to
1671 // the right lists and have the right Block set, but will not otherwise be
1672 // correct. It will not have the right defining access, and if it is a def,
1673 // things below it will not properly be updated.
1675  AccessList::iterator Where) {
1676  prepareForMoveTo(What, BB);
1677  insertIntoListsBefore(What, BB, Where);
1678 }
1679 
1681  InsertionPlace Point) {
1682  if (isa<MemoryPhi>(What)) {
1683  assert(Point == Beginning &&
1684  "Can only move a Phi at the beginning of the block");
1685  // Update lookup table entry
1686  ValueToMemoryAccess.erase(What->getBlock());
1687  bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1688  (void)Inserted;
1689  assert(Inserted && "Cannot move a Phi to a block that already has one");
1690  }
1691 
1692  prepareForMoveTo(What, BB);
1693  insertIntoListsForBlock(What, BB, Point);
1694 }
1695 
1696 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1697  assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1698  MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1699  // Phi's always are placed at the front of the block.
1701  ValueToMemoryAccess[BB] = Phi;
1702  return Phi;
1703 }
1704 
1706  MemoryAccess *Definition,
1707  const MemoryUseOrDef *Template,
1708  bool CreationMustSucceed) {
1709  assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1710  MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1711  if (CreationMustSucceed)
1712  assert(NewAccess != nullptr && "Tried to create a memory access for a "
1713  "non-memory touching instruction");
1714  if (NewAccess) {
1715  assert((!Definition || !isa<MemoryUse>(Definition)) &&
1716  "A use cannot be a defining access");
1717  NewAccess->setDefiningAccess(Definition);
1718  }
1719  return NewAccess;
1720 }
1721 
1722 // Return true if the instruction has ordering constraints.
1723 // Note specifically that this only considers stores and loads
1724 // because others are still considered ModRef by getModRefInfo.
1725 static inline bool isOrdered(const Instruction *I) {
1726  if (auto *SI = dyn_cast<StoreInst>(I)) {
1727  if (!SI->isUnordered())
1728  return true;
1729  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1730  if (!LI->isUnordered())
1731  return true;
1732  }
1733  return false;
1734 }
1735 
1736 /// Helper function to create new memory accesses
1737 template <typename AliasAnalysisType>
1738 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1739  AliasAnalysisType *AAP,
1740  const MemoryUseOrDef *Template) {
1741  // The assume intrinsic has a control dependency which we model by claiming
1742  // that it writes arbitrarily. Debuginfo intrinsics may be considered
1743  // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1744  // dependencies here.
1745  // FIXME: Replace this special casing with a more accurate modelling of
1746  // assume's control dependency.
1747  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1748  switch (II->getIntrinsicID()) {
1749  default:
1750  break;
1751  case Intrinsic::assume:
1752  case Intrinsic::experimental_noalias_scope_decl:
1753  return nullptr;
1754  }
1755  }
1756 
1757  // Using a nonstandard AA pipelines might leave us with unexpected modref
1758  // results for I, so add a check to not model instructions that may not read
1759  // from or write to memory. This is necessary for correctness.
1760  if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1761  return nullptr;
1762 
1763  bool Def, Use;
1764  if (Template) {
1765  Def = isa<MemoryDef>(Template);
1766  Use = isa<MemoryUse>(Template);
1767 #if !defined(NDEBUG)
1768  ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1769  bool DefCheck, UseCheck;
1770  DefCheck = isModSet(ModRef) || isOrdered(I);
1771  UseCheck = isRefSet(ModRef);
1772  assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1773 #endif
1774  } else {
1775  // Find out what affect this instruction has on memory.
1776  ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1777  // The isOrdered check is used to ensure that volatiles end up as defs
1778  // (atomics end up as ModRef right now anyway). Until we separate the
1779  // ordering chain from the memory chain, this enables people to see at least
1780  // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1781  // will still give an answer that bypasses other volatile loads. TODO:
1782  // Separate memory aliasing and ordering into two different chains so that
1783  // we can precisely represent both "what memory will this read/write/is
1784  // clobbered by" and "what instructions can I move this past".
1785  Def = isModSet(ModRef) || isOrdered(I);
1786  Use = isRefSet(ModRef);
1787  }
1788 
1789  // It's possible for an instruction to not modify memory at all. During
1790  // construction, we ignore them.
1791  if (!Def && !Use)
1792  return nullptr;
1793 
1794  MemoryUseOrDef *MUD;
1795  if (Def)
1796  MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1797  else
1798  MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1799  ValueToMemoryAccess[I] = MUD;
1800  return MUD;
1801 }
1802 
1803 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1805  assert(MA->use_empty() &&
1806  "Trying to remove memory access that still has uses");
1807  BlockNumbering.erase(MA);
1808  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1809  MUD->setDefiningAccess(nullptr);
1810  // Invalidate our walker's cache if necessary
1811  if (!isa<MemoryUse>(MA))
1812  getWalker()->invalidateInfo(MA);
1813 
1814  Value *MemoryInst;
1815  if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1816  MemoryInst = MUD->getMemoryInst();
1817  else
1818  MemoryInst = MA->getBlock();
1819 
1820  auto VMA = ValueToMemoryAccess.find(MemoryInst);
1821  if (VMA->second == MA)
1822  ValueToMemoryAccess.erase(VMA);
1823 }
1824 
1825 /// Properly remove \p MA from all of MemorySSA's lists.
1826 ///
1827 /// Because of the way the intrusive list and use lists work, it is important to
1828 /// do removal in the right order.
1829 /// ShouldDelete defaults to true, and will cause the memory access to also be
1830 /// deleted, not just removed.
1831 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1832  BasicBlock *BB = MA->getBlock();
1833  // The access list owns the reference, so we erase it from the non-owning list
1834  // first.
1835  if (!isa<MemoryUse>(MA)) {
1836  auto DefsIt = PerBlockDefs.find(BB);
1837  std::unique_ptr<DefsList> &Defs = DefsIt->second;
1838  Defs->remove(*MA);
1839  if (Defs->empty())
1840  PerBlockDefs.erase(DefsIt);
1841  }
1842 
1843  // The erase call here will delete it. If we don't want it deleted, we call
1844  // remove instead.
1845  auto AccessIt = PerBlockAccesses.find(BB);
1846  std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1847  if (ShouldDelete)
1848  Accesses->erase(MA);
1849  else
1850  Accesses->remove(MA);
1851 
1852  if (Accesses->empty()) {
1853  PerBlockAccesses.erase(AccessIt);
1854  BlockNumberingValid.erase(BB);
1855  }
1856 }
1857 
1859  MemorySSAAnnotatedWriter Writer(this);
1860  F.print(OS, &Writer);
1861 }
1862 
1863 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1865 #endif
1866 
1871  // Previously, the verification used to also verify that the clobberingAccess
1872  // cached by MemorySSA is the same as the clobberingAccess found at a later
1873  // query to AA. This does not hold true in general due to the current fragility
1874  // of BasicAA which has arbitrary caps on the things it analyzes before giving
1875  // up. As a result, transformations that are correct, will lead to BasicAA
1876  // returning different Alias answers before and after that transformation.
1877  // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1878  // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1879  // every transformation, which defeats the purpose of using it. For such an
1880  // example, see test4 added in D51960.
1881 }
1882 
1884 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1885  for (const BasicBlock &BB : F) {
1886  if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1887  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1888  auto *Pred = Phi->getIncomingBlock(I);
1889  auto *IncAcc = Phi->getIncomingValue(I);
1890  // If Pred has no unreachable predecessors, get last def looking at
1891  // IDoms. If, while walkings IDoms, any of these has an unreachable
1892  // predecessor, then the incoming def can be any access.
1893  if (auto *DTNode = DT->getNode(Pred)) {
1894  while (DTNode) {
1895  if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1896  auto *LastAcc = &*(--DefList->end());
1897  assert(LastAcc == IncAcc &&
1898  "Incorrect incoming access into phi.");
1899  break;
1900  }
1901  DTNode = DTNode->getIDom();
1902  }
1903  } else {
1904  // If Pred has unreachable predecessors, but has at least a Def, the
1905  // incoming access can be the last Def in Pred, or it could have been
1906  // optimized to LoE. After an update, though, the LoE may have been
1907  // replaced by another access, so IncAcc may be any access.
1908  // If Pred has unreachable predecessors and no Defs, incoming access
1909  // should be LoE; However, after an update, it may be any access.
1910  }
1911  }
1912  }
1913  }
1914 #endif
1915 }
1916 
1917 /// Verify that all of the blocks we believe to have valid domination numbers
1918 /// actually have valid domination numbers.
1920 #ifndef NDEBUG
1921  if (BlockNumberingValid.empty())
1922  return;
1923 
1924  SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1925  for (const BasicBlock &BB : F) {
1926  if (!ValidBlocks.count(&BB))
1927  continue;
1928 
1929  ValidBlocks.erase(&BB);
1930 
1931  const AccessList *Accesses = getBlockAccesses(&BB);
1932  // It's correct to say an empty block has valid numbering.
1933  if (!Accesses)
1934  continue;
1935 
1936  // Block numbering starts at 1.
1937  unsigned long LastNumber = 0;
1938  for (const MemoryAccess &MA : *Accesses) {
1939  auto ThisNumberIter = BlockNumbering.find(&MA);
1940  assert(ThisNumberIter != BlockNumbering.end() &&
1941  "MemoryAccess has no domination number in a valid block!");
1942 
1943  unsigned long ThisNumber = ThisNumberIter->second;
1944  assert(ThisNumber > LastNumber &&
1945  "Domination numbers should be strictly increasing!");
1946  LastNumber = ThisNumber;
1947  }
1948  }
1949 
1950  assert(ValidBlocks.empty() &&
1951  "All valid BasicBlocks should exist in F -- dangling pointers?");
1952 #endif
1953 }
1954 
1955 /// Verify ordering: the order and existence of MemoryAccesses matches the
1956 /// order and existence of memory affecting instructions.
1957 /// Verify domination: each definition dominates all of its uses.
1958 /// Verify def-uses: the immediate use information - walk all the memory
1959 /// accesses and verifying that, for each use, it appears in the appropriate
1960 /// def's use list
1962 #if !defined(NDEBUG)
1963  // Walk all the blocks, comparing what the lookups think and what the access
1964  // lists think, as well as the order in the blocks vs the order in the access
1965  // lists.
1966  SmallVector<MemoryAccess *, 32> ActualAccesses;
1968  for (BasicBlock &B : F) {
1969  const AccessList *AL = getBlockAccesses(&B);
1970  const auto *DL = getBlockDefs(&B);
1971  MemoryPhi *Phi = getMemoryAccess(&B);
1972  if (Phi) {
1973  // Verify ordering.
1974  ActualAccesses.push_back(Phi);
1975  ActualDefs.push_back(Phi);
1976  // Verify domination
1977  for (const Use &U : Phi->uses())
1978  assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
1979 #if defined(EXPENSIVE_CHECKS)
1980  // Verify def-uses.
1981  assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1982  pred_begin(&B), pred_end(&B))) &&
1983  "Incomplete MemoryPhi Node");
1984  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1985  verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1987  "Incoming phi block not a block predecessor");
1988  }
1989 #endif
1990  }
1991 
1992  for (Instruction &I : B) {
1994  assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1995  "We have memory affecting instructions "
1996  "in this block but they are not in the "
1997  "access list or defs list");
1998  if (MA) {
1999  // Verify ordering.
2000  ActualAccesses.push_back(MA);
2001  if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
2002  // Verify ordering.
2003  ActualDefs.push_back(MA);
2004  // Verify domination.
2005  for (const Use &U : MD->uses())
2006  assert(dominates(MD, U) &&
2007  "Memory Def does not dominate it's uses");
2008  }
2009 #if defined(EXPENSIVE_CHECKS)
2010  // Verify def-uses.
2011  verifyUseInDefs(MA->getDefiningAccess(), MA);
2012 #endif
2013  }
2014  }
2015  // Either we hit the assert, really have no accesses, or we have both
2016  // accesses and an access list. Same with defs.
2017  if (!AL && !DL)
2018  continue;
2019  // Verify ordering.
2020  assert(AL->size() == ActualAccesses.size() &&
2021  "We don't have the same number of accesses in the block as on the "
2022  "access list");
2023  assert((DL || ActualDefs.size() == 0) &&
2024  "Either we should have a defs list, or we should have no defs");
2025  assert((!DL || DL->size() == ActualDefs.size()) &&
2026  "We don't have the same number of defs in the block as on the "
2027  "def list");
2028  auto ALI = AL->begin();
2029  auto AAI = ActualAccesses.begin();
2030  while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2031  assert(&*ALI == *AAI && "Not the same accesses in the same order");
2032  ++ALI;
2033  ++AAI;
2034  }
2035  ActualAccesses.clear();
2036  if (DL) {
2037  auto DLI = DL->begin();
2038  auto ADI = ActualDefs.begin();
2039  while (DLI != DL->end() && ADI != ActualDefs.end()) {
2040  assert(&*DLI == *ADI && "Not the same defs in the same order");
2041  ++DLI;
2042  ++ADI;
2043  }
2044  }
2045  ActualDefs.clear();
2046  }
2047 #endif
2048 }
2049 
2050 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2051 /// appears in the use list of \p Def.
2052 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2053 #ifndef NDEBUG
2054  // The live on entry use may cause us to get a NULL def here
2055  if (!Def)
2057  "Null def but use not point to live on entry def");
2058  else
2059  assert(is_contained(Def->users(), Use) &&
2060  "Did not find use in def's use list");
2061 #endif
2062 }
2063 
2064 /// Perform a local numbering on blocks so that instruction ordering can be
2065 /// determined in constant time.
2066 /// TODO: We currently just number in order. If we numbered by N, we could
2067 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2068 /// log2(N) sequences of mixed before and after) without needing to invalidate
2069 /// the numbering.
2070 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2071  // The pre-increment ensures the numbers really start at 1.
2072  unsigned long CurrentNumber = 0;
2073  const AccessList *AL = getBlockAccesses(B);
2074  assert(AL != nullptr && "Asking to renumber an empty block");
2075  for (const auto &I : *AL)
2076  BlockNumbering[&I] = ++CurrentNumber;
2077  BlockNumberingValid.insert(B);
2078 }
2079 
2080 /// Determine, for two memory accesses in the same block,
2081 /// whether \p Dominator dominates \p Dominatee.
2082 /// \returns True if \p Dominator dominates \p Dominatee.
2084  const MemoryAccess *Dominatee) const {
2085  const BasicBlock *DominatorBlock = Dominator->getBlock();
2086 
2087  assert((DominatorBlock == Dominatee->getBlock()) &&
2088  "Asking for local domination when accesses are in different blocks!");
2089  // A node dominates itself.
2090  if (Dominatee == Dominator)
2091  return true;
2092 
2093  // When Dominatee is defined on function entry, it is not dominated by another
2094  // memory access.
2095  if (isLiveOnEntryDef(Dominatee))
2096  return false;
2097 
2098  // When Dominator is defined on function entry, it dominates the other memory
2099  // access.
2100  if (isLiveOnEntryDef(Dominator))
2101  return true;
2102 
2103  if (!BlockNumberingValid.count(DominatorBlock))
2104  renumberBlock(DominatorBlock);
2105 
2106  unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2107  // All numbers start with 1
2108  assert(DominatorNum != 0 && "Block was not numbered properly");
2109  unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2110  assert(DominateeNum != 0 && "Block was not numbered properly");
2111  return DominatorNum < DominateeNum;
2112 }
2113 
2114 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2115  const MemoryAccess *Dominatee) const {
2116  if (Dominator == Dominatee)
2117  return true;
2118 
2119  if (isLiveOnEntryDef(Dominatee))
2120  return false;
2121 
2122  if (Dominator->getBlock() != Dominatee->getBlock())
2123  return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2124  return locallyDominates(Dominator, Dominatee);
2125 }
2126 
2127 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2128  const Use &Dominatee) const {
2129  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2130  BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2131  // The def must dominate the incoming block of the phi.
2132  if (UseBB != Dominator->getBlock())
2133  return DT->dominates(Dominator->getBlock(), UseBB);
2134  // If the UseBB and the DefBB are the same, compare locally.
2135  return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2136  }
2137  // If it's not a PHI node use, the normal dominates can already handle it.
2138  return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2139 }
2140 
2141 const static char LiveOnEntryStr[] = "liveOnEntry";
2142 
2144  switch (getValueID()) {
2145  case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2146  case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2147  case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2148  }
2149  llvm_unreachable("invalid value id");
2150 }
2151 
2153  MemoryAccess *UO = getDefiningAccess();
2154 
2155  auto printID = [&OS](MemoryAccess *A) {
2156  if (A && A->getID())
2157  OS << A->getID();
2158  else
2159  OS << LiveOnEntryStr;
2160  };
2161 
2162  OS << getID() << " = MemoryDef(";
2163  printID(UO);
2164  OS << ")";
2165 
2166  if (isOptimized()) {
2167  OS << "->";
2168  printID(getOptimized());
2169 
2170  if (Optional<AliasResult> AR = getOptimizedAccessType())
2171  OS << " " << *AR;
2172  }
2173 }
2174 
2176  ListSeparator LS(",");
2177  OS << getID() << " = MemoryPhi(";
2178  for (const auto &Op : operands()) {
2179  BasicBlock *BB = getIncomingBlock(Op);
2180  MemoryAccess *MA = cast<MemoryAccess>(Op);
2181 
2182  OS << LS << '{';
2183  if (BB->hasName())
2184  OS << BB->getName();
2185  else
2186  BB->printAsOperand(OS, false);
2187  OS << ',';
2188  if (unsigned ID = MA->getID())
2189  OS << ID;
2190  else
2191  OS << LiveOnEntryStr;
2192  OS << '}';
2193  }
2194  OS << ')';
2195 }
2196 
2198  MemoryAccess *UO = getDefiningAccess();
2199  OS << "MemoryUse(";
2200  if (UO && UO->getID())
2201  OS << UO->getID();
2202  else
2203  OS << LiveOnEntryStr;
2204  OS << ')';
2205 
2206  if (Optional<AliasResult> AR = getOptimizedAccessType())
2207  OS << " " << *AR;
2208 }
2209 
2210 void MemoryAccess::dump() const {
2211 // Cannot completely remove virtual function even in release mode.
2212 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2213  print(dbgs());
2214  dbgs() << "\n";
2215 #endif
2216 }
2217 
2219 
2222 }
2223 
2225  AU.setPreservesAll();
2227 }
2228 
2230 private:
2231  const Function &F;
2232  MemorySSAAnnotatedWriter MSSAWriter;
2233 
2234 public:
2236  : F(F), MSSAWriter(&MSSA) {}
2237 
2238  const Function *getFunction() { return &F; }
2239  MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }
2240 };
2241 
2242 namespace llvm {
2243 
2244 template <>
2247  return &(CFGInfo->getFunction()->getEntryBlock());
2248  }
2249 
2250  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2252 
2254  return nodes_iterator(CFGInfo->getFunction()->begin());
2255  }
2256 
2258  return nodes_iterator(CFGInfo->getFunction()->end());
2259  }
2260 
2261  static size_t size(DOTFuncMSSAInfo *CFGInfo) {
2262  return CFGInfo->getFunction()->size();
2263  }
2264 };
2265 
2266 template <>
2268 
2269  DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
2270 
2271  static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) {
2272  return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
2273  "' function";
2274  }
2275 
2276  std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) {
2278  Node, nullptr,
2279  [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void {
2280  BB.print(OS, &CFGInfo->getWriter(), true, true);
2281  },
2282  [](std::string &S, unsigned &I, unsigned Idx) -> void {
2283  std::string Str = S.substr(I, Idx - I);
2284  StringRef SR = Str;
2285  if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||
2286  SR.count("MemoryUse("))
2287  return;
2289  });
2290  }
2291 
2292  static std::string getEdgeSourceLabel(const BasicBlock *Node,
2295  }
2296 
2297  /// Display the raw branch weights from PGO.
2299  DOTFuncMSSAInfo *CFGInfo) {
2300  return "";
2301  }
2302 
2303  std::string getNodeAttributes(const BasicBlock *Node,
2304  DOTFuncMSSAInfo *CFGInfo) {
2305  return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos
2306  ? "style=filled, fillcolor=lightpink"
2307  : "";
2308  }
2309 };
2310 
2311 } // namespace llvm
2312 
2314  auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2315  if (DotCFGMSSA != "") {
2316  DOTFuncMSSAInfo CFGInfo(F, MSSA);
2317  WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2318  } else
2319  MSSA.print(dbgs());
2320 
2321  if (VerifyMemorySSA)
2322  MSSA.verifyMemorySSA();
2323  return false;
2324 }
2325 
2327 
2330  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2331  auto &AA = AM.getResult<AAManager>(F);
2332  return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2333 }
2334 
2336  Function &F, const PreservedAnalyses &PA,
2338  auto PAC = PA.getChecker<MemorySSAAnalysis>();
2339  return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2340  Inv.invalidate<AAManager>(F, PA) ||
2342 }
2343 
2346  auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2347  if (DotCFGMSSA != "") {
2348  DOTFuncMSSAInfo CFGInfo(F, MSSA);
2349  WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2350  } else {
2351  OS << "MemorySSA for function: " << F.getName() << "\n";
2352  MSSA.print(OS);
2353  }
2354 
2355  return PreservedAnalyses::all();
2356 }
2357 
2360  AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2361 
2362  return PreservedAnalyses::all();
2363 }
2364 
2365 char MemorySSAWrapperPass::ID = 0;
2366 
2369 }
2370 
2371 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2372 
2374  AU.setPreservesAll();
2377 }
2378 
2380  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2381  auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2382  MSSA.reset(new MemorySSA(F, &AA, &DT));
2383  return false;
2384 }
2385 
2387  if (VerifyMemorySSA)
2388  MSSA->verifyMemorySSA();
2389 }
2390 
2392  MSSA->print(OS);
2393 }
2394 
2396 
2397 /// Walk the use-def chains starting at \p StartingAccess and find
2398 /// the MemoryAccess that actually clobbers Loc.
2399 ///
2400 /// \returns our clobbering memory access
2401 template <typename AliasAnalysisType>
2402 MemoryAccess *
2403 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2404  MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2405  unsigned &UpwardWalkLimit) {
2406  assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
2407 
2408  Instruction *I = nullptr;
2409  if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2410  if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2411  return StartingUseOrDef;
2412 
2413  I = StartingUseOrDef->getMemoryInst();
2414 
2415  // Conservatively, fences are always clobbers, so don't perform the walk if
2416  // we hit a fence.
2417  if (!isa<CallBase>(I) && I->isFenceLike())
2418  return StartingUseOrDef;
2419  }
2420 
2421  UpwardsMemoryQuery Q;
2422  Q.OriginalAccess = StartingAccess;
2423  Q.StartingLoc = Loc;
2424  Q.Inst = nullptr;
2425  Q.IsCall = false;
2426 
2427  // Unlike the other function, do not walk to the def of a def, because we are
2428  // handed something we already believe is the clobbering access.
2429  // We never set SkipSelf to true in Q in this method.
2430  MemoryAccess *Clobber =
2431  Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
2432  LLVM_DEBUG({
2433  dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
2434  if (I)
2435  dbgs() << " for instruction " << *I << "\n";
2436  dbgs() << " is " << *Clobber << "\n";
2437  });
2438  return Clobber;
2439 }
2440 
2441 template <typename AliasAnalysisType>
2442 MemoryAccess *
2443 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2444  MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
2445  auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2446  // If this is a MemoryPhi, we can't do anything.
2447  if (!StartingAccess)
2448  return MA;
2449 
2450  bool IsOptimized = false;
2451 
2452  // If this is an already optimized use or def, return the optimized result.
2453  // Note: Currently, we store the optimized def result in a separate field,
2454  // since we can't use the defining access.
2455  if (StartingAccess->isOptimized()) {
2456  if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2457  return StartingAccess->getOptimized();
2458  IsOptimized = true;
2459  }
2460 
2461  const Instruction *I = StartingAccess->getMemoryInst();
2462  // We can't sanely do anything with a fence, since they conservatively clobber
2463  // all memory, and have no locations to get pointers from to try to
2464  // disambiguate.
2465  if (!isa<CallBase>(I) && I->isFenceLike())
2466  return StartingAccess;
2467 
2468  UpwardsMemoryQuery Q(I, StartingAccess);
2469 
2470  if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2471  MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2472  StartingAccess->setOptimized(LiveOnEntry);
2473  StartingAccess->setOptimizedAccessType(None);
2474  return LiveOnEntry;
2475  }
2476 
2477  MemoryAccess *OptimizedAccess;
2478  if (!IsOptimized) {
2479  // Start with the thing we already think clobbers this location
2480  MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2481 
2482  // At this point, DefiningAccess may be the live on entry def.
2483  // If it is, we will not get a better result.
2484  if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2485  StartingAccess->setOptimized(DefiningAccess);
2486  StartingAccess->setOptimizedAccessType(None);
2487  return DefiningAccess;
2488  }
2489 
2490  OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2491  StartingAccess->setOptimized(OptimizedAccess);
2492  if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2493  StartingAccess->setOptimizedAccessType(None);
2494  else if (Q.AR && *Q.AR == AliasResult::MustAlias)
2495  StartingAccess->setOptimizedAccessType(
2497  } else
2498  OptimizedAccess = StartingAccess->getOptimized();
2499 
2500  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2501  LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2502  LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2503  LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2504 
2506  if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2507  isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2508  assert(isa<MemoryDef>(Q.OriginalAccess));
2509  Q.SkipSelfAccess = true;
2510  Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2511  } else
2512  Result = OptimizedAccess;
2513 
2514  LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2515  LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2516 
2517  return Result;
2518 }
2519 
2520 MemoryAccess *
2522  if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2523  return Use->getDefiningAccess();
2524  return MA;
2525 }
2526 
2528  MemoryAccess *StartingAccess, const MemoryLocation &) {
2529  if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2530  return Use->getDefiningAccess();
2531  return StartingAccess;
2532 }
2533 
2534 void MemoryPhi::deleteMe(DerivedUser *Self) {
2535  delete static_cast<MemoryPhi *>(Self);
2536 }
2537 
2538 void MemoryDef::deleteMe(DerivedUser *Self) {
2539  delete static_cast<MemoryDef *>(Self);
2540 }
2541 
2542 void MemoryUse::deleteMe(DerivedUser *Self) {
2543  delete static_cast<MemoryUse *>(Self);
2544 }
2545 
2546 bool upward_defs_iterator::IsGuaranteedLoopInvariant(Value *Ptr) const {
2547  auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) {
2548  Ptr = Ptr->stripPointerCasts();
2549  if (!isa<Instruction>(Ptr))
2550  return true;
2551  return isa<AllocaInst>(Ptr);
2552  };
2553 
2554  Ptr = Ptr->stripPointerCasts();
2555  if (auto *I = dyn_cast<Instruction>(Ptr)) {
2556  if (I->getParent()->isEntryBlock())
2557  return true;
2558  }
2559  if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
2560  return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
2561  GEP->hasAllConstantIndices();
2562  }
2563  return IsGuaranteedLoopInvariantBase(Ptr);
2564 }
isOrdered
static bool isOrdered(const Instruction *I)
Definition: MemorySSA.cpp:1725
llvm::SuccIterator
Definition: CFG.h:139
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
llvm::BatchAAResults
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Definition: AliasAnalysis.h:896
llvm::AAManager
A manager for alias analyses.
Definition: AliasAnalysis.h:1233
llvm::GraphTraits< DOTFuncMSSAInfo * >::getEntryNode
static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo)
Definition: MemorySSA.cpp:2246
llvm::predecessors
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:127
llvm::AliasResult::MayAlias
@ MayAlias
The two locations may or may not alias.
Definition: AliasAnalysis.h:102
LLVM_DUMP_METHOD
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:491
llvm::MemoryLocation::get
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
Definition: MemoryLocation.cpp:37
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::AArch64CC::AL
@ AL
Definition: AArch64BaseInfo.h:269
llvm::MemorySSA::getWritableBlockDefs
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:814
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
llvm::SmallPtrSetImpl::erase
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:378
Optional.h
llvm::StringRef::count
LLVM_NODISCARD size_t count(char C) const
Return the number of occurrences of C in the string.
Definition: StringRef.h:489
print
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Definition: ArchiveWriter.cpp:147
llvm::Function::end
iterator end()
Definition: Function.h:780
llvm::AnalysisInfoMixin< MemorySSAAnalysis >::ID
static AnalysisKey * ID()
Returns an opaque, unique ID for this analysis type.
Definition: PassManager.h:407
llvm::isModOrRefSet
LLVM_NODISCARD bool isModOrRefSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:189
IntrinsicInst.h
AtomicOrdering.h
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:769
VerifyMemorySSAX
static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))
llvm::MemorySSAAnnotatedWriter::MemorySSAAnnotatedWriter
MemorySSAAnnotatedWriter(const MemorySSA *M)
Definition: MemorySSA.cpp:112
llvm::Function
Definition: Function.h:61
llvm::cl::location
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:459
llvm::AnalysisManager::Invalidator::invalidate
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:674
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
llvm::MemoryDef::resetOptimized
void resetOptimized()
Definition: MemorySSA.h:413
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:623
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:137
llvm::MemorySSA::DefsList
simple_ilist< MemoryAccess, ilist_tag< MSSAHelpers::DefsOnlyTag > > DefsList
Definition: MemorySSA.h:760
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::MemorySSA::OptimizeUses::OptimizeUses
OptimizeUses(MemorySSA *MSSA, CachingWalker< BatchAAResults > *Walker, BatchAAResults *BAA, DominatorTree *DT)
Definition: MemorySSA.cpp:1289
llvm::RISCVFenceField::W
@ W
Definition: RISCVBaseInfo.h:181
llvm::User::dropAllReferences
void dropAllReferences()
Drop all references to operands.
Definition: User.h:299
llvm::isa
LLVM_NODISCARD bool isa(const Y &Val)
Definition: Casting.h:141
TemplateParamKind::Template
@ Template
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getGraphName
static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo)
Definition: MemorySSA.cpp:2271
ErrorHandling.h
llvm::Function::getEntryBlock
const BasicBlock & getEntryBlock() const
Definition: Function.h:762
llvm::AllAnalysesOn
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: PassManager.h:93
llvm::MemorySSA::dominates
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2114
llvm::MemoryAccess::getID
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition: MemorySSA.h:669
llvm::MemorySSA::renamePass
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
Definition: MemorySSA.h:827
llvm::AtomicOrdering::SequentiallyConsistent
@ SequentiallyConsistent
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
stores
hexagon widen stores
Definition: HexagonStoreWidening.cpp:118
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::DenseMapBase::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
llvm::MemorySSAWalker::MemorySSAWalker
MemorySSAWalker(MemorySSA *)
Definition: MemorySSA.cpp:2395
to
Should compile to
Definition: README.txt:449
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:321
llvm::MemorySSAWrapperPass::ID
static char ID
Definition: MemorySSA.h:969
llvm::MemorySSA::insertIntoListsBefore
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
Definition: MemorySSA.cpp:1630
DenseMap.h
llvm::MemorySSA::getLiveOnEntryDef
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:749
llvm::DominatorTreeBase::getNode
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Definition: GenericDomTree.h:351
llvm::Optional
Definition: APInt.h:33
T
#define T
Definition: Mips16ISelLowering.cpp:341
llvm::M68kBeads::Term
@ Term
Definition: M68kBaseInfo.h:71
llvm::AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:81
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
DOTFuncMSSAInfo
Definition: MemorySSA.cpp:2229
Hashing.h
llvm::MemorySSAAnnotatedWriter::emitInstructionAnnot
void emitInstructionAnnot(const Instruction *I, formatted_raw_ostream &OS) override
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
Definition: MemorySSA.cpp:120
llvm::MemoryPhi
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:487
llvm::MemoryUse::print
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2197
STLExtras.h
llvm::EnableMSSALoopDependency
cl::opt< bool > EnableMSSALoopDependency
Enables memory ssa as a dependency for loop passes.
llvm::MemorySSA::~MemorySSA
~MemorySSA()
Definition: MemorySSA.cpp:1255
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::MemoryUse
Represents read-only accesses to memory.
Definition: MemorySSA.h:323
llvm::successors
succ_range successors(Instruction *I)
Definition: CFG.h:262
LLVM_ATTRIBUTE_UNUSED
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:188
llvm::DoNothingMemorySSAWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition: MemorySSA.cpp:2521
memssa
early cse memssa
Definition: EarlyCSE.cpp:1734
llvm::MemorySSA::verifyPrevDefInPhis
void verifyPrevDefInPhis(Function &F) const
Definition: MemorySSA.cpp:1883
llvm::detail::DenseSetImpl::insert
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
Use.h
llvm::MemorySSA::print
void print(raw_ostream &) const
Definition: MemorySSA.cpp:1858
llvm::iplist
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
Definition: ilist.h:390
llvm::MemoryPhi::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:548
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:122
DepthFirstIterator.h
llvm::MemorySSAPrinterLegacyPass::ID
static char ID
Definition: MemorySSA.h:921
llvm::MemorySSA::insertIntoListsForBlock
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition: MemorySSA.cpp:1598
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:180
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
llvm::lltok::equal
@ equal
Definition: LLToken.h:25
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:115
llvm::isAtLeastOrStrongerThan
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
Definition: AtomicOrdering.h:105
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
Instruction.h
CommandLine.h
FormattedStream.h
llvm::DenseMapInfo
Definition: APInt.h:34
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1547
llvm::MemorySSAWrapperPass::MemorySSAWrapperPass
MemorySSAWrapperPass()
Definition: MemorySSA.cpp:2367
memoryssa
memoryssa
Definition: MemorySSA.cpp:73
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getNodeAttributes
std::string getNodeAttributes(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
Definition: MemorySSA.cpp:2303
llvm::MemorySSAPrinterPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2344
llvm::MemorySSAWrapperPass
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:965
llvm::DominatorTreeBase::getRootNode
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
Definition: GenericDomTree.h:370
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::maximum
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
Definition: APFloat.h:1333
llvm::AAResults
Definition: AliasAnalysis.h:456
llvm::MemorySSA::isLiveOnEntryDef
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:745
llvm::MemorySSA::moveTo
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
Definition: MemorySSA.cpp:1674
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MemorySSAWrapperPass::releaseMemory
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
Definition: MemorySSA.cpp:2371
llvm::MemorySSAAnalysis::Result
Definition: MemorySSA.h:935
Intrinsics.h
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::DenseMapInfo< MemoryLocOrCall >::isEqual
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
Definition: MemorySSA.cpp:220
llvm::ARM_PROC::A
@ A
Definition: ARMBaseInfo.h:34
llvm::MemorySSA::Beginning
@ Beginning
Definition: MemorySSA.h:795
llvm::MemorySSAPrinterLegacyPass::runOnFunction
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: MemorySSA.cpp:2313
llvm::MemorySSAWrapperPass::print
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
Definition: MemorySSA.cpp:2391
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::AMDGPU::PALMD::Key
Key
PAL metadata keys.
Definition: AMDGPUMetadata.h:481
llvm::MemorySSAWalker::invalidateInfo
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.h:1054
llvm::Value::uses
iterator_range< use_iterator > uses()
Definition: Value.h:377
DenseSet.h
false
Definition: StackSlotColoring.cpp:142
llvm::MemorySSAPrinterLegacyPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:2224
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
First
into llvm powi allowing the code generator to produce balanced multiplication trees First
Definition: README.txt:54
opt
arm prera ldst opt
Definition: ARMLoadStoreOptimizer.cpp:2191
llvm::Instruction
Definition: Instruction.h:45
llvm::MemorySSAAnalysis::run
Result run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2328
llvm::MemoryPhi::addIncoming
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:569
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:281
llvm::AtomicOrdering::Acquire
@ Acquire
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
llvm::MemorySSA::getBlockAccesses
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:765
llvm::Use::getUser
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:73
llvm::MemorySSA::removeFromLookups
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
Definition: MemorySSA.cpp:1804
SmallPtrSet.h
llvm::AnalysisManager::Invalidator
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:656
llvm::MemoryPhi::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:535
llvm::MemoryUseOrDef::getMemoryInst
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:257
llvm::initializeMemorySSAPrinterLegacyPassPass
void initializeMemorySSAPrinterLegacyPassPass(PassRegistry &)
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:153
llvm::isModSet
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:196
llvm::StringRef::str
LLVM_NODISCARD std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:245
llvm::None
const NoneType None
Definition: None.h:23
llvm::MemoryAccess::getBlock
BasicBlock * getBlock() const
Definition: MemorySSA.h:162
llvm::MemorySSAPrinterLegacyPass
Definition: MemorySSA.h:914
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:345
llvm::MemorySSAVerifierPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2358
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
will
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32 n y store obj obj **nth_el If the i64 division is lowered to a then a safe point will(must) appear for the call site. If a collection occurs
llvm::MemorySSA::locallyDominates
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:2083
llvm::DOTGraphTraits
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Definition: DOTGraphTraits.h:161
AssemblyAnnotationWriter.h
llvm::MemoryDef::print
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2152
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getNodeLabel
std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
Definition: MemorySSA.cpp:2276
llvm::DenseSet< ConstMemoryAccessPair >
llvm::MemorySSAWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1025
llvm::pointer_iterator
Definition: iterator.h:320
BasicBlock.h
llvm::cl::opt
Definition: CommandLine.h:1422
llvm::MemorySSAWrapperPass::runOnFunction
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: MemorySSA.cpp:2379
SSA
Memory SSA
Definition: MemorySSA.cpp:73
llvm::RISCVFenceField::O
@ O
Definition: RISCVBaseInfo.h:179
llvm::MemorySSA::dump
void dump() const
Definition: MemorySSA.cpp:1864
CFGPrinter.h
llvm::DomTreeNodeBase::getBlock
NodeT * getBlock() const
Definition: GenericDomTree.h:88
llvm::GraphTraits< DOTFuncMSSAInfo * >::nodes_end
static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo)
Definition: MemorySSA.cpp:2257
D
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
llvm::MemoryAccess::print
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2143
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::iplist_impl< simple_ilist< T, Options... >, ilist_traits< T > >::iterator
base_list_type::iterator iterator
Definition: ilist.h:177
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::def_chain
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition: MemorySSA.h:1329
llvm::MemorySSA::OptimizeUses::optimizeUses
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
Definition: MemorySSA.cpp:1483
MemoryLocation.h
llvm::DenseMap
Definition: DenseMap.h:714
llvm::AnalysisKey
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:72
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getEdgeAttributes
std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, DOTFuncMSSAInfo *CFGInfo)
Display the raw branch weights from PGO.
Definition: MemorySSA.cpp:2298
llvm::MemorySSA
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:708
iterator.h
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::MemorySSA::getMemoryAccess
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:725
StringExtras.h
llvm::iterator_facade_base
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:66
llvm::detail::DenseSetImpl::empty
bool empty() const
Definition: DenseSet.h:80
DOTFuncMSSAInfo::getWriter
MemorySSAAnnotatedWriter & getWriter()
Definition: MemorySSA.cpp:2239
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1612
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getEdgeSourceLabel
static std::string getEdgeSourceLabel(const BasicBlock *Node, const_succ_iterator I)
Definition: MemorySSA.cpp:2292
llvm::MemoryDef
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:380
llvm::MemorySSA::getDomTree
DominatorTree & getDomTree() const
Definition: MemorySSA.h:733
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::MemorySSA::InsertionPlace
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:795
llvm::ModRefInfo
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: AliasAnalysis.h:148
llvm::formatted_raw_ostream
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
Definition: FormattedStream.h:30
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineBasicBlock::size
unsigned size() const
Definition: MachineBasicBlock.h:239
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:840
llvm::upward_defs_begin
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT, bool *PerformedPhiTranslation=nullptr)
Definition: MemorySSA.h:1273
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::IndexedInstrProf::HashT::Last
@ Last
llvm::operator==
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:2030
llvm::DominatorTree::isReachableFromEntry
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:328
llvm::IDFCalculator
Definition: IteratedDominanceFrontier.h:39
llvm::WriteGraph
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
Definition: GraphWriter.h:307
iterator_range.h
llvm::GraphTraits< const BasicBlock * >
Definition: CFG.h:310
llvm::GraphTraits< DOTFuncMSSAInfo * >::size
static size_t size(DOTFuncMSSAInfo *CFGInfo)
Definition: MemorySSA.cpp:2261
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
llvm::MemorySSA::verifyMemorySSA
void verifyMemorySSA() const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1867
llvm::MemorySSAAnalysis
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:926
llvm::MemorySSA::getWalker
MemorySSAWalker * getWalker()
Definition: MemorySSA.cpp:1566
llvm::MemoryPhi::getIncomingValue
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:538
llvm::MemorySSAWrapperPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: MemorySSA.cpp:2373
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
llvm::detail::DenseSetImpl::clear
void clear()
Definition: DenseSet.h:92
llvm::MemorySSA::createDefinedAccess
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
Definition: MemorySSA.cpp:1705
llvm::operator*
APInt operator*(APInt a, uint64_t RHS)
Definition: APInt.h:2154
llvm::MemoryAccessPair
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition: MemorySSA.h:1075
llvm::MemorySSA::verifyOrderingDominationAndDefUses
void verifyOrderingDominationAndDefUses(Function &F) const
Verify ordering: the order and existence of MemoryAccesses matches the order and existence of memory ...
Definition: MemorySSA.cpp:1961
MAP
#define MAP(n)
None.h
llvm::sys::Memory
This class provides various memory handling functions that manipulate MemoryBlock instances.
Definition: Memory.h:52
llvm::DenseMapInfo< MemoryLocOrCall >::getHashValue
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
Definition: MemorySSA.cpp:205
llvm::MemorySSA::getSkipSelfWalker
MemorySSAWalker * getSkipSelfWalker()
Definition: MemorySSA.cpp:1581
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
Printer
Memory true print Memory SSA Printer
Definition: MemorySSA.cpp:80
llvm::simple_ilist::rbegin
reverse_iterator rbegin()
Definition: simple_ilist.h:121
cl
http eax xorl edx cl sete al setne dl sall cl
Definition: README.txt:25
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::MemorySSA::getBlockDefs
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition: MemorySSA.h:773
llvm::AliasResult::NoAlias
@ NoAlias
The two locations do not alias at all.
Definition: AliasAnalysis.h:99
llvm::DefaultDOTGraphTraits::getEdgeSourceLabel
static std::string getEdgeSourceLabel(const void *, EdgeIter)
getEdgeSourceLabel - If you want to label the edge source itself, implement this method.
Definition: DOTGraphTraits.h:107
Compiler.h
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::MemoryPhi::setIncomingValue
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:539
llvm::DomTreeNodeBase::begin
iterator begin()
Definition: GenericDomTree.h:75
llvm::AliasResult::MustAlias
@ MustAlias
The two locations precisely alias each other.
Definition: AliasAnalysis.h:106
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::LoadInst::getOrdering
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:232
llvm::MemoryAccess
Definition: MemorySSA.h:140
llvm::DomTreeNodeBase< BasicBlock >
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:297
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:175
llvm::DenseMapBase::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
consider
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from where P can be anything The alignment inference code cannot handle loads from globals in static non mode because it doesn t look through the extra dyld stub load If you try vec_align ll without relocation you ll see what I mean We should lower which eliminates a constant pool load For consider
Definition: README-SSE.txt:421
llvm::Init
Definition: Record.h:271
llvm::Function::begin
iterator begin()
Definition: Function.h:778
llvm::DenseMapInfo< MemoryLocOrCall >::getEmptyKey
static MemoryLocOrCall getEmptyKey()
Definition: MemorySSA.cpp:197
MaxCheckLimit
Memory true print Memory SSA static false cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:675
llvm::find_if
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1574
llvm::MemoryAccess::setBlock
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition: MemorySSA.h:215
llvm::MemorySSA::OptimizeUses
This class is a batch walker of all MemoryUse's in the program, and points their defining access at t...
Definition: MemorySSA.cpp:1287
llvm::depth_first
iterator_range< df_iterator< T > > depth_first(const T &G)
Definition: DepthFirstIterator.h:229
isUseTriviallyOptimizableToLiveOnEntry
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, const Instruction *I)
Definition: MemorySSA.cpp:363
DOTFuncMSSAInfo::getFunction
const Function * getFunction()
Definition: MemorySSA.cpp:2238
llvm::AnalysisUsage::setPreservesAll
void setPreservesAll()
Set by analyses that do not transform their input at all.
Definition: PassAnalysisSupport.h:130
llvm::GraphTraits< DOTFuncMSSAInfo * >::nodes_begin
static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo)
Definition: MemorySSA.cpp:2253
llvm::MemoryUseOrDef::getDefiningAccess
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:260
llvm::MemoryPhi::print
void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2175
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:83
llvm::isMustSet
LLVM_NODISCARD bool isMustSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:202
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
areLoadsReorderable
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
Definition: MemorySSA.cpp:233
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:161
llvm::AssemblyAnnotationWriter
Definition: AssemblyAnnotationWriter.h:27
checkClobberSanity
static LLVM_ATTRIBUTE_UNUSED void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, AliasAnalysisType &AA, bool AllowImpreciseClobber=false)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
Definition: MemorySSA.cpp:389
llvm::pred_end
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
llvm::AArch64CC::LS
@ LS
Definition: AArch64BaseInfo.h:264
Casting.h
LiveOnEntryStr
const static char LiveOnEntryStr[]
Definition: MemorySSA.cpp:2141
Function.h
llvm::MemorySSAWrapperPass::verifyAnalysis
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
Definition: MemorySSA.cpp:2386
PassManager.h
instructionClobbersQuery
static ClobberAlias instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysisType &AA)
Definition: MemorySSA.cpp:270
llvm::MemorySSA::removeFromLists
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
Definition: MemorySSA.cpp:1831
llvm::MemoryUseOrDef
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:250
llvm::cl::value_desc
Definition: CommandLine.h:424
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::MemoryAccess::dump
void dump() const
Definition: MemorySSA.cpp:2210
llvm::MemorySSA::getWritableBlockAccesses
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:808
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
IteratedDominanceFrontier.h
llvm::pred_begin
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:252
MemorySSA.h
llvm::DomTreeNodeBase< BasicBlock >::const_iterator
typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator
Definition: GenericDomTree.h:73
Instructions.h
llvm::User::getNumOperands
unsigned getNumOperands() const
Definition: User.h:191
llvm::LoadInst::isVolatile
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:212
SmallVector.h
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:268
llvm::hash_combine
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:604
llvm::isRefSet
LLVM_NODISCARD bool isRefSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:199
Dominators.h
llvm::SmallPtrSetImplBase::empty
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
N
#define N
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1281
llvm::initializeMemorySSAWrapperPassPass
void initializeMemorySSAWrapperPassPass(PassRegistry &)
llvm::iterator_range
A range adaptor for a pair of iterators.
Definition: iterator_range.h:30
llvm::MemorySSA::MemorySSA
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
Definition: MemorySSA.cpp:1237
llvm::simple_ilist
A simple intrusive list implementation.
Definition: simple_ilist.h:78
DOTFuncMSSAInfo::DOTFuncMSSAInfo
DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
Definition: MemorySSA.cpp:2235
llvm::MemorySSA::AccessList
iplist< MemoryAccess, ilist_tag< MSSAHelpers::AllAccessTag > > AccessList
Definition: MemorySSA.h:758
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::DefaultDOTGraphTraits
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
Definition: DOTGraphTraits.h:28
DenseMapInfo.h
llvm::MemorySSAAnalysis::Result::invalidate
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Definition: LoopAnalysisManager.cpp:31
getNodeLabel
static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS)
Definition: ModuleSummaryIndex.cpp:474
llvm::MemorySSAAnnotatedWriter::emitBasicBlockStartAnnot
void emitBasicBlockStartAnnot(const BasicBlock *BB, formatted_raw_ostream &OS) override
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
Definition: MemorySSA.cpp:114
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1161
llvm::AnalysisUsage::addRequiredTransitive
AnalysisUsage & addRequiredTransitive()
Definition: PassAnalysisSupport.h:81
llvm::SmallPtrSetImpl
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
llvm::PreservedAnalyses::getChecker
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: PassManager.h:313
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
llvm::MemorySSA::verifyDominationNumbers
void verifyDominationNumbers(const Function &F) const
Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...
Definition: MemorySSA.cpp:1919
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:172
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, true) INITIALIZE_PASS_END(MemorySSAWrapperPass
llvm::upward_defs_end
upward_defs_iterator upward_defs_end()
Definition: MemorySSA.h:1278
llvm::MemorySSAUtil::defClobbersUseOrDef
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
Definition: MemorySSA.cpp:331
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
llvm::GraphTraits
Definition: GraphTraits.h:35
llvm::VerifyMemorySSA
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:91
LLVMContext.h
From
BlockVerifier::State From
Definition: BlockVerifier.cpp:55
llvm::MemorySSAWalker
This is the generic walker interface for walkers of MemorySSA.
Definition: MemorySSA.h:996
llvm::cl::desc
Definition: CommandLine.h:414
llvm::MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass
MemorySSAPrinterLegacyPass()
Definition: MemorySSA.cpp:2220
raw_ostream.h
llvm::MemoryUseOrDef::setDefiningAccess
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false, Optional< AliasResult > AR=AliasResult(AliasResult::MayAlias))
Definition: MemorySSA.h:302
llvm::DenseMapInfo< MemoryLocOrCall >::getTombstoneKey
static MemoryLocOrCall getTombstoneKey()
Definition: MemorySSA.cpp:201
true
basic Basic Alias true
Definition: BasicAliasAnalysis.cpp:1815
MemorySSA
early cse Early CSE w MemorySSA
Definition: EarlyCSE.cpp:1735
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
DotCFGMSSA
static cl::opt< std::string > DotCFGMSSA("dot-cfg-mssa", cl::value_desc("file name for generated dot file"), cl::desc("file name for generated dot file"), cl::init(""))
Debug.h
llvm::DerivedUser
Extension point for the Value hierarchy.
Definition: DerivedUser.h:27
llvm::find_if_not
auto find_if_not(R &&Range, UnaryPredicate P)
Definition: STLExtras.h:1579
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:270
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::DOTGraphTraits
DOTGraphTraits(bool IsSimple=false)
Definition: MemorySSA.cpp:2269
llvm::ModRefInfo::ModRef
@ ModRef
The access may reference and may modify the value stored in memory.
llvm::MemoryLocation
Representation for a specific memory location.
Definition: MemoryLocation.h:209
Other
Optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1172
of
Add support for conditional and other related patterns Instead of
Definition: README.txt:134
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
llvm::MemorySSAAnnotatedWriter
An assembly annotator class to print Memory SSA information in comments.
Definition: MemorySSA.cpp:106
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::Function::size
size_t size() const
Definition: Function.h:783
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
llvm::hash_code
An opaque object representing a hash code.
Definition: Hashing.h:72