File: | lib/Analysis/MemorySSA.cpp |
Warning: | line 1139, column 5 Value stored to 'Walker' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file implements the MemorySSA class. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "llvm/Analysis/MemorySSA.h" |
15 | #include "llvm/ADT/DenseMap.h" |
16 | #include "llvm/ADT/DenseMapInfo.h" |
17 | #include "llvm/ADT/DenseSet.h" |
18 | #include "llvm/ADT/DepthFirstIterator.h" |
19 | #include "llvm/ADT/Hashing.h" |
20 | #include "llvm/ADT/None.h" |
21 | #include "llvm/ADT/Optional.h" |
22 | #include "llvm/ADT/STLExtras.h" |
23 | #include "llvm/ADT/SmallPtrSet.h" |
24 | #include "llvm/ADT/SmallVector.h" |
25 | #include "llvm/ADT/iterator.h" |
26 | #include "llvm/ADT/iterator_range.h" |
27 | #include "llvm/Analysis/AliasAnalysis.h" |
28 | #include "llvm/Analysis/IteratedDominanceFrontier.h" |
29 | #include "llvm/Analysis/MemoryLocation.h" |
30 | #include "llvm/Config/llvm-config.h" |
31 | #include "llvm/IR/AssemblyAnnotationWriter.h" |
32 | #include "llvm/IR/BasicBlock.h" |
33 | #include "llvm/IR/CallSite.h" |
34 | #include "llvm/IR/Dominators.h" |
35 | #include "llvm/IR/Function.h" |
36 | #include "llvm/IR/Instruction.h" |
37 | #include "llvm/IR/Instructions.h" |
38 | #include "llvm/IR/IntrinsicInst.h" |
39 | #include "llvm/IR/Intrinsics.h" |
40 | #include "llvm/IR/LLVMContext.h" |
41 | #include "llvm/IR/PassManager.h" |
42 | #include "llvm/IR/Use.h" |
43 | #include "llvm/Pass.h" |
44 | #include "llvm/Support/AtomicOrdering.h" |
45 | #include "llvm/Support/Casting.h" |
46 | #include "llvm/Support/CommandLine.h" |
47 | #include "llvm/Support/Compiler.h" |
48 | #include "llvm/Support/Debug.h" |
49 | #include "llvm/Support/ErrorHandling.h" |
50 | #include "llvm/Support/FormattedStream.h" |
51 | #include "llvm/Support/raw_ostream.h" |
52 | #include <algorithm> |
53 | #include <cassert> |
54 | #include <iterator> |
55 | #include <memory> |
56 | #include <utility> |
57 | |
58 | using namespace llvm; |
59 | |
60 | #define DEBUG_TYPE"memoryssa" "memoryssa" |
61 | |
62 | INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { |
63 | true)static void *initializeMemorySSAWrapperPassPassOnce(PassRegistry &Registry) { |
64 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); |
65 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); |
66 | INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } |
67 | true)PassInfo *PI = new PassInfo( "Memory SSA", "memoryssa", & MemorySSAWrapperPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemorySSAWrapperPass>), false, true); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemorySSAWrapperPassPassFlag ; void llvm::initializeMemorySSAWrapperPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAWrapperPassPassFlag , initializeMemorySSAWrapperPassPassOnce, std::ref(Registry)) ; } |
68 | |
69 | INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { |
70 | "Memory SSA Printer", false, false)static void *initializeMemorySSAPrinterLegacyPassPassOnce(PassRegistry &Registry) { |
71 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); |
72 | INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } |
73 | "Memory SSA Printer", false, false)PassInfo *PI = new PassInfo( "Memory SSA Printer", "print-memoryssa" , &MemorySSAPrinterLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<MemorySSAPrinterLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeMemorySSAPrinterLegacyPassPassFlag; void llvm::initializeMemorySSAPrinterLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemorySSAPrinterLegacyPassPassFlag , initializeMemorySSAPrinterLegacyPassPassOnce, std::ref(Registry )); } |
74 | |
75 | static cl::opt<unsigned> MaxCheckLimit( |
76 | "memssa-check-limit", cl::Hidden, cl::init(100), |
77 | cl::desc("The maximum number of stores/phis MemorySSA" |
78 | "will consider trying to walk past (default = 100)")); |
79 | |
80 | // Always verify MemorySSA if expensive checking is enabled. |
81 | #ifdef EXPENSIVE_CHECKS |
82 | bool llvm::VerifyMemorySSA = true; |
83 | #else |
84 | bool llvm::VerifyMemorySSA = false; |
85 | #endif |
86 | static cl::opt<bool, true> |
87 | VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), |
88 | cl::Hidden, cl::desc("Enable verification of MemorySSA.")); |
89 | |
90 | namespace llvm { |
91 | |
92 | /// An assembly annotator class to print Memory SSA information in |
93 | /// comments. |
94 | class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { |
95 | friend class MemorySSA; |
96 | |
97 | const MemorySSA *MSSA; |
98 | |
99 | public: |
100 | MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} |
101 | |
102 | void emitBasicBlockStartAnnot(const BasicBlock *BB, |
103 | formatted_raw_ostream &OS) override { |
104 | if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) |
105 | OS << "; " << *MA << "\n"; |
106 | } |
107 | |
108 | void emitInstructionAnnot(const Instruction *I, |
109 | formatted_raw_ostream &OS) override { |
110 | if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) |
111 | OS << "; " << *MA << "\n"; |
112 | } |
113 | }; |
114 | |
115 | } // end namespace llvm |
116 | |
117 | namespace { |
118 | |
119 | /// Our current alias analysis API differentiates heavily between calls and |
120 | /// non-calls, and functions called on one usually assert on the other. |
121 | /// This class encapsulates the distinction to simplify other code that wants |
122 | /// "Memory affecting instructions and related data" to use as a key. |
123 | /// For example, this class is used as a densemap key in the use optimizer. |
124 | class MemoryLocOrCall { |
125 | public: |
126 | bool IsCall = false; |
127 | |
128 | MemoryLocOrCall(MemoryUseOrDef *MUD) |
129 | : MemoryLocOrCall(MUD->getMemoryInst()) {} |
130 | MemoryLocOrCall(const MemoryUseOrDef *MUD) |
131 | : MemoryLocOrCall(MUD->getMemoryInst()) {} |
132 | |
133 | MemoryLocOrCall(Instruction *Inst) { |
134 | if (ImmutableCallSite(Inst)) { |
135 | IsCall = true; |
136 | CS = ImmutableCallSite(Inst); |
137 | } else { |
138 | IsCall = false; |
139 | // There is no such thing as a memorylocation for a fence inst, and it is |
140 | // unique in that regard. |
141 | if (!isa<FenceInst>(Inst)) |
142 | Loc = MemoryLocation::get(Inst); |
143 | } |
144 | } |
145 | |
146 | explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} |
147 | |
148 | ImmutableCallSite getCS() const { |
149 | assert(IsCall)((IsCall) ? static_cast<void> (0) : __assert_fail ("IsCall" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 149, __PRETTY_FUNCTION__)); |
150 | return CS; |
151 | } |
152 | |
153 | MemoryLocation getLoc() const { |
154 | assert(!IsCall)((!IsCall) ? static_cast<void> (0) : __assert_fail ("!IsCall" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 154, __PRETTY_FUNCTION__)); |
155 | return Loc; |
156 | } |
157 | |
158 | bool operator==(const MemoryLocOrCall &Other) const { |
159 | if (IsCall != Other.IsCall) |
160 | return false; |
161 | |
162 | if (!IsCall) |
163 | return Loc == Other.Loc; |
164 | |
165 | if (CS.getCalledValue() != Other.CS.getCalledValue()) |
166 | return false; |
167 | |
168 | return CS.arg_size() == Other.CS.arg_size() && |
169 | std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin()); |
170 | } |
171 | |
172 | private: |
173 | union { |
174 | ImmutableCallSite CS; |
175 | MemoryLocation Loc; |
176 | }; |
177 | }; |
178 | |
179 | } // end anonymous namespace |
180 | |
181 | namespace llvm { |
182 | |
183 | template <> struct DenseMapInfo<MemoryLocOrCall> { |
184 | static inline MemoryLocOrCall getEmptyKey() { |
185 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); |
186 | } |
187 | |
188 | static inline MemoryLocOrCall getTombstoneKey() { |
189 | return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); |
190 | } |
191 | |
192 | static unsigned getHashValue(const MemoryLocOrCall &MLOC) { |
193 | if (!MLOC.IsCall) |
194 | return hash_combine( |
195 | MLOC.IsCall, |
196 | DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); |
197 | |
198 | hash_code hash = |
199 | hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( |
200 | MLOC.getCS().getCalledValue())); |
201 | |
202 | for (const Value *Arg : MLOC.getCS().args()) |
203 | hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); |
204 | return hash; |
205 | } |
206 | |
207 | static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { |
208 | return LHS == RHS; |
209 | } |
210 | }; |
211 | |
212 | } // end namespace llvm |
213 | |
214 | /// This does one-way checks to see if Use could theoretically be hoisted above |
215 | /// MayClobber. This will not check the other way around. |
216 | /// |
217 | /// This assumes that, for the purposes of MemorySSA, Use comes directly after |
218 | /// MayClobber, with no potentially clobbering operations in between them. |
219 | /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) |
220 | static bool areLoadsReorderable(const LoadInst *Use, |
221 | const LoadInst *MayClobber) { |
222 | bool VolatileUse = Use->isVolatile(); |
223 | bool VolatileClobber = MayClobber->isVolatile(); |
224 | // Volatile operations may never be reordered with other volatile operations. |
225 | if (VolatileUse && VolatileClobber) |
226 | return false; |
227 | // Otherwise, volatile doesn't matter here. From the language reference: |
228 | // 'optimizers may change the order of volatile operations relative to |
229 | // non-volatile operations.'" |
230 | |
231 | // If a load is seq_cst, it cannot be moved above other loads. If its ordering |
232 | // is weaker, it can be moved above other loads. We just need to be sure that |
233 | // MayClobber isn't an acquire load, because loads can't be moved above |
234 | // acquire loads. |
235 | // |
236 | // Note that this explicitly *does* allow the free reordering of monotonic (or |
237 | // weaker) loads of the same address. |
238 | bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; |
239 | bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), |
240 | AtomicOrdering::Acquire); |
241 | return !(SeqCstUse || MayClobberIsAcquire); |
242 | } |
243 | |
244 | namespace { |
245 | |
246 | struct ClobberAlias { |
247 | bool IsClobber; |
248 | Optional<AliasResult> AR; |
249 | }; |
250 | |
251 | } // end anonymous namespace |
252 | |
253 | // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being |
254 | // ignored if IsClobber = false. |
255 | static ClobberAlias instructionClobbersQuery(const MemoryDef *MD, |
256 | const MemoryLocation &UseLoc, |
257 | const Instruction *UseInst, |
258 | AliasAnalysis &AA) { |
259 | Instruction *DefInst = MD->getMemoryInst(); |
260 | assert(DefInst && "Defining instruction not actually an instruction")((DefInst && "Defining instruction not actually an instruction" ) ? static_cast<void> (0) : __assert_fail ("DefInst && \"Defining instruction not actually an instruction\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 260, __PRETTY_FUNCTION__)); |
261 | ImmutableCallSite UseCS(UseInst); |
262 | Optional<AliasResult> AR; |
263 | |
264 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { |
265 | // These intrinsics will show up as affecting memory, but they are just |
266 | // markers, mostly. |
267 | // |
268 | // FIXME: We probably don't actually want MemorySSA to model these at all |
269 | // (including creating MemoryAccesses for them): we just end up inventing |
270 | // clobbers where they don't really exist at all. Please see D43269 for |
271 | // context. |
272 | switch (II->getIntrinsicID()) { |
273 | case Intrinsic::lifetime_start: |
274 | if (UseCS) |
275 | return {false, NoAlias}; |
276 | AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc); |
277 | return {AR != NoAlias, AR}; |
278 | case Intrinsic::lifetime_end: |
279 | case Intrinsic::invariant_start: |
280 | case Intrinsic::invariant_end: |
281 | case Intrinsic::assume: |
282 | return {false, NoAlias}; |
283 | default: |
284 | break; |
285 | } |
286 | } |
287 | |
288 | if (UseCS) { |
289 | ModRefInfo I = AA.getModRefInfo(DefInst, UseCS); |
290 | AR = isMustSet(I) ? MustAlias : MayAlias; |
291 | return {isModOrRefSet(I), AR}; |
292 | } |
293 | |
294 | if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) |
295 | if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) |
296 | return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias}; |
297 | |
298 | ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); |
299 | AR = isMustSet(I) ? MustAlias : MayAlias; |
300 | return {isModSet(I), AR}; |
301 | } |
302 | |
303 | static ClobberAlias instructionClobbersQuery(MemoryDef *MD, |
304 | const MemoryUseOrDef *MU, |
305 | const MemoryLocOrCall &UseMLOC, |
306 | AliasAnalysis &AA) { |
307 | // FIXME: This is a temporary hack to allow a single instructionClobbersQuery |
308 | // to exist while MemoryLocOrCall is pushed through places. |
309 | if (UseMLOC.IsCall) |
310 | return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), |
311 | AA); |
312 | return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), |
313 | AA); |
314 | } |
315 | |
316 | // Return true when MD may alias MU, return false otherwise. |
317 | bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, |
318 | AliasAnalysis &AA) { |
319 | return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber; |
320 | } |
321 | |
322 | namespace { |
323 | |
324 | struct UpwardsMemoryQuery { |
325 | // True if our original query started off as a call |
326 | bool IsCall = false; |
327 | // The pointer location we started the query with. This will be empty if |
328 | // IsCall is true. |
329 | MemoryLocation StartingLoc; |
330 | // This is the instruction we were querying about. |
331 | const Instruction *Inst = nullptr; |
332 | // The MemoryAccess we actually got called with, used to test local domination |
333 | const MemoryAccess *OriginalAccess = nullptr; |
334 | Optional<AliasResult> AR = MayAlias; |
335 | |
336 | UpwardsMemoryQuery() = default; |
337 | |
338 | UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) |
339 | : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { |
340 | if (!IsCall) |
341 | StartingLoc = MemoryLocation::get(Inst); |
342 | } |
343 | }; |
344 | |
345 | } // end anonymous namespace |
346 | |
347 | static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, |
348 | AliasAnalysis &AA) { |
349 | Instruction *Inst = MD->getMemoryInst(); |
350 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { |
351 | switch (II->getIntrinsicID()) { |
352 | case Intrinsic::lifetime_end: |
353 | return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc); |
354 | default: |
355 | return false; |
356 | } |
357 | } |
358 | return false; |
359 | } |
360 | |
361 | static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, |
362 | const Instruction *I) { |
363 | // If the memory can't be changed, then loads of the memory can't be |
364 | // clobbered. |
365 | return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || |
366 | AA.pointsToConstantMemory(cast<LoadInst>(I)-> |
367 | getPointerOperand())); |
368 | } |
369 | |
370 | /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing |
371 | /// inbetween `Start` and `ClobberAt` can clobbers `Start`. |
372 | /// |
373 | /// This is meant to be as simple and self-contained as possible. Because it |
374 | /// uses no cache, etc., it can be relatively expensive. |
375 | /// |
376 | /// \param Start The MemoryAccess that we want to walk from. |
377 | /// \param ClobberAt A clobber for Start. |
378 | /// \param StartLoc The MemoryLocation for Start. |
379 | /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to. |
380 | /// \param Query The UpwardsMemoryQuery we used for our search. |
381 | /// \param AA The AliasAnalysis we used for our search. |
382 | /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. |
383 | static void |
384 | checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, |
385 | const MemoryLocation &StartLoc, const MemorySSA &MSSA, |
386 | const UpwardsMemoryQuery &Query, AliasAnalysis &AA, |
387 | bool AllowImpreciseClobber = false) { |
388 | assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?")((MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?" ) ? static_cast<void> (0) : __assert_fail ("MSSA.dominates(ClobberAt, Start) && \"Clobber doesn't dominate start?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 388, __PRETTY_FUNCTION__)); |
389 | |
390 | if (MSSA.isLiveOnEntryDef(Start)) { |
391 | assert(MSSA.isLiveOnEntryDef(ClobberAt) &&((MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself" ) ? static_cast<void> (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 392, __PRETTY_FUNCTION__)) |
392 | "liveOnEntry must clobber itself")((MSSA.isLiveOnEntryDef(ClobberAt) && "liveOnEntry must clobber itself" ) ? static_cast<void> (0) : __assert_fail ("MSSA.isLiveOnEntryDef(ClobberAt) && \"liveOnEntry must clobber itself\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 392, __PRETTY_FUNCTION__)); |
393 | return; |
394 | } |
395 | |
396 | bool FoundClobber = false; |
397 | DenseSet<ConstMemoryAccessPair> VisitedPhis; |
398 | SmallVector<ConstMemoryAccessPair, 8> Worklist; |
399 | Worklist.emplace_back(Start, StartLoc); |
400 | // Walk all paths from Start to ClobberAt, while looking for clobbers. If one |
401 | // is found, complain. |
402 | while (!Worklist.empty()) { |
403 | auto MAP = Worklist.pop_back_val(); |
404 | // All we care about is that nothing from Start to ClobberAt clobbers Start. |
405 | // We learn nothing from revisiting nodes. |
406 | if (!VisitedPhis.insert(MAP).second) |
407 | continue; |
408 | |
409 | for (const auto *MA : def_chain(MAP.first)) { |
410 | if (MA == ClobberAt) { |
411 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { |
412 | // instructionClobbersQuery isn't essentially free, so don't use `|=`, |
413 | // since it won't let us short-circuit. |
414 | // |
415 | // Also, note that this can't be hoisted out of the `Worklist` loop, |
416 | // since MD may only act as a clobber for 1 of N MemoryLocations. |
417 | FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); |
418 | if (!FoundClobber) { |
419 | ClobberAlias CA = |
420 | instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); |
421 | if (CA.IsClobber) { |
422 | FoundClobber = true; |
423 | // Not used: CA.AR; |
424 | } |
425 | } |
426 | } |
427 | break; |
428 | } |
429 | |
430 | // We should never hit liveOnEntry, unless it's the clobber. |
431 | assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?")((!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(MA) && \"Hit liveOnEntry before clobber?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 431, __PRETTY_FUNCTION__)); |
432 | |
433 | if (const auto *MD = dyn_cast<MemoryDef>(MA)) { |
434 | // If Start is a Def, skip self. |
435 | if (MD == Start) |
436 | continue; |
437 | |
438 | assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) . IsClobber && "Found clobber before reaching ClobberAt!" ) ? static_cast<void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 440, __PRETTY_FUNCTION__)) |
439 | .IsClobber &&((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) . IsClobber && "Found clobber before reaching ClobberAt!" ) ? static_cast<void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 440, __PRETTY_FUNCTION__)) |
440 | "Found clobber before reaching ClobberAt!")((!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) . IsClobber && "Found clobber before reaching ClobberAt!" ) ? static_cast<void> (0) : __assert_fail ("!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) .IsClobber && \"Found clobber before reaching ClobberAt!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 440, __PRETTY_FUNCTION__)); |
441 | continue; |
442 | } |
443 | |
444 | if (const auto *MU = dyn_cast<MemoryUse>(MA)) { |
445 | (void)MU; |
446 | assert (MU == Start &&((MU == Start && "Can only find use in def chain if Start is a use" ) ? static_cast<void> (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 447, __PRETTY_FUNCTION__)) |
447 | "Can only find use in def chain if Start is a use")((MU == Start && "Can only find use in def chain if Start is a use" ) ? static_cast<void> (0) : __assert_fail ("MU == Start && \"Can only find use in def chain if Start is a use\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 447, __PRETTY_FUNCTION__)); |
448 | continue; |
449 | } |
450 | |
451 | assert(isa<MemoryPhi>(MA))((isa<MemoryPhi>(MA)) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(MA)", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 451, __PRETTY_FUNCTION__)); |
452 | Worklist.append( |
453 | upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}), |
454 | upward_defs_end()); |
455 | } |
456 | } |
457 | |
458 | // If the verify is done following an optimization, it's possible that |
459 | // ClobberAt was a conservative clobbering, that we can now infer is not a |
460 | // true clobbering access. Don't fail the verify if that's the case. |
461 | // We do have accesses that claim they're optimized, but could be optimized |
462 | // further. Updating all these can be expensive, so allow it for now (FIXME). |
463 | if (AllowImpreciseClobber) |
464 | return; |
465 | |
466 | // If ClobberAt is a MemoryPhi, we can assume something above it acted as a |
467 | // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. |
468 | assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&(((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber") ? static_cast<void> (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 469, __PRETTY_FUNCTION__)) |
469 | "ClobberAt never acted as a clobber")(((isa<MemoryPhi>(ClobberAt) || FoundClobber) && "ClobberAt never acted as a clobber") ? static_cast<void> (0) : __assert_fail ("(isa<MemoryPhi>(ClobberAt) || FoundClobber) && \"ClobberAt never acted as a clobber\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 469, __PRETTY_FUNCTION__)); |
470 | } |
471 | |
472 | namespace { |
473 | |
474 | /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up |
475 | /// in one class. |
476 | class ClobberWalker { |
477 | /// Save a few bytes by using unsigned instead of size_t. |
478 | using ListIndex = unsigned; |
479 | |
480 | /// Represents a span of contiguous MemoryDefs, potentially ending in a |
481 | /// MemoryPhi. |
482 | struct DefPath { |
483 | MemoryLocation Loc; |
484 | // Note that, because we always walk in reverse, Last will always dominate |
485 | // First. Also note that First and Last are inclusive. |
486 | MemoryAccess *First; |
487 | MemoryAccess *Last; |
488 | Optional<ListIndex> Previous; |
489 | |
490 | DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, |
491 | Optional<ListIndex> Previous) |
492 | : Loc(Loc), First(First), Last(Last), Previous(Previous) {} |
493 | |
494 | DefPath(const MemoryLocation &Loc, MemoryAccess *Init, |
495 | Optional<ListIndex> Previous) |
496 | : DefPath(Loc, Init, Init, Previous) {} |
497 | }; |
498 | |
499 | const MemorySSA &MSSA; |
500 | AliasAnalysis &AA; |
501 | DominatorTree &DT; |
502 | UpwardsMemoryQuery *Query; |
503 | |
504 | // Phi optimization bookkeeping |
505 | SmallVector<DefPath, 32> Paths; |
506 | DenseSet<ConstMemoryAccessPair> VisitedPhis; |
507 | |
508 | /// Find the nearest def or phi that `From` can legally be optimized to. |
509 | const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { |
510 | assert(From->getNumOperands() && "Phi with no operands?")((From->getNumOperands() && "Phi with no operands?" ) ? static_cast<void> (0) : __assert_fail ("From->getNumOperands() && \"Phi with no operands?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 510, __PRETTY_FUNCTION__)); |
511 | |
512 | BasicBlock *BB = From->getBlock(); |
513 | MemoryAccess *Result = MSSA.getLiveOnEntryDef(); |
514 | DomTreeNode *Node = DT.getNode(BB); |
515 | while ((Node = Node->getIDom())) { |
516 | auto *Defs = MSSA.getBlockDefs(Node->getBlock()); |
517 | if (Defs) |
518 | return &*Defs->rbegin(); |
519 | } |
520 | return Result; |
521 | } |
522 | |
523 | /// Result of calling walkToPhiOrClobber. |
524 | struct UpwardsWalkResult { |
525 | /// The "Result" of the walk. Either a clobber, the last thing we walked, or |
526 | /// both. Include alias info when clobber found. |
527 | MemoryAccess *Result; |
528 | bool IsKnownClobber; |
529 | Optional<AliasResult> AR; |
530 | }; |
531 | |
532 | /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. |
533 | /// This will update Desc.Last as it walks. It will (optionally) also stop at |
534 | /// StopAt. |
535 | /// |
536 | /// This does not test for whether StopAt is a clobber |
537 | UpwardsWalkResult |
538 | walkToPhiOrClobber(DefPath &Desc, |
539 | const MemoryAccess *StopAt = nullptr) const { |
540 | assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world")((!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world" ) ? static_cast<void> (0) : __assert_fail ("!isa<MemoryUse>(Desc.Last) && \"Uses don't exist in my world\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 540, __PRETTY_FUNCTION__)); |
541 | |
542 | for (MemoryAccess *Current : def_chain(Desc.Last)) { |
543 | Desc.Last = Current; |
544 | if (Current == StopAt) |
545 | return {Current, false, MayAlias}; |
546 | |
547 | if (auto *MD = dyn_cast<MemoryDef>(Current)) { |
548 | if (MSSA.isLiveOnEntryDef(MD)) |
549 | return {MD, true, MustAlias}; |
550 | ClobberAlias CA = |
551 | instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); |
552 | if (CA.IsClobber) |
553 | return {MD, true, CA.AR}; |
554 | } |
555 | } |
556 | |
557 | assert(isa<MemoryPhi>(Desc.Last) &&((isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?" ) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 558, __PRETTY_FUNCTION__)) |
558 | "Ended at a non-clobber that's not a phi?")((isa<MemoryPhi>(Desc.Last) && "Ended at a non-clobber that's not a phi?" ) ? static_cast<void> (0) : __assert_fail ("isa<MemoryPhi>(Desc.Last) && \"Ended at a non-clobber that's not a phi?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 558, __PRETTY_FUNCTION__)); |
559 | return {Desc.Last, false, MayAlias}; |
560 | } |
561 | |
562 | void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, |
563 | ListIndex PriorNode) { |
564 | auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), |
565 | upward_defs_end()); |
566 | for (const MemoryAccessPair &P : UpwardDefs) { |
567 | PausedSearches.push_back(Paths.size()); |
568 | Paths.emplace_back(P.second, P.first, PriorNode); |
569 | } |
570 | } |
571 | |
572 | /// Represents a search that terminated after finding a clobber. This clobber |
573 | /// may or may not be present in the path of defs from LastNode..SearchStart, |
574 | /// since it may have been retrieved from cache. |
575 | struct TerminatedPath { |
576 | MemoryAccess *Clobber; |
577 | ListIndex LastNode; |
578 | }; |
579 | |
580 | /// Get an access that keeps us from optimizing to the given phi. |
581 | /// |
582 | /// PausedSearches is an array of indices into the Paths array. Its incoming |
583 | /// value is the indices of searches that stopped at the last phi optimization |
584 | /// target. It's left in an unspecified state. |
585 | /// |
586 | /// If this returns None, NewPaused is a vector of searches that terminated |
587 | /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. |
588 | Optional<TerminatedPath> |
589 | getBlockingAccess(const MemoryAccess *StopWhere, |
590 | SmallVectorImpl<ListIndex> &PausedSearches, |
591 | SmallVectorImpl<ListIndex> &NewPaused, |
592 | SmallVectorImpl<TerminatedPath> &Terminated) { |
593 | assert(!PausedSearches.empty() && "No searches to continue?")((!PausedSearches.empty() && "No searches to continue?" ) ? static_cast<void> (0) : __assert_fail ("!PausedSearches.empty() && \"No searches to continue?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 593, __PRETTY_FUNCTION__)); |
594 | |
595 | // BFS vs DFS really doesn't make a difference here, so just do a DFS with |
596 | // PausedSearches as our stack. |
597 | while (!PausedSearches.empty()) { |
598 | ListIndex PathIndex = PausedSearches.pop_back_val(); |
599 | DefPath &Node = Paths[PathIndex]; |
600 | |
601 | // If we've already visited this path with this MemoryLocation, we don't |
602 | // need to do so again. |
603 | // |
604 | // NOTE: That we just drop these paths on the ground makes caching |
605 | // behavior sporadic. e.g. given a diamond: |
606 | // A |
607 | // B C |
608 | // D |
609 | // |
610 | // ...If we walk D, B, A, C, we'll only cache the result of phi |
611 | // optimization for A, B, and D; C will be skipped because it dies here. |
612 | // This arguably isn't the worst thing ever, since: |
613 | // - We generally query things in a top-down order, so if we got below D |
614 | // without needing cache entries for {C, MemLoc}, then chances are |
615 | // that those cache entries would end up ultimately unused. |
616 | // - We still cache things for A, so C only needs to walk up a bit. |
617 | // If this behavior becomes problematic, we can fix without a ton of extra |
618 | // work. |
619 | if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) |
620 | continue; |
621 | |
622 | UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere); |
623 | if (Res.IsKnownClobber) { |
624 | assert(Res.Result != StopWhere)((Res.Result != StopWhere) ? static_cast<void> (0) : __assert_fail ("Res.Result != StopWhere", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 624, __PRETTY_FUNCTION__)); |
625 | // If this wasn't a cache hit, we hit a clobber when walking. That's a |
626 | // failure. |
627 | TerminatedPath Term{Res.Result, PathIndex}; |
628 | if (!MSSA.dominates(Res.Result, StopWhere)) |
629 | return Term; |
630 | |
631 | // Otherwise, it's a valid thing to potentially optimize to. |
632 | Terminated.push_back(Term); |
633 | continue; |
634 | } |
635 | |
636 | if (Res.Result == StopWhere) { |
637 | // We've hit our target. Save this path off for if we want to continue |
638 | // walking. |
639 | NewPaused.push_back(PathIndex); |
640 | continue; |
641 | } |
642 | |
643 | assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber")((!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Res.Result) && \"liveOnEntry is a clobber\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 643, __PRETTY_FUNCTION__)); |
644 | addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); |
645 | } |
646 | |
647 | return None; |
648 | } |
649 | |
650 | template <typename T, typename Walker> |
651 | struct generic_def_path_iterator |
652 | : public iterator_facade_base<generic_def_path_iterator<T, Walker>, |
653 | std::forward_iterator_tag, T *> { |
654 | generic_def_path_iterator() = default; |
655 | generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} |
656 | |
657 | T &operator*() const { return curNode(); } |
658 | |
659 | generic_def_path_iterator &operator++() { |
660 | N = curNode().Previous; |
661 | return *this; |
662 | } |
663 | |
664 | bool operator==(const generic_def_path_iterator &O) const { |
665 | if (N.hasValue() != O.N.hasValue()) |
666 | return false; |
667 | return !N.hasValue() || *N == *O.N; |
668 | } |
669 | |
670 | private: |
671 | T &curNode() const { return W->Paths[*N]; } |
672 | |
673 | Walker *W = nullptr; |
674 | Optional<ListIndex> N = None; |
675 | }; |
676 | |
677 | using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; |
678 | using const_def_path_iterator = |
679 | generic_def_path_iterator<const DefPath, const ClobberWalker>; |
680 | |
681 | iterator_range<def_path_iterator> def_path(ListIndex From) { |
682 | return make_range(def_path_iterator(this, From), def_path_iterator()); |
683 | } |
684 | |
685 | iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { |
686 | return make_range(const_def_path_iterator(this, From), |
687 | const_def_path_iterator()); |
688 | } |
689 | |
690 | struct OptznResult { |
691 | /// The path that contains our result. |
692 | TerminatedPath PrimaryClobber; |
693 | /// The paths that we can legally cache back from, but that aren't |
694 | /// necessarily the result of the Phi optimization. |
695 | SmallVector<TerminatedPath, 4> OtherClobbers; |
696 | }; |
697 | |
698 | ListIndex defPathIndex(const DefPath &N) const { |
699 | // The assert looks nicer if we don't need to do &N |
700 | const DefPath *NP = &N; |
701 | assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&((!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!" ) ? static_cast<void> (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 702, __PRETTY_FUNCTION__)) |
702 | "Out of bounds DefPath!")((!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && "Out of bounds DefPath!" ) ? static_cast<void> (0) : __assert_fail ("!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && \"Out of bounds DefPath!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 702, __PRETTY_FUNCTION__)); |
703 | return NP - &Paths.front(); |
704 | } |
705 | |
706 | /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths |
707 | /// that act as legal clobbers. Note that this won't return *all* clobbers. |
708 | /// |
709 | /// Phi optimization algorithm tl;dr: |
710 | /// - Find the earliest def/phi, A, we can optimize to |
711 | /// - Find if all paths from the starting memory access ultimately reach A |
712 | /// - If not, optimization isn't possible. |
713 | /// - Otherwise, walk from A to another clobber or phi, A'. |
714 | /// - If A' is a def, we're done. |
715 | /// - If A' is a phi, try to optimize it. |
716 | /// |
717 | /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path |
718 | /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. |
719 | OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, |
720 | const MemoryLocation &Loc) { |
721 | assert(Paths.empty() && VisitedPhis.empty() &&((Paths.empty() && VisitedPhis.empty() && "Reset the optimization state." ) ? static_cast<void> (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 722, __PRETTY_FUNCTION__)) |
722 | "Reset the optimization state.")((Paths.empty() && VisitedPhis.empty() && "Reset the optimization state." ) ? static_cast<void> (0) : __assert_fail ("Paths.empty() && VisitedPhis.empty() && \"Reset the optimization state.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 722, __PRETTY_FUNCTION__)); |
723 | |
724 | Paths.emplace_back(Loc, Start, Phi, None); |
725 | // Stores how many "valid" optimization nodes we had prior to calling |
726 | // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. |
727 | auto PriorPathsSize = Paths.size(); |
728 | |
729 | SmallVector<ListIndex, 16> PausedSearches; |
730 | SmallVector<ListIndex, 8> NewPaused; |
731 | SmallVector<TerminatedPath, 4> TerminatedPaths; |
732 | |
733 | addSearches(Phi, PausedSearches, 0); |
734 | |
735 | // Moves the TerminatedPath with the "most dominated" Clobber to the end of |
736 | // Paths. |
737 | auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { |
738 | assert(!Paths.empty() && "Need a path to move")((!Paths.empty() && "Need a path to move") ? static_cast <void> (0) : __assert_fail ("!Paths.empty() && \"Need a path to move\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 738, __PRETTY_FUNCTION__)); |
739 | auto Dom = Paths.begin(); |
740 | for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) |
741 | if (!MSSA.dominates(I->Clobber, Dom->Clobber)) |
742 | Dom = I; |
743 | auto Last = Paths.end() - 1; |
744 | if (Last != Dom) |
745 | std::iter_swap(Last, Dom); |
746 | }; |
747 | |
748 | MemoryPhi *Current = Phi; |
749 | while (true) { |
750 | assert(!MSSA.isLiveOnEntryDef(Current) &&((!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 751, __PRETTY_FUNCTION__)) |
751 | "liveOnEntry wasn't treated as a clobber?")((!MSSA.isLiveOnEntryDef(Current) && "liveOnEntry wasn't treated as a clobber?" ) ? static_cast<void> (0) : __assert_fail ("!MSSA.isLiveOnEntryDef(Current) && \"liveOnEntry wasn't treated as a clobber?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 751, __PRETTY_FUNCTION__)); |
752 | |
753 | const auto *Target = getWalkTarget(Current); |
754 | // If a TerminatedPath doesn't dominate Target, then it wasn't a legal |
755 | // optimization for the prior phi. |
756 | assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {((all_of(TerminatedPaths, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast <void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 758, __PRETTY_FUNCTION__)) |
757 | return MSSA.dominates(P.Clobber, Target);((all_of(TerminatedPaths, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast <void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 758, __PRETTY_FUNCTION__)) |
758 | }))((all_of(TerminatedPaths, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, Target); })) ? static_cast <void> (0) : __assert_fail ("all_of(TerminatedPaths, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, Target); })" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 758, __PRETTY_FUNCTION__)); |
759 | |
760 | // FIXME: This is broken, because the Blocker may be reported to be |
761 | // liveOnEntry, and we'll happily wait for that to disappear (read: never) |
762 | // For the moment, this is fine, since we do nothing with blocker info. |
763 | if (Optional<TerminatedPath> Blocker = getBlockingAccess( |
764 | Target, PausedSearches, NewPaused, TerminatedPaths)) { |
765 | |
766 | // Find the node we started at. We can't search based on N->Last, since |
767 | // we may have gone around a loop with a different MemoryLocation. |
768 | auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { |
769 | return defPathIndex(N) < PriorPathsSize; |
770 | }); |
771 | assert(Iter != def_path_iterator())((Iter != def_path_iterator()) ? static_cast<void> (0) : __assert_fail ("Iter != def_path_iterator()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 771, __PRETTY_FUNCTION__)); |
772 | |
773 | DefPath &CurNode = *Iter; |
774 | assert(CurNode.Last == Current)((CurNode.Last == Current) ? static_cast<void> (0) : __assert_fail ("CurNode.Last == Current", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 774, __PRETTY_FUNCTION__)); |
775 | |
776 | // Two things: |
777 | // A. We can't reliably cache all of NewPaused back. Consider a case |
778 | // where we have two paths in NewPaused; one of which can't optimize |
779 | // above this phi, whereas the other can. If we cache the second path |
780 | // back, we'll end up with suboptimal cache entries. We can handle |
781 | // cases like this a bit better when we either try to find all |
782 | // clobbers that block phi optimization, or when our cache starts |
783 | // supporting unfinished searches. |
784 | // B. We can't reliably cache TerminatedPaths back here without doing |
785 | // extra checks; consider a case like: |
786 | // T |
787 | // / \ |
788 | // D C |
789 | // \ / |
790 | // S |
791 | // Where T is our target, C is a node with a clobber on it, D is a |
792 | // diamond (with a clobber *only* on the left or right node, N), and |
793 | // S is our start. Say we walk to D, through the node opposite N |
794 | // (read: ignoring the clobber), and see a cache entry in the top |
795 | // node of D. That cache entry gets put into TerminatedPaths. We then |
796 | // walk up to C (N is later in our worklist), find the clobber, and |
797 | // quit. If we append TerminatedPaths to OtherClobbers, we'll cache |
798 | // the bottom part of D to the cached clobber, ignoring the clobber |
799 | // in N. Again, this problem goes away if we start tracking all |
800 | // blockers for a given phi optimization. |
801 | TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; |
802 | return {Result, {}}; |
803 | } |
804 | |
805 | // If there's nothing left to search, then all paths led to valid clobbers |
806 | // that we got from our cache; pick the nearest to the start, and allow |
807 | // the rest to be cached back. |
808 | if (NewPaused.empty()) { |
809 | MoveDominatedPathToEnd(TerminatedPaths); |
810 | TerminatedPath Result = TerminatedPaths.pop_back_val(); |
811 | return {Result, std::move(TerminatedPaths)}; |
812 | } |
813 | |
814 | MemoryAccess *DefChainEnd = nullptr; |
815 | SmallVector<TerminatedPath, 4> Clobbers; |
816 | for (ListIndex Paused : NewPaused) { |
817 | UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); |
818 | if (WR.IsKnownClobber) |
819 | Clobbers.push_back({WR.Result, Paused}); |
820 | else |
821 | // Micro-opt: If we hit the end of the chain, save it. |
822 | DefChainEnd = WR.Result; |
823 | } |
824 | |
825 | if (!TerminatedPaths.empty()) { |
826 | // If we couldn't find the dominating phi/liveOnEntry in the above loop, |
827 | // do it now. |
828 | if (!DefChainEnd) |
829 | for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) |
830 | DefChainEnd = MA; |
831 | |
832 | // If any of the terminated paths don't dominate the phi we'll try to |
833 | // optimize, we need to figure out what they are and quit. |
834 | const BasicBlock *ChainBB = DefChainEnd->getBlock(); |
835 | for (const TerminatedPath &TP : TerminatedPaths) { |
836 | // Because we know that DefChainEnd is as "high" as we can go, we |
837 | // don't need local dominance checks; BB dominance is sufficient. |
838 | if (DT.dominates(ChainBB, TP.Clobber->getBlock())) |
839 | Clobbers.push_back(TP); |
840 | } |
841 | } |
842 | |
843 | // If we have clobbers in the def chain, find the one closest to Current |
844 | // and quit. |
845 | if (!Clobbers.empty()) { |
846 | MoveDominatedPathToEnd(Clobbers); |
847 | TerminatedPath Result = Clobbers.pop_back_val(); |
848 | return {Result, std::move(Clobbers)}; |
849 | } |
850 | |
851 | assert(all_of(NewPaused,((all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? static_cast<void> (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 852, __PRETTY_FUNCTION__)) |
852 | [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }))((all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })) ? static_cast<void> (0) : __assert_fail ("all_of(NewPaused, [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 852, __PRETTY_FUNCTION__)); |
853 | |
854 | // Because liveOnEntry is a clobber, this must be a phi. |
855 | auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); |
856 | |
857 | PriorPathsSize = Paths.size(); |
858 | PausedSearches.clear(); |
859 | for (ListIndex I : NewPaused) |
860 | addSearches(DefChainPhi, PausedSearches, I); |
861 | NewPaused.clear(); |
862 | |
863 | Current = DefChainPhi; |
864 | } |
865 | } |
866 | |
867 | void verifyOptResult(const OptznResult &R) const { |
868 | assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {((all_of(R.OtherClobbers, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber ); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 870, __PRETTY_FUNCTION__)) |
869 | return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);((all_of(R.OtherClobbers, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber ); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 870, __PRETTY_FUNCTION__)) |
870 | }))((all_of(R.OtherClobbers, [&](const TerminatedPath &P ) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber ); })) ? static_cast<void> (0) : __assert_fail ("all_of(R.OtherClobbers, [&](const TerminatedPath &P) { return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); })" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 870, __PRETTY_FUNCTION__)); |
871 | } |
872 | |
873 | void resetPhiOptznState() { |
874 | Paths.clear(); |
875 | VisitedPhis.clear(); |
876 | } |
877 | |
878 | public: |
879 | ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT) |
880 | : MSSA(MSSA), AA(AA), DT(DT) {} |
881 | |
882 | /// Finds the nearest clobber for the given query, optimizing phis if |
883 | /// possible. |
884 | MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) { |
885 | Query = &Q; |
886 | |
887 | MemoryAccess *Current = Start; |
888 | // This walker pretends uses don't exist. If we're handed one, silently grab |
889 | // its def. (This has the nice side-effect of ensuring we never cache uses) |
890 | if (auto *MU = dyn_cast<MemoryUse>(Start)) |
891 | Current = MU->getDefiningAccess(); |
892 | |
893 | DefPath FirstDesc(Q.StartingLoc, Current, Current, None); |
894 | // Fast path for the overly-common case (no crazy phi optimization |
895 | // necessary) |
896 | UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); |
897 | MemoryAccess *Result; |
898 | if (WalkResult.IsKnownClobber) { |
899 | Result = WalkResult.Result; |
900 | Q.AR = WalkResult.AR; |
901 | } else { |
902 | OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), |
903 | Current, Q.StartingLoc); |
904 | verifyOptResult(OptRes); |
905 | resetPhiOptznState(); |
906 | Result = OptRes.PrimaryClobber.Clobber; |
907 | } |
908 | |
909 | #ifdef EXPENSIVE_CHECKS |
910 | checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); |
911 | #endif |
912 | return Result; |
913 | } |
914 | |
915 | void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA)((MSSA == &this->MSSA) ? static_cast<void> (0) : __assert_fail ("MSSA == &this->MSSA", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 915, __PRETTY_FUNCTION__)); } |
916 | }; |
917 | |
918 | struct RenamePassData { |
919 | DomTreeNode *DTN; |
920 | DomTreeNode::const_iterator ChildIt; |
921 | MemoryAccess *IncomingVal; |
922 | |
923 | RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, |
924 | MemoryAccess *M) |
925 | : DTN(D), ChildIt(It), IncomingVal(M) {} |
926 | |
927 | void swap(RenamePassData &RHS) { |
928 | std::swap(DTN, RHS.DTN); |
929 | std::swap(ChildIt, RHS.ChildIt); |
930 | std::swap(IncomingVal, RHS.IncomingVal); |
931 | } |
932 | }; |
933 | |
934 | } // end anonymous namespace |
935 | |
936 | namespace llvm { |
937 | |
938 | /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no |
939 | /// longer does caching on its own, but the name has been retained for the |
940 | /// moment. |
941 | class MemorySSA::CachingWalker final : public MemorySSAWalker { |
942 | ClobberWalker Walker; |
943 | |
944 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &); |
945 | |
946 | public: |
947 | CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *); |
948 | ~CachingWalker() override = default; |
949 | |
950 | using MemorySSAWalker::getClobberingMemoryAccess; |
951 | |
952 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; |
953 | MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, |
954 | const MemoryLocation &) override; |
955 | void invalidateInfo(MemoryAccess *) override; |
956 | |
957 | void verify(const MemorySSA *MSSA) override { |
958 | MemorySSAWalker::verify(MSSA); |
959 | Walker.verify(MSSA); |
960 | } |
961 | }; |
962 | |
963 | } // end namespace llvm |
964 | |
965 | void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, |
966 | bool RenameAllUses) { |
967 | // Pass through values to our successors |
968 | for (const BasicBlock *S : successors(BB)) { |
969 | auto It = PerBlockAccesses.find(S); |
970 | // Rename the phi nodes in our successor block |
971 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) |
972 | continue; |
973 | AccessList *Accesses = It->second.get(); |
974 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); |
975 | if (RenameAllUses) { |
976 | int PhiIndex = Phi->getBasicBlockIndex(BB); |
977 | assert(PhiIndex != -1 && "Incomplete phi during partial rename")((PhiIndex != -1 && "Incomplete phi during partial rename" ) ? static_cast<void> (0) : __assert_fail ("PhiIndex != -1 && \"Incomplete phi during partial rename\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 977, __PRETTY_FUNCTION__)); |
978 | Phi->setIncomingValue(PhiIndex, IncomingVal); |
979 | } else |
980 | Phi->addIncoming(IncomingVal, BB); |
981 | } |
982 | } |
983 | |
984 | /// Rename a single basic block into MemorySSA form. |
985 | /// Uses the standard SSA renaming algorithm. |
986 | /// \returns The new incoming value. |
987 | MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, |
988 | bool RenameAllUses) { |
989 | auto It = PerBlockAccesses.find(BB); |
990 | // Skip most processing if the list is empty. |
991 | if (It != PerBlockAccesses.end()) { |
992 | AccessList *Accesses = It->second.get(); |
993 | for (MemoryAccess &L : *Accesses) { |
994 | if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { |
995 | if (MUD->getDefiningAccess() == nullptr || RenameAllUses) |
996 | MUD->setDefiningAccess(IncomingVal); |
997 | if (isa<MemoryDef>(&L)) |
998 | IncomingVal = &L; |
999 | } else { |
1000 | IncomingVal = &L; |
1001 | } |
1002 | } |
1003 | } |
1004 | return IncomingVal; |
1005 | } |
1006 | |
1007 | /// This is the standard SSA renaming algorithm. |
1008 | /// |
1009 | /// We walk the dominator tree in preorder, renaming accesses, and then filling |
1010 | /// in phi nodes in our successors. |
1011 | void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, |
1012 | SmallPtrSetImpl<BasicBlock *> &Visited, |
1013 | bool SkipVisited, bool RenameAllUses) { |
1014 | SmallVector<RenamePassData, 32> WorkStack; |
1015 | // Skip everything if we already renamed this block and we are skipping. |
1016 | // Note: You can't sink this into the if, because we need it to occur |
1017 | // regardless of whether we skip blocks or not. |
1018 | bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; |
1019 | if (SkipVisited && AlreadyVisited) |
1020 | return; |
1021 | |
1022 | IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); |
1023 | renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); |
1024 | WorkStack.push_back({Root, Root->begin(), IncomingVal}); |
1025 | |
1026 | while (!WorkStack.empty()) { |
1027 | DomTreeNode *Node = WorkStack.back().DTN; |
1028 | DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; |
1029 | IncomingVal = WorkStack.back().IncomingVal; |
1030 | |
1031 | if (ChildIt == Node->end()) { |
1032 | WorkStack.pop_back(); |
1033 | } else { |
1034 | DomTreeNode *Child = *ChildIt; |
1035 | ++WorkStack.back().ChildIt; |
1036 | BasicBlock *BB = Child->getBlock(); |
1037 | // Note: You can't sink this into the if, because we need it to occur |
1038 | // regardless of whether we skip blocks or not. |
1039 | AlreadyVisited = !Visited.insert(BB).second; |
1040 | if (SkipVisited && AlreadyVisited) { |
1041 | // We already visited this during our renaming, which can happen when |
1042 | // being asked to rename multiple blocks. Figure out the incoming val, |
1043 | // which is the last def. |
1044 | // Incoming value can only change if there is a block def, and in that |
1045 | // case, it's the last block def in the list. |
1046 | if (auto *BlockDefs = getWritableBlockDefs(BB)) |
1047 | IncomingVal = &*BlockDefs->rbegin(); |
1048 | } else |
1049 | IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); |
1050 | renameSuccessorPhis(BB, IncomingVal, RenameAllUses); |
1051 | WorkStack.push_back({Child, Child->begin(), IncomingVal}); |
1052 | } |
1053 | } |
1054 | } |
1055 | |
1056 | /// This handles unreachable block accesses by deleting phi nodes in |
1057 | /// unreachable blocks, and marking all other unreachable MemoryAccess's as |
1058 | /// being uses of the live on entry definition. |
1059 | void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { |
1060 | assert(!DT->isReachableFromEntry(BB) &&((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks" ) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1061, __PRETTY_FUNCTION__)) |
1061 | "Reachable block found while handling unreachable blocks")((!DT->isReachableFromEntry(BB) && "Reachable block found while handling unreachable blocks" ) ? static_cast<void> (0) : __assert_fail ("!DT->isReachableFromEntry(BB) && \"Reachable block found while handling unreachable blocks\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1061, __PRETTY_FUNCTION__)); |
1062 | |
1063 | // Make sure phi nodes in our reachable successors end up with a |
1064 | // LiveOnEntryDef for our incoming edge, even though our block is forward |
1065 | // unreachable. We could just disconnect these blocks from the CFG fully, |
1066 | // but we do not right now. |
1067 | for (const BasicBlock *S : successors(BB)) { |
1068 | if (!DT->isReachableFromEntry(S)) |
1069 | continue; |
1070 | auto It = PerBlockAccesses.find(S); |
1071 | // Rename the phi nodes in our successor block |
1072 | if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) |
1073 | continue; |
1074 | AccessList *Accesses = It->second.get(); |
1075 | auto *Phi = cast<MemoryPhi>(&Accesses->front()); |
1076 | Phi->addIncoming(LiveOnEntryDef.get(), BB); |
1077 | } |
1078 | |
1079 | auto It = PerBlockAccesses.find(BB); |
1080 | if (It == PerBlockAccesses.end()) |
1081 | return; |
1082 | |
1083 | auto &Accesses = It->second; |
1084 | for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { |
1085 | auto Next = std::next(AI); |
1086 | // If we have a phi, just remove it. We are going to replace all |
1087 | // users with live on entry. |
1088 | if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) |
1089 | UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); |
1090 | else |
1091 | Accesses->erase(AI); |
1092 | AI = Next; |
1093 | } |
1094 | } |
1095 | |
1096 | MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) |
1097 | : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), |
1098 | NextID(0) { |
1099 | buildMemorySSA(); |
1100 | } |
1101 | |
1102 | MemorySSA::~MemorySSA() { |
1103 | // Drop all our references |
1104 | for (const auto &Pair : PerBlockAccesses) |
1105 | for (MemoryAccess &MA : *Pair.second) |
1106 | MA.dropAllReferences(); |
1107 | } |
1108 | |
1109 | MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { |
1110 | auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); |
1111 | |
1112 | if (Res.second) |
1113 | Res.first->second = llvm::make_unique<AccessList>(); |
1114 | return Res.first->second.get(); |
1115 | } |
1116 | |
1117 | MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { |
1118 | auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); |
1119 | |
1120 | if (Res.second) |
1121 | Res.first->second = llvm::make_unique<DefsList>(); |
1122 | return Res.first->second.get(); |
1123 | } |
1124 | |
1125 | namespace llvm { |
1126 | |
1127 | /// This class is a batch walker of all MemoryUse's in the program, and points |
1128 | /// their defining access at the thing that actually clobbers them. Because it |
1129 | /// is a batch walker that touches everything, it does not operate like the |
1130 | /// other walkers. This walker is basically performing a top-down SSA renaming |
1131 | /// pass, where the version stack is used as the cache. This enables it to be |
1132 | /// significantly more time and memory efficient than using the regular walker, |
1133 | /// which is walking bottom-up. |
1134 | class MemorySSA::OptimizeUses { |
1135 | public: |
1136 | OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA, |
1137 | DominatorTree *DT) |
1138 | : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) { |
1139 | Walker = MSSA->getWalker(); |
Value stored to 'Walker' is never read | |
1140 | } |
1141 | |
1142 | void optimizeUses(); |
1143 | |
1144 | private: |
1145 | /// This represents where a given memorylocation is in the stack. |
1146 | struct MemlocStackInfo { |
1147 | // This essentially is keeping track of versions of the stack. Whenever |
1148 | // the stack changes due to pushes or pops, these versions increase. |
1149 | unsigned long StackEpoch; |
1150 | unsigned long PopEpoch; |
1151 | // This is the lower bound of places on the stack to check. It is equal to |
1152 | // the place the last stack walk ended. |
1153 | // Note: Correctness depends on this being initialized to 0, which densemap |
1154 | // does |
1155 | unsigned long LowerBound; |
1156 | const BasicBlock *LowerBoundBlock; |
1157 | // This is where the last walk for this memory location ended. |
1158 | unsigned long LastKill; |
1159 | bool LastKillValid; |
1160 | Optional<AliasResult> AR; |
1161 | }; |
1162 | |
1163 | void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, |
1164 | SmallVectorImpl<MemoryAccess *> &, |
1165 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &); |
1166 | |
1167 | MemorySSA *MSSA; |
1168 | MemorySSAWalker *Walker; |
1169 | AliasAnalysis *AA; |
1170 | DominatorTree *DT; |
1171 | }; |
1172 | |
1173 | } // end namespace llvm |
1174 | |
1175 | /// Optimize the uses in a given block This is basically the SSA renaming |
1176 | /// algorithm, with one caveat: We are able to use a single stack for all |
1177 | /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is |
1178 | /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just |
1179 | /// going to be some position in that stack of possible ones. |
1180 | /// |
1181 | /// We track the stack positions that each MemoryLocation needs |
1182 | /// to check, and last ended at. This is because we only want to check the |
1183 | /// things that changed since last time. The same MemoryLocation should |
1184 | /// get clobbered by the same store (getModRefInfo does not use invariantness or |
1185 | /// things like this, and if they start, we can modify MemoryLocOrCall to |
1186 | /// include relevant data) |
1187 | void MemorySSA::OptimizeUses::optimizeUsesInBlock( |
1188 | const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, |
1189 | SmallVectorImpl<MemoryAccess *> &VersionStack, |
1190 | DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { |
1191 | |
1192 | /// If no accesses, nothing to do. |
1193 | MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); |
1194 | if (Accesses == nullptr) |
1195 | return; |
1196 | |
1197 | // Pop everything that doesn't dominate the current block off the stack, |
1198 | // increment the PopEpoch to account for this. |
1199 | while (true) { |
1200 | assert(((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1202, __PRETTY_FUNCTION__)) |
1201 | !VersionStack.empty() &&((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1202, __PRETTY_FUNCTION__)) |
1202 | "Version stack should have liveOnEntry sentinel dominating everything")((!VersionStack.empty() && "Version stack should have liveOnEntry sentinel dominating everything" ) ? static_cast<void> (0) : __assert_fail ("!VersionStack.empty() && \"Version stack should have liveOnEntry sentinel dominating everything\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1202, __PRETTY_FUNCTION__)); |
1203 | BasicBlock *BackBlock = VersionStack.back()->getBlock(); |
1204 | if (DT->dominates(BackBlock, BB)) |
1205 | break; |
1206 | while (VersionStack.back()->getBlock() == BackBlock) |
1207 | VersionStack.pop_back(); |
1208 | ++PopEpoch; |
1209 | } |
1210 | |
1211 | for (MemoryAccess &MA : *Accesses) { |
1212 | auto *MU = dyn_cast<MemoryUse>(&MA); |
1213 | if (!MU) { |
1214 | VersionStack.push_back(&MA); |
1215 | ++StackEpoch; |
1216 | continue; |
1217 | } |
1218 | |
1219 | if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { |
1220 | MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); |
1221 | continue; |
1222 | } |
1223 | |
1224 | MemoryLocOrCall UseMLOC(MU); |
1225 | auto &LocInfo = LocStackInfo[UseMLOC]; |
1226 | // If the pop epoch changed, it means we've removed stuff from top of |
1227 | // stack due to changing blocks. We may have to reset the lower bound or |
1228 | // last kill info. |
1229 | if (LocInfo.PopEpoch != PopEpoch) { |
1230 | LocInfo.PopEpoch = PopEpoch; |
1231 | LocInfo.StackEpoch = StackEpoch; |
1232 | // If the lower bound was in something that no longer dominates us, we |
1233 | // have to reset it. |
1234 | // We can't simply track stack size, because the stack may have had |
1235 | // pushes/pops in the meantime. |
1236 | // XXX: This is non-optimal, but only is slower cases with heavily |
1237 | // branching dominator trees. To get the optimal number of queries would |
1238 | // be to make lowerbound and lastkill a per-loc stack, and pop it until |
1239 | // the top of that stack dominates us. This does not seem worth it ATM. |
1240 | // A much cheaper optimization would be to always explore the deepest |
1241 | // branch of the dominator tree first. This will guarantee this resets on |
1242 | // the smallest set of blocks. |
1243 | if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && |
1244 | !DT->dominates(LocInfo.LowerBoundBlock, BB)) { |
1245 | // Reset the lower bound of things to check. |
1246 | // TODO: Some day we should be able to reset to last kill, rather than |
1247 | // 0. |
1248 | LocInfo.LowerBound = 0; |
1249 | LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); |
1250 | LocInfo.LastKillValid = false; |
1251 | } |
1252 | } else if (LocInfo.StackEpoch != StackEpoch) { |
1253 | // If all that has changed is the StackEpoch, we only have to check the |
1254 | // new things on the stack, because we've checked everything before. In |
1255 | // this case, the lower bound of things to check remains the same. |
1256 | LocInfo.PopEpoch = PopEpoch; |
1257 | LocInfo.StackEpoch = StackEpoch; |
1258 | } |
1259 | if (!LocInfo.LastKillValid) { |
1260 | LocInfo.LastKill = VersionStack.size() - 1; |
1261 | LocInfo.LastKillValid = true; |
1262 | LocInfo.AR = MayAlias; |
1263 | } |
1264 | |
1265 | // At this point, we should have corrected last kill and LowerBound to be |
1266 | // in bounds. |
1267 | assert(LocInfo.LowerBound < VersionStack.size() &&((LocInfo.LowerBound < VersionStack.size() && "Lower bound out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1268, __PRETTY_FUNCTION__)) |
1268 | "Lower bound out of range")((LocInfo.LowerBound < VersionStack.size() && "Lower bound out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LowerBound < VersionStack.size() && \"Lower bound out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1268, __PRETTY_FUNCTION__)); |
1269 | assert(LocInfo.LastKill < VersionStack.size() &&((LocInfo.LastKill < VersionStack.size() && "Last kill info out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1270, __PRETTY_FUNCTION__)) |
1270 | "Last kill info out of range")((LocInfo.LastKill < VersionStack.size() && "Last kill info out of range" ) ? static_cast<void> (0) : __assert_fail ("LocInfo.LastKill < VersionStack.size() && \"Last kill info out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1270, __PRETTY_FUNCTION__)); |
1271 | // In any case, the new upper bound is the top of the stack. |
1272 | unsigned long UpperBound = VersionStack.size() - 1; |
1273 | |
1274 | if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { |
1275 | LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) |
1276 | << *(MU->getMemoryInst()) << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) |
1277 | << " because there are "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) |
1278 | << UpperBound - LocInfo.LowerBounddo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false) |
1279 | << " stores to disambiguate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "MemorySSA skipping optimization of " << *MU << " (" << *(MU->getMemoryInst() ) << ")" << " because there are " << UpperBound - LocInfo.LowerBound << " stores to disambiguate\n"; } } while (false); |
1280 | // Because we did not walk, LastKill is no longer valid, as this may |
1281 | // have been a kill. |
1282 | LocInfo.LastKillValid = false; |
1283 | continue; |
1284 | } |
1285 | bool FoundClobberResult = false; |
1286 | while (UpperBound > LocInfo.LowerBound) { |
1287 | if (isa<MemoryPhi>(VersionStack[UpperBound])) { |
1288 | // For phis, use the walker, see where we ended up, go there |
1289 | Instruction *UseInst = MU->getMemoryInst(); |
1290 | MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst); |
1291 | // We are guaranteed to find it or something is wrong |
1292 | while (VersionStack[UpperBound] != Result) { |
1293 | assert(UpperBound != 0)((UpperBound != 0) ? static_cast<void> (0) : __assert_fail ("UpperBound != 0", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1293, __PRETTY_FUNCTION__)); |
1294 | --UpperBound; |
1295 | } |
1296 | FoundClobberResult = true; |
1297 | break; |
1298 | } |
1299 | |
1300 | MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); |
1301 | // If the lifetime of the pointer ends at this instruction, it's live on |
1302 | // entry. |
1303 | if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) { |
1304 | // Reset UpperBound to liveOnEntryDef's place in the stack |
1305 | UpperBound = 0; |
1306 | FoundClobberResult = true; |
1307 | LocInfo.AR = MustAlias; |
1308 | break; |
1309 | } |
1310 | ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA); |
1311 | if (CA.IsClobber) { |
1312 | FoundClobberResult = true; |
1313 | LocInfo.AR = CA.AR; |
1314 | break; |
1315 | } |
1316 | --UpperBound; |
1317 | } |
1318 | |
1319 | // Note: Phis always have AliasResult AR set to MayAlias ATM. |
1320 | |
1321 | // At the end of this loop, UpperBound is either a clobber, or lower bound |
1322 | // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. |
1323 | if (FoundClobberResult || UpperBound < LocInfo.LastKill) { |
1324 | // We were last killed now by where we got to |
1325 | if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound])) |
1326 | LocInfo.AR = None; |
1327 | MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR); |
1328 | LocInfo.LastKill = UpperBound; |
1329 | } else { |
1330 | // Otherwise, we checked all the new ones, and now we know we can get to |
1331 | // LastKill. |
1332 | MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR); |
1333 | } |
1334 | LocInfo.LowerBound = VersionStack.size() - 1; |
1335 | LocInfo.LowerBoundBlock = BB; |
1336 | } |
1337 | } |
1338 | |
1339 | /// Optimize uses to point to their actual clobbering definitions. |
1340 | void MemorySSA::OptimizeUses::optimizeUses() { |
1341 | SmallVector<MemoryAccess *, 16> VersionStack; |
1342 | DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; |
1343 | VersionStack.push_back(MSSA->getLiveOnEntryDef()); |
1344 | |
1345 | unsigned long StackEpoch = 1; |
1346 | unsigned long PopEpoch = 1; |
1347 | // We perform a non-recursive top-down dominator tree walk. |
1348 | for (const auto *DomNode : depth_first(DT->getRootNode())) |
1349 | optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, |
1350 | LocStackInfo); |
1351 | } |
1352 | |
1353 | void MemorySSA::placePHINodes( |
1354 | const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { |
1355 | // Determine where our MemoryPhi's should go |
1356 | ForwardIDFCalculator IDFs(*DT); |
1357 | IDFs.setDefiningBlocks(DefiningBlocks); |
1358 | SmallVector<BasicBlock *, 32> IDFBlocks; |
1359 | IDFs.calculate(IDFBlocks); |
1360 | |
1361 | // Now place MemoryPhi nodes. |
1362 | for (auto &BB : IDFBlocks) |
1363 | createMemoryPhi(BB); |
1364 | } |
1365 | |
1366 | void MemorySSA::buildMemorySSA() { |
1367 | // We create an access to represent "live on entry", for things like |
1368 | // arguments or users of globals, where the memory they use is defined before |
1369 | // the beginning of the function. We do not actually insert it into the IR. |
1370 | // We do not define a live on exit for the immediate uses, and thus our |
1371 | // semantics do *not* imply that something with no immediate uses can simply |
1372 | // be removed. |
1373 | BasicBlock &StartingPoint = F.getEntryBlock(); |
1374 | LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, |
1375 | &StartingPoint, NextID++)); |
1376 | |
1377 | // We maintain lists of memory accesses per-block, trading memory for time. We |
1378 | // could just look up the memory access for every possible instruction in the |
1379 | // stream. |
1380 | SmallPtrSet<BasicBlock *, 32> DefiningBlocks; |
1381 | // Go through each block, figure out where defs occur, and chain together all |
1382 | // the accesses. |
1383 | for (BasicBlock &B : F) { |
1384 | bool InsertIntoDef = false; |
1385 | AccessList *Accesses = nullptr; |
1386 | DefsList *Defs = nullptr; |
1387 | for (Instruction &I : B) { |
1388 | MemoryUseOrDef *MUD = createNewAccess(&I); |
1389 | if (!MUD) |
1390 | continue; |
1391 | |
1392 | if (!Accesses) |
1393 | Accesses = getOrCreateAccessList(&B); |
1394 | Accesses->push_back(MUD); |
1395 | if (isa<MemoryDef>(MUD)) { |
1396 | InsertIntoDef = true; |
1397 | if (!Defs) |
1398 | Defs = getOrCreateDefsList(&B); |
1399 | Defs->push_back(*MUD); |
1400 | } |
1401 | } |
1402 | if (InsertIntoDef) |
1403 | DefiningBlocks.insert(&B); |
1404 | } |
1405 | placePHINodes(DefiningBlocks); |
1406 | |
1407 | // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get |
1408 | // filled in with all blocks. |
1409 | SmallPtrSet<BasicBlock *, 16> Visited; |
1410 | renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); |
1411 | |
1412 | CachingWalker *Walker = getWalkerImpl(); |
1413 | |
1414 | OptimizeUses(this, Walker, AA, DT).optimizeUses(); |
1415 | |
1416 | // Mark the uses in unreachable blocks as live on entry, so that they go |
1417 | // somewhere. |
1418 | for (auto &BB : F) |
1419 | if (!Visited.count(&BB)) |
1420 | markUnreachableAsLiveOnEntry(&BB); |
1421 | } |
1422 | |
1423 | MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } |
1424 | |
1425 | MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() { |
1426 | if (Walker) |
1427 | return Walker.get(); |
1428 | |
1429 | Walker = llvm::make_unique<CachingWalker>(this, AA, DT); |
1430 | return Walker.get(); |
1431 | } |
1432 | |
1433 | // This is a helper function used by the creation routines. It places NewAccess |
1434 | // into the access and defs lists for a given basic block, at the given |
1435 | // insertion point. |
1436 | void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, |
1437 | const BasicBlock *BB, |
1438 | InsertionPlace Point) { |
1439 | auto *Accesses = getOrCreateAccessList(BB); |
1440 | if (Point == Beginning) { |
1441 | // If it's a phi node, it goes first, otherwise, it goes after any phi |
1442 | // nodes. |
1443 | if (isa<MemoryPhi>(NewAccess)) { |
1444 | Accesses->push_front(NewAccess); |
1445 | auto *Defs = getOrCreateDefsList(BB); |
1446 | Defs->push_front(*NewAccess); |
1447 | } else { |
1448 | auto AI = find_if_not( |
1449 | *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); |
1450 | Accesses->insert(AI, NewAccess); |
1451 | if (!isa<MemoryUse>(NewAccess)) { |
1452 | auto *Defs = getOrCreateDefsList(BB); |
1453 | auto DI = find_if_not( |
1454 | *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); |
1455 | Defs->insert(DI, *NewAccess); |
1456 | } |
1457 | } |
1458 | } else { |
1459 | Accesses->push_back(NewAccess); |
1460 | if (!isa<MemoryUse>(NewAccess)) { |
1461 | auto *Defs = getOrCreateDefsList(BB); |
1462 | Defs->push_back(*NewAccess); |
1463 | } |
1464 | } |
1465 | BlockNumberingValid.erase(BB); |
1466 | } |
1467 | |
1468 | void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, |
1469 | AccessList::iterator InsertPt) { |
1470 | auto *Accesses = getWritableBlockAccesses(BB); |
1471 | bool WasEnd = InsertPt == Accesses->end(); |
1472 | Accesses->insert(AccessList::iterator(InsertPt), What); |
1473 | if (!isa<MemoryUse>(What)) { |
1474 | auto *Defs = getOrCreateDefsList(BB); |
1475 | // If we got asked to insert at the end, we have an easy job, just shove it |
1476 | // at the end. If we got asked to insert before an existing def, we also get |
1477 | // an iterator. If we got asked to insert before a use, we have to hunt for |
1478 | // the next def. |
1479 | if (WasEnd) { |
1480 | Defs->push_back(*What); |
1481 | } else if (isa<MemoryDef>(InsertPt)) { |
1482 | Defs->insert(InsertPt->getDefsIterator(), *What); |
1483 | } else { |
1484 | while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) |
1485 | ++InsertPt; |
1486 | // Either we found a def, or we are inserting at the end |
1487 | if (InsertPt == Accesses->end()) |
1488 | Defs->push_back(*What); |
1489 | else |
1490 | Defs->insert(InsertPt->getDefsIterator(), *What); |
1491 | } |
1492 | } |
1493 | BlockNumberingValid.erase(BB); |
1494 | } |
1495 | |
1496 | void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) { |
1497 | // Keep it in the lookup tables, remove from the lists |
1498 | removeFromLists(What, false); |
1499 | |
1500 | // Note that moving should implicitly invalidate the optimized state of a |
1501 | // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a |
1502 | // MemoryDef. |
1503 | if (auto *MD = dyn_cast<MemoryDef>(What)) |
1504 | MD->resetOptimized(); |
1505 | What->setBlock(BB); |
1506 | } |
1507 | |
1508 | // Move What before Where in the IR. The end result is that What will belong to |
1509 | // the right lists and have the right Block set, but will not otherwise be |
1510 | // correct. It will not have the right defining access, and if it is a def, |
1511 | // things below it will not properly be updated. |
1512 | void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, |
1513 | AccessList::iterator Where) { |
1514 | prepareForMoveTo(What, BB); |
1515 | insertIntoListsBefore(What, BB, Where); |
1516 | } |
1517 | |
1518 | void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, |
1519 | InsertionPlace Point) { |
1520 | if (isa<MemoryPhi>(What)) { |
1521 | assert(Point == Beginning &&((Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? static_cast<void> (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1522, __PRETTY_FUNCTION__)) |
1522 | "Can only move a Phi at the beginning of the block")((Point == Beginning && "Can only move a Phi at the beginning of the block" ) ? static_cast<void> (0) : __assert_fail ("Point == Beginning && \"Can only move a Phi at the beginning of the block\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1522, __PRETTY_FUNCTION__)); |
1523 | // Update lookup table entry |
1524 | ValueToMemoryAccess.erase(What->getBlock()); |
1525 | bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; |
1526 | (void)Inserted; |
1527 | assert(Inserted && "Cannot move a Phi to a block that already has one")((Inserted && "Cannot move a Phi to a block that already has one" ) ? static_cast<void> (0) : __assert_fail ("Inserted && \"Cannot move a Phi to a block that already has one\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1527, __PRETTY_FUNCTION__)); |
1528 | } |
1529 | |
1530 | prepareForMoveTo(What, BB); |
1531 | insertIntoListsForBlock(What, BB, Point); |
1532 | } |
1533 | |
1534 | MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { |
1535 | assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB")((!getMemoryAccess(BB) && "MemoryPhi already exists for this BB" ) ? static_cast<void> (0) : __assert_fail ("!getMemoryAccess(BB) && \"MemoryPhi already exists for this BB\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1535, __PRETTY_FUNCTION__)); |
1536 | MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); |
1537 | // Phi's always are placed at the front of the block. |
1538 | insertIntoListsForBlock(Phi, BB, Beginning); |
1539 | ValueToMemoryAccess[BB] = Phi; |
1540 | return Phi; |
1541 | } |
1542 | |
1543 | MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, |
1544 | MemoryAccess *Definition, |
1545 | const MemoryUseOrDef *Template) { |
1546 | assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI")((!isa<PHINode>(I) && "Cannot create a defined access for a PHI" ) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(I) && \"Cannot create a defined access for a PHI\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1546, __PRETTY_FUNCTION__)); |
1547 | MemoryUseOrDef *NewAccess = createNewAccess(I, Template); |
1548 | assert(((NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction" ) ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1550, __PRETTY_FUNCTION__)) |
1549 | NewAccess != nullptr &&((NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction" ) ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1550, __PRETTY_FUNCTION__)) |
1550 | "Tried to create a memory access for a non-memory touching instruction")((NewAccess != nullptr && "Tried to create a memory access for a non-memory touching instruction" ) ? static_cast<void> (0) : __assert_fail ("NewAccess != nullptr && \"Tried to create a memory access for a non-memory touching instruction\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1550, __PRETTY_FUNCTION__)); |
1551 | NewAccess->setDefiningAccess(Definition); |
1552 | return NewAccess; |
1553 | } |
1554 | |
1555 | // Return true if the instruction has ordering constraints. |
1556 | // Note specifically that this only considers stores and loads |
1557 | // because others are still considered ModRef by getModRefInfo. |
1558 | static inline bool isOrdered(const Instruction *I) { |
1559 | if (auto *SI = dyn_cast<StoreInst>(I)) { |
1560 | if (!SI->isUnordered()) |
1561 | return true; |
1562 | } else if (auto *LI = dyn_cast<LoadInst>(I)) { |
1563 | if (!LI->isUnordered()) |
1564 | return true; |
1565 | } |
1566 | return false; |
1567 | } |
1568 | |
1569 | /// Helper function to create new memory accesses |
1570 | MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I, |
1571 | const MemoryUseOrDef *Template) { |
1572 | // The assume intrinsic has a control dependency which we model by claiming |
1573 | // that it writes arbitrarily. Ignore that fake memory dependency here. |
1574 | // FIXME: Replace this special casing with a more accurate modelling of |
1575 | // assume's control dependency. |
1576 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) |
1577 | if (II->getIntrinsicID() == Intrinsic::assume) |
1578 | return nullptr; |
1579 | |
1580 | bool Def, Use; |
1581 | if (Template) { |
1582 | Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr; |
1583 | Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr; |
1584 | #if !defined(NDEBUG) |
1585 | ModRefInfo ModRef = AA->getModRefInfo(I, None); |
1586 | bool DefCheck, UseCheck; |
1587 | DefCheck = isModSet(ModRef) || isOrdered(I); |
1588 | UseCheck = isRefSet(ModRef); |
1589 | assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template")((Def == DefCheck && (Def || Use == UseCheck) && "Invalid template") ? static_cast<void> (0) : __assert_fail ("Def == DefCheck && (Def || Use == UseCheck) && \"Invalid template\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1589, __PRETTY_FUNCTION__)); |
1590 | #endif |
1591 | } else { |
1592 | // Find out what affect this instruction has on memory. |
1593 | ModRefInfo ModRef = AA->getModRefInfo(I, None); |
1594 | // The isOrdered check is used to ensure that volatiles end up as defs |
1595 | // (atomics end up as ModRef right now anyway). Until we separate the |
1596 | // ordering chain from the memory chain, this enables people to see at least |
1597 | // some relative ordering to volatiles. Note that getClobberingMemoryAccess |
1598 | // will still give an answer that bypasses other volatile loads. TODO: |
1599 | // Separate memory aliasing and ordering into two different chains so that |
1600 | // we can precisely represent both "what memory will this read/write/is |
1601 | // clobbered by" and "what instructions can I move this past". |
1602 | Def = isModSet(ModRef) || isOrdered(I); |
1603 | Use = isRefSet(ModRef); |
1604 | } |
1605 | |
1606 | // It's possible for an instruction to not modify memory at all. During |
1607 | // construction, we ignore them. |
1608 | if (!Def && !Use) |
1609 | return nullptr; |
1610 | |
1611 | MemoryUseOrDef *MUD; |
1612 | if (Def) |
1613 | MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); |
1614 | else |
1615 | MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); |
1616 | ValueToMemoryAccess[I] = MUD; |
1617 | return MUD; |
1618 | } |
1619 | |
1620 | /// Returns true if \p Replacer dominates \p Replacee . |
1621 | bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, |
1622 | const MemoryAccess *Replacee) const { |
1623 | if (isa<MemoryUseOrDef>(Replacee)) |
1624 | return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); |
1625 | const auto *MP = cast<MemoryPhi>(Replacee); |
1626 | // For a phi node, the use occurs in the predecessor block of the phi node. |
1627 | // Since we may occur multiple times in the phi node, we have to check each |
1628 | // operand to ensure Replacer dominates each operand where Replacee occurs. |
1629 | for (const Use &Arg : MP->operands()) { |
1630 | if (Arg.get() != Replacee && |
1631 | !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) |
1632 | return false; |
1633 | } |
1634 | return true; |
1635 | } |
1636 | |
1637 | /// Properly remove \p MA from all of MemorySSA's lookup tables. |
1638 | void MemorySSA::removeFromLookups(MemoryAccess *MA) { |
1639 | assert(MA->use_empty() &&((MA->use_empty() && "Trying to remove memory access that still has uses" ) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1640, __PRETTY_FUNCTION__)) |
1640 | "Trying to remove memory access that still has uses")((MA->use_empty() && "Trying to remove memory access that still has uses" ) ? static_cast<void> (0) : __assert_fail ("MA->use_empty() && \"Trying to remove memory access that still has uses\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1640, __PRETTY_FUNCTION__)); |
1641 | BlockNumbering.erase(MA); |
1642 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) |
1643 | MUD->setDefiningAccess(nullptr); |
1644 | // Invalidate our walker's cache if necessary |
1645 | if (!isa<MemoryUse>(MA)) |
1646 | Walker->invalidateInfo(MA); |
1647 | |
1648 | Value *MemoryInst; |
1649 | if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) |
1650 | MemoryInst = MUD->getMemoryInst(); |
1651 | else |
1652 | MemoryInst = MA->getBlock(); |
1653 | |
1654 | auto VMA = ValueToMemoryAccess.find(MemoryInst); |
1655 | if (VMA->second == MA) |
1656 | ValueToMemoryAccess.erase(VMA); |
1657 | } |
1658 | |
1659 | /// Properly remove \p MA from all of MemorySSA's lists. |
1660 | /// |
1661 | /// Because of the way the intrusive list and use lists work, it is important to |
1662 | /// do removal in the right order. |
1663 | /// ShouldDelete defaults to true, and will cause the memory access to also be |
1664 | /// deleted, not just removed. |
1665 | void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { |
1666 | BasicBlock *BB = MA->getBlock(); |
1667 | // The access list owns the reference, so we erase it from the non-owning list |
1668 | // first. |
1669 | if (!isa<MemoryUse>(MA)) { |
1670 | auto DefsIt = PerBlockDefs.find(BB); |
1671 | std::unique_ptr<DefsList> &Defs = DefsIt->second; |
1672 | Defs->remove(*MA); |
1673 | if (Defs->empty()) |
1674 | PerBlockDefs.erase(DefsIt); |
1675 | } |
1676 | |
1677 | // The erase call here will delete it. If we don't want it deleted, we call |
1678 | // remove instead. |
1679 | auto AccessIt = PerBlockAccesses.find(BB); |
1680 | std::unique_ptr<AccessList> &Accesses = AccessIt->second; |
1681 | if (ShouldDelete) |
1682 | Accesses->erase(MA); |
1683 | else |
1684 | Accesses->remove(MA); |
1685 | |
1686 | if (Accesses->empty()) { |
1687 | PerBlockAccesses.erase(AccessIt); |
1688 | BlockNumberingValid.erase(BB); |
1689 | } |
1690 | } |
1691 | |
1692 | void MemorySSA::print(raw_ostream &OS) const { |
1693 | MemorySSAAnnotatedWriter Writer(this); |
1694 | F.print(OS, &Writer); |
1695 | } |
1696 | |
1697 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
1698 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MemorySSA::dump() const { print(dbgs()); } |
1699 | #endif |
1700 | |
1701 | void MemorySSA::verifyMemorySSA() const { |
1702 | verifyDefUses(F); |
1703 | verifyDomination(F); |
1704 | verifyOrdering(F); |
1705 | verifyDominationNumbers(F); |
1706 | Walker->verify(this); |
1707 | verifyClobberSanity(F); |
1708 | } |
1709 | |
1710 | /// Check sanity of the clobbering instruction for access MA. |
1711 | void MemorySSA::checkClobberSanityAccess(const MemoryAccess *MA) const { |
1712 | if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { |
1713 | if (!MUD->isOptimized()) |
1714 | return; |
1715 | auto *I = MUD->getMemoryInst(); |
1716 | auto Loc = MemoryLocation::getOrNone(I); |
1717 | if (Loc == None) |
1718 | return; |
1719 | auto *Clobber = MUD->getOptimized(); |
1720 | UpwardsMemoryQuery Q(I, MUD); |
1721 | checkClobberSanity(MUD, Clobber, *Loc, *this, Q, *AA, true); |
1722 | } |
1723 | } |
1724 | |
1725 | void MemorySSA::verifyClobberSanity(const Function &F) const { |
1726 | #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) |
1727 | for (const BasicBlock &BB : F) { |
1728 | const AccessList *Accesses = getBlockAccesses(&BB); |
1729 | if (!Accesses) |
1730 | continue; |
1731 | for (const MemoryAccess &MA : *Accesses) |
1732 | checkClobberSanityAccess(&MA); |
1733 | } |
1734 | #endif |
1735 | } |
1736 | |
1737 | /// Verify that all of the blocks we believe to have valid domination numbers |
1738 | /// actually have valid domination numbers. |
1739 | void MemorySSA::verifyDominationNumbers(const Function &F) const { |
1740 | #ifndef NDEBUG |
1741 | if (BlockNumberingValid.empty()) |
1742 | return; |
1743 | |
1744 | SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; |
1745 | for (const BasicBlock &BB : F) { |
1746 | if (!ValidBlocks.count(&BB)) |
1747 | continue; |
1748 | |
1749 | ValidBlocks.erase(&BB); |
1750 | |
1751 | const AccessList *Accesses = getBlockAccesses(&BB); |
1752 | // It's correct to say an empty block has valid numbering. |
1753 | if (!Accesses) |
1754 | continue; |
1755 | |
1756 | // Block numbering starts at 1. |
1757 | unsigned long LastNumber = 0; |
1758 | for (const MemoryAccess &MA : *Accesses) { |
1759 | auto ThisNumberIter = BlockNumbering.find(&MA); |
1760 | assert(ThisNumberIter != BlockNumbering.end() &&((ThisNumberIter != BlockNumbering.end() && "MemoryAccess has no domination number in a valid block!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1761, __PRETTY_FUNCTION__)) |
1761 | "MemoryAccess has no domination number in a valid block!")((ThisNumberIter != BlockNumbering.end() && "MemoryAccess has no domination number in a valid block!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumberIter != BlockNumbering.end() && \"MemoryAccess has no domination number in a valid block!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1761, __PRETTY_FUNCTION__)); |
1762 | |
1763 | unsigned long ThisNumber = ThisNumberIter->second; |
1764 | assert(ThisNumber > LastNumber &&((ThisNumber > LastNumber && "Domination numbers should be strictly increasing!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1765, __PRETTY_FUNCTION__)) |
1765 | "Domination numbers should be strictly increasing!")((ThisNumber > LastNumber && "Domination numbers should be strictly increasing!" ) ? static_cast<void> (0) : __assert_fail ("ThisNumber > LastNumber && \"Domination numbers should be strictly increasing!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1765, __PRETTY_FUNCTION__)); |
1766 | LastNumber = ThisNumber; |
1767 | } |
1768 | } |
1769 | |
1770 | assert(ValidBlocks.empty() &&((ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? static_cast<void> (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1771, __PRETTY_FUNCTION__)) |
1771 | "All valid BasicBlocks should exist in F -- dangling pointers?")((ValidBlocks.empty() && "All valid BasicBlocks should exist in F -- dangling pointers?" ) ? static_cast<void> (0) : __assert_fail ("ValidBlocks.empty() && \"All valid BasicBlocks should exist in F -- dangling pointers?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1771, __PRETTY_FUNCTION__)); |
1772 | #endif |
1773 | } |
1774 | |
1775 | /// Verify that the order and existence of MemoryAccesses matches the |
1776 | /// order and existence of memory affecting instructions. |
1777 | void MemorySSA::verifyOrdering(Function &F) const { |
1778 | #ifndef NDEBUG |
1779 | // Walk all the blocks, comparing what the lookups think and what the access |
1780 | // lists think, as well as the order in the blocks vs the order in the access |
1781 | // lists. |
1782 | SmallVector<MemoryAccess *, 32> ActualAccesses; |
1783 | SmallVector<MemoryAccess *, 32> ActualDefs; |
1784 | for (BasicBlock &B : F) { |
1785 | const AccessList *AL = getBlockAccesses(&B); |
1786 | const auto *DL = getBlockDefs(&B); |
1787 | MemoryAccess *Phi = getMemoryAccess(&B); |
1788 | if (Phi) { |
1789 | ActualAccesses.push_back(Phi); |
1790 | ActualDefs.push_back(Phi); |
1791 | } |
1792 | |
1793 | for (Instruction &I : B) { |
1794 | MemoryAccess *MA = getMemoryAccess(&I); |
1795 | assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1798, __PRETTY_FUNCTION__)) |
1796 | "We have memory affecting instructions "(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1798, __PRETTY_FUNCTION__)) |
1797 | "in this block but they are not in the "(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1798, __PRETTY_FUNCTION__)) |
1798 | "access list or defs list")(((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && "We have memory affecting instructions " "in this block but they are not in the " "access list or defs list") ? static_cast<void> (0) : __assert_fail ("(!MA || (AL && (isa<MemoryUse>(MA) || DL))) && \"We have memory affecting instructions \" \"in this block but they are not in the \" \"access list or defs list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1798, __PRETTY_FUNCTION__)); |
1799 | if (MA) { |
1800 | ActualAccesses.push_back(MA); |
1801 | if (isa<MemoryDef>(MA)) |
1802 | ActualDefs.push_back(MA); |
1803 | } |
1804 | } |
1805 | // Either we hit the assert, really have no accesses, or we have both |
1806 | // accesses and an access list. |
1807 | // Same with defs. |
1808 | if (!AL && !DL) |
1809 | continue; |
1810 | assert(AL->size() == ActualAccesses.size() &&((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the " "access list") ? static_cast<void> (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1812, __PRETTY_FUNCTION__)) |
1811 | "We don't have the same number of accesses in the block as on the "((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the " "access list") ? static_cast<void> (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1812, __PRETTY_FUNCTION__)) |
1812 | "access list")((AL->size() == ActualAccesses.size() && "We don't have the same number of accesses in the block as on the " "access list") ? static_cast<void> (0) : __assert_fail ("AL->size() == ActualAccesses.size() && \"We don't have the same number of accesses in the block as on the \" \"access list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1812, __PRETTY_FUNCTION__)); |
1813 | assert((DL || ActualDefs.size() == 0) &&(((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? static_cast<void> (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1814, __PRETTY_FUNCTION__)) |
1814 | "Either we should have a defs list, or we should have no defs")(((DL || ActualDefs.size() == 0) && "Either we should have a defs list, or we should have no defs" ) ? static_cast<void> (0) : __assert_fail ("(DL || ActualDefs.size() == 0) && \"Either we should have a defs list, or we should have no defs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1814, __PRETTY_FUNCTION__)); |
1815 | assert((!DL || DL->size() == ActualDefs.size()) &&(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the " "def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1817, __PRETTY_FUNCTION__)) |
1816 | "We don't have the same number of defs in the block as on the "(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the " "def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1817, __PRETTY_FUNCTION__)) |
1817 | "def list")(((!DL || DL->size() == ActualDefs.size()) && "We don't have the same number of defs in the block as on the " "def list") ? static_cast<void> (0) : __assert_fail ("(!DL || DL->size() == ActualDefs.size()) && \"We don't have the same number of defs in the block as on the \" \"def list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1817, __PRETTY_FUNCTION__)); |
1818 | auto ALI = AL->begin(); |
1819 | auto AAI = ActualAccesses.begin(); |
1820 | while (ALI != AL->end() && AAI != ActualAccesses.end()) { |
1821 | assert(&*ALI == *AAI && "Not the same accesses in the same order")((&*ALI == *AAI && "Not the same accesses in the same order" ) ? static_cast<void> (0) : __assert_fail ("&*ALI == *AAI && \"Not the same accesses in the same order\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1821, __PRETTY_FUNCTION__)); |
1822 | ++ALI; |
1823 | ++AAI; |
1824 | } |
1825 | ActualAccesses.clear(); |
1826 | if (DL) { |
1827 | auto DLI = DL->begin(); |
1828 | auto ADI = ActualDefs.begin(); |
1829 | while (DLI != DL->end() && ADI != ActualDefs.end()) { |
1830 | assert(&*DLI == *ADI && "Not the same defs in the same order")((&*DLI == *ADI && "Not the same defs in the same order" ) ? static_cast<void> (0) : __assert_fail ("&*DLI == *ADI && \"Not the same defs in the same order\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1830, __PRETTY_FUNCTION__)); |
1831 | ++DLI; |
1832 | ++ADI; |
1833 | } |
1834 | } |
1835 | ActualDefs.clear(); |
1836 | } |
1837 | #endif |
1838 | } |
1839 | |
1840 | /// Verify the domination properties of MemorySSA by checking that each |
1841 | /// definition dominates all of its uses. |
1842 | void MemorySSA::verifyDomination(Function &F) const { |
1843 | #ifndef NDEBUG |
1844 | for (BasicBlock &B : F) { |
1845 | // Phi nodes are attached to basic blocks |
1846 | if (MemoryPhi *MP = getMemoryAccess(&B)) |
1847 | for (const Use &U : MP->uses()) |
1848 | assert(dominates(MP, U) && "Memory PHI does not dominate it's uses")((dominates(MP, U) && "Memory PHI does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("dominates(MP, U) && \"Memory PHI does not dominate it's uses\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1848, __PRETTY_FUNCTION__)); |
1849 | |
1850 | for (Instruction &I : B) { |
1851 | MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); |
1852 | if (!MD) |
1853 | continue; |
1854 | |
1855 | for (const Use &U : MD->uses()) |
1856 | assert(dominates(MD, U) && "Memory Def does not dominate it's uses")((dominates(MD, U) && "Memory Def does not dominate it's uses" ) ? static_cast<void> (0) : __assert_fail ("dominates(MD, U) && \"Memory Def does not dominate it's uses\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1856, __PRETTY_FUNCTION__)); |
1857 | } |
1858 | } |
1859 | #endif |
1860 | } |
1861 | |
1862 | /// Verify the def-use lists in MemorySSA, by verifying that \p Use |
1863 | /// appears in the use list of \p Def. |
1864 | void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { |
1865 | #ifndef NDEBUG |
1866 | // The live on entry use may cause us to get a NULL def here |
1867 | if (!Def) |
1868 | assert(isLiveOnEntryDef(Use) &&((isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? static_cast<void> (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1869, __PRETTY_FUNCTION__)) |
1869 | "Null def but use not point to live on entry def")((isLiveOnEntryDef(Use) && "Null def but use not point to live on entry def" ) ? static_cast<void> (0) : __assert_fail ("isLiveOnEntryDef(Use) && \"Null def but use not point to live on entry def\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1869, __PRETTY_FUNCTION__)); |
1870 | else |
1871 | assert(is_contained(Def->users(), Use) &&((is_contained(Def->users(), Use) && "Did not find use in def's use list" ) ? static_cast<void> (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1872, __PRETTY_FUNCTION__)) |
1872 | "Did not find use in def's use list")((is_contained(Def->users(), Use) && "Did not find use in def's use list" ) ? static_cast<void> (0) : __assert_fail ("is_contained(Def->users(), Use) && \"Did not find use in def's use list\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1872, __PRETTY_FUNCTION__)); |
1873 | #endif |
1874 | } |
1875 | |
1876 | /// Verify the immediate use information, by walking all the memory |
1877 | /// accesses and verifying that, for each use, it appears in the |
1878 | /// appropriate def's use list |
1879 | void MemorySSA::verifyDefUses(Function &F) const { |
1880 | #ifndef NDEBUG |
1881 | for (BasicBlock &B : F) { |
1882 | // Phi nodes are attached to basic blocks |
1883 | if (MemoryPhi *Phi = getMemoryAccess(&B)) { |
1884 | assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(((Phi->getNumOperands() == static_cast<unsigned>(std ::distance( pred_begin(&B), pred_end(&B))) && "Incomplete MemoryPhi Node") ? static_cast<void> (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1886, __PRETTY_FUNCTION__)) |
1885 | pred_begin(&B), pred_end(&B))) &&((Phi->getNumOperands() == static_cast<unsigned>(std ::distance( pred_begin(&B), pred_end(&B))) && "Incomplete MemoryPhi Node") ? static_cast<void> (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1886, __PRETTY_FUNCTION__)) |
1886 | "Incomplete MemoryPhi Node")((Phi->getNumOperands() == static_cast<unsigned>(std ::distance( pred_begin(&B), pred_end(&B))) && "Incomplete MemoryPhi Node") ? static_cast<void> (0) : __assert_fail ("Phi->getNumOperands() == static_cast<unsigned>(std::distance( pred_begin(&B), pred_end(&B))) && \"Incomplete MemoryPhi Node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1886, __PRETTY_FUNCTION__)); |
1887 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { |
1888 | verifyUseInDefs(Phi->getIncomingValue(I), Phi); |
1889 | assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=((find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end (&B) && "Incoming phi block not a block predecessor" ) ? static_cast<void> (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1891, __PRETTY_FUNCTION__)) |
1890 | pred_end(&B) &&((find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end (&B) && "Incoming phi block not a block predecessor" ) ? static_cast<void> (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1891, __PRETTY_FUNCTION__)) |
1891 | "Incoming phi block not a block predecessor")((find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end (&B) && "Incoming phi block not a block predecessor" ) ? static_cast<void> (0) : __assert_fail ("find(predecessors(&B), Phi->getIncomingBlock(I)) != pred_end(&B) && \"Incoming phi block not a block predecessor\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1891, __PRETTY_FUNCTION__)); |
1892 | } |
1893 | } |
1894 | |
1895 | for (Instruction &I : B) { |
1896 | if (MemoryUseOrDef *MA = getMemoryAccess(&I)) { |
1897 | verifyUseInDefs(MA->getDefiningAccess(), MA); |
1898 | } |
1899 | } |
1900 | } |
1901 | #endif |
1902 | } |
1903 | |
1904 | /// Perform a local numbering on blocks so that instruction ordering can be |
1905 | /// determined in constant time. |
1906 | /// TODO: We currently just number in order. If we numbered by N, we could |
1907 | /// allow at least N-1 sequences of insertBefore or insertAfter (and at least |
1908 | /// log2(N) sequences of mixed before and after) without needing to invalidate |
1909 | /// the numbering. |
1910 | void MemorySSA::renumberBlock(const BasicBlock *B) const { |
1911 | // The pre-increment ensures the numbers really start at 1. |
1912 | unsigned long CurrentNumber = 0; |
1913 | const AccessList *AL = getBlockAccesses(B); |
1914 | assert(AL != nullptr && "Asking to renumber an empty block")((AL != nullptr && "Asking to renumber an empty block" ) ? static_cast<void> (0) : __assert_fail ("AL != nullptr && \"Asking to renumber an empty block\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1914, __PRETTY_FUNCTION__)); |
1915 | for (const auto &I : *AL) |
1916 | BlockNumbering[&I] = ++CurrentNumber; |
1917 | BlockNumberingValid.insert(B); |
1918 | } |
1919 | |
1920 | /// Determine, for two memory accesses in the same block, |
1921 | /// whether \p Dominator dominates \p Dominatee. |
1922 | /// \returns True if \p Dominator dominates \p Dominatee. |
1923 | bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, |
1924 | const MemoryAccess *Dominatee) const { |
1925 | const BasicBlock *DominatorBlock = Dominator->getBlock(); |
1926 | |
1927 | assert((DominatorBlock == Dominatee->getBlock()) &&(((DominatorBlock == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!" ) ? static_cast<void> (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1928, __PRETTY_FUNCTION__)) |
1928 | "Asking for local domination when accesses are in different blocks!")(((DominatorBlock == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!" ) ? static_cast<void> (0) : __assert_fail ("(DominatorBlock == Dominatee->getBlock()) && \"Asking for local domination when accesses are in different blocks!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1928, __PRETTY_FUNCTION__)); |
1929 | // A node dominates itself. |
1930 | if (Dominatee == Dominator) |
1931 | return true; |
1932 | |
1933 | // When Dominatee is defined on function entry, it is not dominated by another |
1934 | // memory access. |
1935 | if (isLiveOnEntryDef(Dominatee)) |
1936 | return false; |
1937 | |
1938 | // When Dominator is defined on function entry, it dominates the other memory |
1939 | // access. |
1940 | if (isLiveOnEntryDef(Dominator)) |
1941 | return true; |
1942 | |
1943 | if (!BlockNumberingValid.count(DominatorBlock)) |
1944 | renumberBlock(DominatorBlock); |
1945 | |
1946 | unsigned long DominatorNum = BlockNumbering.lookup(Dominator); |
1947 | // All numbers start with 1 |
1948 | assert(DominatorNum != 0 && "Block was not numbered properly")((DominatorNum != 0 && "Block was not numbered properly" ) ? static_cast<void> (0) : __assert_fail ("DominatorNum != 0 && \"Block was not numbered properly\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1948, __PRETTY_FUNCTION__)); |
1949 | unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); |
1950 | assert(DominateeNum != 0 && "Block was not numbered properly")((DominateeNum != 0 && "Block was not numbered properly" ) ? static_cast<void> (0) : __assert_fail ("DominateeNum != 0 && \"Block was not numbered properly\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1950, __PRETTY_FUNCTION__)); |
1951 | return DominatorNum < DominateeNum; |
1952 | } |
1953 | |
1954 | bool MemorySSA::dominates(const MemoryAccess *Dominator, |
1955 | const MemoryAccess *Dominatee) const { |
1956 | if (Dominator == Dominatee) |
1957 | return true; |
1958 | |
1959 | if (isLiveOnEntryDef(Dominatee)) |
1960 | return false; |
1961 | |
1962 | if (Dominator->getBlock() != Dominatee->getBlock()) |
1963 | return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); |
1964 | return locallyDominates(Dominator, Dominatee); |
1965 | } |
1966 | |
1967 | bool MemorySSA::dominates(const MemoryAccess *Dominator, |
1968 | const Use &Dominatee) const { |
1969 | if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { |
1970 | BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); |
1971 | // The def must dominate the incoming block of the phi. |
1972 | if (UseBB != Dominator->getBlock()) |
1973 | return DT->dominates(Dominator->getBlock(), UseBB); |
1974 | // If the UseBB and the DefBB are the same, compare locally. |
1975 | return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); |
1976 | } |
1977 | // If it's not a PHI node use, the normal dominates can already handle it. |
1978 | return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); |
1979 | } |
1980 | |
1981 | const static char LiveOnEntryStr[] = "liveOnEntry"; |
1982 | |
1983 | void MemoryAccess::print(raw_ostream &OS) const { |
1984 | switch (getValueID()) { |
1985 | case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); |
1986 | case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); |
1987 | case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); |
1988 | } |
1989 | llvm_unreachable("invalid value id")::llvm::llvm_unreachable_internal("invalid value id", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Analysis/MemorySSA.cpp" , 1989); |
1990 | } |
1991 | |
1992 | void MemoryDef::print(raw_ostream &OS) const { |
1993 | MemoryAccess *UO = getDefiningAccess(); |
1994 | |
1995 | auto printID = [&OS](MemoryAccess *A) { |
1996 | if (A && A->getID()) |
1997 | OS << A->getID(); |
1998 | else |
1999 | OS << LiveOnEntryStr; |
2000 | }; |
2001 | |
2002 | OS << getID() << " = MemoryDef("; |
2003 | printID(UO); |
2004 | OS << ")"; |
2005 | |
2006 | if (isOptimized()) { |
2007 | OS << "->"; |
2008 | printID(getOptimized()); |
2009 | |
2010 | if (Optional<AliasResult> AR = getOptimizedAccessType()) |
2011 | OS << " " << *AR; |
2012 | } |
2013 | } |
2014 | |
2015 | void MemoryPhi::print(raw_ostream &OS) const { |
2016 | bool First = true; |
2017 | OS << getID() << " = MemoryPhi("; |
2018 | for (const auto &Op : operands()) { |
2019 | BasicBlock *BB = getIncomingBlock(Op); |
2020 | MemoryAccess *MA = cast<MemoryAccess>(Op); |
2021 | if (!First) |
2022 | OS << ','; |
2023 | else |
2024 | First = false; |
2025 | |
2026 | OS << '{'; |
2027 | if (BB->hasName()) |
2028 | OS << BB->getName(); |
2029 | else |
2030 | BB->printAsOperand(OS, false); |
2031 | OS << ','; |
2032 | if (unsigned ID = MA->getID()) |
2033 | OS << ID; |
2034 | else |
2035 | OS << LiveOnEntryStr; |
2036 | OS << '}'; |
2037 | } |
2038 | OS << ')'; |
2039 | } |
2040 | |
2041 | void MemoryUse::print(raw_ostream &OS) const { |
2042 | MemoryAccess *UO = getDefiningAccess(); |
2043 | OS << "MemoryUse("; |
2044 | if (UO && UO->getID()) |
2045 | OS << UO->getID(); |
2046 | else |
2047 | OS << LiveOnEntryStr; |
2048 | OS << ')'; |
2049 | |
2050 | if (Optional<AliasResult> AR = getOptimizedAccessType()) |
2051 | OS << " " << *AR; |
2052 | } |
2053 | |
2054 | void MemoryAccess::dump() const { |
2055 | // Cannot completely remove virtual function even in release mode. |
2056 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
2057 | print(dbgs()); |
2058 | dbgs() << "\n"; |
2059 | #endif |
2060 | } |
2061 | |
2062 | char MemorySSAPrinterLegacyPass::ID = 0; |
2063 | |
2064 | MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { |
2065 | initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); |
2066 | } |
2067 | |
2068 | void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { |
2069 | AU.setPreservesAll(); |
2070 | AU.addRequired<MemorySSAWrapperPass>(); |
2071 | } |
2072 | |
2073 | bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { |
2074 | auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); |
2075 | MSSA.print(dbgs()); |
2076 | if (VerifyMemorySSA) |
2077 | MSSA.verifyMemorySSA(); |
2078 | return false; |
2079 | } |
2080 | |
2081 | AnalysisKey MemorySSAAnalysis::Key; |
2082 | |
2083 | MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, |
2084 | FunctionAnalysisManager &AM) { |
2085 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); |
2086 | auto &AA = AM.getResult<AAManager>(F); |
2087 | return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT)); |
2088 | } |
2089 | |
2090 | PreservedAnalyses MemorySSAPrinterPass::run(Function &F, |
2091 | FunctionAnalysisManager &AM) { |
2092 | OS << "MemorySSA for function: " << F.getName() << "\n"; |
2093 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS); |
2094 | |
2095 | return PreservedAnalyses::all(); |
2096 | } |
2097 | |
2098 | PreservedAnalyses MemorySSAVerifierPass::run(Function &F, |
2099 | FunctionAnalysisManager &AM) { |
2100 | AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); |
2101 | |
2102 | return PreservedAnalyses::all(); |
2103 | } |
2104 | |
2105 | char MemorySSAWrapperPass::ID = 0; |
2106 | |
2107 | MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { |
2108 | initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); |
2109 | } |
2110 | |
2111 | void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } |
2112 | |
2113 | void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { |
2114 | AU.setPreservesAll(); |
2115 | AU.addRequiredTransitive<DominatorTreeWrapperPass>(); |
2116 | AU.addRequiredTransitive<AAResultsWrapperPass>(); |
2117 | } |
2118 | |
2119 | bool MemorySSAWrapperPass::runOnFunction(Function &F) { |
2120 | auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
2121 | auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); |
2122 | MSSA.reset(new MemorySSA(F, &AA, &DT)); |
2123 | return false; |
2124 | } |
2125 | |
2126 | void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); } |
2127 | |
2128 | void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { |
2129 | MSSA->print(OS); |
2130 | } |
2131 | |
2132 | MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} |
2133 | |
2134 | MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A, |
2135 | DominatorTree *D) |
2136 | : MemorySSAWalker(M), Walker(*M, *A, *D) {} |
2137 | |
2138 | void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) { |
2139 | if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) |
2140 | MUD->resetOptimized(); |
2141 | } |
2142 | |
2143 | /// Walk the use-def chains starting at \p MA and find |
2144 | /// the MemoryAccess that actually clobbers Loc. |
2145 | /// |
2146 | /// \returns our clobbering memory access |
2147 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( |
2148 | MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) { |
2149 | return Walker.findClobber(StartingAccess, Q); |
2150 | } |
2151 | |
2152 | MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( |
2153 | MemoryAccess *StartingAccess, const MemoryLocation &Loc) { |
2154 | if (isa<MemoryPhi>(StartingAccess)) |
2155 | return StartingAccess; |
2156 | |
2157 | auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); |
2158 | if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) |
2159 | return StartingUseOrDef; |
2160 | |
2161 | Instruction *I = StartingUseOrDef->getMemoryInst(); |
2162 | |
2163 | // Conservatively, fences are always clobbers, so don't perform the walk if we |
2164 | // hit a fence. |
2165 | if (!ImmutableCallSite(I) && I->isFenceLike()) |
2166 | return StartingUseOrDef; |
2167 | |
2168 | UpwardsMemoryQuery Q; |
2169 | Q.OriginalAccess = StartingUseOrDef; |
2170 | Q.StartingLoc = Loc; |
2171 | Q.Inst = I; |
2172 | Q.IsCall = false; |
2173 | |
2174 | // Unlike the other function, do not walk to the def of a def, because we are |
2175 | // handed something we already believe is the clobbering access. |
2176 | MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) |
2177 | ? StartingUseOrDef->getDefiningAccess() |
2178 | : StartingUseOrDef; |
2179 | |
2180 | MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q); |
2181 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); |
2182 | LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *StartingUseOrDef << "\n" ; } } while (false); |
2183 | LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Final Memory SSA clobber for " << *I << " is "; } } while (false); |
2184 | LLVM_DEBUG(dbgs() << *Clobber << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *Clobber << "\n"; } } while (false); |
2185 | return Clobber; |
2186 | } |
2187 | |
2188 | MemoryAccess * |
2189 | MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) { |
2190 | auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); |
2191 | // If this is a MemoryPhi, we can't do anything. |
2192 | if (!StartingAccess) |
2193 | return MA; |
2194 | |
2195 | // If this is an already optimized use or def, return the optimized result. |
2196 | // Note: Currently, we store the optimized def result in a separate field, |
2197 | // since we can't use the defining access. |
2198 | if (StartingAccess->isOptimized()) |
2199 | return StartingAccess->getOptimized(); |
2200 | |
2201 | const Instruction *I = StartingAccess->getMemoryInst(); |
2202 | // We can't sanely do anything with a fence, since they conservatively clobber |
2203 | // all memory, and have no locations to get pointers from to try to |
2204 | // disambiguate. |
2205 | if (!ImmutableCallSite(I) && I->isFenceLike()) |
2206 | return StartingAccess; |
2207 | |
2208 | UpwardsMemoryQuery Q(I, StartingAccess); |
2209 | |
2210 | if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) { |
2211 | MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); |
2212 | StartingAccess->setOptimized(LiveOnEntry); |
2213 | StartingAccess->setOptimizedAccessType(None); |
2214 | return LiveOnEntry; |
2215 | } |
2216 | |
2217 | // Start with the thing we already think clobbers this location |
2218 | MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); |
2219 | |
2220 | // At this point, DefiningAccess may be the live on entry def. |
2221 | // If it is, we will not get a better result. |
2222 | if (MSSA->isLiveOnEntryDef(DefiningAccess)) { |
2223 | StartingAccess->setOptimized(DefiningAccess); |
2224 | StartingAccess->setOptimizedAccessType(None); |
2225 | return DefiningAccess; |
2226 | } |
2227 | |
2228 | MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q); |
2229 | LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Starting Memory SSA clobber for " << *I << " is "; } } while (false); |
2230 | LLVM_DEBUG(dbgs() << *DefiningAccess << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *DefiningAccess << "\n" ; } } while (false); |
2231 | LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << "Final Memory SSA clobber for " << *I << " is "; } } while (false); |
2232 | LLVM_DEBUG(dbgs() << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memoryssa")) { dbgs() << *Result << "\n"; } } while (false); |
2233 | |
2234 | StartingAccess->setOptimized(Result); |
2235 | if (MSSA->isLiveOnEntryDef(Result)) |
2236 | StartingAccess->setOptimizedAccessType(None); |
2237 | else if (Q.AR == MustAlias) |
2238 | StartingAccess->setOptimizedAccessType(MustAlias); |
2239 | |
2240 | return Result; |
2241 | } |
2242 | |
2243 | MemoryAccess * |
2244 | DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { |
2245 | if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) |
2246 | return Use->getDefiningAccess(); |
2247 | return MA; |
2248 | } |
2249 | |
2250 | MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( |
2251 | MemoryAccess *StartingAccess, const MemoryLocation &) { |
2252 | if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) |
2253 | return Use->getDefiningAccess(); |
2254 | return StartingAccess; |
2255 | } |
2256 | |
2257 | void MemoryPhi::deleteMe(DerivedUser *Self) { |
2258 | delete static_cast<MemoryPhi *>(Self); |
2259 | } |
2260 | |
2261 | void MemoryDef::deleteMe(DerivedUser *Self) { |
2262 | delete static_cast<MemoryDef *>(Self); |
2263 | } |
2264 | |
2265 | void MemoryUse::deleteMe(DerivedUser *Self) { |
2266 | delete static_cast<MemoryUse *>(Self); |
2267 | } |