LLVM  9.0.0svn
PromoteMemoryToRegister.cpp
Go to the documentation of this file.
1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file promotes memory references to be register references. It promotes
10 // alloca instructions which only have loads and stores as uses. An alloca is
11 // transformed by using iterated dominator frontiers to place PHI nodes, then
12 // traversing the function in depth-first order to rewrite loads and stores as
13 // appropriate.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/TinyPtrVector.h"
24 #include "llvm/ADT/Twine.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DIBuilder.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Module.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/User.h"
47 #include "llvm/Support/Casting.h"
49 #include <algorithm>
50 #include <cassert>
51 #include <iterator>
52 #include <utility>
53 #include <vector>
54 
55 using namespace llvm;
56 
57 #define DEBUG_TYPE "mem2reg"
58 
59 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
60 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store");
61 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed");
62 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted");
63 
65  // FIXME: If the memory unit is of pointer or integer type, we can permit
66  // assignments to subsections of the memory unit.
67  unsigned AS = AI->getType()->getAddressSpace();
68 
69  // Only allow direct and non-volatile loads and stores...
70  for (const User *U : AI->users()) {
71  if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
72  // Note that atomic loads can be transformed; atomic semantics do
73  // not have any meaning for a local alloca.
74  if (LI->isVolatile())
75  return false;
76  } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
77  if (SI->getOperand(0) == AI)
78  return false; // Don't allow a store OF the AI, only INTO the AI.
79  // Note that atomic stores can be transformed; atomic semantics do
80  // not have any meaning for a local alloca.
81  if (SI->isVolatile())
82  return false;
83  } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
84  if (!II->isLifetimeStartOrEnd())
85  return false;
86  } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
87  if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
88  return false;
89  if (!onlyUsedByLifetimeMarkers(BCI))
90  return false;
91  } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
92  if (GEPI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
93  return false;
94  if (!GEPI->hasAllZeroIndices())
95  return false;
96  if (!onlyUsedByLifetimeMarkers(GEPI))
97  return false;
98  } else {
99  return false;
100  }
101  }
102 
103  return true;
104 }
105 
106 namespace {
107 
108 struct AllocaInfo {
109  SmallVector<BasicBlock *, 32> DefiningBlocks;
110  SmallVector<BasicBlock *, 32> UsingBlocks;
111 
112  StoreInst *OnlyStore;
113  BasicBlock *OnlyBlock;
114  bool OnlyUsedInOneBlock;
115 
117 
118  void clear() {
119  DefiningBlocks.clear();
120  UsingBlocks.clear();
121  OnlyStore = nullptr;
122  OnlyBlock = nullptr;
123  OnlyUsedInOneBlock = true;
124  DbgDeclares.clear();
125  }
126 
127  /// Scan the uses of the specified alloca, filling in the AllocaInfo used
128  /// by the rest of the pass to reason about the uses of this alloca.
129  void AnalyzeAlloca(AllocaInst *AI) {
130  clear();
131 
132  // As we scan the uses of the alloca instruction, keep track of stores,
133  // and decide whether all of the loads and stores to the alloca are within
134  // the same basic block.
135  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
136  Instruction *User = cast<Instruction>(*UI++);
137 
138  if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
139  // Remember the basic blocks which define new values for the alloca
140  DefiningBlocks.push_back(SI->getParent());
141  OnlyStore = SI;
142  } else {
143  LoadInst *LI = cast<LoadInst>(User);
144  // Otherwise it must be a load instruction, keep track of variable
145  // reads.
146  UsingBlocks.push_back(LI->getParent());
147  }
148 
149  if (OnlyUsedInOneBlock) {
150  if (!OnlyBlock)
151  OnlyBlock = User->getParent();
152  else if (OnlyBlock != User->getParent())
153  OnlyUsedInOneBlock = false;
154  }
155  }
156 
157  DbgDeclares = FindDbgAddrUses(AI);
158  }
159 };
160 
161 /// Data package used by RenamePass().
162 struct RenamePassData {
163  using ValVector = std::vector<Value *>;
164  using LocationVector = std::vector<DebugLoc>;
165 
166  RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V, LocationVector L)
167  : BB(B), Pred(P), Values(std::move(V)), Locations(std::move(L)) {}
168 
169  BasicBlock *BB;
170  BasicBlock *Pred;
171  ValVector Values;
172  LocationVector Locations;
173 };
174 
175 /// This assigns and keeps a per-bb relative ordering of load/store
176 /// instructions in the block that directly load or store an alloca.
177 ///
178 /// This functionality is important because it avoids scanning large basic
179 /// blocks multiple times when promoting many allocas in the same block.
180 class LargeBlockInfo {
181  /// For each instruction that we track, keep the index of the
182  /// instruction.
183  ///
184  /// The index starts out as the number of the instruction from the start of
185  /// the block.
187 
188 public:
189 
190  /// This code only looks at accesses to allocas.
191  static bool isInterestingInstruction(const Instruction *I) {
192  return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) ||
193  (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1)));
194  }
195 
196  /// Get or calculate the index of the specified instruction.
197  unsigned getInstructionIndex(const Instruction *I) {
198  assert(isInterestingInstruction(I) &&
199  "Not a load/store to/from an alloca?");
200 
201  // If we already have this instruction number, return it.
203  if (It != InstNumbers.end())
204  return It->second;
205 
206  // Scan the whole block to get the instruction. This accumulates
207  // information for every interesting instruction in the block, in order to
208  // avoid gratuitus rescans.
209  const BasicBlock *BB = I->getParent();
210  unsigned InstNo = 0;
211  for (const Instruction &BBI : *BB)
212  if (isInterestingInstruction(&BBI))
213  InstNumbers[&BBI] = InstNo++;
214  It = InstNumbers.find(I);
215 
216  assert(It != InstNumbers.end() && "Didn't insert instruction?");
217  return It->second;
218  }
219 
220  void deleteValue(const Instruction *I) { InstNumbers.erase(I); }
221 
222  void clear() { InstNumbers.clear(); }
223 };
224 
225 struct PromoteMem2Reg {
226  /// The alloca instructions being promoted.
227  std::vector<AllocaInst *> Allocas;
228 
229  DominatorTree &DT;
230  DIBuilder DIB;
231 
232  /// A cache of @llvm.assume intrinsics used by SimplifyInstruction.
233  AssumptionCache *AC;
234 
235  const SimplifyQuery SQ;
236 
237  /// Reverse mapping of Allocas.
239 
240  /// The PhiNodes we're adding.
241  ///
242  /// That map is used to simplify some Phi nodes as we iterate over it, so
243  /// it should have deterministic iterators. We could use a MapVector, but
244  /// since we already maintain a map from BasicBlock* to a stable numbering
245  /// (BBNumbers), the DenseMap is more efficient (also supports removal).
247 
248  /// For each PHI node, keep track of which entry in Allocas it corresponds
249  /// to.
250  DenseMap<PHINode *, unsigned> PhiToAllocaMap;
251 
252  /// For each alloca, we keep track of the dbg.declare intrinsic that
253  /// describes it, if any, so that we can convert it to a dbg.value
254  /// intrinsic if the alloca gets promoted.
256 
257  /// The set of basic blocks the renamer has already visited.
259 
260  /// Contains a stable numbering of basic blocks to avoid non-determinstic
261  /// behavior.
263 
264  /// Lazily compute the number of predecessors a block has.
266 
267 public:
268  PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
269  AssumptionCache *AC)
270  : Allocas(Allocas.begin(), Allocas.end()), DT(DT),
271  DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false),
272  AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(),
273  nullptr, &DT, AC) {}
274 
275  void run();
276 
277 private:
278  void RemoveFromAllocasList(unsigned &AllocaIdx) {
279  Allocas[AllocaIdx] = Allocas.back();
280  Allocas.pop_back();
281  --AllocaIdx;
282  }
283 
284  unsigned getNumPreds(const BasicBlock *BB) {
285  unsigned &NP = BBNumPreds[BB];
286  if (NP == 0)
287  NP = pred_size(BB) + 1;
288  return NP - 1;
289  }
290 
291  void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
292  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
293  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
294  void RenamePass(BasicBlock *BB, BasicBlock *Pred,
295  RenamePassData::ValVector &IncVals,
296  RenamePassData::LocationVector &IncLocs,
297  std::vector<RenamePassData> &Worklist);
298  bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
299 };
300 
301 } // end anonymous namespace
302 
303 /// Given a LoadInst LI this adds assume(LI != null) after it.
305  Function *AssumeIntrinsic =
306  Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume);
307  ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI,
309  LoadNotNull->insertAfter(LI);
310  CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull});
311  CI->insertAfter(LoadNotNull);
312  AC->registerAssumption(CI);
313 }
314 
316  // Knowing that this alloca is promotable, we know that it's safe to kill all
317  // instructions except for load and store.
318 
319  for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) {
320  Instruction *I = cast<Instruction>(*UI);
321  ++UI;
322  if (isa<LoadInst>(I) || isa<StoreInst>(I))
323  continue;
324 
325  if (!I->getType()->isVoidTy()) {
326  // The only users of this bitcast/GEP instruction are lifetime intrinsics.
327  // Follow the use/def chain to erase them now instead of leaving it for
328  // dead code elimination later.
329  for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) {
330  Instruction *Inst = cast<Instruction>(*UUI);
331  ++UUI;
332  Inst->eraseFromParent();
333  }
334  }
335  I->eraseFromParent();
336  }
337 }
338 
339 /// Rewrite as many loads as possible given a single store.
340 ///
341 /// When there is only a single store, we can use the domtree to trivially
342 /// replace all of the dominated loads with the stored value. Do so, and return
343 /// true if this has successfully promoted the alloca entirely. If this returns
344 /// false there were some loads which were not dominated by the single store
345 /// and thus must be phi-ed with undef. We fall back to the standard alloca
346 /// promotion algorithm in that case.
347 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
348  LargeBlockInfo &LBI, const DataLayout &DL,
349  DominatorTree &DT, AssumptionCache *AC) {
350  StoreInst *OnlyStore = Info.OnlyStore;
351  bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
352  BasicBlock *StoreBB = OnlyStore->getParent();
353  int StoreIndex = -1;
354 
355  // Clear out UsingBlocks. We will reconstruct it here if needed.
356  Info.UsingBlocks.clear();
357 
358  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
359  Instruction *UserInst = cast<Instruction>(*UI++);
360  if (UserInst == OnlyStore)
361  continue;
362  LoadInst *LI = cast<LoadInst>(UserInst);
363 
364  // Okay, if we have a load from the alloca, we want to replace it with the
365  // only value stored to the alloca. We can do this if the value is
366  // dominated by the store. If not, we use the rest of the mem2reg machinery
367  // to insert the phi nodes as needed.
368  if (!StoringGlobalVal) { // Non-instructions are always dominated.
369  if (LI->getParent() == StoreBB) {
370  // If we have a use that is in the same block as the store, compare the
371  // indices of the two instructions to see which one came first. If the
372  // load came before the store, we can't handle it.
373  if (StoreIndex == -1)
374  StoreIndex = LBI.getInstructionIndex(OnlyStore);
375 
376  if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
377  // Can't handle this load, bail out.
378  Info.UsingBlocks.push_back(StoreBB);
379  continue;
380  }
381  } else if (!DT.dominates(StoreBB, LI->getParent())) {
382  // If the load and store are in different blocks, use BB dominance to
383  // check their relationships. If the store doesn't dom the use, bail
384  // out.
385  Info.UsingBlocks.push_back(LI->getParent());
386  continue;
387  }
388  }
389 
390  // Otherwise, we *can* safely rewrite this load.
391  Value *ReplVal = OnlyStore->getOperand(0);
392  // If the replacement value is the load, this must occur in unreachable
393  // code.
394  if (ReplVal == LI)
395  ReplVal = UndefValue::get(LI->getType());
396 
397  // If the load was marked as nonnull we don't want to lose
398  // that information when we erase this Load. So we preserve
399  // it with an assume.
400  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
401  !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
402  addAssumeNonNull(AC, LI);
403 
404  LI->replaceAllUsesWith(ReplVal);
405  LI->eraseFromParent();
406  LBI.deleteValue(LI);
407  }
408 
409  // Finally, after the scan, check to see if the store is all that is left.
410  if (!Info.UsingBlocks.empty())
411  return false; // If not, we'll have to fall back for the remainder.
412 
413  // Record debuginfo for the store and remove the declaration's
414  // debuginfo.
415  for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
416  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
417  ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB);
418  DII->eraseFromParent();
419  }
420  // Remove the (now dead) store and alloca.
421  Info.OnlyStore->eraseFromParent();
422  LBI.deleteValue(Info.OnlyStore);
423 
424  AI->eraseFromParent();
425  return true;
426 }
427 
428 /// Many allocas are only used within a single basic block. If this is the
429 /// case, avoid traversing the CFG and inserting a lot of potentially useless
430 /// PHI nodes by just performing a single linear pass over the basic block
431 /// using the Alloca.
432 ///
433 /// If we cannot promote this alloca (because it is read before it is written),
434 /// return false. This is necessary in cases where, due to control flow, the
435 /// alloca is undefined only on some control flow paths. e.g. code like
436 /// this is correct in LLVM IR:
437 /// // A is an alloca with no stores so far
438 /// for (...) {
439 /// int t = *A;
440 /// if (!first_iteration)
441 /// use(t);
442 /// *A = 42;
443 /// }
444 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
445  LargeBlockInfo &LBI,
446  const DataLayout &DL,
447  DominatorTree &DT,
448  AssumptionCache *AC) {
449  // The trickiest case to handle is when we have large blocks. Because of this,
450  // this code is optimized assuming that large blocks happen. This does not
451  // significantly pessimize the small block case. This uses LargeBlockInfo to
452  // make it efficient to get the index of various operations in the block.
453 
454  // Walk the use-def list of the alloca, getting the locations of all stores.
455  using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>;
456  StoresByIndexTy StoresByIndex;
457 
458  for (User *U : AI->users())
459  if (StoreInst *SI = dyn_cast<StoreInst>(U))
460  StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
461 
462  // Sort the stores by their index, making it efficient to do a lookup with a
463  // binary search.
464  llvm::sort(StoresByIndex, less_first());
465 
466  // Walk all of the loads from this alloca, replacing them with the nearest
467  // store above them, if any.
468  for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
469  LoadInst *LI = dyn_cast<LoadInst>(*UI++);
470  if (!LI)
471  continue;
472 
473  unsigned LoadIdx = LBI.getInstructionIndex(LI);
474 
475  // Find the nearest store that has a lower index than this load.
476  StoresByIndexTy::iterator I = llvm::lower_bound(
477  StoresByIndex,
478  std::make_pair(LoadIdx, static_cast<StoreInst *>(nullptr)),
479  less_first());
480  if (I == StoresByIndex.begin()) {
481  if (StoresByIndex.empty())
482  // If there are no stores, the load takes the undef value.
484  else
485  // There is no store before this load, bail out (load may be affected
486  // by the following stores - see main comment).
487  return false;
488  } else {
489  // Otherwise, there was a store before this load, the load takes its value.
490  // Note, if the load was marked as nonnull we don't want to lose that
491  // information when we erase it. So we preserve it with an assume.
492  Value *ReplVal = std::prev(I)->second->getOperand(0);
493  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
494  !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
495  addAssumeNonNull(AC, LI);
496 
497  // If the replacement value is the load, this must occur in unreachable
498  // code.
499  if (ReplVal == LI)
500  ReplVal = UndefValue::get(LI->getType());
501 
502  LI->replaceAllUsesWith(ReplVal);
503  }
504 
505  LI->eraseFromParent();
506  LBI.deleteValue(LI);
507  }
508 
509  // Remove the (now dead) stores and alloca.
510  while (!AI->use_empty()) {
511  StoreInst *SI = cast<StoreInst>(AI->user_back());
512  // Record debuginfo for the store before removing it.
513  for (DbgVariableIntrinsic *DII : Info.DbgDeclares) {
514  DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
515  ConvertDebugDeclareToDebugValue(DII, SI, DIB);
516  }
517  SI->eraseFromParent();
518  LBI.deleteValue(SI);
519  }
520 
521  AI->eraseFromParent();
522 
523  // The alloca's debuginfo can be removed as well.
524  for (DbgVariableIntrinsic *DII : Info.DbgDeclares)
525  DII->eraseFromParent();
526 
527  ++NumLocalPromoted;
528  return true;
529 }
530 
531 void PromoteMem2Reg::run() {
532  Function &F = *DT.getRoot()->getParent();
533 
534  AllocaDbgDeclares.resize(Allocas.size());
535 
536  AllocaInfo Info;
537  LargeBlockInfo LBI;
538  ForwardIDFCalculator IDF(DT);
539 
540  for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
541  AllocaInst *AI = Allocas[AllocaNum];
542 
543  assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!");
544  assert(AI->getParent()->getParent() == &F &&
545  "All allocas should be in the same function, which is same as DF!");
546 
548 
549  if (AI->use_empty()) {
550  // If there are no uses of the alloca, just delete it now.
551  AI->eraseFromParent();
552 
553  // Remove the alloca from the Allocas list, since it has been processed
554  RemoveFromAllocasList(AllocaNum);
555  ++NumDeadAlloca;
556  continue;
557  }
558 
559  // Calculate the set of read and write-locations for each alloca. This is
560  // analogous to finding the 'uses' and 'definitions' of each variable.
561  Info.AnalyzeAlloca(AI);
562 
563  // If there is only a single store to this value, replace any loads of
564  // it that are directly dominated by the definition with the value stored.
565  if (Info.DefiningBlocks.size() == 1) {
566  if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
567  // The alloca has been processed, move on.
568  RemoveFromAllocasList(AllocaNum);
569  ++NumSingleStore;
570  continue;
571  }
572  }
573 
574  // If the alloca is only read and written in one basic block, just perform a
575  // linear sweep over the block to eliminate it.
576  if (Info.OnlyUsedInOneBlock &&
577  promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
578  // The alloca has been processed, move on.
579  RemoveFromAllocasList(AllocaNum);
580  continue;
581  }
582 
583  // If we haven't computed a numbering for the BB's in the function, do so
584  // now.
585  if (BBNumbers.empty()) {
586  unsigned ID = 0;
587  for (auto &BB : F)
588  BBNumbers[&BB] = ID++;
589  }
590 
591  // Remember the dbg.declare intrinsic describing this alloca, if any.
592  if (!Info.DbgDeclares.empty())
593  AllocaDbgDeclares[AllocaNum] = Info.DbgDeclares;
594 
595  // Keep the reverse mapping of the 'Allocas' array for the rename pass.
596  AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
597 
598  // At this point, we're committed to promoting the alloca using IDF's, and
599  // the standard SSA construction algorithm. Determine which blocks need PHI
600  // nodes and see if we can optimize out some work by avoiding insertion of
601  // dead phi nodes.
602 
603  // Unique the set of defining blocks for efficient lookup.
604  SmallPtrSet<BasicBlock *, 32> DefBlocks(Info.DefiningBlocks.begin(),
605  Info.DefiningBlocks.end());
606 
607  // Determine which blocks the value is live in. These are blocks which lead
608  // to uses.
609  SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
610  ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
611 
612  // At this point, we're committed to promoting the alloca using IDF's, and
613  // the standard SSA construction algorithm. Determine which blocks need phi
614  // nodes and see if we can optimize out some work by avoiding insertion of
615  // dead phi nodes.
616  IDF.setLiveInBlocks(LiveInBlocks);
617  IDF.setDefiningBlocks(DefBlocks);
619  IDF.calculate(PHIBlocks);
620  llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) {
621  return BBNumbers.find(A)->second < BBNumbers.find(B)->second;
622  });
623 
624  unsigned CurrentVersion = 0;
625  for (BasicBlock *BB : PHIBlocks)
626  QueuePhiNode(BB, AllocaNum, CurrentVersion);
627  }
628 
629  if (Allocas.empty())
630  return; // All of the allocas must have been trivial!
631 
632  LBI.clear();
633 
634  // Set the incoming values for the basic block to be null values for all of
635  // the alloca's. We do this in case there is a load of a value that has not
636  // been stored yet. In this case, it will get this null value.
637  RenamePassData::ValVector Values(Allocas.size());
638  for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
639  Values[i] = UndefValue::get(Allocas[i]->getAllocatedType());
640 
641  // When handling debug info, treat all incoming values as if they have unknown
642  // locations until proven otherwise.
643  RenamePassData::LocationVector Locations(Allocas.size());
644 
645  // Walks all basic blocks in the function performing the SSA rename algorithm
646  // and inserting the phi nodes we marked as necessary
647  std::vector<RenamePassData> RenamePassWorkList;
648  RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values),
649  std::move(Locations));
650  do {
651  RenamePassData RPD = std::move(RenamePassWorkList.back());
652  RenamePassWorkList.pop_back();
653  // RenamePass may add new worklist entries.
654  RenamePass(RPD.BB, RPD.Pred, RPD.Values, RPD.Locations, RenamePassWorkList);
655  } while (!RenamePassWorkList.empty());
656 
657  // The renamer uses the Visited set to avoid infinite loops. Clear it now.
658  Visited.clear();
659 
660  // Remove the allocas themselves from the function.
661  for (Instruction *A : Allocas) {
662  // If there are any uses of the alloca instructions left, they must be in
663  // unreachable basic blocks that were not processed by walking the dominator
664  // tree. Just delete the users now.
665  if (!A->use_empty())
666  A->replaceAllUsesWith(UndefValue::get(A->getType()));
667  A->eraseFromParent();
668  }
669 
670  // Remove alloca's dbg.declare instrinsics from the function.
671  for (auto &Declares : AllocaDbgDeclares)
672  for (auto *DII : Declares)
673  DII->eraseFromParent();
674 
675  // Loop over all of the PHI nodes and see if there are any that we can get
676  // rid of because they merge all of the same incoming values. This can
677  // happen due to undef values coming into the PHI nodes. This process is
678  // iterative, because eliminating one PHI node can cause others to be removed.
679  bool EliminatedAPHI = true;
680  while (EliminatedAPHI) {
681  EliminatedAPHI = false;
682 
683  // Iterating over NewPhiNodes is deterministic, so it is safe to try to
684  // simplify and RAUW them as we go. If it was not, we could add uses to
685  // the values we replace with in a non-deterministic order, thus creating
686  // non-deterministic def->use chains.
687  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
688  I = NewPhiNodes.begin(),
689  E = NewPhiNodes.end();
690  I != E;) {
691  PHINode *PN = I->second;
692 
693  // If this PHI node merges one value and/or undefs, get the value.
694  if (Value *V = SimplifyInstruction(PN, SQ)) {
695  PN->replaceAllUsesWith(V);
696  PN->eraseFromParent();
697  NewPhiNodes.erase(I++);
698  EliminatedAPHI = true;
699  continue;
700  }
701  ++I;
702  }
703  }
704 
705  // At this point, the renamer has added entries to PHI nodes for all reachable
706  // code. Unfortunately, there may be unreachable blocks which the renamer
707  // hasn't traversed. If this is the case, the PHI nodes may not
708  // have incoming values for all predecessors. Loop over all PHI nodes we have
709  // created, inserting undef values if they are missing any incoming values.
710  for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
711  I = NewPhiNodes.begin(),
712  E = NewPhiNodes.end();
713  I != E; ++I) {
714  // We want to do this once per basic block. As such, only process a block
715  // when we find the PHI that is the first entry in the block.
716  PHINode *SomePHI = I->second;
717  BasicBlock *BB = SomePHI->getParent();
718  if (&BB->front() != SomePHI)
719  continue;
720 
721  // Only do work here if there the PHI nodes are missing incoming values. We
722  // know that all PHI nodes that were inserted in a block will have the same
723  // number of incoming values, so we can just check any of them.
724  if (SomePHI->getNumIncomingValues() == getNumPreds(BB))
725  continue;
726 
727  // Get the preds for BB.
729 
730  // Ok, now we know that all of the PHI nodes are missing entries for some
731  // basic blocks. Start by sorting the incoming predecessors for efficient
732  // access.
733  auto CompareBBNumbers = [this](BasicBlock *A, BasicBlock *B) {
734  return BBNumbers.find(A)->second < BBNumbers.find(B)->second;
735  };
736  llvm::sort(Preds, CompareBBNumbers);
737 
738  // Now we loop through all BB's which have entries in SomePHI and remove
739  // them from the Preds list.
740  for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
741  // Do a log(n) search of the Preds list for the entry we want.
743  Preds, SomePHI->getIncomingBlock(i), CompareBBNumbers);
744  assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) &&
745  "PHI node has entry for a block which is not a predecessor!");
746 
747  // Remove the entry
748  Preds.erase(EntIt);
749  }
750 
751  // At this point, the blocks left in the preds list must have dummy
752  // entries inserted into every PHI nodes for the block. Update all the phi
753  // nodes in this block that we are inserting (there could be phis before
754  // mem2reg runs).
755  unsigned NumBadPreds = SomePHI->getNumIncomingValues();
756  BasicBlock::iterator BBI = BB->begin();
757  while ((SomePHI = dyn_cast<PHINode>(BBI++)) &&
758  SomePHI->getNumIncomingValues() == NumBadPreds) {
759  Value *UndefVal = UndefValue::get(SomePHI->getType());
760  for (BasicBlock *Pred : Preds)
761  SomePHI->addIncoming(UndefVal, Pred);
762  }
763  }
764 
765  NewPhiNodes.clear();
766 }
767 
768 /// Determine which blocks the value is live in.
769 ///
770 /// These are blocks which lead to uses. Knowing this allows us to avoid
771 /// inserting PHI nodes into blocks which don't lead to uses (thus, the
772 /// inserted phi nodes would be dead).
774  AllocaInst *AI, AllocaInfo &Info,
775  const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
776  SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
777  // To determine liveness, we must iterate through the predecessors of blocks
778  // where the def is live. Blocks are added to the worklist if we need to
779  // check their predecessors. Start with all the using blocks.
780  SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
781  Info.UsingBlocks.end());
782 
783  // If any of the using blocks is also a definition block, check to see if the
784  // definition occurs before or after the use. If it happens before the use,
785  // the value isn't really live-in.
786  for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
787  BasicBlock *BB = LiveInBlockWorklist[i];
788  if (!DefBlocks.count(BB))
789  continue;
790 
791  // Okay, this is a block that both uses and defines the value. If the first
792  // reference to the alloca is a def (store), then we know it isn't live-in.
793  for (BasicBlock::iterator I = BB->begin();; ++I) {
794  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
795  if (SI->getOperand(1) != AI)
796  continue;
797 
798  // We found a store to the alloca before a load. The alloca is not
799  // actually live-in here.
800  LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
801  LiveInBlockWorklist.pop_back();
802  --i;
803  --e;
804  break;
805  }
806 
807  if (LoadInst *LI = dyn_cast<LoadInst>(I))
808  // Okay, we found a load before a store to the alloca. It is actually
809  // live into this block.
810  if (LI->getOperand(0) == AI)
811  break;
812  }
813  }
814 
815  // Now that we have a set of blocks where the phi is live-in, recursively add
816  // their predecessors until we find the full region the value is live.
817  while (!LiveInBlockWorklist.empty()) {
818  BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
819 
820  // The block really is live in here, insert it into the set. If already in
821  // the set, then it has already been processed.
822  if (!LiveInBlocks.insert(BB).second)
823  continue;
824 
825  // Since the value is live into BB, it is either defined in a predecessor or
826  // live into it to. Add the preds to the worklist unless they are a
827  // defining block.
828  for (BasicBlock *P : predecessors(BB)) {
829  // The value is not live into a predecessor if it defines the value.
830  if (DefBlocks.count(P))
831  continue;
832 
833  // Otherwise it is, add to the worklist.
834  LiveInBlockWorklist.push_back(P);
835  }
836  }
837 }
838 
839 /// Queue a phi-node to be added to a basic-block for a specific Alloca.
840 ///
841 /// Returns true if there wasn't already a phi-node for that variable
842 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
843  unsigned &Version) {
844  // Look up the basic-block in question.
845  PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)];
846 
847  // If the BB already has a phi node added for the i'th alloca then we're done!
848  if (PN)
849  return false;
850 
851  // Create a PhiNode using the dereferenced type... and add the phi-node to the
852  // BasicBlock.
853  PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB),
854  Allocas[AllocaNo]->getName() + "." + Twine(Version++),
855  &BB->front());
856  ++NumPHIInsert;
857  PhiToAllocaMap[PN] = AllocaNo;
858  return true;
859 }
860 
861 /// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to
862 /// create a merged location incorporating \p DL, or to set \p DL directly.
864  bool ApplyMergedLoc) {
865  if (ApplyMergedLoc)
866  PN->applyMergedLocation(PN->getDebugLoc(), DL);
867  else
868  PN->setDebugLoc(DL);
869 }
870 
871 /// Recursively traverse the CFG of the function, renaming loads and
872 /// stores to the allocas which we are promoting.
873 ///
874 /// IncomingVals indicates what value each Alloca contains on exit from the
875 /// predecessor block Pred.
876 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
877  RenamePassData::ValVector &IncomingVals,
878  RenamePassData::LocationVector &IncomingLocs,
879  std::vector<RenamePassData> &Worklist) {
880 NextIteration:
881  // If we are inserting any phi nodes into this BB, they will already be in the
882  // block.
883  if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) {
884  // If we have PHI nodes to update, compute the number of edges from Pred to
885  // BB.
886  if (PhiToAllocaMap.count(APN)) {
887  // We want to be able to distinguish between PHI nodes being inserted by
888  // this invocation of mem2reg from those phi nodes that already existed in
889  // the IR before mem2reg was run. We determine that APN is being inserted
890  // because it is missing incoming edges. All other PHI nodes being
891  // inserted by this pass of mem2reg will have the same number of incoming
892  // operands so far. Remember this count.
893  unsigned NewPHINumOperands = APN->getNumOperands();
894 
895  unsigned NumEdges = std::count(succ_begin(Pred), succ_end(Pred), BB);
896  assert(NumEdges && "Must be at least one edge from Pred to BB!");
897 
898  // Add entries for all the phis.
899  BasicBlock::iterator PNI = BB->begin();
900  do {
901  unsigned AllocaNo = PhiToAllocaMap[APN];
902 
903  // Update the location of the phi node.
904  updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo],
905  APN->getNumIncomingValues() > 0);
906 
907  // Add N incoming values to the PHI node.
908  for (unsigned i = 0; i != NumEdges; ++i)
909  APN->addIncoming(IncomingVals[AllocaNo], Pred);
910 
911  // The currently active variable for this block is now the PHI.
912  IncomingVals[AllocaNo] = APN;
913  for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[AllocaNo])
914  ConvertDebugDeclareToDebugValue(DII, APN, DIB);
915 
916  // Get the next phi node.
917  ++PNI;
918  APN = dyn_cast<PHINode>(PNI);
919  if (!APN)
920  break;
921 
922  // Verify that it is missing entries. If not, it is not being inserted
923  // by this mem2reg invocation so we want to ignore it.
924  } while (APN->getNumOperands() == NewPHINumOperands);
925  }
926  }
927 
928  // Don't revisit blocks.
929  if (!Visited.insert(BB).second)
930  return;
931 
932  for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) {
933  Instruction *I = &*II++; // get the instruction, increment iterator
934 
935  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
936  AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand());
937  if (!Src)
938  continue;
939 
940  DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src);
941  if (AI == AllocaLookup.end())
942  continue;
943 
944  Value *V = IncomingVals[AI->second];
945 
946  // If the load was marked as nonnull we don't want to lose
947  // that information when we erase this Load. So we preserve
948  // it with an assume.
949  if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
950  !isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT))
951  addAssumeNonNull(AC, LI);
952 
953  // Anything using the load now uses the current value.
954  LI->replaceAllUsesWith(V);
955  BB->getInstList().erase(LI);
956  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
957  // Delete this instruction and mark the name as the current holder of the
958  // value
959  AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand());
960  if (!Dest)
961  continue;
962 
963  DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
964  if (ai == AllocaLookup.end())
965  continue;
966 
967  // what value were we writing?
968  unsigned AllocaNo = ai->second;
969  IncomingVals[AllocaNo] = SI->getOperand(0);
970 
971  // Record debuginfo for the store before removing it.
972  IncomingLocs[AllocaNo] = SI->getDebugLoc();
973  for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[ai->second])
975  BB->getInstList().erase(SI);
976  }
977  }
978 
979  // 'Recurse' to our successors.
980  succ_iterator I = succ_begin(BB), E = succ_end(BB);
981  if (I == E)
982  return;
983 
984  // Keep track of the successors so we don't visit the same successor twice
985  SmallPtrSet<BasicBlock *, 8> VisitedSuccs;
986 
987  // Handle the first successor without using the worklist.
988  VisitedSuccs.insert(*I);
989  Pred = BB;
990  BB = *I;
991  ++I;
992 
993  for (; I != E; ++I)
994  if (VisitedSuccs.insert(*I).second)
995  Worklist.emplace_back(*I, Pred, IncomingVals, IncomingLocs);
996 
997  goto NextIteration;
998 }
999 
1001  AssumptionCache *AC) {
1002  // If there is nothing to do, bail out...
1003  if (Allocas.empty())
1004  return;
1005 
1006  PromoteMem2Reg(Allocas, DT, AC).run();
1007 }
auto lower_bound(R &&Range, T &&Value) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1288
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:157
iterator erase(iterator where)
Definition: ilist.h:265
This class represents lattice values for constants.
Definition: AllocatorList.h:23
iterator begin() const
Definition: ArrayRef.h:136
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
static void removeLifetimeIntrinsicUsers(AllocaInst *AI)
This class represents a function call, abstracting a target machine&#39;s calling convention.
A cache of @llvm.assume calls within a function.
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:33
An instruction for reading from memory.
Definition: Instructions.h:167
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:30
void calculate(SmallVectorImpl< NodeTy *> &IDFBlocks)
Calculate iterated dominance frontiers.
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:274
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:268
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
static StringRef getName(Value *V)
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:102
#define F(x, y, z)
Definition: MD5.cpp:55
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
This is the common base class for debug info intrinsics for variables.
Definition: IntrinsicInst.h:87
This class represents a no-op cast from one type to another.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:234
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
An instruction for storing to memory.
Definition: Instructions.h:320
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1043
Value * getOperand(unsigned i) const
Definition: User.h:169
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:1258
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:105
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:176
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:875
#define P(N)
bool erase(const KeyT &Val)
Definition: DenseMap.h:298
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:318
static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, bool ApplyMergedLoc)
Update the debug location of a phi.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
void setLiveInBlocks(const SmallPtrSetImpl< NodeTy *> &Blocks)
Give the IDF calculator the set of blocks in which the value is live on entry to the block...
static void ComputeLiveInBlocks(const SmallPtrSetImpl< BasicBlock *> &UsingBlocks, const SmallPtrSetImpl< BasicBlock *> &DefBlocks, SmallPtrSetImpl< BasicBlock *> &LiveInBlocks, PredIteratorCache &PredCache)
Given sets of UsingBlocks and DefBlocks, compute the set of LiveInBlocks.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
const Instruction & front() const
Definition: BasicBlock.h:280
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
const Instruction & back() const
Definition: BasicBlock.h:282
This instruction compares its operands according to the predicate given to the constructor.
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:572
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI)
Given a LoadInst LI this adds assume(LI != null) after it.
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1424
iterator erase(const_iterator CI)
Definition: SmallVector.h:434
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:219
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1122
void setDefiningBlocks(const SmallPtrSetImpl< NodeTy *> &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:333
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
TinyPtrVector< DbgVariableIntrinsic * > FindDbgAddrUses(Value *V)
Finds all intrinsics declaring local variables as living in the memory that &#39;V&#39; points to...
Definition: Local.cpp:1506
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Many allocas are only used within a single basic block.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:248
Module.h This file contains the declarations for the Module class.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:63
iterator end() const
Definition: ArrayRef.h:137
void applyMergedLocation(const DILocation *LocA, const DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
Definition: DebugInfo.cpp:689
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:124
unsigned getNumIncomingValues() const
Return the number of incoming edges.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
iterator_range< user_iterator > users()
Definition: Value.h:399
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:211
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:321
void registerAssumption(CallInst *CI)
Add an @llvm.assume intrinsic to this function&#39;s cache.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:106
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction...
Definition: Instruction.cpp:79
unsigned pred_size(const BasicBlock *BB)
Get the number of predecessors of BB.
Definition: CFG.h:121
#define I(x, y, z)
Definition: MD5.cpp:58
iterator end()
Definition: DenseMap.h:108
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:375
const BasicBlock & front() const
Definition: Function.h:687
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC)
Rewrite as many loads as possible given a single store.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:72
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===---------------------------------------------------------------——===// Dbg Intrinsic utilities ...
Definition: Local.cpp:1306
void PromoteMemToReg(ArrayRef< AllocaInst *> Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
NodeT * getRoot() const
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
const uint64_t Version
Definition: InstrProf.h:984
bool use_empty() const
Definition: Value.h:322
Function object to check whether the first component of a std::pair compares less than the first comp...
Definition: STLExtras.h:966
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:43
const BasicBlock * getParent() const
Definition: Instruction.h:66
an instruction to allocate memory on the stack
Definition: Instructions.h:59
user_iterator user_end()
Definition: Value.h:383