LLVM 23.0.0git
PromoteMemoryToRegister.cpp
Go to the documentation of this file.
1//===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file promotes memory references to be register references. It promotes
10// alloca instructions which only have loads and stores as uses. An alloca is
11// transformed by using iterated dominator frontiers to place PHI nodes, then
12// traversing the function in depth-first order to rewrite loads and stores as
13// appropriate.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/ADT/Twine.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DIBuilder.h"
34#include "llvm/IR/DebugInfo.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/Instruction.h"
42#include "llvm/IR/Intrinsics.h"
43#include "llvm/IR/LLVMContext.h"
44#include "llvm/IR/Module.h"
45#include "llvm/IR/Operator.h"
46#include "llvm/IR/Type.h"
47#include "llvm/IR/User.h"
51#include <algorithm>
52#include <cassert>
53#include <iterator>
54#include <utility>
55#include <vector>
56
57using namespace llvm;
58
59#define DEBUG_TYPE "mem2reg"
60
61STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
62STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store");
63STATISTIC(NumDeadAlloca, "Number of dead alloca's removed");
64STATISTIC(NumPHIInsert, "Number of PHI nodes inserted");
65
67 // Only allow direct and non-volatile loads and stores...
68 // All loads and stores must use the same type (determined by the first one
69 // seen). We don't require the type to match the alloca's declared type.
70 Type *ExpectedType = nullptr;
71 for (const User *U : AI->users()) {
72 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
73 // Note that atomic loads can be transformed; atomic semantics do
74 // not have any meaning for a local alloca.
75 if (LI->isVolatile())
76 return false;
77 if (!ExpectedType)
78 ExpectedType = LI->getType();
79 else if (LI->getType() != ExpectedType)
80 return false;
81 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
82 if (SI->getValueOperand() == AI)
83 return false; // Don't allow a store OF the AI, only INTO the AI.
84 // Note that atomic stores can be transformed; atomic semantics do
85 // not have any meaning for a local alloca.
86 if (SI->isVolatile())
87 return false;
88 Type *StoreType = SI->getValueOperand()->getType();
89 if (!ExpectedType)
90 ExpectedType = StoreType;
91 else if (StoreType != ExpectedType)
92 return false;
93 } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
94 if (!II->isLifetimeStartOrEnd() && !II->isDroppable() &&
95 II->getIntrinsicID() != Intrinsic::fake_use)
96 return false;
97 } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
99 return false;
100 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
101 if (!GEPI->hasAllZeroIndices())
102 return false;
104 return false;
105 } else if (const AddrSpaceCastInst *ASCI = dyn_cast<AddrSpaceCastInst>(U)) {
106 if (!onlyUsedByLifetimeMarkers(ASCI))
107 return false;
108 } else {
109 return false;
110 }
111 }
112
113 return true;
114}
115
116namespace {
117
118static void createDebugValue(DIBuilder &DIB, Value *NewValue,
119 DILocalVariable *Variable,
121 DbgVariableRecord *InsertBefore) {
122 // FIXME: Merge these two functions now that DIBuilder supports
123 // DbgVariableRecords. We neeed the API to accept DbgVariableRecords as an
124 // insert point for that to work.
125 (void)DIB;
127 *InsertBefore);
128}
129
130/// Helper for updating assignment tracking debug info when promoting allocas.
131class AssignmentTrackingInfo {
132 /// DbgAssignIntrinsics linked to the alloca with at most one per variable
133 /// fragment. (i.e. not be a comprehensive set if there are multiple
134 /// dbg.assigns for one variable fragment).
136
137public:
138 void init(AllocaInst *AI) {
139 SmallSet<DebugVariable, 2> Vars;
140 for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(AI)) {
141 if (Vars.insert(DebugVariable(DVR)).second)
142 DVRAssigns.push_back(DVR);
143 }
144 }
145
146 /// Update assignment tracking debug info given for the to-be-deleted store
147 /// \p ToDelete that stores to this alloca.
148 void updateForDeletedStore(
149 StoreInst *ToDelete, DIBuilder &DIB,
150 SmallPtrSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) const {
151 // There's nothing to do if the alloca doesn't have any variables using
152 // assignment tracking.
153 if (DVRAssigns.empty())
154 return;
155
156 // Insert a dbg.value where the linked dbg.assign is and remember to delete
157 // the dbg.assign later. Demoting to dbg.value isn't necessary for
158 // correctness but does reduce compile time and memory usage by reducing
159 // unnecessary function-local metadata. Remember that we've seen a
160 // dbg.assign for each variable fragment for the untracked store handling
161 // (after this loop).
162 SmallSet<DebugVariableAggregate, 2> VarHasDbgAssignForStore;
163 auto InsertValueForAssign = [&](auto *DbgAssign, auto *&AssignList) {
164 VarHasDbgAssignForStore.insert(DebugVariableAggregate(DbgAssign));
165 AssignList->insert(DbgAssign);
166 createDebugValue(DIB, DbgAssign->getValue(), DbgAssign->getVariable(),
167 DbgAssign->getExpression(), DbgAssign->getDebugLoc(),
168 DbgAssign);
169 };
170 for (auto *Assign : at::getDVRAssignmentMarkers(ToDelete))
171 InsertValueForAssign(Assign, DVRAssignsToDelete);
172
173 // It's possible for variables using assignment tracking to have no
174 // dbg.assign linked to this store. These are variables in DVRAssigns that
175 // are missing from VarHasDbgAssignForStore. Since there isn't a dbg.assign
176 // to mark the assignment - and the store is going to be deleted - insert a
177 // dbg.value to do that now. An untracked store may be either one that
178 // cannot be represented using assignment tracking (non-const offset or
179 // size) or one that is trackable but has had its DIAssignID attachment
180 // dropped accidentally.
181 auto ConvertUnlinkedAssignToValue = [&](DbgVariableRecord *Assign) {
182 if (VarHasDbgAssignForStore.contains(DebugVariableAggregate(Assign)))
183 return;
184 ConvertDebugDeclareToDebugValue(Assign, ToDelete, DIB);
185 };
186 for_each(DVRAssigns, ConvertUnlinkedAssignToValue);
187 }
188
189 /// Update assignment tracking debug info given for the newly inserted PHI \p
190 /// NewPhi.
191 void updateForNewPhi(PHINode *NewPhi, DIBuilder &DIB) const {
192 // Regardless of the position of dbg.assigns relative to stores, the
193 // incoming values into a new PHI should be the same for the (imaginary)
194 // debug-phi.
195 for (auto *DVR : DVRAssigns)
196 ConvertDebugDeclareToDebugValue(DVR, NewPhi, DIB);
197 }
198
199 void clear() { DVRAssigns.clear(); }
200 bool empty() { return DVRAssigns.empty(); }
201};
202
203struct AllocaInfo {
204 using DPUserVec = SmallVector<DbgVariableRecord *, 1>;
205
206 SmallVector<BasicBlock *, 32> DefiningBlocks;
208
209 StoreInst *OnlyStore;
210 BasicBlock *OnlyBlock;
211 bool OnlyUsedInOneBlock;
212
213 /// The type used by all loads/stores of this alloca. This may differ from
214 /// the alloca's declared type if all accesses use a different type.
215 Type *ValueType;
216
217 /// Debug users of the alloca - does not include dbg.assign intrinsics.
218 DPUserVec DPUsers;
219 /// Helper to update assignment tracking debug info.
220 AssignmentTrackingInfo AssignmentTracking;
221
222 void clear() {
223 DefiningBlocks.clear();
224 UsingBlocks.clear();
225 OnlyStore = nullptr;
226 OnlyBlock = nullptr;
227 OnlyUsedInOneBlock = true;
228 ValueType = nullptr;
229 DPUsers.clear();
230 AssignmentTracking.clear();
231 }
232
233 /// Scan the uses of the specified alloca, filling in the AllocaInfo used
234 /// by the rest of the pass to reason about the uses of this alloca.
235 void AnalyzeAlloca(AllocaInst *AI) {
236 clear();
237
238 // As we scan the uses of the alloca instruction, keep track of stores,
239 // and decide whether all of the loads and stores to the alloca are within
240 // the same basic block.
241 for (User *U : AI->users()) {
243
244 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
245 // Remember the basic blocks which define new values for the alloca
246 DefiningBlocks.push_back(SI->getParent());
247 OnlyStore = SI;
248 if (!ValueType)
249 ValueType = SI->getValueOperand()->getType();
250 else
251 assert(ValueType == SI->getValueOperand()->getType() &&
252 "All stores were checked to have used the same type");
253 } else {
254 LoadInst *LI = cast<LoadInst>(User);
255 // Otherwise it must be a load instruction, keep track of variable
256 // reads.
257 UsingBlocks.push_back(LI->getParent());
258 if (!ValueType)
259 ValueType = LI->getType();
260 else
261 assert(ValueType == LI->getType() &&
262 "All loads where checked to have used the same type");
263 }
264
265 if (OnlyUsedInOneBlock) {
266 if (!OnlyBlock)
267 OnlyBlock = User->getParent();
268 else if (OnlyBlock != User->getParent())
269 OnlyUsedInOneBlock = false;
270 }
271 }
273 findDbgUsers(AI, AllDPUsers);
274 std::copy_if(AllDPUsers.begin(), AllDPUsers.end(),
275 std::back_inserter(DPUsers),
276 [](DbgVariableRecord *DVR) { return !DVR->isDbgAssign(); });
277 AssignmentTracking.init(AI);
278 }
279};
280
281template <typename T> class VectorWithUndo {
284
285public:
286 void undo(size_t S) {
287 assert(S <= Undo.size());
288 while (S < Undo.size()) {
289 Vals[Undo.back().first] = Undo.back().second;
290 Undo.pop_back();
291 }
292 }
293
294 void resize(size_t Sz) { Vals.resize(Sz); }
295
296 size_t undoSize() const { return Undo.size(); }
297
298 const T &operator[](size_t Idx) const { return Vals[Idx]; }
299
300 void set(size_t Idx, const T &Val) {
301 if (Vals[Idx] == Val)
302 return;
303 Undo.emplace_back(Idx, Vals[Idx]);
304 Vals[Idx] = Val;
305 }
306
307 void init(size_t Idx, const T &Val) {
308 assert(Undo.empty());
309 Vals[Idx] = Val;
310 }
311};
312
313/// Data package used by RenamePass().
314struct RenamePassData {
315 RenamePassData(BasicBlock *B, BasicBlock *P, size_t V, size_t L)
316 : BB(B), Pred(P), UndoVals(V), UndoLocs(L) {}
317
318 BasicBlock *BB;
319 BasicBlock *Pred;
320
321 size_t UndoVals;
322 size_t UndoLocs;
323};
324
325/// This assigns and keeps a per-bb relative ordering of load/store
326/// instructions in the block that directly load or store an alloca.
327///
328/// This functionality is important because it avoids scanning large basic
329/// blocks multiple times when promoting many allocas in the same block.
330class LargeBlockInfo {
331 /// For each instruction that we track, keep the index of the
332 /// instruction.
333 ///
334 /// The index starts out as the number of the instruction from the start of
335 /// the block.
336 DenseMap<const Instruction *, unsigned> InstNumbers;
337
338public:
339
340 /// This code only looks at accesses to allocas.
341 static bool isInterestingInstruction(const Instruction *I) {
342 return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) ||
343 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1)));
344 }
345
346 /// Get or calculate the index of the specified instruction.
347 unsigned getInstructionIndex(const Instruction *I) {
348 assert(isInterestingInstruction(I) &&
349 "Not a load/store to/from an alloca?");
350
351 // If we already have this instruction number, return it.
352 DenseMap<const Instruction *, unsigned>::iterator It = InstNumbers.find(I);
353 if (It != InstNumbers.end())
354 return It->second;
355
356 // Scan the whole block to get the instruction. This accumulates
357 // information for every interesting instruction in the block, in order to
358 // avoid gratuitus rescans.
359 const BasicBlock *BB = I->getParent();
360 unsigned InstNo = 0;
361 for (const Instruction &BBI : *BB)
362 if (isInterestingInstruction(&BBI))
363 InstNumbers[&BBI] = InstNo++;
364 It = InstNumbers.find(I);
365
366 assert(It != InstNumbers.end() && "Didn't insert instruction?");
367 return It->second;
368 }
369
370 void deleteValue(const Instruction *I) { InstNumbers.erase(I); }
371
372 void clear() { InstNumbers.clear(); }
373};
374
375struct PromoteMem2Reg {
376 /// The alloca instructions being promoted.
377 std::vector<AllocaInst *> Allocas;
378
379 DominatorTree &DT;
380 DIBuilder DIB;
381
382 /// A cache of @llvm.assume intrinsics used by SimplifyInstruction.
383 AssumptionCache *AC;
384
385 const SimplifyQuery SQ;
386
387 /// Reverse mapping of Allocas.
388 DenseMap<AllocaInst *, unsigned> AllocaLookup;
389
390 /// The PhiNodes we're adding.
391 ///
392 /// That map is used to simplify some Phi nodes as we iterate over it, so
393 /// it should have deterministic iterators. We could use a MapVector, but
394 /// since basic blocks have numbers, using these are more efficient.
395 DenseMap<std::pair<unsigned, unsigned>, PHINode *> NewPhiNodes;
396
397 /// For each PHI node, keep track of which entry in Allocas it corresponds
398 /// to.
399 DenseMap<PHINode *, unsigned> PhiToAllocaMap;
400
401 /// For each alloca, we keep track of the dbg.declare record that
402 /// describes it, if any, so that we can convert it to a dbg.value
403 /// record if the alloca gets promoted.
405
406 /// For each alloca, keep an instance of a helper class that gives us an easy
407 /// way to update assignment tracking debug info if the alloca is promoted.
409 /// For each alloca, the type used by all loads/stores of this alloca.
410 SmallVector<Type *, 8> AllocaValueTypes;
411 /// A set of dbg.assigns to delete because they've been demoted to
412 /// dbg.values. Call cleanUpDbgAssigns to delete them.
413 SmallPtrSet<DbgVariableRecord *, 8> DVRAssignsToDelete;
414
415 /// The set of basic blocks the renamer has already visited.
416 BitVector Visited;
417
418 /// Lazily compute the number of predecessors a block has, indexed by block
419 /// number.
420 SmallVector<unsigned> BBNumPreds;
421
422 /// The state of incoming values for the current DFS step.
423 VectorWithUndo<Value *> IncomingVals;
424
425 /// The state of incoming locations for the current DFS step.
426 VectorWithUndo<DebugLoc> IncomingLocs;
427
428 // DFS work stack.
430
431 /// Whether the function has the no-signed-zeros-fp-math attribute set.
432 bool NoSignedZeros = false;
433
434public:
435 PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
436 AssumptionCache *AC)
437 : Allocas(Allocas.begin(), Allocas.end()), DT(DT),
438 DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false),
439 AC(AC), SQ(DT.getRoot()->getDataLayout(),
440 nullptr, &DT, AC) {}
441
442 void run();
443
444private:
445 void RemoveFromAllocasList(unsigned &AllocaIdx) {
446 Allocas[AllocaIdx] = Allocas.back();
447 Allocas.pop_back();
448 --AllocaIdx;
449 }
450
451 unsigned getNumPreds(const BasicBlock *BB) {
452 // BBNumPreds is resized to getMaxBlockNumber() at the beginning.
453 unsigned &NP = BBNumPreds[BB->getNumber()];
454 if (NP == 0)
455 NP = pred_size(BB) + 1;
456 return NP - 1;
457 }
458
459 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
460 const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
461 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
462 void RenamePass(BasicBlock *BB, BasicBlock *Pred);
463 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
464
465 /// Delete dbg.assigns that have been demoted to dbg.values.
466 void cleanUpDbgAssigns() {
467 for (auto *DVR : DVRAssignsToDelete)
468 DVR->eraseFromParent();
469 DVRAssignsToDelete.clear();
470 }
471
472 void pushToWorklist(BasicBlock *BB, BasicBlock *Pred) {
473 Worklist.emplace_back(BB, Pred, IncomingVals.undoSize(),
474 IncomingLocs.undoSize());
475 }
476
477 RenamePassData popFromWorklist() {
478 RenamePassData R = Worklist.back();
479 Worklist.pop_back();
480 IncomingVals.undo(R.UndoVals);
481 IncomingLocs.undo(R.UndoLocs);
482 return R;
483 }
484};
485
486} // end anonymous namespace
487
488/// Given a LoadInst LI this adds assume(LI != null) after it.
490 Function *AssumeIntrinsic =
491 Intrinsic::getOrInsertDeclaration(LI->getModule(), Intrinsic::assume);
492 ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI,
494 LoadNotNull->insertAfter(LI->getIterator());
495 CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull});
496 CI->insertAfter(LoadNotNull->getIterator());
498}
499
501 const DataLayout &DL, AssumptionCache *AC,
502 const DominatorTree *DT) {
503 if (isa<UndefValue>(Val) && LI->hasMetadata(LLVMContext::MD_noundef)) {
504 // Insert non-terminator unreachable.
505 LLVMContext &Ctx = LI->getContext();
508 /*isVolatile=*/false, Align(1), LI->getIterator());
509 return;
510 }
511
512 // If the load was marked as nonnull we don't want to lose that information
513 // when we erase this Load. So we preserve it with an assume. As !nonnull
514 // returns poison while assume violations are immediate undefined behavior,
515 // we can only do this if the value is known non-poison.
516 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
517 LI->getMetadata(LLVMContext::MD_noundef) &&
518 !isKnownNonZero(Val, SimplifyQuery(DL, DT, AC, LI)))
519 addAssumeNonNull(AC, LI);
520}
521
523 // Knowing that this alloca is promotable, we know that it's safe to kill all
524 // instructions except for load and store.
525
526 for (Use &U : llvm::make_early_inc_range(AI->uses())) {
527 Instruction *I = cast<Instruction>(U.getUser());
529 continue;
530
531 // Drop the use of AI in droppable instructions.
532 if (I->isDroppable()) {
533 I->dropDroppableUse(U);
534 continue;
535 }
536
537 if (!I->getType()->isVoidTy()) {
538 // The only users of this bitcast/GEP instruction are lifetime intrinsics.
539 // Follow the use/def chain to erase them now instead of leaving it for
540 // dead code elimination later.
541 for (Use &UU : llvm::make_early_inc_range(I->uses())) {
542 Instruction *Inst = cast<Instruction>(UU.getUser());
543
544 // Drop the use of I in droppable instructions.
545 if (Inst->isDroppable()) {
546 Inst->dropDroppableUse(UU);
547 continue;
548 }
549 Inst->eraseFromParent();
550 }
551 }
552 I->eraseFromParent();
553 }
554}
555
556/// Rewrite as many loads as possible given a single store.
557///
558/// When there is only a single store, we can use the domtree to trivially
559/// replace all of the dominated loads with the stored value. Do so, and return
560/// true if this has successfully promoted the alloca entirely. If this returns
561/// false there were some loads which were not dominated by the single store
562/// and thus must be phi-ed with undef. We fall back to the standard alloca
563/// promotion algorithm in that case.
565 AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL,
567 SmallPtrSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) {
568 StoreInst *OnlyStore = Info.OnlyStore;
569 Value *ReplVal = OnlyStore->getOperand(0);
570 // Loads may either load the stored value or uninitialized memory (undef).
571 // If the stored value may be poison, then replacing an uninitialized memory
572 // load with it would be incorrect. If the store dominates the load, we know
573 // it is always initialized.
574 bool RequireDominatingStore =
575 isa<Instruction>(ReplVal) || !isGuaranteedNotToBePoison(ReplVal);
576 BasicBlock *StoreBB = OnlyStore->getParent();
577 int StoreIndex = -1;
578
579 // Clear out UsingBlocks. We will reconstruct it here if needed.
580 Info.UsingBlocks.clear();
581
582 for (User *U : make_early_inc_range(AI->users())) {
583 Instruction *UserInst = cast<Instruction>(U);
584 if (UserInst == OnlyStore)
585 continue;
586 LoadInst *LI = cast<LoadInst>(UserInst);
587
588 // Okay, if we have a load from the alloca, we want to replace it with the
589 // only value stored to the alloca. We can do this if the value is
590 // dominated by the store. If not, we use the rest of the mem2reg machinery
591 // to insert the phi nodes as needed.
592 if (RequireDominatingStore) {
593 if (LI->getParent() == StoreBB) {
594 // If we have a use that is in the same block as the store, compare the
595 // indices of the two instructions to see which one came first. If the
596 // load came before the store, we can't handle it.
597 if (StoreIndex == -1)
598 StoreIndex = LBI.getInstructionIndex(OnlyStore);
599
600 if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
601 // Can't handle this load, bail out.
602 Info.UsingBlocks.push_back(StoreBB);
603 continue;
604 }
605 } else if (!DT.dominates(StoreBB, LI->getParent())) {
606 // If the load and store are in different blocks, use BB dominance to
607 // check their relationships. If the store doesn't dom the use, bail
608 // out.
609 Info.UsingBlocks.push_back(LI->getParent());
610 continue;
611 }
612 }
613
614 // Otherwise, we *can* safely rewrite this load.
615 // If the replacement value is the load, this must occur in unreachable
616 // code.
617 if (ReplVal == LI)
618 ReplVal = PoisonValue::get(LI->getType());
619
620 convertMetadataToAssumes(LI, ReplVal, DL, AC, &DT);
621 LI->replaceAllUsesWith(ReplVal);
622 LI->eraseFromParent();
623 LBI.deleteValue(LI);
624 }
625
626 // Finally, after the scan, check to see if the store is all that is left.
627 if (!Info.UsingBlocks.empty())
628 return false; // If not, we'll have to fall back for the remainder.
629
630 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
631 // Update assignment tracking info for the store we're going to delete.
632 Info.AssignmentTracking.updateForDeletedStore(Info.OnlyStore, DIB,
633 DVRAssignsToDelete);
634
635 // Record debuginfo for the store and remove the declaration's
636 // debuginfo.
637 for (DbgVariableRecord *DbgItem : Info.DPUsers) {
638 if (DbgItem->isAddressOfVariable()) {
639 ConvertDebugDeclareToDebugValue(DbgItem, Info.OnlyStore, DIB);
640 DbgItem->eraseFromParent();
641 } else if (DbgItem->isValueOfVariable() &&
642 DbgItem->getExpression()->startsWithDeref()) {
643 InsertDebugValueAtStoreLoc(DbgItem, Info.OnlyStore, DIB);
644 DbgItem->eraseFromParent();
645 } else if (DbgItem->getExpression()->startsWithDeref()) {
646 DbgItem->eraseFromParent();
647 }
648 }
649
650 // Remove dbg.assigns linked to the alloca as these are now redundant.
652
653 // Remove the (now dead) store and alloca.
654 Info.OnlyStore->eraseFromParent();
655 LBI.deleteValue(Info.OnlyStore);
656
657 AI->eraseFromParent();
658 return true;
659}
660
661/// Many allocas are only used within a single basic block. If this is the
662/// case, avoid traversing the CFG and inserting a lot of potentially useless
663/// PHI nodes by just performing a single linear pass over the basic block
664/// using the Alloca.
665///
666/// If we cannot promote this alloca (because it is read before it is written),
667/// return false. This is necessary in cases where, due to control flow, the
668/// alloca is undefined only on some control flow paths. e.g. code like
669/// this is correct in LLVM IR:
670/// // A is an alloca with no stores so far
671/// for (...) {
672/// int t = *A;
673/// if (!first_iteration)
674/// use(t);
675/// *A = 42;
676/// }
678 AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI,
680 SmallPtrSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) {
681 // The trickiest case to handle is when we have large blocks. Because of this,
682 // this code is optimized assuming that large blocks happen. This does not
683 // significantly pessimize the small block case. This uses LargeBlockInfo to
684 // make it efficient to get the index of various operations in the block.
685
686 // Walk the use-def list of the alloca, getting the locations of all stores.
687 using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>;
688 StoresByIndexTy StoresByIndex;
689
690 for (User *U : AI->users())
692 StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
693
694 // Sort the stores by their index, making it efficient to do a lookup with a
695 // binary search.
696 llvm::sort(StoresByIndex, less_first());
697
698 // Walk all of the loads from this alloca, replacing them with the nearest
699 // store above them, if any.
700 for (User *U : make_early_inc_range(AI->users())) {
702 if (!LI)
703 continue;
704
705 unsigned LoadIdx = LBI.getInstructionIndex(LI);
706
707 // Find the nearest store that has a lower index than this load.
708 StoresByIndexTy::iterator I = llvm::lower_bound(
709 StoresByIndex,
710 std::make_pair(LoadIdx, static_cast<StoreInst *>(nullptr)),
711 less_first());
712 Value *ReplVal;
713 if (I == StoresByIndex.begin()) {
714 if (StoresByIndex.empty())
715 // If there are no stores, the load takes the undef value.
716 ReplVal = UndefValue::get(LI->getType());
717 else
718 // There is no store before this load, bail out (load may be affected
719 // by the following stores - see main comment).
720 return false;
721 } else {
722 // Otherwise, there was a store before this load, the load takes its
723 // value.
724 ReplVal = std::prev(I)->second->getOperand(0);
725 }
726
727 convertMetadataToAssumes(LI, ReplVal, DL, AC, &DT);
728
729 // If the replacement value is the load, this must occur in unreachable
730 // code.
731 if (ReplVal == LI)
732 ReplVal = PoisonValue::get(LI->getType());
733
734 LI->replaceAllUsesWith(ReplVal);
735 LI->eraseFromParent();
736 LBI.deleteValue(LI);
737 }
738
739 // Remove the (now dead) stores and alloca.
740 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
741 while (!AI->use_empty()) {
743 // Update assignment tracking info for the store we're going to delete.
744 Info.AssignmentTracking.updateForDeletedStore(SI, DIB, DVRAssignsToDelete);
745 // Record debuginfo for the store before removing it.
746 for (DbgVariableRecord *DbgItem : Info.DPUsers) {
747 if (DbgItem->isAddressOfVariable()) {
748 ConvertDebugDeclareToDebugValue(DbgItem, SI, DIB);
749 }
750 }
751
752 SI->eraseFromParent();
753 LBI.deleteValue(SI);
754 }
755
756 // Remove dbg.assigns linked to the alloca as these are now redundant.
758 AI->eraseFromParent();
759
760 // The alloca's debuginfo can be removed as well.
761 for (DbgVariableRecord *DbgItem : Info.DPUsers) {
762 if (DbgItem->isAddressOfVariable() ||
763 DbgItem->getExpression()->startsWithDeref())
764 DbgItem->eraseFromParent();
765 }
766
767 ++NumLocalPromoted;
768 return true;
769}
770
771void PromoteMem2Reg::run() {
772 Function &F = *DT.getRoot()->getParent();
773
774 AllocaATInfo.resize(Allocas.size());
775 AllocaDPUsers.resize(Allocas.size());
776 AllocaValueTypes.resize(Allocas.size());
777
778 AllocaInfo Info;
779 LargeBlockInfo LBI;
780 ForwardIDFCalculator IDF(DT);
781
782 NoSignedZeros = F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool();
783
784 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
785 AllocaInst *AI = Allocas[AllocaNum];
786
787 assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!");
788 assert(AI->getParent()->getParent() == &F &&
789 "All allocas should be in the same function, which is same as DF!");
790
792
793 if (AI->use_empty()) {
794 // If there are no uses of the alloca, just delete it now.
795 AI->eraseFromParent();
796
797 // Remove the alloca from the Allocas list, since it has been processed
798 RemoveFromAllocasList(AllocaNum);
799 ++NumDeadAlloca;
800 continue;
801 }
802
803 // Calculate the set of read and write-locations for each alloca. This is
804 // analogous to finding the 'uses' and 'definitions' of each variable.
805 Info.AnalyzeAlloca(AI);
806
807 // If there is only a single store to this value, replace any loads of
808 // it that are directly dominated by the definition with the value stored.
809 if (Info.DefiningBlocks.size() == 1) {
810 if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC,
811 &DVRAssignsToDelete)) {
812 // The alloca has been processed, move on.
813 RemoveFromAllocasList(AllocaNum);
814 ++NumSingleStore;
815 continue;
816 }
817 }
818
819 // If the alloca is only read and written in one basic block, just perform a
820 // linear sweep over the block to eliminate it.
821 if (Info.OnlyUsedInOneBlock &&
822 promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC,
823 &DVRAssignsToDelete)) {
824 // The alloca has been processed, move on.
825 RemoveFromAllocasList(AllocaNum);
826 continue;
827 }
828
829 // Initialize BBNumPreds lazily
830 if (BBNumPreds.empty())
831 BBNumPreds.resize(F.getMaxBlockNumber());
832
833 // Remember the dbg.declare record describing this alloca, if any.
834 if (!Info.AssignmentTracking.empty())
835 AllocaATInfo[AllocaNum] = Info.AssignmentTracking;
836 if (!Info.DPUsers.empty())
837 AllocaDPUsers[AllocaNum] = Info.DPUsers;
838 AllocaValueTypes[AllocaNum] = Info.ValueType;
839
840 // Keep the reverse mapping of the 'Allocas' array for the rename pass.
841 AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
842
843 // Unique the set of defining blocks for efficient lookup.
844 SmallPtrSet<BasicBlock *, 32> DefBlocks(llvm::from_range,
845 Info.DefiningBlocks);
846
847 // Determine which blocks the value is live in. These are blocks which lead
848 // to uses.
849 SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
850 ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
851
852 // At this point, we're committed to promoting the alloca using IDF's, and
853 // the standard SSA construction algorithm. Determine which blocks need phi
854 // nodes and see if we can optimize out some work by avoiding insertion of
855 // dead phi nodes.
856 IDF.setLiveInBlocks(LiveInBlocks);
857 IDF.setDefiningBlocks(DefBlocks);
859 IDF.calculate(PHIBlocks);
860 llvm::sort(PHIBlocks, [](BasicBlock *A, BasicBlock *B) {
861 return A->getNumber() < B->getNumber();
862 });
863
864 unsigned CurrentVersion = 0;
865 for (BasicBlock *BB : PHIBlocks)
866 QueuePhiNode(BB, AllocaNum, CurrentVersion);
867 }
868
869 if (Allocas.empty()) {
870 cleanUpDbgAssigns();
871 return; // All of the allocas must have been trivial!
872 }
873 LBI.clear();
874
875 // Set the incoming values for the basic block to be null values for all of
876 // the alloca's. We do this in case there is a load of a value that has not
877 // been stored yet. In this case, it will get this null value.
878 IncomingVals.resize(Allocas.size());
879 for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
880 IncomingVals.init(i, UndefValue::get(AllocaValueTypes[i]));
881
882 // When handling debug info, treat all incoming values as if they have
883 // compiler-generated (empty) locations, representing the uninitialized
884 // alloca, until proven otherwise.
885 IncomingLocs.resize(Allocas.size());
886 for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
887 IncomingLocs.init(i, DebugLoc::getCompilerGenerated());
888
889 // The renamer uses the Visited set to avoid infinite loops.
890 Visited.resize(F.getMaxBlockNumber(), false);
891
892 // Add the entry block to the worklist, with a null predecessor.
893 pushToWorklist(&F.front(), nullptr);
894
895 do {
896 RenamePassData RPD = popFromWorklist();
897 RenamePass(RPD.BB, RPD.Pred);
898 } while (!Worklist.empty());
899
900 // Remove the allocas themselves from the function.
901 for (Instruction *A : Allocas) {
902 // Remove dbg.assigns linked to the alloca as these are now redundant.
904 // If there are any uses of the alloca instructions left, they must be in
905 // unreachable basic blocks that were not processed by walking the dominator
906 // tree. Just delete the users now.
907 if (!A->use_empty())
908 A->replaceAllUsesWith(PoisonValue::get(A->getType()));
909 A->eraseFromParent();
910 }
911
912 // Remove alloca's dbg.declare intrinsics from the function.
913 for (auto &DbgUsers : AllocaDPUsers) {
914 for (DbgVariableRecord *DbgItem : DbgUsers)
915 if (DbgItem->isAddressOfVariable() ||
916 DbgItem->getExpression()->startsWithDeref())
917 DbgItem->eraseFromParent();
918 }
919
920 // Loop over all of the PHI nodes and see if there are any that we can get
921 // rid of because they merge all of the same incoming values. This can
922 // happen due to undef values coming into the PHI nodes. This process is
923 // iterative, because eliminating one PHI node can cause others to be removed.
924 bool EliminatedAPHI = true;
925 while (EliminatedAPHI) {
926 EliminatedAPHI = false;
927
928 // Iterating over NewPhiNodes is deterministic, so it is safe to try to
929 // simplify and RAUW them as we go. If it was not, we could add uses to
930 // the values we replace with in a non-deterministic order, thus creating
931 // non-deterministic def->use chains.
932 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
933 I = NewPhiNodes.begin(),
934 E = NewPhiNodes.end();
935 I != E;) {
936 PHINode *PN = I->second;
937
938 // If this PHI node merges one value and/or undefs, get the value.
939 if (Value *V = simplifyInstruction(PN, SQ)) {
940 PN->replaceAllUsesWith(V);
941 PN->eraseFromParent();
942 NewPhiNodes.erase(I++);
943 EliminatedAPHI = true;
944 continue;
945 }
946 ++I;
947 }
948 }
949
950 // At this point, the renamer has added entries to PHI nodes for all reachable
951 // code. Unfortunately, there may be unreachable blocks which the renamer
952 // hasn't traversed. If this is the case, the PHI nodes may not
953 // have incoming values for all predecessors. Loop over all PHI nodes we have
954 // created, inserting poison values if they are missing any incoming values.
955 for (const auto &PhiNode : NewPhiNodes) {
956 // We want to do this once per basic block. As such, only process a block
957 // when we find the PHI that is the first entry in the block.
958 PHINode *SomePHI = PhiNode.second;
959 BasicBlock *BB = SomePHI->getParent();
960 if (&BB->front() != SomePHI)
961 continue;
962
963 // Only do work here if there the PHI nodes are missing incoming values. We
964 // know that all PHI nodes that were inserted in a block will have the same
965 // number of incoming values, so we can just check any of them.
966 if (SomePHI->getNumIncomingValues() == getNumPreds(BB))
967 continue;
968
969 // Get the preds for BB.
970 SmallVector<BasicBlock *, 16> Preds(predecessors(BB));
971
972 // Ok, now we know that all of the PHI nodes are missing entries for some
973 // basic blocks. Start by sorting the incoming predecessors for efficient
974 // access.
975 auto CompareBBNumbers = [](BasicBlock *A, BasicBlock *B) {
976 return A->getNumber() < B->getNumber();
977 };
978 llvm::sort(Preds, CompareBBNumbers);
979
980 // Now we loop through all BB's which have entries in SomePHI and remove
981 // them from the Preds list.
982 for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
983 // Do a log(n) search of the Preds list for the entry we want.
985 Preds, SomePHI->getIncomingBlock(i), CompareBBNumbers);
986 assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) &&
987 "PHI node has entry for a block which is not a predecessor!");
988
989 // Remove the entry
990 Preds.erase(EntIt);
991 }
992
993 // At this point, the blocks left in the preds list must have dummy
994 // entries inserted into every PHI nodes for the block. Update all the phi
995 // nodes in this block that we are inserting (there could be phis before
996 // mem2reg runs).
997 unsigned NumBadPreds = SomePHI->getNumIncomingValues();
998 BasicBlock::iterator BBI = BB->begin();
999 while ((SomePHI = dyn_cast<PHINode>(BBI++)) &&
1000 SomePHI->getNumIncomingValues() == NumBadPreds) {
1001 Value *PoisonVal = PoisonValue::get(SomePHI->getType());
1002 for (BasicBlock *Pred : Preds)
1003 SomePHI->addIncoming(PoisonVal, Pred);
1004 }
1005 }
1006
1007 NewPhiNodes.clear();
1008 cleanUpDbgAssigns();
1009}
1010
1011/// Determine which blocks the value is live in.
1012///
1013/// These are blocks which lead to uses. Knowing this allows us to avoid
1014/// inserting PHI nodes into blocks which don't lead to uses (thus, the
1015/// inserted phi nodes would be dead).
1016void PromoteMem2Reg::ComputeLiveInBlocks(
1017 AllocaInst *AI, AllocaInfo &Info,
1018 const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
1019 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
1020 // To determine liveness, we must iterate through the predecessors of blocks
1021 // where the def is live. Blocks are added to the worklist if we need to
1022 // check their predecessors. Start with all the using blocks.
1023 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
1024 Info.UsingBlocks.end());
1025
1026 // If any of the using blocks is also a definition block, check to see if the
1027 // definition occurs before or after the use. If it happens before the use,
1028 // the value isn't really live-in.
1029 for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
1030 BasicBlock *BB = LiveInBlockWorklist[i];
1031 if (!DefBlocks.count(BB))
1032 continue;
1033
1034 // Okay, this is a block that both uses and defines the value. If the first
1035 // reference to the alloca is a def (store), then we know it isn't live-in.
1036 for (BasicBlock::iterator I = BB->begin();; ++I) {
1037 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1038 if (SI->getOperand(1) != AI)
1039 continue;
1040
1041 // We found a store to the alloca before a load. The alloca is not
1042 // actually live-in here.
1043 LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
1044 LiveInBlockWorklist.pop_back();
1045 --i;
1046 --e;
1047 break;
1048 }
1049
1050 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1051 // Okay, we found a load before a store to the alloca. It is actually
1052 // live into this block.
1053 if (LI->getOperand(0) == AI)
1054 break;
1055 }
1056 }
1057
1058 // Now that we have a set of blocks where the phi is live-in, recursively add
1059 // their predecessors until we find the full region the value is live.
1060 while (!LiveInBlockWorklist.empty()) {
1061 BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
1062
1063 // The block really is live in here, insert it into the set. If already in
1064 // the set, then it has already been processed.
1065 if (!LiveInBlocks.insert(BB).second)
1066 continue;
1067
1068 // Since the value is live into BB, it is either defined in a predecessor or
1069 // live into it to. Add the preds to the worklist unless they are a
1070 // defining block.
1071 for (BasicBlock *P : predecessors(BB)) {
1072 // The value is not live into a predecessor if it defines the value.
1073 if (DefBlocks.count(P))
1074 continue;
1075
1076 // Otherwise it is, add to the worklist.
1077 LiveInBlockWorklist.push_back(P);
1078 }
1079 }
1080}
1081
1082/// Queue a phi-node to be added to a basic-block for a specific Alloca.
1083///
1084/// Returns true if there wasn't already a phi-node for that variable
1085bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
1086 unsigned &Version) {
1087 // Look up the basic-block in question.
1088 PHINode *&PN = NewPhiNodes[std::make_pair(BB->getNumber(), AllocaNo)];
1089
1090 // If the BB already has a phi node added for the i'th alloca then we're done!
1091 if (PN)
1092 return false;
1093
1094 // Create a PhiNode using the type from loads/stores... and add the phi-node
1095 // to the BasicBlock.
1096 PN = PHINode::Create(AllocaValueTypes[AllocaNo], getNumPreds(BB),
1097 Allocas[AllocaNo]->getName() + "." + Twine(Version++));
1098 PN->insertBefore(BB->begin());
1099 ++NumPHIInsert;
1100 PhiToAllocaMap[PN] = AllocaNo;
1101 return true;
1102}
1103
1104/// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to
1105/// create a merged location incorporating \p DL, or to set \p DL directly.
1107 bool ApplyMergedLoc) {
1108 if (ApplyMergedLoc)
1110 else
1111 PN->setDebugLoc(DL);
1112}
1113
1114/// Recursively traverse the CFG of the function, renaming loads and
1115/// stores to the allocas which we are promoting.
1116///
1117/// IncomingVals indicates what value each Alloca contains on exit from the
1118/// predecessor block Pred.
1119void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred) {
1120 // If we are inserting any phi nodes into this BB, they will already be in the
1121 // block.
1122 if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) {
1123 // If we have PHI nodes to update, compute the number of edges from Pred to
1124 // BB.
1125 if (PhiToAllocaMap.count(APN)) {
1126 // We want to be able to distinguish between PHI nodes being inserted by
1127 // this invocation of mem2reg from those phi nodes that already existed in
1128 // the IR before mem2reg was run. We determine that APN is being inserted
1129 // because it is missing incoming edges. All other PHI nodes being
1130 // inserted by this pass of mem2reg will have the same number of incoming
1131 // operands so far. Remember this count.
1132 unsigned NewPHINumOperands = APN->getNumOperands();
1133
1134 unsigned NumEdges = llvm::count(successors(Pred), BB);
1135 assert(NumEdges && "Must be at least one edge from Pred to BB!");
1136
1137 // Add entries for all the phis.
1138 BasicBlock::iterator PNI = BB->begin();
1139 do {
1140 unsigned AllocaNo = PhiToAllocaMap[APN];
1141
1142 // Update the location of the phi node.
1143 updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo],
1144 APN->getNumIncomingValues() > 0);
1145
1146 // Add N incoming values to the PHI node.
1147 for (unsigned i = 0; i != NumEdges; ++i)
1148 APN->addIncoming(IncomingVals[AllocaNo], Pred);
1149
1150 // For the sequence `return X > 0.0 ? X : -X`, it is expected that this
1151 // results in fabs intrinsic. However, without no-signed-zeros(nsz) flag
1152 // on the phi node generated at this stage, fabs folding does not
1153 // happen. So, we try to infer nsz flag from the function attributes to
1154 // enable this fabs folding.
1155 if (isa<FPMathOperator>(APN) && NoSignedZeros)
1156 APN->setHasNoSignedZeros(true);
1157
1158 // The currently active variable for this block is now the PHI.
1159 IncomingVals.set(AllocaNo, APN);
1160 AllocaATInfo[AllocaNo].updateForNewPhi(APN, DIB);
1161 for (DbgVariableRecord *DbgItem : AllocaDPUsers[AllocaNo])
1162 if (DbgItem->isAddressOfVariable())
1163 ConvertDebugDeclareToDebugValue(DbgItem, APN, DIB);
1164
1165 // Get the next phi node.
1166 ++PNI;
1167 APN = dyn_cast<PHINode>(PNI);
1168 if (!APN)
1169 break;
1170
1171 // Verify that it is missing entries. If not, it is not being inserted
1172 // by this mem2reg invocation so we want to ignore it.
1173 } while (APN->getNumOperands() == NewPHINumOperands);
1174 }
1175 }
1176
1177 // Don't revisit blocks.
1178 if (Visited.test(BB->getNumber()))
1179 return;
1180 Visited.set(BB->getNumber());
1181
1182 for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) {
1183 Instruction *I = &*II++; // get the instruction, increment iterator
1184
1185 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1186 AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand());
1187 if (!Src)
1188 continue;
1189
1190 DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src);
1191 if (AI == AllocaLookup.end())
1192 continue;
1193
1194 Value *V = IncomingVals[AI->second];
1195 convertMetadataToAssumes(LI, V, SQ.DL, AC, &DT);
1196
1197 // Anything using the load now uses the current value.
1198 LI->replaceAllUsesWith(V);
1199 LI->eraseFromParent();
1200 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1201 // Delete this instruction and mark the name as the current holder of the
1202 // value
1203 AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand());
1204 if (!Dest)
1205 continue;
1206
1207 DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
1208 if (ai == AllocaLookup.end())
1209 continue;
1210
1211 // what value were we writing?
1212 unsigned AllocaNo = ai->second;
1213 IncomingVals.set(AllocaNo, SI->getOperand(0));
1214
1215 // Record debuginfo for the store before removing it.
1216 IncomingLocs.set(AllocaNo, SI->getDebugLoc());
1217 AllocaATInfo[AllocaNo].updateForDeletedStore(SI, DIB,
1218 &DVRAssignsToDelete);
1219 for (DbgVariableRecord *DbgItem : AllocaDPUsers[ai->second])
1220 if (DbgItem->isAddressOfVariable())
1221 ConvertDebugDeclareToDebugValue(DbgItem, SI, DIB);
1222 SI->eraseFromParent();
1223 }
1224 }
1225
1226 // 'Recurse' to our successors.
1227
1228 // Keep track of the successors so we don't visit the same successor twice
1229 SmallPtrSet<BasicBlock *, 8> VisitedSuccs;
1230
1231 for (BasicBlock *S : reverse(successors(BB)))
1232 if (VisitedSuccs.insert(S).second)
1233 pushToWorklist(S, BB);
1234}
1235
1237 AssumptionCache *AC) {
1238 // If there is nothing to do, bail out...
1239 if (Allocas.empty())
1240 return;
1241
1242 PromoteMem2Reg(Allocas, DT, AC).run();
1243}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static DeltaTreeNode * getRoot(void *Root)
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
uint64_t IntrinsicInst * II
#define P(N)
static void convertMetadataToAssumes(LoadInst *LI, Value *Val, const DataLayout &DL, AssumptionCache *AC, const DominatorTree *DT)
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC, SmallPtrSet< DbgVariableRecord *, 8 > *DVRAssignsToDelete)
Many allocas are only used within a single basic block.
static void removeIntrinsicUsers(AllocaInst *AI)
static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, bool ApplyMergedLoc)
Update the debug location of a phi.
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC, SmallPtrSet< DbgVariableRecord *, 8 > *DVRAssignsToDelete)
Rewrite as many loads as possible given a single store.
static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI)
Given a LoadInst LI this adds assume(LI != null) after it.
static StringRef getName(Value *V)
static void ComputeLiveInBlocks(const SmallPtrSetImpl< BasicBlock * > &UsingBlocks, const SmallPtrSetImpl< BasicBlock * > &DefBlocks, SmallPtrSetImpl< BasicBlock * > &LiveInBlocks, PredIteratorCache &PredCache)
Given sets of UsingBlocks and DefBlocks, compute the set of LiveInBlocks.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
unsigned getNumber() const
Definition BasicBlock.h:95
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
const Instruction & front() const
Definition BasicBlock.h:493
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
This class represents a no-op cast from one type to another.
bool test(unsigned Idx) const
Definition BitVector.h:480
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition BitVector.h:360
BitVector & set()
Definition BitVector.h:370
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ ICMP_NE
not equal
Definition InstrTypes.h:698
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
DWARF expression.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVM_ABI void eraseFromParent()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
bool isValueOfVariable() const
Determine if this describes the value of a local variable.
bool isAddressOfVariable() const
Does this describe the address of a local variable.
DIExpression * getExpression() const
static LLVM_ABI DbgVariableRecord * createDbgVariableRecord(Value *Location, DILocalVariable *DV, DIExpression *Expr, const DILocation *DI)
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getCompilerGenerated()
Definition DebugLoc.h:162
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool erase(const KeyT &Val)
Definition DenseMap.h:330
iterator begin()
Definition DenseMap.h:78
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
iterator end()
Definition DenseMap.h:81
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void applyMergedLocation(DebugLoc LocA, DebugLoc LocB)
Merge 2 debug locations and apply it to the Instruction.
LLVM_ABI void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Value * getPointerOperand()
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
typename SuperClass::iterator iterator
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:119
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
iterator_range< user_iterator > users()
Definition Value.h:426
static LLVM_ABI void dropDroppableUse(Use &U)
Remove the droppable use U.
Definition Value.cpp:226
bool use_empty() const
Definition Value.h:346
iterator_range< use_iterator > uses()
Definition Value.h:380
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
Definition DebugInfo.h:195
LLVM_ABI void deleteAssignmentMarkers(const Instruction *Inst)
Delete the llvm.dbg.assign intrinsics linked to Inst.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
constexpr double e
@ User
could "use" a pointer
bool empty() const
Definition BasicBlock.h:101
iterator end() const
Definition BasicBlock.h:89
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
LLVM_ABI iterator begin() const
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1730
LLVM_ABI void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
LLVM_ABI void InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition Local.cpp:1721
constexpr from_range_t from_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1675
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
IDFCalculator< false > ForwardIDFCalculator
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition STLExtras.h:2042
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition STLExtras.h:2002
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto predecessors(const MachineBasicBlock *BB)
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
const DataLayout & DL
Function object to check whether the first component of a container supported by std::get (like std::...
Definition STLExtras.h:1437