LLVM 18.0.0git
PromoteMemoryToRegister.cpp
Go to the documentation of this file.
1//===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file promotes memory references to be register references. It promotes
10// alloca instructions which only have loads and stores as uses. An alloca is
11// transformed by using iterated dominator frontiers to place PHI nodes, then
12// traversing the function in depth-first order to rewrite loads and stores as
13// appropriate.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/ADT/Twine.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CFG.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/DIBuilder.h"
33#include "llvm/IR/DebugInfo.h"
34#include "llvm/IR/Dominators.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/InstrTypes.h"
37#include "llvm/IR/Instruction.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/LLVMContext.h"
42#include "llvm/IR/Module.h"
43#include "llvm/IR/Type.h"
44#include "llvm/IR/User.h"
48#include <algorithm>
49#include <cassert>
50#include <iterator>
51#include <utility>
52#include <vector>
53
54using namespace llvm;
55
56#define DEBUG_TYPE "mem2reg"
57
58STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
59STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store");
60STATISTIC(NumDeadAlloca, "Number of dead alloca's removed");
61STATISTIC(NumPHIInsert, "Number of PHI nodes inserted");
62
64 // Only allow direct and non-volatile loads and stores...
65 for (const User *U : AI->users()) {
66 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
67 // Note that atomic loads can be transformed; atomic semantics do
68 // not have any meaning for a local alloca.
69 if (LI->isVolatile() || LI->getType() != AI->getAllocatedType())
70 return false;
71 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
72 if (SI->getValueOperand() == AI ||
73 SI->getValueOperand()->getType() != AI->getAllocatedType())
74 return false; // Don't allow a store OF the AI, only INTO the AI.
75 // Note that atomic stores can be transformed; atomic semantics do
76 // not have any meaning for a local alloca.
77 if (SI->isVolatile())
78 return false;
79 } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
80 if (!II->isLifetimeStartOrEnd() && !II->isDroppable())
81 return false;
82 } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
84 return false;
85 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
86 if (!GEPI->hasAllZeroIndices())
87 return false;
89 return false;
90 } else if (const AddrSpaceCastInst *ASCI = dyn_cast<AddrSpaceCastInst>(U)) {
92 return false;
93 } else {
94 return false;
95 }
96 }
97
98 return true;
99}
100
101namespace {
102
103/// Helper for updating assignment tracking debug info when promoting allocas.
104class AssignmentTrackingInfo {
105 /// DbgAssignIntrinsics linked to the alloca with at most one per variable
106 /// fragment. (i.e. not be a comprehensive set if there are multiple
107 /// dbg.assigns for one variable fragment).
109
110public:
111 void init(AllocaInst *AI) {
114 if (Vars.insert(DebugVariable(DAI)).second)
115 DbgAssigns.push_back(DAI);
116 }
117 }
118
119 /// Update assignment tracking debug info given for the to-be-deleted store
120 /// \p ToDelete that stores to this alloca.
121 void updateForDeletedStore(
122 StoreInst *ToDelete, DIBuilder &DIB,
123 SmallSet<DbgAssignIntrinsic *, 8> *DbgAssignsToDelete) const {
124 // There's nothing to do if the alloca doesn't have any variables using
125 // assignment tracking.
126 if (DbgAssigns.empty())
127 return;
128
129 // Insert a dbg.value where the linked dbg.assign is and remember to delete
130 // the dbg.assign later. Demoting to dbg.value isn't necessary for
131 // correctness but does reduce compile time and memory usage by reducing
132 // unnecessary function-local metadata. Remember that we've seen a
133 // dbg.assign for each variable fragment for the untracked store handling
134 // (after this loop).
135 SmallSet<DebugVariableAggregate, 2> VarHasDbgAssignForStore;
136 for (DbgAssignIntrinsic *DAI : at::getAssignmentMarkers(ToDelete)) {
137 VarHasDbgAssignForStore.insert(DebugVariableAggregate(DAI));
138 DbgAssignsToDelete->insert(DAI);
139 DIB.insertDbgValueIntrinsic(DAI->getValue(), DAI->getVariable(),
140 DAI->getExpression(), DAI->getDebugLoc(),
141 DAI);
142 }
143
144 // It's possible for variables using assignment tracking to have no
145 // dbg.assign linked to this store. These are variables in DbgAssigns that
146 // are missing from VarHasDbgAssignForStore. Since there isn't a dbg.assign
147 // to mark the assignment - and the store is going to be deleted - insert a
148 // dbg.value to do that now. An untracked store may be either one that
149 // cannot be represented using assignment tracking (non-const offset or
150 // size) or one that is trackable but has had its DIAssignID attachment
151 // dropped accidentally.
152 for (auto *DAI : DbgAssigns) {
153 if (VarHasDbgAssignForStore.contains(DebugVariableAggregate(DAI)))
154 continue;
155 ConvertDebugDeclareToDebugValue(DAI, ToDelete, DIB);
156 }
157 }
158
159 /// Update assignment tracking debug info given for the newly inserted PHI \p
160 /// NewPhi.
161 void updateForNewPhi(PHINode *NewPhi, DIBuilder &DIB) const {
162 // Regardless of the position of dbg.assigns relative to stores, the
163 // incoming values into a new PHI should be the same for the (imaginary)
164 // debug-phi.
165 for (auto *DAI : DbgAssigns)
166 ConvertDebugDeclareToDebugValue(DAI, NewPhi, DIB);
167 }
168
169 void clear() { DbgAssigns.clear(); }
170 bool empty() { return DbgAssigns.empty(); }
171};
172
173struct AllocaInfo {
175
176 SmallVector<BasicBlock *, 32> DefiningBlocks;
178
179 StoreInst *OnlyStore;
180 BasicBlock *OnlyBlock;
181 bool OnlyUsedInOneBlock;
182
183 /// Debug users of the alloca - does not include dbg.assign intrinsics.
184 DbgUserVec DbgUsers;
185 /// Helper to update assignment tracking debug info.
186 AssignmentTrackingInfo AssignmentTracking;
187
188 void clear() {
189 DefiningBlocks.clear();
190 UsingBlocks.clear();
191 OnlyStore = nullptr;
192 OnlyBlock = nullptr;
193 OnlyUsedInOneBlock = true;
194 DbgUsers.clear();
195 AssignmentTracking.clear();
196 }
197
198 /// Scan the uses of the specified alloca, filling in the AllocaInfo used
199 /// by the rest of the pass to reason about the uses of this alloca.
200 void AnalyzeAlloca(AllocaInst *AI) {
201 clear();
202
203 // As we scan the uses of the alloca instruction, keep track of stores,
204 // and decide whether all of the loads and stores to the alloca are within
205 // the same basic block.
206 for (User *U : AI->users()) {
207 Instruction *User = cast<Instruction>(U);
208
209 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
210 // Remember the basic blocks which define new values for the alloca
211 DefiningBlocks.push_back(SI->getParent());
212 OnlyStore = SI;
213 } else {
214 LoadInst *LI = cast<LoadInst>(User);
215 // Otherwise it must be a load instruction, keep track of variable
216 // reads.
217 UsingBlocks.push_back(LI->getParent());
218 }
219
220 if (OnlyUsedInOneBlock) {
221 if (!OnlyBlock)
222 OnlyBlock = User->getParent();
223 else if (OnlyBlock != User->getParent())
224 OnlyUsedInOneBlock = false;
225 }
226 }
227 DbgUserVec AllDbgUsers;
228 findDbgUsers(AllDbgUsers, AI);
229 std::copy_if(AllDbgUsers.begin(), AllDbgUsers.end(),
230 std::back_inserter(DbgUsers), [](DbgVariableIntrinsic *DII) {
231 return !isa<DbgAssignIntrinsic>(DII);
232 });
233 AssignmentTracking.init(AI);
234 }
235};
236
237/// Data package used by RenamePass().
238struct RenamePassData {
239 using ValVector = std::vector<Value *>;
240 using LocationVector = std::vector<DebugLoc>;
241
242 RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V, LocationVector L)
243 : BB(B), Pred(P), Values(std::move(V)), Locations(std::move(L)) {}
244
245 BasicBlock *BB;
246 BasicBlock *Pred;
247 ValVector Values;
248 LocationVector Locations;
249};
250
251/// This assigns and keeps a per-bb relative ordering of load/store
252/// instructions in the block that directly load or store an alloca.
253///
254/// This functionality is important because it avoids scanning large basic
255/// blocks multiple times when promoting many allocas in the same block.
256class LargeBlockInfo {
257 /// For each instruction that we track, keep the index of the
258 /// instruction.
259 ///
260 /// The index starts out as the number of the instruction from the start of
261 /// the block.
263
264public:
265
266 /// This code only looks at accesses to allocas.
267 static bool isInterestingInstruction(const Instruction *I) {
268 return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) ||
269 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1)));
270 }
271
272 /// Get or calculate the index of the specified instruction.
273 unsigned getInstructionIndex(const Instruction *I) {
274 assert(isInterestingInstruction(I) &&
275 "Not a load/store to/from an alloca?");
276
277 // If we already have this instruction number, return it.
279 if (It != InstNumbers.end())
280 return It->second;
281
282 // Scan the whole block to get the instruction. This accumulates
283 // information for every interesting instruction in the block, in order to
284 // avoid gratuitus rescans.
285 const BasicBlock *BB = I->getParent();
286 unsigned InstNo = 0;
287 for (const Instruction &BBI : *BB)
288 if (isInterestingInstruction(&BBI))
289 InstNumbers[&BBI] = InstNo++;
290 It = InstNumbers.find(I);
291
292 assert(It != InstNumbers.end() && "Didn't insert instruction?");
293 return It->second;
294 }
295
296 void deleteValue(const Instruction *I) { InstNumbers.erase(I); }
297
298 void clear() { InstNumbers.clear(); }
299};
300
301struct PromoteMem2Reg {
302 /// The alloca instructions being promoted.
303 std::vector<AllocaInst *> Allocas;
304
305 DominatorTree &DT;
306 DIBuilder DIB;
307
308 /// A cache of @llvm.assume intrinsics used by SimplifyInstruction.
309 AssumptionCache *AC;
310
311 const SimplifyQuery SQ;
312
313 /// Reverse mapping of Allocas.
315
316 /// The PhiNodes we're adding.
317 ///
318 /// That map is used to simplify some Phi nodes as we iterate over it, so
319 /// it should have deterministic iterators. We could use a MapVector, but
320 /// since we already maintain a map from BasicBlock* to a stable numbering
321 /// (BBNumbers), the DenseMap is more efficient (also supports removal).
323
324 /// For each PHI node, keep track of which entry in Allocas it corresponds
325 /// to.
326 DenseMap<PHINode *, unsigned> PhiToAllocaMap;
327
328 /// For each alloca, we keep track of the dbg.declare intrinsic that
329 /// describes it, if any, so that we can convert it to a dbg.value
330 /// intrinsic if the alloca gets promoted.
332
333 /// For each alloca, keep an instance of a helper class that gives us an easy
334 /// way to update assignment tracking debug info if the alloca is promoted.
336 /// A set of dbg.assigns to delete because they've been demoted to
337 /// dbg.values. Call cleanUpDbgAssigns to delete them.
338 SmallSet<DbgAssignIntrinsic *, 8> DbgAssignsToDelete;
339
340 /// The set of basic blocks the renamer has already visited.
342
343 /// Contains a stable numbering of basic blocks to avoid non-determinstic
344 /// behavior.
346
347 /// Lazily compute the number of predecessors a block has.
349
350public:
351 PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
352 AssumptionCache *AC)
353 : Allocas(Allocas.begin(), Allocas.end()), DT(DT),
354 DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false),
355 AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(),
356 nullptr, &DT, AC) {}
357
358 void run();
359
360private:
361 void RemoveFromAllocasList(unsigned &AllocaIdx) {
362 Allocas[AllocaIdx] = Allocas.back();
363 Allocas.pop_back();
364 --AllocaIdx;
365 }
366
367 unsigned getNumPreds(const BasicBlock *BB) {
368 unsigned &NP = BBNumPreds[BB];
369 if (NP == 0)
370 NP = pred_size(BB) + 1;
371 return NP - 1;
372 }
373
374 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
375 const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
376 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
377 void RenamePass(BasicBlock *BB, BasicBlock *Pred,
378 RenamePassData::ValVector &IncVals,
379 RenamePassData::LocationVector &IncLocs,
380 std::vector<RenamePassData> &Worklist);
381 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
382
383 /// Delete dbg.assigns that have been demoted to dbg.values.
384 void cleanUpDbgAssigns() {
385 for (auto *DAI : DbgAssignsToDelete)
386 DAI->eraseFromParent();
387 DbgAssignsToDelete.clear();
388 }
389};
390
391} // end anonymous namespace
392
393/// Given a LoadInst LI this adds assume(LI != null) after it.
395 Function *AssumeIntrinsic =
396 Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume);
397 ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI,
399 LoadNotNull->insertAfter(LI);
400 CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull});
401 CI->insertAfter(LoadNotNull);
402 AC->registerAssumption(cast<AssumeInst>(CI));
403}
404
406 const DataLayout &DL, AssumptionCache *AC,
407 const DominatorTree *DT) {
408 // If the load was marked as nonnull we don't want to lose that information
409 // when we erase this Load. So we preserve it with an assume. As !nonnull
410 // returns poison while assume violations are immediate undefined behavior,
411 // we can only do this if the value is known non-poison.
412 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
413 LI->getMetadata(LLVMContext::MD_noundef) &&
414 !isKnownNonZero(Val, DL, 0, AC, LI, DT))
415 addAssumeNonNull(AC, LI);
416}
417
419 // Knowing that this alloca is promotable, we know that it's safe to kill all
420 // instructions except for load and store.
421
422 for (Use &U : llvm::make_early_inc_range(AI->uses())) {
423 Instruction *I = cast<Instruction>(U.getUser());
424 if (isa<LoadInst>(I) || isa<StoreInst>(I))
425 continue;
426
427 // Drop the use of AI in droppable instructions.
428 if (I->isDroppable()) {
429 I->dropDroppableUse(U);
430 continue;
431 }
432
433 if (!I->getType()->isVoidTy()) {
434 // The only users of this bitcast/GEP instruction are lifetime intrinsics.
435 // Follow the use/def chain to erase them now instead of leaving it for
436 // dead code elimination later.
437 for (Use &UU : llvm::make_early_inc_range(I->uses())) {
438 Instruction *Inst = cast<Instruction>(UU.getUser());
439
440 // Drop the use of I in droppable instructions.
441 if (Inst->isDroppable()) {
442 Inst->dropDroppableUse(UU);
443 continue;
444 }
445 Inst->eraseFromParent();
446 }
447 }
448 I->eraseFromParent();
449 }
450}
451
452/// Rewrite as many loads as possible given a single store.
453///
454/// When there is only a single store, we can use the domtree to trivially
455/// replace all of the dominated loads with the stored value. Do so, and return
456/// true if this has successfully promoted the alloca entirely. If this returns
457/// false there were some loads which were not dominated by the single store
458/// and thus must be phi-ed with undef. We fall back to the standard alloca
459/// promotion algorithm in that case.
461 AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL,
463 SmallSet<DbgAssignIntrinsic *, 8> *DbgAssignsToDelete) {
464 StoreInst *OnlyStore = Info.OnlyStore;
465 bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
466 BasicBlock *StoreBB = OnlyStore->getParent();
467 int StoreIndex = -1;
468
469 // Clear out UsingBlocks. We will reconstruct it here if needed.
470 Info.UsingBlocks.clear();
471
472 for (User *U : make_early_inc_range(AI->users())) {
473 Instruction *UserInst = cast<Instruction>(U);
474 if (UserInst == OnlyStore)
475 continue;
476 LoadInst *LI = cast<LoadInst>(UserInst);
477
478 // Okay, if we have a load from the alloca, we want to replace it with the
479 // only value stored to the alloca. We can do this if the value is
480 // dominated by the store. If not, we use the rest of the mem2reg machinery
481 // to insert the phi nodes as needed.
482 if (!StoringGlobalVal) { // Non-instructions are always dominated.
483 if (LI->getParent() == StoreBB) {
484 // If we have a use that is in the same block as the store, compare the
485 // indices of the two instructions to see which one came first. If the
486 // load came before the store, we can't handle it.
487 if (StoreIndex == -1)
488 StoreIndex = LBI.getInstructionIndex(OnlyStore);
489
490 if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
491 // Can't handle this load, bail out.
492 Info.UsingBlocks.push_back(StoreBB);
493 continue;
494 }
495 } else if (!DT.dominates(StoreBB, LI->getParent())) {
496 // If the load and store are in different blocks, use BB dominance to
497 // check their relationships. If the store doesn't dom the use, bail
498 // out.
499 Info.UsingBlocks.push_back(LI->getParent());
500 continue;
501 }
502 }
503
504 // Otherwise, we *can* safely rewrite this load.
505 Value *ReplVal = OnlyStore->getOperand(0);
506 // If the replacement value is the load, this must occur in unreachable
507 // code.
508 if (ReplVal == LI)
509 ReplVal = PoisonValue::get(LI->getType());
510
511 convertMetadataToAssumes(LI, ReplVal, DL, AC, &DT);
512 LI->replaceAllUsesWith(ReplVal);
513 LI->eraseFromParent();
514 LBI.deleteValue(LI);
515 }
516
517 // Finally, after the scan, check to see if the store is all that is left.
518 if (!Info.UsingBlocks.empty())
519 return false; // If not, we'll have to fall back for the remainder.
520
521 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
522 // Update assignment tracking info for the store we're going to delete.
523 Info.AssignmentTracking.updateForDeletedStore(Info.OnlyStore, DIB,
524 DbgAssignsToDelete);
525
526 // Record debuginfo for the store and remove the declaration's
527 // debuginfo.
528 for (DbgVariableIntrinsic *DII : Info.DbgUsers) {
529 if (DII->isAddressOfVariable()) {
530 ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB);
531 DII->eraseFromParent();
532 } else if (DII->getExpression()->startsWithDeref()) {
533 DII->eraseFromParent();
534 }
535 }
536
537 // Remove dbg.assigns linked to the alloca as these are now redundant.
539
540 // Remove the (now dead) store and alloca.
541 Info.OnlyStore->eraseFromParent();
542 LBI.deleteValue(Info.OnlyStore);
543
544 AI->eraseFromParent();
545 return true;
546}
547
548/// Many allocas are only used within a single basic block. If this is the
549/// case, avoid traversing the CFG and inserting a lot of potentially useless
550/// PHI nodes by just performing a single linear pass over the basic block
551/// using the Alloca.
552///
553/// If we cannot promote this alloca (because it is read before it is written),
554/// return false. This is necessary in cases where, due to control flow, the
555/// alloca is undefined only on some control flow paths. e.g. code like
556/// this is correct in LLVM IR:
557/// // A is an alloca with no stores so far
558/// for (...) {
559/// int t = *A;
560/// if (!first_iteration)
561/// use(t);
562/// *A = 42;
563/// }
565 AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI,
567 SmallSet<DbgAssignIntrinsic *, 8> *DbgAssignsToDelete) {
568 // The trickiest case to handle is when we have large blocks. Because of this,
569 // this code is optimized assuming that large blocks happen. This does not
570 // significantly pessimize the small block case. This uses LargeBlockInfo to
571 // make it efficient to get the index of various operations in the block.
572
573 // Walk the use-def list of the alloca, getting the locations of all stores.
574 using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>;
575 StoresByIndexTy StoresByIndex;
576
577 for (User *U : AI->users())
578 if (StoreInst *SI = dyn_cast<StoreInst>(U))
579 StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
580
581 // Sort the stores by their index, making it efficient to do a lookup with a
582 // binary search.
583 llvm::sort(StoresByIndex, less_first());
584
585 // Walk all of the loads from this alloca, replacing them with the nearest
586 // store above them, if any.
587 for (User *U : make_early_inc_range(AI->users())) {
588 LoadInst *LI = dyn_cast<LoadInst>(U);
589 if (!LI)
590 continue;
591
592 unsigned LoadIdx = LBI.getInstructionIndex(LI);
593
594 // Find the nearest store that has a lower index than this load.
595 StoresByIndexTy::iterator I = llvm::lower_bound(
596 StoresByIndex,
597 std::make_pair(LoadIdx, static_cast<StoreInst *>(nullptr)),
598 less_first());
599 Value *ReplVal;
600 if (I == StoresByIndex.begin()) {
601 if (StoresByIndex.empty())
602 // If there are no stores, the load takes the undef value.
603 ReplVal = UndefValue::get(LI->getType());
604 else
605 // There is no store before this load, bail out (load may be affected
606 // by the following stores - see main comment).
607 return false;
608 } else {
609 // Otherwise, there was a store before this load, the load takes its
610 // value.
611 ReplVal = std::prev(I)->second->getOperand(0);
612 }
613
614 convertMetadataToAssumes(LI, ReplVal, DL, AC, &DT);
615
616 // If the replacement value is the load, this must occur in unreachable
617 // code.
618 if (ReplVal == LI)
619 ReplVal = PoisonValue::get(LI->getType());
620
621 LI->replaceAllUsesWith(ReplVal);
622 LI->eraseFromParent();
623 LBI.deleteValue(LI);
624 }
625
626 // Remove the (now dead) stores and alloca.
627 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false);
628 while (!AI->use_empty()) {
629 StoreInst *SI = cast<StoreInst>(AI->user_back());
630 // Update assignment tracking info for the store we're going to delete.
631 Info.AssignmentTracking.updateForDeletedStore(SI, DIB, DbgAssignsToDelete);
632 // Record debuginfo for the store before removing it.
633 for (DbgVariableIntrinsic *DII : Info.DbgUsers) {
634 if (DII->isAddressOfVariable()) {
636 }
637 }
638 SI->eraseFromParent();
639 LBI.deleteValue(SI);
640 }
641
642 // Remove dbg.assigns linked to the alloca as these are now redundant.
644 AI->eraseFromParent();
645
646 // The alloca's debuginfo can be removed as well.
647 for (DbgVariableIntrinsic *DII : Info.DbgUsers)
648 if (DII->isAddressOfVariable() || DII->getExpression()->startsWithDeref())
649 DII->eraseFromParent();
650
651 ++NumLocalPromoted;
652 return true;
653}
654
655void PromoteMem2Reg::run() {
656 Function &F = *DT.getRoot()->getParent();
657
658 AllocaDbgUsers.resize(Allocas.size());
659 AllocaATInfo.resize(Allocas.size());
660
661 AllocaInfo Info;
662 LargeBlockInfo LBI;
663 ForwardIDFCalculator IDF(DT);
664
665 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
666 AllocaInst *AI = Allocas[AllocaNum];
667
668 assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!");
669 assert(AI->getParent()->getParent() == &F &&
670 "All allocas should be in the same function, which is same as DF!");
671
673
674 if (AI->use_empty()) {
675 // If there are no uses of the alloca, just delete it now.
676 AI->eraseFromParent();
677
678 // Remove the alloca from the Allocas list, since it has been processed
679 RemoveFromAllocasList(AllocaNum);
680 ++NumDeadAlloca;
681 continue;
682 }
683
684 // Calculate the set of read and write-locations for each alloca. This is
685 // analogous to finding the 'uses' and 'definitions' of each variable.
686 Info.AnalyzeAlloca(AI);
687
688 // If there is only a single store to this value, replace any loads of
689 // it that are directly dominated by the definition with the value stored.
690 if (Info.DefiningBlocks.size() == 1) {
691 if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC,
692 &DbgAssignsToDelete)) {
693 // The alloca has been processed, move on.
694 RemoveFromAllocasList(AllocaNum);
695 ++NumSingleStore;
696 continue;
697 }
698 }
699
700 // If the alloca is only read and written in one basic block, just perform a
701 // linear sweep over the block to eliminate it.
702 if (Info.OnlyUsedInOneBlock &&
703 promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC,
704 &DbgAssignsToDelete)) {
705 // The alloca has been processed, move on.
706 RemoveFromAllocasList(AllocaNum);
707 continue;
708 }
709
710 // If we haven't computed a numbering for the BB's in the function, do so
711 // now.
712 if (BBNumbers.empty()) {
713 unsigned ID = 0;
714 for (auto &BB : F)
715 BBNumbers[&BB] = ID++;
716 }
717
718 // Remember the dbg.declare intrinsic describing this alloca, if any.
719 if (!Info.DbgUsers.empty())
720 AllocaDbgUsers[AllocaNum] = Info.DbgUsers;
721 if (!Info.AssignmentTracking.empty())
722 AllocaATInfo[AllocaNum] = Info.AssignmentTracking;
723
724 // Keep the reverse mapping of the 'Allocas' array for the rename pass.
725 AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
726
727 // Unique the set of defining blocks for efficient lookup.
728 SmallPtrSet<BasicBlock *, 32> DefBlocks(Info.DefiningBlocks.begin(),
729 Info.DefiningBlocks.end());
730
731 // Determine which blocks the value is live in. These are blocks which lead
732 // to uses.
734 ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
735
736 // At this point, we're committed to promoting the alloca using IDF's, and
737 // the standard SSA construction algorithm. Determine which blocks need phi
738 // nodes and see if we can optimize out some work by avoiding insertion of
739 // dead phi nodes.
740 IDF.setLiveInBlocks(LiveInBlocks);
741 IDF.setDefiningBlocks(DefBlocks);
743 IDF.calculate(PHIBlocks);
744 llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) {
745 return BBNumbers.find(A)->second < BBNumbers.find(B)->second;
746 });
747
748 unsigned CurrentVersion = 0;
749 for (BasicBlock *BB : PHIBlocks)
750 QueuePhiNode(BB, AllocaNum, CurrentVersion);
751 }
752
753 if (Allocas.empty()) {
754 cleanUpDbgAssigns();
755 return; // All of the allocas must have been trivial!
756 }
757 LBI.clear();
758
759 // Set the incoming values for the basic block to be null values for all of
760 // the alloca's. We do this in case there is a load of a value that has not
761 // been stored yet. In this case, it will get this null value.
762 RenamePassData::ValVector Values(Allocas.size());
763 for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
764 Values[i] = UndefValue::get(Allocas[i]->getAllocatedType());
765
766 // When handling debug info, treat all incoming values as if they have unknown
767 // locations until proven otherwise.
768 RenamePassData::LocationVector Locations(Allocas.size());
769
770 // Walks all basic blocks in the function performing the SSA rename algorithm
771 // and inserting the phi nodes we marked as necessary
772 std::vector<RenamePassData> RenamePassWorkList;
773 RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values),
774 std::move(Locations));
775 do {
776 RenamePassData RPD = std::move(RenamePassWorkList.back());
777 RenamePassWorkList.pop_back();
778 // RenamePass may add new worklist entries.
779 RenamePass(RPD.BB, RPD.Pred, RPD.Values, RPD.Locations, RenamePassWorkList);
780 } while (!RenamePassWorkList.empty());
781
782 // The renamer uses the Visited set to avoid infinite loops. Clear it now.
783 Visited.clear();
784
785 // Remove the allocas themselves from the function.
786 for (Instruction *A : Allocas) {
787 // Remove dbg.assigns linked to the alloca as these are now redundant.
789 // If there are any uses of the alloca instructions left, they must be in
790 // unreachable basic blocks that were not processed by walking the dominator
791 // tree. Just delete the users now.
792 if (!A->use_empty())
793 A->replaceAllUsesWith(PoisonValue::get(A->getType()));
794 A->eraseFromParent();
795 }
796
797 // Remove alloca's dbg.declare intrinsics from the function.
798 for (auto &DbgUsers : AllocaDbgUsers) {
799 for (auto *DII : DbgUsers)
800 if (DII->isAddressOfVariable() || DII->getExpression()->startsWithDeref())
801 DII->eraseFromParent();
802 }
803
804 // Loop over all of the PHI nodes and see if there are any that we can get
805 // rid of because they merge all of the same incoming values. This can
806 // happen due to undef values coming into the PHI nodes. This process is
807 // iterative, because eliminating one PHI node can cause others to be removed.
808 bool EliminatedAPHI = true;
809 while (EliminatedAPHI) {
810 EliminatedAPHI = false;
811
812 // Iterating over NewPhiNodes is deterministic, so it is safe to try to
813 // simplify and RAUW them as we go. If it was not, we could add uses to
814 // the values we replace with in a non-deterministic order, thus creating
815 // non-deterministic def->use chains.
816 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
817 I = NewPhiNodes.begin(),
818 E = NewPhiNodes.end();
819 I != E;) {
820 PHINode *PN = I->second;
821
822 // If this PHI node merges one value and/or undefs, get the value.
823 if (Value *V = simplifyInstruction(PN, SQ)) {
824 PN->replaceAllUsesWith(V);
825 PN->eraseFromParent();
826 NewPhiNodes.erase(I++);
827 EliminatedAPHI = true;
828 continue;
829 }
830 ++I;
831 }
832 }
833
834 // At this point, the renamer has added entries to PHI nodes for all reachable
835 // code. Unfortunately, there may be unreachable blocks which the renamer
836 // hasn't traversed. If this is the case, the PHI nodes may not
837 // have incoming values for all predecessors. Loop over all PHI nodes we have
838 // created, inserting poison values if they are missing any incoming values.
839 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
840 I = NewPhiNodes.begin(),
841 E = NewPhiNodes.end();
842 I != E; ++I) {
843 // We want to do this once per basic block. As such, only process a block
844 // when we find the PHI that is the first entry in the block.
845 PHINode *SomePHI = I->second;
846 BasicBlock *BB = SomePHI->getParent();
847 if (&BB->front() != SomePHI)
848 continue;
849
850 // Only do work here if there the PHI nodes are missing incoming values. We
851 // know that all PHI nodes that were inserted in a block will have the same
852 // number of incoming values, so we can just check any of them.
853 if (SomePHI->getNumIncomingValues() == getNumPreds(BB))
854 continue;
855
856 // Get the preds for BB.
858
859 // Ok, now we know that all of the PHI nodes are missing entries for some
860 // basic blocks. Start by sorting the incoming predecessors for efficient
861 // access.
862 auto CompareBBNumbers = [this](BasicBlock *A, BasicBlock *B) {
863 return BBNumbers.find(A)->second < BBNumbers.find(B)->second;
864 };
865 llvm::sort(Preds, CompareBBNumbers);
866
867 // Now we loop through all BB's which have entries in SomePHI and remove
868 // them from the Preds list.
869 for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
870 // Do a log(n) search of the Preds list for the entry we want.
872 Preds, SomePHI->getIncomingBlock(i), CompareBBNumbers);
873 assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) &&
874 "PHI node has entry for a block which is not a predecessor!");
875
876 // Remove the entry
877 Preds.erase(EntIt);
878 }
879
880 // At this point, the blocks left in the preds list must have dummy
881 // entries inserted into every PHI nodes for the block. Update all the phi
882 // nodes in this block that we are inserting (there could be phis before
883 // mem2reg runs).
884 unsigned NumBadPreds = SomePHI->getNumIncomingValues();
885 BasicBlock::iterator BBI = BB->begin();
886 while ((SomePHI = dyn_cast<PHINode>(BBI++)) &&
887 SomePHI->getNumIncomingValues() == NumBadPreds) {
888 Value *PoisonVal = PoisonValue::get(SomePHI->getType());
889 for (BasicBlock *Pred : Preds)
890 SomePHI->addIncoming(PoisonVal, Pred);
891 }
892 }
893
894 NewPhiNodes.clear();
895 cleanUpDbgAssigns();
896}
897
898/// Determine which blocks the value is live in.
899///
900/// These are blocks which lead to uses. Knowing this allows us to avoid
901/// inserting PHI nodes into blocks which don't lead to uses (thus, the
902/// inserted phi nodes would be dead).
903void PromoteMem2Reg::ComputeLiveInBlocks(
904 AllocaInst *AI, AllocaInfo &Info,
905 const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
906 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
907 // To determine liveness, we must iterate through the predecessors of blocks
908 // where the def is live. Blocks are added to the worklist if we need to
909 // check their predecessors. Start with all the using blocks.
910 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
911 Info.UsingBlocks.end());
912
913 // If any of the using blocks is also a definition block, check to see if the
914 // definition occurs before or after the use. If it happens before the use,
915 // the value isn't really live-in.
916 for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
917 BasicBlock *BB = LiveInBlockWorklist[i];
918 if (!DefBlocks.count(BB))
919 continue;
920
921 // Okay, this is a block that both uses and defines the value. If the first
922 // reference to the alloca is a def (store), then we know it isn't live-in.
923 for (BasicBlock::iterator I = BB->begin();; ++I) {
924 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
925 if (SI->getOperand(1) != AI)
926 continue;
927
928 // We found a store to the alloca before a load. The alloca is not
929 // actually live-in here.
930 LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
931 LiveInBlockWorklist.pop_back();
932 --i;
933 --e;
934 break;
935 }
936
937 if (LoadInst *LI = dyn_cast<LoadInst>(I))
938 // Okay, we found a load before a store to the alloca. It is actually
939 // live into this block.
940 if (LI->getOperand(0) == AI)
941 break;
942 }
943 }
944
945 // Now that we have a set of blocks where the phi is live-in, recursively add
946 // their predecessors until we find the full region the value is live.
947 while (!LiveInBlockWorklist.empty()) {
948 BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
949
950 // The block really is live in here, insert it into the set. If already in
951 // the set, then it has already been processed.
952 if (!LiveInBlocks.insert(BB).second)
953 continue;
954
955 // Since the value is live into BB, it is either defined in a predecessor or
956 // live into it to. Add the preds to the worklist unless they are a
957 // defining block.
958 for (BasicBlock *P : predecessors(BB)) {
959 // The value is not live into a predecessor if it defines the value.
960 if (DefBlocks.count(P))
961 continue;
962
963 // Otherwise it is, add to the worklist.
964 LiveInBlockWorklist.push_back(P);
965 }
966 }
967}
968
969/// Queue a phi-node to be added to a basic-block for a specific Alloca.
970///
971/// Returns true if there wasn't already a phi-node for that variable
972bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
973 unsigned &Version) {
974 // Look up the basic-block in question.
975 PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)];
976
977 // If the BB already has a phi node added for the i'th alloca then we're done!
978 if (PN)
979 return false;
980
981 // Create a PhiNode using the dereferenced type... and add the phi-node to the
982 // BasicBlock.
983 PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB),
984 Allocas[AllocaNo]->getName() + "." + Twine(Version++));
985 PN->insertBefore(BB->begin());
986 ++NumPHIInsert;
987 PhiToAllocaMap[PN] = AllocaNo;
988 return true;
989}
990
991/// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to
992/// create a merged location incorporating \p DL, or to set \p DL directly.
994 bool ApplyMergedLoc) {
995 if (ApplyMergedLoc)
997 else
998 PN->setDebugLoc(DL);
999}
1000
1001/// Recursively traverse the CFG of the function, renaming loads and
1002/// stores to the allocas which we are promoting.
1003///
1004/// IncomingVals indicates what value each Alloca contains on exit from the
1005/// predecessor block Pred.
1006void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
1007 RenamePassData::ValVector &IncomingVals,
1008 RenamePassData::LocationVector &IncomingLocs,
1009 std::vector<RenamePassData> &Worklist) {
1010NextIteration:
1011 // If we are inserting any phi nodes into this BB, they will already be in the
1012 // block.
1013 if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) {
1014 // If we have PHI nodes to update, compute the number of edges from Pred to
1015 // BB.
1016 if (PhiToAllocaMap.count(APN)) {
1017 // We want to be able to distinguish between PHI nodes being inserted by
1018 // this invocation of mem2reg from those phi nodes that already existed in
1019 // the IR before mem2reg was run. We determine that APN is being inserted
1020 // because it is missing incoming edges. All other PHI nodes being
1021 // inserted by this pass of mem2reg will have the same number of incoming
1022 // operands so far. Remember this count.
1023 unsigned NewPHINumOperands = APN->getNumOperands();
1024
1025 unsigned NumEdges = llvm::count(successors(Pred), BB);
1026 assert(NumEdges && "Must be at least one edge from Pred to BB!");
1027
1028 // Add entries for all the phis.
1029 BasicBlock::iterator PNI = BB->begin();
1030 do {
1031 unsigned AllocaNo = PhiToAllocaMap[APN];
1032
1033 // Update the location of the phi node.
1034 updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo],
1035 APN->getNumIncomingValues() > 0);
1036
1037 // Add N incoming values to the PHI node.
1038 for (unsigned i = 0; i != NumEdges; ++i)
1039 APN->addIncoming(IncomingVals[AllocaNo], Pred);
1040
1041 // The currently active variable for this block is now the PHI.
1042 IncomingVals[AllocaNo] = APN;
1043 AllocaATInfo[AllocaNo].updateForNewPhi(APN, DIB);
1044 for (DbgVariableIntrinsic *DII : AllocaDbgUsers[AllocaNo])
1045 if (DII->isAddressOfVariable())
1046 ConvertDebugDeclareToDebugValue(DII, APN, DIB);
1047
1048 // Get the next phi node.
1049 ++PNI;
1050 APN = dyn_cast<PHINode>(PNI);
1051 if (!APN)
1052 break;
1053
1054 // Verify that it is missing entries. If not, it is not being inserted
1055 // by this mem2reg invocation so we want to ignore it.
1056 } while (APN->getNumOperands() == NewPHINumOperands);
1057 }
1058 }
1059
1060 // Don't revisit blocks.
1061 if (!Visited.insert(BB).second)
1062 return;
1063
1064 for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) {
1065 Instruction *I = &*II++; // get the instruction, increment iterator
1066
1067 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1068 AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand());
1069 if (!Src)
1070 continue;
1071
1072 DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src);
1073 if (AI == AllocaLookup.end())
1074 continue;
1075
1076 Value *V = IncomingVals[AI->second];
1077 convertMetadataToAssumes(LI, V, SQ.DL, AC, &DT);
1078
1079 // Anything using the load now uses the current value.
1080 LI->replaceAllUsesWith(V);
1081 LI->eraseFromParent();
1082 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1083 // Delete this instruction and mark the name as the current holder of the
1084 // value
1085 AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand());
1086 if (!Dest)
1087 continue;
1088
1089 DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
1090 if (ai == AllocaLookup.end())
1091 continue;
1092
1093 // what value were we writing?
1094 unsigned AllocaNo = ai->second;
1095 IncomingVals[AllocaNo] = SI->getOperand(0);
1096
1097 // Record debuginfo for the store before removing it.
1098 IncomingLocs[AllocaNo] = SI->getDebugLoc();
1099 AllocaATInfo[AllocaNo].updateForDeletedStore(SI, DIB,
1100 &DbgAssignsToDelete);
1101 for (DbgVariableIntrinsic *DII : AllocaDbgUsers[ai->second])
1102 if (DII->isAddressOfVariable())
1103 ConvertDebugDeclareToDebugValue(DII, SI, DIB);
1104 SI->eraseFromParent();
1105 }
1106 }
1107
1108 // 'Recurse' to our successors.
1109 succ_iterator I = succ_begin(BB), E = succ_end(BB);
1110 if (I == E)
1111 return;
1112
1113 // Keep track of the successors so we don't visit the same successor twice
1114 SmallPtrSet<BasicBlock *, 8> VisitedSuccs;
1115
1116 // Handle the first successor without using the worklist.
1117 VisitedSuccs.insert(*I);
1118 Pred = BB;
1119 BB = *I;
1120 ++I;
1121
1122 for (; I != E; ++I)
1123 if (VisitedSuccs.insert(*I).second)
1124 Worklist.emplace_back(*I, Pred, IncomingVals, IncomingLocs);
1125
1126 goto NextIteration;
1127}
1128
1130 AssumptionCache *AC) {
1131 // If there is nothing to do, bail out...
1132 if (Allocas.empty())
1133 return;
1134
1135 PromoteMem2Reg(Allocas, DT, AC).run();
1136}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:150
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Module.h This file contains the declarations for the Module class.
#define P(N)
static void convertMetadataToAssumes(LoadInst *LI, Value *Val, const DataLayout &DL, AssumptionCache *AC, const DominatorTree *DT)
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC, SmallSet< DbgAssignIntrinsic *, 8 > *DbgAssignsToDelete)
Rewrite as many loads as possible given a single store.
static void removeIntrinsicUsers(AllocaInst *AI)
static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, bool ApplyMergedLoc)
Update the debug location of a phi.
static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI)
Given a LoadInst LI this adds assume(LI != null) after it.
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC, SmallSet< DbgAssignIntrinsic *, 8 > *DbgAssignsToDelete)
Many allocas are only used within a single basic block.
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void ComputeLiveInBlocks(const SmallPtrSetImpl< BasicBlock * > &UsingBlocks, const SmallPtrSetImpl< BasicBlock * > &DefBlocks, SmallPtrSetImpl< BasicBlock * > &LiveInBlocks, PredIteratorCache &PredCache)
Given sets of UsingBlocks and DefBlocks, compute the set of LiveInBlocks.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:58
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:118
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:335
const Instruction & front() const
Definition: BasicBlock.h:347
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
const Instruction & back() const
Definition: BasicBlock.h:349
This class represents a no-op cast from one type to another.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.assign instruction.
This is the common base class for debug info intrinsics for variables.
bool isAddressOfVariable() const
Does this describe the address of a local variable.
DIExpression * getExpression() const
A debug info location.
Definition: DebugLoc.h:33
Identifies a unique instance of a whole variable (discards/ignores fragment information).
Identifies a unique instance of a variable.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool erase(const KeyT &Val)
Definition: DenseMap.h:329
bool empty() const
Definition: DenseMap.h:98
iterator begin()
Definition: DenseMap.h:75
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
NodeT * getRoot() const
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
This instruction compares its operands according to the predicate given to the constructor.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:89
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:392
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:71
const BasicBlock * getParent() const
Definition: Instruction.h:90
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:87
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:302
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:83
void applyMergedLocation(DILocation *LocA, DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
Definition: DebugInfo.cpp:880
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:389
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
Definition: Instruction.cpp:95
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
An instruction for reading from memory.
Definition: Instructions.h:177
Value * getPointerOperand()
Definition: Instructions.h:264
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:345
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:236
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
typename SuperClass::iterator iterator
Definition: SmallVector.h:581
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1724
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition: User.cpp:115
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:535
iterator_range< user_iterator > users()
Definition: Value.h:421
static void dropDroppableUse(Use &U)
Remove the droppable use U.
Definition: Value.cpp:217
bool use_empty() const
Definition: Value.h:344
iterator_range< use_iterator > uses()
Definition: Value.h:376
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1422
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
Definition: DebugInfo.cpp:1737
void deleteAssignmentMarkers(const Instruction *Inst)
Delete the llvm.dbg.assign intrinsics linked to Inst.
Definition: DebugInfo.cpp:1751
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
constexpr double e
Definition: MathExtras.h:31
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:228
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:237
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:102
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
auto successors(const MachineBasicBlock *BB)
bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:99
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:666
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:103
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1652
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1520
bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1946
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:1919
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1854
auto predecessors(const MachineBasicBlock *BB)
unsigned pred_size(const MachineBasicBlock *BB)
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
const DataLayout & DL
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1455