LLVM 17.0.0git
DeadStoreElimination.cpp
Go to the documentation of this file.
1//===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The code below implements dead store elimination using MemorySSA. It uses
10// the following general approach: given a MemoryDef, walk upwards to find
11// clobbering MemoryDefs that may be killed by the starting def. Then check
12// that there are no uses that may read the location of the original MemoryDef
13// in between both MemoryDefs. A bit more concretely:
14//
15// For all MemoryDefs StartDef:
16// 1. Get the next dominating clobbering MemoryDef (MaybeDeadAccess) by walking
17// upwards.
18// 2. Check that there are no reads between MaybeDeadAccess and the StartDef by
19// checking all uses starting at MaybeDeadAccess and walking until we see
20// StartDef.
21// 3. For each found CurrentDef, check that:
22// 1. There are no barrier instructions between CurrentDef and StartDef (like
23// throws or stores with ordering constraints).
24// 2. StartDef is executed whenever CurrentDef is executed.
25// 3. StartDef completely overwrites CurrentDef.
26// 4. Erase CurrentDef from the function and MemorySSA.
27//
28//===----------------------------------------------------------------------===//
29
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/DenseMap.h"
33#include "llvm/ADT/MapVector.h"
35#include "llvm/ADT/SetVector.h"
38#include "llvm/ADT/Statistic.h"
39#include "llvm/ADT/StringRef.h"
54#include "llvm/IR/Argument.h"
55#include "llvm/IR/BasicBlock.h"
56#include "llvm/IR/Constant.h"
57#include "llvm/IR/Constants.h"
58#include "llvm/IR/DataLayout.h"
59#include "llvm/IR/DebugInfo.h"
60#include "llvm/IR/Dominators.h"
61#include "llvm/IR/Function.h"
62#include "llvm/IR/IRBuilder.h"
64#include "llvm/IR/InstrTypes.h"
65#include "llvm/IR/Instruction.h"
68#include "llvm/IR/Module.h"
69#include "llvm/IR/PassManager.h"
71#include "llvm/IR/Value.h"
74#include "llvm/Support/Debug.h"
81#include <algorithm>
82#include <cassert>
83#include <cstdint>
84#include <iterator>
85#include <map>
86#include <optional>
87#include <utility>
88
89using namespace llvm;
90using namespace PatternMatch;
91
92#define DEBUG_TYPE "dse"
93
94STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
95STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
96STATISTIC(NumFastStores, "Number of stores deleted");
97STATISTIC(NumFastOther, "Number of other instrs removed");
98STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
99STATISTIC(NumModifiedStores, "Number of stores modified");
100STATISTIC(NumCFGChecks, "Number of stores modified");
101STATISTIC(NumCFGTries, "Number of stores modified");
102STATISTIC(NumCFGSuccess, "Number of stores modified");
103STATISTIC(NumGetDomMemoryDefPassed,
104 "Number of times a valid candidate is returned from getDomMemoryDef");
105STATISTIC(NumDomMemDefChecks,
106 "Number iterations check for reads in getDomMemoryDef");
107
108DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
109 "Controls which MemoryDefs are eliminated.");
110
111static cl::opt<bool>
112EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
113 cl::init(true), cl::Hidden,
114 cl::desc("Enable partial-overwrite tracking in DSE"));
115
116static cl::opt<bool>
117EnablePartialStoreMerging("enable-dse-partial-store-merging",
118 cl::init(true), cl::Hidden,
119 cl::desc("Enable partial store merging in DSE"));
120
122 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
123 cl::desc("The number of memory instructions to scan for "
124 "dead store elimination (default = 150)"));
126 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
127 cl::desc("The maximum number of steps while walking upwards to find "
128 "MemoryDefs that may be killed (default = 90)"));
129
131 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
132 cl::desc("The maximum number candidates that only partially overwrite the "
133 "killing MemoryDef to consider"
134 " (default = 5)"));
135
137 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
138 cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
139 "other stores per basic block (default = 5000)"));
140
142 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
143 cl::desc(
144 "The cost of a step in the same basic block as the killing MemoryDef"
145 "(default = 1)"));
146
148 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
150 cl::desc("The cost of a step in a different basic "
151 "block than the killing MemoryDef"
152 "(default = 5)"));
153
155 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
156 cl::desc("The maximum number of blocks to check when trying to prove that "
157 "all paths to an exit go through a killing block (default = 50)"));
158
159// This flags allows or disallows DSE to optimize MemorySSA during its
160// traversal. Note that DSE optimizing MemorySSA may impact other passes
161// downstream of the DSE invocation and can lead to issues not being
162// reproducible in isolation (i.e. when MemorySSA is built from scratch). In
163// those cases, the flag can be used to check if DSE's MemorySSA optimizations
164// impact follow-up passes.
165static cl::opt<bool>
166 OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden,
167 cl::desc("Allow DSE to optimize memory accesses."));
168
169//===----------------------------------------------------------------------===//
170// Helper functions
171//===----------------------------------------------------------------------===//
172using OverlapIntervalsTy = std::map<int64_t, int64_t>;
174
175/// Returns true if the end of this instruction can be safely shortened in
176/// length.
178 // Don't shorten stores for now
179 if (isa<StoreInst>(I))
180 return false;
181
182 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
183 switch (II->getIntrinsicID()) {
184 default: return false;
185 case Intrinsic::memset:
186 case Intrinsic::memcpy:
187 case Intrinsic::memcpy_element_unordered_atomic:
188 case Intrinsic::memset_element_unordered_atomic:
189 // Do shorten memory intrinsics.
190 // FIXME: Add memmove if it's also safe to transform.
191 return true;
192 }
193 }
194
195 // Don't shorten libcalls calls for now.
196
197 return false;
198}
199
200/// Returns true if the beginning of this instruction can be safely shortened
201/// in length.
203 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
204 // easily done by offsetting the source address.
205 return isa<AnyMemSetInst>(I);
206}
207
208static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
209 const TargetLibraryInfo &TLI,
210 const Function *F) {
212 ObjectSizeOpts Opts;
214
215 if (getObjectSize(V, Size, DL, &TLI, Opts))
216 return Size;
218}
219
220namespace {
221
222enum OverwriteResult {
223 OW_Begin,
224 OW_Complete,
225 OW_End,
226 OW_PartialEarlierWithFullLater,
227 OW_MaybePartial,
228 OW_None,
229 OW_Unknown
230};
231
232} // end anonymous namespace
233
234/// Check if two instruction are masked stores that completely
235/// overwrite one another. More specifically, \p KillingI has to
236/// overwrite \p DeadI.
237static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI,
238 const Instruction *DeadI,
239 BatchAAResults &AA) {
240 const auto *KillingII = dyn_cast<IntrinsicInst>(KillingI);
241 const auto *DeadII = dyn_cast<IntrinsicInst>(DeadI);
242 if (KillingII == nullptr || DeadII == nullptr)
243 return OW_Unknown;
244 if (KillingII->getIntrinsicID() != DeadII->getIntrinsicID())
245 return OW_Unknown;
246 if (KillingII->getIntrinsicID() == Intrinsic::masked_store) {
247 // Type size.
248 VectorType *KillingTy =
249 cast<VectorType>(KillingII->getArgOperand(0)->getType());
250 VectorType *DeadTy = cast<VectorType>(DeadII->getArgOperand(0)->getType());
251 if (KillingTy->getScalarSizeInBits() != DeadTy->getScalarSizeInBits())
252 return OW_Unknown;
253 // Element count.
254 if (KillingTy->getElementCount() != DeadTy->getElementCount())
255 return OW_Unknown;
256 // Pointers.
257 Value *KillingPtr = KillingII->getArgOperand(1)->stripPointerCasts();
258 Value *DeadPtr = DeadII->getArgOperand(1)->stripPointerCasts();
259 if (KillingPtr != DeadPtr && !AA.isMustAlias(KillingPtr, DeadPtr))
260 return OW_Unknown;
261 // Masks.
262 // TODO: check that KillingII's mask is a superset of the DeadII's mask.
263 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3))
264 return OW_Unknown;
265 return OW_Complete;
266 }
267 return OW_Unknown;
268}
269
270/// Return 'OW_Complete' if a store to the 'KillingLoc' location completely
271/// overwrites a store to the 'DeadLoc' location, 'OW_End' if the end of the
272/// 'DeadLoc' location is completely overwritten by 'KillingLoc', 'OW_Begin'
273/// if the beginning of the 'DeadLoc' location is overwritten by 'KillingLoc'.
274/// 'OW_PartialEarlierWithFullLater' means that a dead (big) store was
275/// overwritten by a killing (smaller) store which doesn't write outside the big
276/// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
277/// NOTE: This function must only be called if both \p KillingLoc and \p
278/// DeadLoc belong to the same underlying object with valid \p KillingOff and
279/// \p DeadOff.
280static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc,
281 const MemoryLocation &DeadLoc,
282 int64_t KillingOff, int64_t DeadOff,
283 Instruction *DeadI,
285 const uint64_t KillingSize = KillingLoc.Size.getValue();
286 const uint64_t DeadSize = DeadLoc.Size.getValue();
287 // We may now overlap, although the overlap is not complete. There might also
288 // be other incomplete overlaps, and together, they might cover the complete
289 // dead store.
290 // Note: The correctness of this logic depends on the fact that this function
291 // is not even called providing DepWrite when there are any intervening reads.
293 KillingOff < int64_t(DeadOff + DeadSize) &&
294 int64_t(KillingOff + KillingSize) >= DeadOff) {
295
296 // Insert our part of the overlap into the map.
297 auto &IM = IOL[DeadI];
298 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", "
299 << int64_t(DeadOff + DeadSize) << ") KillingLoc ["
300 << KillingOff << ", " << int64_t(KillingOff + KillingSize)
301 << ")\n");
302
303 // Make sure that we only insert non-overlapping intervals and combine
304 // adjacent intervals. The intervals are stored in the map with the ending
305 // offset as the key (in the half-open sense) and the starting offset as
306 // the value.
307 int64_t KillingIntStart = KillingOff;
308 int64_t KillingIntEnd = KillingOff + KillingSize;
309
310 // Find any intervals ending at, or after, KillingIntStart which start
311 // before KillingIntEnd.
312 auto ILI = IM.lower_bound(KillingIntStart);
313 if (ILI != IM.end() && ILI->second <= KillingIntEnd) {
314 // This existing interval is overlapped with the current store somewhere
315 // in [KillingIntStart, KillingIntEnd]. Merge them by erasing the existing
316 // intervals and adjusting our start and end.
317 KillingIntStart = std::min(KillingIntStart, ILI->second);
318 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
319 ILI = IM.erase(ILI);
320
321 // Continue erasing and adjusting our end in case other previous
322 // intervals are also overlapped with the current store.
323 //
324 // |--- dead 1 ---| |--- dead 2 ---|
325 // |------- killing---------|
326 //
327 while (ILI != IM.end() && ILI->second <= KillingIntEnd) {
328 assert(ILI->second > KillingIntStart && "Unexpected interval");
329 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
330 ILI = IM.erase(ILI);
331 }
332 }
333
334 IM[KillingIntEnd] = KillingIntStart;
335
336 ILI = IM.begin();
337 if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) {
338 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: DeadLoc ["
339 << DeadOff << ", " << int64_t(DeadOff + DeadSize)
340 << ") Composite KillingLoc [" << ILI->second << ", "
341 << ILI->first << ")\n");
342 ++NumCompletePartials;
343 return OW_Complete;
344 }
345 }
346
347 // Check for a dead store which writes to all the memory locations that
348 // the killing store writes to.
349 if (EnablePartialStoreMerging && KillingOff >= DeadOff &&
350 int64_t(DeadOff + DeadSize) > KillingOff &&
351 uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) {
352 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff
353 << ", " << int64_t(DeadOff + DeadSize)
354 << ") by a killing store [" << KillingOff << ", "
355 << int64_t(KillingOff + KillingSize) << ")\n");
356 // TODO: Maybe come up with a better name?
357 return OW_PartialEarlierWithFullLater;
358 }
359
360 // Another interesting case is if the killing store overwrites the end of the
361 // dead store.
362 //
363 // |--dead--|
364 // |-- killing --|
365 //
366 // In this case we may want to trim the size of dead store to avoid
367 // generating stores to addresses which will definitely be overwritten killing
368 // store.
370 (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) &&
371 int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize)))
372 return OW_End;
373
374 // Finally, we also need to check if the killing store overwrites the
375 // beginning of the dead store.
376 //
377 // |--dead--|
378 // |-- killing --|
379 //
380 // In this case we may want to move the destination address and trim the size
381 // of dead store to avoid generating stores to addresses which will definitely
382 // be overwritten killing store.
384 (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) {
385 assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) &&
386 "Expect to be handled as OW_Complete");
387 return OW_Begin;
388 }
389 // Otherwise, they don't completely overlap.
390 return OW_Unknown;
391}
392
393/// Returns true if the memory which is accessed by the second instruction is not
394/// modified between the first and the second instruction.
395/// Precondition: Second instruction must be dominated by the first
396/// instruction.
397static bool
399 BatchAAResults &AA, const DataLayout &DL,
400 DominatorTree *DT) {
401 // Do a backwards scan through the CFG from SecondI to FirstI. Look for
402 // instructions which can modify the memory location accessed by SecondI.
403 //
404 // While doing the walk keep track of the address to check. It might be
405 // different in different basic blocks due to PHI translation.
406 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
408 // Keep track of the address we visited each block with. Bail out if we
409 // visit a block with different addresses.
411
412 BasicBlock::iterator FirstBBI(FirstI);
413 ++FirstBBI;
414 BasicBlock::iterator SecondBBI(SecondI);
415 BasicBlock *FirstBB = FirstI->getParent();
416 BasicBlock *SecondBB = SecondI->getParent();
417 MemoryLocation MemLoc;
418 if (auto *MemSet = dyn_cast<MemSetInst>(SecondI))
419 MemLoc = MemoryLocation::getForDest(MemSet);
420 else
421 MemLoc = MemoryLocation::get(SecondI);
422
423 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
424
425 // Start checking the SecondBB.
426 WorkList.push_back(
427 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
428 bool isFirstBlock = true;
429
430 // Check all blocks going backward until we reach the FirstBB.
431 while (!WorkList.empty()) {
432 BlockAddressPair Current = WorkList.pop_back_val();
433 BasicBlock *B = Current.first;
434 PHITransAddr &Addr = Current.second;
435 Value *Ptr = Addr.getAddr();
436
437 // Ignore instructions before FirstI if this is the FirstBB.
438 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
439
441 if (isFirstBlock) {
442 // Ignore instructions after SecondI if this is the first visit of SecondBB.
443 assert(B == SecondBB && "first block is not the store block");
444 EI = SecondBBI;
445 isFirstBlock = false;
446 } else {
447 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
448 // In this case we also have to look at instructions after SecondI.
449 EI = B->end();
450 }
451 for (; BI != EI; ++BI) {
452 Instruction *I = &*BI;
453 if (I->mayWriteToMemory() && I != SecondI)
454 if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
455 return false;
456 }
457 if (B != FirstBB) {
458 assert(B != &FirstBB->getParent()->getEntryBlock() &&
459 "Should not hit the entry block because SI must be dominated by LI");
460 for (BasicBlock *Pred : predecessors(B)) {
461 PHITransAddr PredAddr = Addr;
462 if (PredAddr.needsPHITranslationFromBlock(B)) {
463 if (!PredAddr.isPotentiallyPHITranslatable())
464 return false;
465 if (!PredAddr.translateValue(B, Pred, DT, false))
466 return false;
467 }
468 Value *TranslatedPtr = PredAddr.getAddr();
469 auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
470 if (!Inserted.second) {
471 // We already visited this block before. If it was with a different
472 // address - bail out!
473 if (TranslatedPtr != Inserted.first->second)
474 return false;
475 // ... otherwise just skip it.
476 continue;
477 }
478 WorkList.push_back(std::make_pair(Pred, PredAddr));
479 }
480 }
481 }
482 return true;
483}
484
485static void shortenAssignment(Instruction *Inst, Value *OriginalDest,
486 uint64_t OldOffsetInBits, uint64_t OldSizeInBits,
487 uint64_t NewSizeInBits, bool IsOverwriteEnd) {
488 const DataLayout &DL = Inst->getModule()->getDataLayout();
489 uint64_t DeadSliceSizeInBits = OldSizeInBits - NewSizeInBits;
490 uint64_t DeadSliceOffsetInBits =
491 OldOffsetInBits + (IsOverwriteEnd ? NewSizeInBits : 0);
492 auto SetDeadFragExpr = [](DbgAssignIntrinsic *DAI,
493 DIExpression::FragmentInfo DeadFragment) {
494 // createFragmentExpression expects an offset relative to the existing
495 // fragment offset if there is one.
496 uint64_t RelativeOffset = DeadFragment.OffsetInBits -
497 DAI->getExpression()
499 .value_or(DIExpression::FragmentInfo(0, 0))
500 .OffsetInBits;
502 DAI->getExpression(), RelativeOffset, DeadFragment.SizeInBits)) {
503 DAI->setExpression(*NewExpr);
504 return;
505 }
506 // Failed to create a fragment expression for this so discard the value,
507 // making this a kill location.
509 DIExpression::get(DAI->getContext(), std::nullopt),
510 DeadFragment.OffsetInBits, DeadFragment.SizeInBits);
511 DAI->setExpression(Expr);
512 DAI->setKillLocation();
513 };
514
515 // A DIAssignID to use so that the inserted dbg.assign intrinsics do not
516 // link to any instructions. Created in the loop below (once).
517 DIAssignID *LinkToNothing = nullptr;
518 LLVMContext &Ctx = Inst->getContext();
519 auto GetDeadLink = [&Ctx, &LinkToNothing]() {
520 if (!LinkToNothing)
521 LinkToNothing = DIAssignID::getDistinct(Ctx);
522 return LinkToNothing;
523 };
524
525 // Insert an unlinked dbg.assign intrinsic for the dead fragment after each
526 // overlapping dbg.assign intrinsic. The loop invalidates the iterators
527 // returned by getAssignmentMarkers so save a copy of the markers to iterate
528 // over.
529 auto LinkedRange = at::getAssignmentMarkers(Inst);
530 SmallVector<DbgAssignIntrinsic *> Linked(LinkedRange.begin(),
531 LinkedRange.end());
532 for (auto *DAI : Linked) {
533 std::optional<DIExpression::FragmentInfo> NewFragment;
534 if (!at::calculateFragmentIntersect(DL, OriginalDest, DeadSliceOffsetInBits,
535 DeadSliceSizeInBits, DAI,
536 NewFragment) ||
537 !NewFragment) {
538 // We couldn't calculate the intersecting fragment for some reason. Be
539 // cautious and unlink the whole assignment from the store.
540 DAI->setKillAddress();
541 DAI->setAssignId(GetDeadLink());
542 continue;
543 }
544 // No intersect.
545 if (NewFragment->SizeInBits == 0)
546 continue;
547
548 // Fragments overlap: insert a new dbg.assign for this dead part.
549 auto *NewAssign = cast<DbgAssignIntrinsic>(DAI->clone());
550 NewAssign->insertAfter(DAI);
551 NewAssign->setAssignId(GetDeadLink());
552 if (NewFragment)
553 SetDeadFragExpr(NewAssign, *NewFragment);
554 NewAssign->setKillAddress();
555 }
556}
557
558static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
559 uint64_t &DeadSize, int64_t KillingStart,
560 uint64_t KillingSize, bool IsOverwriteEnd) {
561 auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI);
562 Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne();
563
564 // We assume that memet/memcpy operates in chunks of the "largest" native
565 // type size and aligned on the same value. That means optimal start and size
566 // of memset/memcpy should be modulo of preferred alignment of that type. That
567 // is it there is no any sense in trying to reduce store size any further
568 // since any "extra" stores comes for free anyway.
569 // On the other hand, maximum alignment we can achieve is limited by alignment
570 // of initial store.
571
572 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
573 // "largest" native type.
574 // Note: What is the proper way to get that value?
575 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
576 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
577
578 int64_t ToRemoveStart = 0;
579 uint64_t ToRemoveSize = 0;
580 // Compute start and size of the region to remove. Make sure 'PrefAlign' is
581 // maintained on the remaining store.
582 if (IsOverwriteEnd) {
583 // Calculate required adjustment for 'KillingStart' in order to keep
584 // remaining store size aligned on 'PerfAlign'.
585 uint64_t Off =
586 offsetToAlignment(uint64_t(KillingStart - DeadStart), PrefAlign);
587 ToRemoveStart = KillingStart + Off;
588 if (DeadSize <= uint64_t(ToRemoveStart - DeadStart))
589 return false;
590 ToRemoveSize = DeadSize - uint64_t(ToRemoveStart - DeadStart);
591 } else {
592 ToRemoveStart = DeadStart;
593 assert(KillingSize >= uint64_t(DeadStart - KillingStart) &&
594 "Not overlapping accesses?");
595 ToRemoveSize = KillingSize - uint64_t(DeadStart - KillingStart);
596 // Calculate required adjustment for 'ToRemoveSize'in order to keep
597 // start of the remaining store aligned on 'PerfAlign'.
598 uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
599 if (Off != 0) {
600 if (ToRemoveSize <= (PrefAlign.value() - Off))
601 return false;
602 ToRemoveSize -= PrefAlign.value() - Off;
603 }
604 assert(isAligned(PrefAlign, ToRemoveSize) &&
605 "Should preserve selected alignment");
606 }
607
608 assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove");
609 assert(DeadSize > ToRemoveSize && "Can't remove more than original size");
610
611 uint64_t NewSize = DeadSize - ToRemoveSize;
612 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) {
613 // When shortening an atomic memory intrinsic, the newly shortened
614 // length must remain an integer multiple of the element size.
615 const uint32_t ElementSize = AMI->getElementSizeInBytes();
616 if (0 != NewSize % ElementSize)
617 return false;
618 }
619
620 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
621 << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI
622 << "\n KILLER [" << ToRemoveStart << ", "
623 << int64_t(ToRemoveStart + ToRemoveSize) << ")\n");
624
625 Value *DeadWriteLength = DeadIntrinsic->getLength();
626 Value *TrimmedLength = ConstantInt::get(DeadWriteLength->getType(), NewSize);
627 DeadIntrinsic->setLength(TrimmedLength);
628 DeadIntrinsic->setDestAlignment(PrefAlign);
629
630 Value *OrigDest = DeadIntrinsic->getRawDest();
631 if (!IsOverwriteEnd) {
632 Type *Int8PtrTy =
633 Type::getInt8PtrTy(DeadIntrinsic->getContext(),
634 OrigDest->getType()->getPointerAddressSpace());
635 Value *Dest = OrigDest;
636 if (OrigDest->getType() != Int8PtrTy)
637 Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", DeadI);
638 Value *Indices[1] = {
639 ConstantInt::get(DeadWriteLength->getType(), ToRemoveSize)};
641 Type::getInt8Ty(DeadIntrinsic->getContext()), Dest, Indices, "", DeadI);
642 NewDestGEP->setDebugLoc(DeadIntrinsic->getDebugLoc());
643 if (NewDestGEP->getType() != OrigDest->getType())
644 NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(),
645 "", DeadI);
646 DeadIntrinsic->setDest(NewDestGEP);
647 }
648
649 // Update attached dbg.assign intrinsics. Assume 8-bit byte.
650 shortenAssignment(DeadI, OrigDest, DeadStart * 8, DeadSize * 8, NewSize * 8,
651 IsOverwriteEnd);
652
653 // Finally update start and size of dead access.
654 if (!IsOverwriteEnd)
655 DeadStart += ToRemoveSize;
656 DeadSize = NewSize;
657
658 return true;
659}
660
662 int64_t &DeadStart, uint64_t &DeadSize) {
663 if (IntervalMap.empty() || !isShortenableAtTheEnd(DeadI))
664 return false;
665
666 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
667 int64_t KillingStart = OII->second;
668 uint64_t KillingSize = OII->first - KillingStart;
669
670 assert(OII->first - KillingStart >= 0 && "Size expected to be positive");
671
672 if (KillingStart > DeadStart &&
673 // Note: "KillingStart - KillingStart" is known to be positive due to
674 // preceding check.
675 (uint64_t)(KillingStart - DeadStart) < DeadSize &&
676 // Note: "DeadSize - (uint64_t)(KillingStart - DeadStart)" is known to
677 // be non negative due to preceding checks.
678 KillingSize >= DeadSize - (uint64_t)(KillingStart - DeadStart)) {
679 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
680 true)) {
681 IntervalMap.erase(OII);
682 return true;
683 }
684 }
685 return false;
686}
687
690 int64_t &DeadStart, uint64_t &DeadSize) {
692 return false;
693
694 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
695 int64_t KillingStart = OII->second;
696 uint64_t KillingSize = OII->first - KillingStart;
697
698 assert(OII->first - KillingStart >= 0 && "Size expected to be positive");
699
700 if (KillingStart <= DeadStart &&
701 // Note: "DeadStart - KillingStart" is known to be non negative due to
702 // preceding check.
703 KillingSize > (uint64_t)(DeadStart - KillingStart)) {
704 // Note: "KillingSize - (uint64_t)(DeadStart - DeadStart)" is known to
705 // be positive due to preceding checks.
706 assert(KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize &&
707 "Should have been handled as OW_Complete");
708 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
709 false)) {
710 IntervalMap.erase(OII);
711 return true;
712 }
713 }
714 return false;
715}
716
717static Constant *
719 int64_t KillingOffset, int64_t DeadOffset,
720 const DataLayout &DL, BatchAAResults &AA,
721 DominatorTree *DT) {
722
723 if (DeadI && isa<ConstantInt>(DeadI->getValueOperand()) &&
724 DL.typeSizeEqualsStoreSize(DeadI->getValueOperand()->getType()) &&
725 KillingI && isa<ConstantInt>(KillingI->getValueOperand()) &&
726 DL.typeSizeEqualsStoreSize(KillingI->getValueOperand()->getType()) &&
727 memoryIsNotModifiedBetween(DeadI, KillingI, AA, DL, DT)) {
728 // If the store we find is:
729 // a) partially overwritten by the store to 'Loc'
730 // b) the killing store is fully contained in the dead one and
731 // c) they both have a constant value
732 // d) none of the two stores need padding
733 // Merge the two stores, replacing the dead store's value with a
734 // merge of both values.
735 // TODO: Deal with other constant types (vectors, etc), and probably
736 // some mem intrinsics (if needed)
737
738 APInt DeadValue = cast<ConstantInt>(DeadI->getValueOperand())->getValue();
739 APInt KillingValue =
740 cast<ConstantInt>(KillingI->getValueOperand())->getValue();
741 unsigned KillingBits = KillingValue.getBitWidth();
742 assert(DeadValue.getBitWidth() > KillingValue.getBitWidth());
743 KillingValue = KillingValue.zext(DeadValue.getBitWidth());
744
745 // Offset of the smaller store inside the larger store
746 unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8;
747 unsigned LShiftAmount =
748 DL.isBigEndian() ? DeadValue.getBitWidth() - BitOffsetDiff - KillingBits
749 : BitOffsetDiff;
750 APInt Mask = APInt::getBitsSet(DeadValue.getBitWidth(), LShiftAmount,
751 LShiftAmount + KillingBits);
752 // Clear the bits we'll be replacing, then OR with the smaller
753 // store, shifted appropriately.
754 APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount);
755 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI
756 << "\n Killing: " << *KillingI
757 << "\n Merged Value: " << Merged << '\n');
758 return ConstantInt::get(DeadI->getValueOperand()->getType(), Merged);
759 }
760 return nullptr;
761}
762
763namespace {
764// Returns true if \p I is an intrinsic that does not read or write memory.
765bool isNoopIntrinsic(Instruction *I) {
766 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
767 switch (II->getIntrinsicID()) {
768 case Intrinsic::lifetime_start:
769 case Intrinsic::lifetime_end:
770 case Intrinsic::invariant_end:
771 case Intrinsic::launder_invariant_group:
772 case Intrinsic::assume:
773 return true;
774 case Intrinsic::dbg_declare:
775 case Intrinsic::dbg_label:
776 case Intrinsic::dbg_value:
777 llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
778 default:
779 return false;
780 }
781 }
782 return false;
783}
784
785// Check if we can ignore \p D for DSE.
786bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
787 Instruction *DI = D->getMemoryInst();
788 // Calls that only access inaccessible memory cannot read or write any memory
789 // locations we consider for elimination.
790 if (auto *CB = dyn_cast<CallBase>(DI))
791 if (CB->onlyAccessesInaccessibleMemory())
792 return true;
793
794 // We can eliminate stores to locations not visible to the caller across
795 // throwing instructions.
796 if (DI->mayThrow() && !DefVisibleToCaller)
797 return true;
798
799 // We can remove the dead stores, irrespective of the fence and its ordering
800 // (release/acquire/seq_cst). Fences only constraints the ordering of
801 // already visible stores, it does not make a store visible to other
802 // threads. So, skipping over a fence does not change a store from being
803 // dead.
804 if (isa<FenceInst>(DI))
805 return true;
806
807 // Skip intrinsics that do not really read or modify memory.
808 if (isNoopIntrinsic(DI))
809 return true;
810
811 return false;
812}
813
814struct DSEState {
815 Function &F;
816 AliasAnalysis &AA;
818
819 /// The single BatchAA instance that is used to cache AA queries. It will
820 /// not be invalidated over the whole run. This is safe, because:
821 /// 1. Only memory writes are removed, so the alias cache for memory
822 /// locations remains valid.
823 /// 2. No new instructions are added (only instructions removed), so cached
824 /// information for a deleted value cannot be accessed by a re-used new
825 /// value pointer.
826 BatchAAResults BatchAA;
827
828 MemorySSA &MSSA;
829 DominatorTree &DT;
831 const TargetLibraryInfo &TLI;
832 const DataLayout &DL;
833 const LoopInfo &LI;
834
835 // Whether the function contains any irreducible control flow, useful for
836 // being accurately able to detect loops.
837 bool ContainsIrreducibleLoops;
838
839 // All MemoryDefs that potentially could kill other MemDefs.
841 // Any that should be skipped as they are already deleted
843 // Keep track whether a given object is captured before return or not.
844 DenseMap<const Value *, bool> CapturedBeforeReturn;
845 // Keep track of all of the objects that are invisible to the caller after
846 // the function returns.
847 DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
848 // Keep track of blocks with throwing instructions not modeled in MemorySSA.
849 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
850 // Post-order numbers for each basic block. Used to figure out if memory
851 // accesses are executed before another access.
852 DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
853 // Values that are only used with assumes. Used to refine pointer escape
854 // analysis.
856
857 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
858 /// basic block.
860 // Check if there are root nodes that are terminated by UnreachableInst.
861 // Those roots pessimize post-dominance queries. If there are such roots,
862 // fall back to CFG scan starting from all non-unreachable roots.
863 bool AnyUnreachableExit;
864
865 // Whether or not we should iterate on removing dead stores at the end of the
866 // function due to removing a store causing a previously captured pointer to
867 // no longer be captured.
868 bool ShouldIterateEndOfFunctionDSE;
869
870 // Class contains self-reference, make sure it's not copied/moved.
871 DSEState(const DSEState &) = delete;
872 DSEState &operator=(const DSEState &) = delete;
873
874 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
876 const TargetLibraryInfo &TLI, const LoopInfo &LI)
877 : F(F), AA(AA), EI(DT, LI, EphValues), BatchAA(AA, &EI), MSSA(MSSA),
878 DT(DT), PDT(PDT), TLI(TLI), DL(F.getParent()->getDataLayout()), LI(LI) {
879 // Collect blocks with throwing instructions not modeled in MemorySSA and
880 // alloc-like objects.
881 unsigned PO = 0;
882 for (BasicBlock *BB : post_order(&F)) {
883 PostOrderNumbers[BB] = PO++;
884 for (Instruction &I : *BB) {
885 MemoryAccess *MA = MSSA.getMemoryAccess(&I);
886 if (I.mayThrow() && !MA)
887 ThrowingBlocks.insert(I.getParent());
888
889 auto *MD = dyn_cast_or_null<MemoryDef>(MA);
890 if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit &&
891 (getLocForWrite(&I) || isMemTerminatorInst(&I)))
892 MemDefs.push_back(MD);
893 }
894 }
895
896 // Treat byval or inalloca arguments the same as Allocas, stores to them are
897 // dead at the end of the function.
898 for (Argument &AI : F.args())
899 if (AI.hasPassPointeeByValueCopyAttr())
900 InvisibleToCallerAfterRet.insert({&AI, true});
901
902 // Collect whether there is any irreducible control flow in the function.
903 ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI);
904
905 AnyUnreachableExit = any_of(PDT.roots(), [](const BasicBlock *E) {
906 return isa<UnreachableInst>(E->getTerminator());
907 });
908
909 CodeMetrics::collectEphemeralValues(&F, &AC, EphValues);
910 }
911
912 LocationSize strengthenLocationSize(const Instruction *I,
913 LocationSize Size) const {
914 if (auto *CB = dyn_cast<CallBase>(I)) {
915 LibFunc F;
916 if (TLI.getLibFunc(*CB, F) && TLI.has(F) &&
917 (F == LibFunc_memset_chk || F == LibFunc_memcpy_chk)) {
918 // Use the precise location size specified by the 3rd argument
919 // for determining KillingI overwrites DeadLoc if it is a memset_chk
920 // instruction. memset_chk will write either the amount specified as 3rd
921 // argument or the function will immediately abort and exit the program.
922 // NOTE: AA may determine NoAlias if it can prove that the access size
923 // is larger than the allocation size due to that being UB. To avoid
924 // returning potentially invalid NoAlias results by AA, limit the use of
925 // the precise location size to isOverwrite.
926 if (const auto *Len = dyn_cast<ConstantInt>(CB->getArgOperand(2)))
927 return LocationSize::precise(Len->getZExtValue());
928 }
929 }
930 return Size;
931 }
932
933 /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p
934 /// KillingI instruction) completely overwrites a store to the 'DeadLoc'
935 /// location (by \p DeadI instruction).
936 /// Return OW_MaybePartial if \p KillingI does not completely overwrite
937 /// \p DeadI, but they both write to the same underlying object. In that
938 /// case, use isPartialOverwrite to check if \p KillingI partially overwrites
939 /// \p DeadI. Returns 'OR_None' if \p KillingI is known to not overwrite the
940 /// \p DeadI. Returns 'OW_Unknown' if nothing can be determined.
941 OverwriteResult isOverwrite(const Instruction *KillingI,
942 const Instruction *DeadI,
943 const MemoryLocation &KillingLoc,
944 const MemoryLocation &DeadLoc,
945 int64_t &KillingOff, int64_t &DeadOff) {
946 // AliasAnalysis does not always account for loops. Limit overwrite checks
947 // to dependencies for which we can guarantee they are independent of any
948 // loops they are in.
949 if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
950 return OW_Unknown;
951
952 LocationSize KillingLocSize =
953 strengthenLocationSize(KillingI, KillingLoc.Size);
954 const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts();
955 const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts();
956 const Value *DeadUndObj = getUnderlyingObject(DeadPtr);
957 const Value *KillingUndObj = getUnderlyingObject(KillingPtr);
958
959 // Check whether the killing store overwrites the whole object, in which
960 // case the size/offset of the dead store does not matter.
961 if (DeadUndObj == KillingUndObj && KillingLocSize.isPrecise()) {
962 uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F);
963 if (KillingUndObjSize != MemoryLocation::UnknownSize &&
964 KillingUndObjSize == KillingLocSize.getValue())
965 return OW_Complete;
966 }
967
968 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
969 // get imprecise values here, though (except for unknown sizes).
970 if (!KillingLocSize.isPrecise() || !DeadLoc.Size.isPrecise()) {
971 // In case no constant size is known, try to an IR values for the number
972 // of bytes written and check if they match.
973 const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI);
974 const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI);
975 if (KillingMemI && DeadMemI) {
976 const Value *KillingV = KillingMemI->getLength();
977 const Value *DeadV = DeadMemI->getLength();
978 if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc))
979 return OW_Complete;
980 }
981
982 // Masked stores have imprecise locations, but we can reason about them
983 // to some extent.
984 return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA);
985 }
986
987 const uint64_t KillingSize = KillingLocSize.getValue();
988 const uint64_t DeadSize = DeadLoc.Size.getValue();
989
990 // Query the alias information
991 AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc);
992
993 // If the start pointers are the same, we just have to compare sizes to see if
994 // the killing store was larger than the dead store.
995 if (AAR == AliasResult::MustAlias) {
996 // Make sure that the KillingSize size is >= the DeadSize size.
997 if (KillingSize >= DeadSize)
998 return OW_Complete;
999 }
1000
1001 // If we hit a partial alias we may have a full overwrite
1002 if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
1003 int32_t Off = AAR.getOffset();
1004 if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize)
1005 return OW_Complete;
1006 }
1007
1008 // If we can't resolve the same pointers to the same object, then we can't
1009 // analyze them at all.
1010 if (DeadUndObj != KillingUndObj) {
1011 // Non aliasing stores to different objects don't overlap. Note that
1012 // if the killing store is known to overwrite whole object (out of
1013 // bounds access overwrites whole object as well) then it is assumed to
1014 // completely overwrite any store to the same object even if they don't
1015 // actually alias (see next check).
1016 if (AAR == AliasResult::NoAlias)
1017 return OW_None;
1018 return OW_Unknown;
1019 }
1020
1021 // Okay, we have stores to two completely different pointers. Try to
1022 // decompose the pointer into a "base + constant_offset" form. If the base
1023 // pointers are equal, then we can reason about the two stores.
1024 DeadOff = 0;
1025 KillingOff = 0;
1026 const Value *DeadBasePtr =
1027 GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL);
1028 const Value *KillingBasePtr =
1029 GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL);
1030
1031 // If the base pointers still differ, we have two completely different
1032 // stores.
1033 if (DeadBasePtr != KillingBasePtr)
1034 return OW_Unknown;
1035
1036 // The killing access completely overlaps the dead store if and only if
1037 // both start and end of the dead one is "inside" the killing one:
1038 // |<->|--dead--|<->|
1039 // |-----killing------|
1040 // Accesses may overlap if and only if start of one of them is "inside"
1041 // another one:
1042 // |<->|--dead--|<-------->|
1043 // |-------killing--------|
1044 // OR
1045 // |-------dead-------|
1046 // |<->|---killing---|<----->|
1047 //
1048 // We have to be careful here as *Off is signed while *.Size is unsigned.
1049
1050 // Check if the dead access starts "not before" the killing one.
1051 if (DeadOff >= KillingOff) {
1052 // If the dead access ends "not after" the killing access then the
1053 // dead one is completely overwritten by the killing one.
1054 if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize)
1055 return OW_Complete;
1056 // If start of the dead access is "before" end of the killing access
1057 // then accesses overlap.
1058 else if ((uint64_t)(DeadOff - KillingOff) < KillingSize)
1059 return OW_MaybePartial;
1060 }
1061 // If start of the killing access is "before" end of the dead access then
1062 // accesses overlap.
1063 else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) {
1064 return OW_MaybePartial;
1065 }
1066
1067 // Can reach here only if accesses are known not to overlap.
1068 return OW_None;
1069 }
1070
1071 bool isInvisibleToCallerAfterRet(const Value *V) {
1072 if (isa<AllocaInst>(V))
1073 return true;
1074 auto I = InvisibleToCallerAfterRet.insert({V, false});
1075 if (I.second) {
1076 if (!isInvisibleToCallerOnUnwind(V)) {
1077 I.first->second = false;
1078 } else if (isNoAliasCall(V)) {
1079 I.first->second = !PointerMayBeCaptured(V, true, false, EphValues);
1080 }
1081 }
1082 return I.first->second;
1083 }
1084
1085 bool isInvisibleToCallerOnUnwind(const Value *V) {
1086 bool RequiresNoCaptureBeforeUnwind;
1087 if (!isNotVisibleOnUnwind(V, RequiresNoCaptureBeforeUnwind))
1088 return false;
1089 if (!RequiresNoCaptureBeforeUnwind)
1090 return true;
1091
1092 auto I = CapturedBeforeReturn.insert({V, true});
1093 if (I.second)
1094 // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1095 // with the killing MemoryDef. But we refrain from doing so for now to
1096 // limit compile-time and this does not cause any changes to the number
1097 // of stores removed on a large test set in practice.
1098 I.first->second = PointerMayBeCaptured(V, false, true, EphValues);
1099 return !I.first->second;
1100 }
1101
1102 std::optional<MemoryLocation> getLocForWrite(Instruction *I) const {
1103 if (!I->mayWriteToMemory())
1104 return std::nullopt;
1105
1106 if (auto *CB = dyn_cast<CallBase>(I))
1107 return MemoryLocation::getForDest(CB, TLI);
1108
1110 }
1111
1112 /// Assuming this instruction has a dead analyzable write, can we delete
1113 /// this instruction?
1114 bool isRemovable(Instruction *I) {
1115 assert(getLocForWrite(I) && "Must have analyzable write");
1116
1117 // Don't remove volatile/atomic stores.
1118 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1119 return SI->isUnordered();
1120
1121 if (auto *CB = dyn_cast<CallBase>(I)) {
1122 // Don't remove volatile memory intrinsics.
1123 if (auto *MI = dyn_cast<MemIntrinsic>(CB))
1124 return !MI->isVolatile();
1125
1126 // Never remove dead lifetime intrinsics, e.g. because they are followed
1127 // by a free.
1128 if (CB->isLifetimeStartOrEnd())
1129 return false;
1130
1131 return CB->use_empty() && CB->willReturn() && CB->doesNotThrow() &&
1132 !CB->isTerminator();
1133 }
1134
1135 return false;
1136 }
1137
1138 /// Returns true if \p UseInst completely overwrites \p DefLoc
1139 /// (stored by \p DefInst).
1140 bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1141 Instruction *UseInst) {
1142 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1143 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1144 // MemoryDef.
1145 if (!UseInst->mayWriteToMemory())
1146 return false;
1147
1148 if (auto *CB = dyn_cast<CallBase>(UseInst))
1149 if (CB->onlyAccessesInaccessibleMemory())
1150 return false;
1151
1152 int64_t InstWriteOffset, DepWriteOffset;
1153 if (auto CC = getLocForWrite(UseInst))
1154 return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset,
1155 DepWriteOffset) == OW_Complete;
1156 return false;
1157 }
1158
1159 /// Returns true if \p Def is not read before returning from the function.
1160 bool isWriteAtEndOfFunction(MemoryDef *Def) {
1161 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("
1162 << *Def->getMemoryInst()
1163 << ") is at the end the function \n");
1164
1165 auto MaybeLoc = getLocForWrite(Def->getMemoryInst());
1166 if (!MaybeLoc) {
1167 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n");
1168 return false;
1169 }
1170
1173 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1174 if (!Visited.insert(Acc).second)
1175 return;
1176 for (Use &U : Acc->uses())
1177 WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1178 };
1179 PushMemUses(Def);
1180 for (unsigned I = 0; I < WorkList.size(); I++) {
1181 if (WorkList.size() >= MemorySSAScanLimit) {
1182 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n");
1183 return false;
1184 }
1185
1186 MemoryAccess *UseAccess = WorkList[I];
1187 if (isa<MemoryPhi>(UseAccess)) {
1188 // AliasAnalysis does not account for loops. Limit elimination to
1189 // candidates for which we can guarantee they always store to the same
1190 // memory location.
1191 if (!isGuaranteedLoopInvariant(MaybeLoc->Ptr))
1192 return false;
1193
1194 PushMemUses(cast<MemoryPhi>(UseAccess));
1195 continue;
1196 }
1197 // TODO: Checking for aliasing is expensive. Consider reducing the amount
1198 // of times this is called and/or caching it.
1199 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1200 if (isReadClobber(*MaybeLoc, UseInst)) {
1201 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n");
1202 return false;
1203 }
1204
1205 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1206 PushMemUses(UseDef);
1207 }
1208 return true;
1209 }
1210
1211 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a
1212 /// pair with the MemoryLocation terminated by \p I and a boolean flag
1213 /// indicating whether \p I is a free-like call.
1214 std::optional<std::pair<MemoryLocation, bool>>
1215 getLocForTerminator(Instruction *I) const {
1216 uint64_t Len;
1217 Value *Ptr;
1218 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1219 m_Value(Ptr))))
1220 return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1221
1222 if (auto *CB = dyn_cast<CallBase>(I)) {
1223 if (Value *FreedOp = getFreedOperand(CB, &TLI))
1224 return {std::make_pair(MemoryLocation::getAfter(FreedOp), true)};
1225 }
1226
1227 return std::nullopt;
1228 }
1229
1230 /// Returns true if \p I is a memory terminator instruction like
1231 /// llvm.lifetime.end or free.
1232 bool isMemTerminatorInst(Instruction *I) const {
1233 auto *CB = dyn_cast<CallBase>(I);
1234 return CB && (CB->getIntrinsicID() == Intrinsic::lifetime_end ||
1235 getFreedOperand(CB, &TLI) != nullptr);
1236 }
1237
1238 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1239 /// instruction \p AccessI.
1240 bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1241 Instruction *MaybeTerm) {
1242 std::optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1243 getLocForTerminator(MaybeTerm);
1244
1245 if (!MaybeTermLoc)
1246 return false;
1247
1248 // If the terminator is a free-like call, all accesses to the underlying
1249 // object can be considered terminated.
1250 if (getUnderlyingObject(Loc.Ptr) !=
1251 getUnderlyingObject(MaybeTermLoc->first.Ptr))
1252 return false;
1253
1254 auto TermLoc = MaybeTermLoc->first;
1255 if (MaybeTermLoc->second) {
1256 const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1257 return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1258 }
1259 int64_t InstWriteOffset = 0;
1260 int64_t DepWriteOffset = 0;
1261 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset,
1262 DepWriteOffset) == OW_Complete;
1263 }
1264
1265 // Returns true if \p Use may read from \p DefLoc.
1266 bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1267 if (isNoopIntrinsic(UseInst))
1268 return false;
1269
1270 // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1271 // treated as read clobber.
1272 if (auto SI = dyn_cast<StoreInst>(UseInst))
1273 return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1274
1275 if (!UseInst->mayReadFromMemory())
1276 return false;
1277
1278 if (auto *CB = dyn_cast<CallBase>(UseInst))
1279 if (CB->onlyAccessesInaccessibleMemory())
1280 return false;
1281
1282 return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1283 }
1284
1285 /// Returns true if a dependency between \p Current and \p KillingDef is
1286 /// guaranteed to be loop invariant for the loops that they are in. Either
1287 /// because they are known to be in the same block, in the same loop level or
1288 /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation
1289 /// during execution of the containing function.
1290 bool isGuaranteedLoopIndependent(const Instruction *Current,
1291 const Instruction *KillingDef,
1292 const MemoryLocation &CurrentLoc) {
1293 // If the dependency is within the same block or loop level (being careful
1294 // of irreducible loops), we know that AA will return a valid result for the
1295 // memory dependency. (Both at the function level, outside of any loop,
1296 // would also be valid but we currently disable that to limit compile time).
1297 if (Current->getParent() == KillingDef->getParent())
1298 return true;
1299 const Loop *CurrentLI = LI.getLoopFor(Current->getParent());
1300 if (!ContainsIrreducibleLoops && CurrentLI &&
1301 CurrentLI == LI.getLoopFor(KillingDef->getParent()))
1302 return true;
1303 // Otherwise check the memory location is invariant to any loops.
1304 return isGuaranteedLoopInvariant(CurrentLoc.Ptr);
1305 }
1306
1307 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1308 /// loop. In particular, this guarantees that it only references a single
1309 /// MemoryLocation during execution of the containing function.
1310 bool isGuaranteedLoopInvariant(const Value *Ptr) {
1311 Ptr = Ptr->stripPointerCasts();
1312 if (auto *GEP = dyn_cast<GEPOperator>(Ptr))
1313 if (GEP->hasAllConstantIndices())
1314 Ptr = GEP->getPointerOperand()->stripPointerCasts();
1315
1316 if (auto *I = dyn_cast<Instruction>(Ptr)) {
1317 return I->getParent()->isEntryBlock() ||
1318 (!ContainsIrreducibleLoops && !LI.getLoopFor(I->getParent()));
1319 }
1320 return true;
1321 }
1322
1323 // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess,
1324 // with no read access between them or on any other path to a function exit
1325 // block if \p KillingLoc is not accessible after the function returns. If
1326 // there is no such MemoryDef, return std::nullopt. The returned value may not
1327 // (completely) overwrite \p KillingLoc. Currently we bail out when we
1328 // encounter an aliasing MemoryUse (read).
1329 std::optional<MemoryAccess *>
1330 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1331 const MemoryLocation &KillingLoc, const Value *KillingUndObj,
1332 unsigned &ScanLimit, unsigned &WalkerStepLimit,
1333 bool IsMemTerm, unsigned &PartialLimit) {
1334 if (ScanLimit == 0 || WalkerStepLimit == 0) {
1335 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1336 return std::nullopt;
1337 }
1338
1339 MemoryAccess *Current = StartAccess;
1340 Instruction *KillingI = KillingDef->getMemoryInst();
1341 LLVM_DEBUG(dbgs() << " trying to get dominating access\n");
1342
1343 // Only optimize defining access of KillingDef when directly starting at its
1344 // defining access. The defining access also must only access KillingLoc. At
1345 // the moment we only support instructions with a single write location, so
1346 // it should be sufficient to disable optimizations for instructions that
1347 // also read from memory.
1348 bool CanOptimize = OptimizeMemorySSA &&
1349 KillingDef->getDefiningAccess() == StartAccess &&
1350 !KillingI->mayReadFromMemory();
1351
1352 // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1353 std::optional<MemoryLocation> CurrentLoc;
1354 for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
1355 LLVM_DEBUG({
1356 dbgs() << " visiting " << *Current;
1357 if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
1358 dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1359 << ")";
1360 dbgs() << "\n";
1361 });
1362
1363 // Reached TOP.
1364 if (MSSA.isLiveOnEntryDef(Current)) {
1365 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n");
1366 if (CanOptimize && Current != KillingDef->getDefiningAccess())
1367 // The first clobbering def is... none.
1368 KillingDef->setOptimized(Current);
1369 return std::nullopt;
1370 }
1371
1372 // Cost of a step. Accesses in the same block are more likely to be valid
1373 // candidates for elimination, hence consider them cheaper.
1374 unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
1377 if (WalkerStepLimit <= StepCost) {
1378 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n");
1379 return std::nullopt;
1380 }
1381 WalkerStepLimit -= StepCost;
1382
1383 // Return for MemoryPhis. They cannot be eliminated directly and the
1384 // caller is responsible for traversing them.
1385 if (isa<MemoryPhi>(Current)) {
1386 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n");
1387 return Current;
1388 }
1389
1390 // Below, check if CurrentDef is a valid candidate to be eliminated by
1391 // KillingDef. If it is not, check the next candidate.
1392 MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1393 Instruction *CurrentI = CurrentDef->getMemoryInst();
1394
1395 if (canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) {
1396 CanOptimize = false;
1397 continue;
1398 }
1399
1400 // Before we try to remove anything, check for any extra throwing
1401 // instructions that block us from DSEing
1402 if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) {
1403 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n");
1404 return std::nullopt;
1405 }
1406
1407 // Check for anything that looks like it will be a barrier to further
1408 // removal
1409 if (isDSEBarrier(KillingUndObj, CurrentI)) {
1410 LLVM_DEBUG(dbgs() << " ... skip, barrier\n");
1411 return std::nullopt;
1412 }
1413
1414 // If Current is known to be on path that reads DefLoc or is a read
1415 // clobber, bail out, as the path is not profitable. We skip this check
1416 // for intrinsic calls, because the code knows how to handle memcpy
1417 // intrinsics.
1418 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI))
1419 return std::nullopt;
1420
1421 // Quick check if there are direct uses that are read-clobbers.
1422 if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) {
1423 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1424 return !MSSA.dominates(StartAccess, UseOrDef) &&
1425 isReadClobber(KillingLoc, UseOrDef->getMemoryInst());
1426 return false;
1427 })) {
1428 LLVM_DEBUG(dbgs() << " ... found a read clobber\n");
1429 return std::nullopt;
1430 }
1431
1432 // If Current does not have an analyzable write location or is not
1433 // removable, skip it.
1434 CurrentLoc = getLocForWrite(CurrentI);
1435 if (!CurrentLoc || !isRemovable(CurrentI)) {
1436 CanOptimize = false;
1437 continue;
1438 }
1439
1440 // AliasAnalysis does not account for loops. Limit elimination to
1441 // candidates for which we can guarantee they always store to the same
1442 // memory location and not located in different loops.
1443 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
1444 LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n");
1445 CanOptimize = false;
1446 continue;
1447 }
1448
1449 if (IsMemTerm) {
1450 // If the killing def is a memory terminator (e.g. lifetime.end), check
1451 // the next candidate if the current Current does not write the same
1452 // underlying object as the terminator.
1453 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
1454 CanOptimize = false;
1455 continue;
1456 }
1457 } else {
1458 int64_t KillingOffset = 0;
1459 int64_t DeadOffset = 0;
1460 auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc,
1461 KillingOffset, DeadOffset);
1462 if (CanOptimize) {
1463 // CurrentDef is the earliest write clobber of KillingDef. Use it as
1464 // optimized access. Do not optimize if CurrentDef is already the
1465 // defining access of KillingDef.
1466 if (CurrentDef != KillingDef->getDefiningAccess() &&
1467 (OR == OW_Complete || OR == OW_MaybePartial))
1468 KillingDef->setOptimized(CurrentDef);
1469
1470 // Once a may-aliasing def is encountered do not set an optimized
1471 // access.
1472 if (OR != OW_None)
1473 CanOptimize = false;
1474 }
1475
1476 // If Current does not write to the same object as KillingDef, check
1477 // the next candidate.
1478 if (OR == OW_Unknown || OR == OW_None)
1479 continue;
1480 else if (OR == OW_MaybePartial) {
1481 // If KillingDef only partially overwrites Current, check the next
1482 // candidate if the partial step limit is exceeded. This aggressively
1483 // limits the number of candidates for partial store elimination,
1484 // which are less likely to be removable in the end.
1485 if (PartialLimit <= 1) {
1486 WalkerStepLimit -= 1;
1487 LLVM_DEBUG(dbgs() << " ... reached partial limit ... continue with next access\n");
1488 continue;
1489 }
1490 PartialLimit -= 1;
1491 }
1492 }
1493 break;
1494 };
1495
1496 // Accesses to objects accessible after the function returns can only be
1497 // eliminated if the access is dead along all paths to the exit. Collect
1498 // the blocks with killing (=completely overwriting MemoryDefs) and check if
1499 // they cover all paths from MaybeDeadAccess to any function exit.
1501 KillingDefs.insert(KillingDef->getMemoryInst());
1502 MemoryAccess *MaybeDeadAccess = Current;
1503 MemoryLocation MaybeDeadLoc = *CurrentLoc;
1504 Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst();
1505 LLVM_DEBUG(dbgs() << " Checking for reads of " << *MaybeDeadAccess << " ("
1506 << *MaybeDeadI << ")\n");
1507
1509 auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1510 for (Use &U : Acc->uses())
1511 WorkList.insert(cast<MemoryAccess>(U.getUser()));
1512 };
1513 PushMemUses(MaybeDeadAccess);
1514
1515 // Check if DeadDef may be read.
1516 for (unsigned I = 0; I < WorkList.size(); I++) {
1517 MemoryAccess *UseAccess = WorkList[I];
1518
1519 LLVM_DEBUG(dbgs() << " " << *UseAccess);
1520 // Bail out if the number of accesses to check exceeds the scan limit.
1521 if (ScanLimit < (WorkList.size() - I)) {
1522 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1523 return std::nullopt;
1524 }
1525 --ScanLimit;
1526 NumDomMemDefChecks++;
1527
1528 if (isa<MemoryPhi>(UseAccess)) {
1529 if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
1530 return DT.properlyDominates(KI->getParent(),
1531 UseAccess->getBlock());
1532 })) {
1533 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
1534 continue;
1535 }
1536 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n");
1537 PushMemUses(UseAccess);
1538 continue;
1539 }
1540
1541 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1542 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
1543
1544 if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
1545 return DT.dominates(KI, UseInst);
1546 })) {
1547 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
1548 continue;
1549 }
1550
1551 // A memory terminator kills all preceeding MemoryDefs and all succeeding
1552 // MemoryAccesses. We do not have to check it's users.
1553 if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1554 LLVM_DEBUG(
1555 dbgs()
1556 << " ... skipping, memterminator invalidates following accesses\n");
1557 continue;
1558 }
1559
1560 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1561 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n");
1562 PushMemUses(UseAccess);
1563 continue;
1564 }
1565
1566 if (UseInst->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) {
1567 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n");
1568 return std::nullopt;
1569 }
1570
1571 // Uses which may read the original MemoryDef mean we cannot eliminate the
1572 // original MD. Stop walk.
1573 if (isReadClobber(MaybeDeadLoc, UseInst)) {
1574 LLVM_DEBUG(dbgs() << " ... found read clobber\n");
1575 return std::nullopt;
1576 }
1577
1578 // If this worklist walks back to the original memory access (and the
1579 // pointer is not guarenteed loop invariant) then we cannot assume that a
1580 // store kills itself.
1581 if (MaybeDeadAccess == UseAccess &&
1582 !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) {
1583 LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n");
1584 return std::nullopt;
1585 }
1586 // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check
1587 // if it reads the memory location.
1588 // TODO: It would probably be better to check for self-reads before
1589 // calling the function.
1590 if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) {
1591 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n");
1592 continue;
1593 }
1594
1595 // Check all uses for MemoryDefs, except for defs completely overwriting
1596 // the original location. Otherwise we have to check uses of *all*
1597 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1598 // miss cases like the following
1599 // 1 = Def(LoE) ; <----- DeadDef stores [0,1]
1600 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3]
1601 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3].
1602 // (The Use points to the *first* Def it may alias)
1603 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias,
1604 // stores [0,1]
1605 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1606 if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1607 BasicBlock *MaybeKillingBlock = UseInst->getParent();
1608 if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1609 PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) {
1610 if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1612 << " ... found killing def " << *UseInst << "\n");
1613 KillingDefs.insert(UseInst);
1614 }
1615 } else {
1617 << " ... found preceeding def " << *UseInst << "\n");
1618 return std::nullopt;
1619 }
1620 } else
1621 PushMemUses(UseDef);
1622 }
1623 }
1624
1625 // For accesses to locations visible after the function returns, make sure
1626 // that the location is dead (=overwritten) along all paths from
1627 // MaybeDeadAccess to the exit.
1628 if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1629 SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1630 for (Instruction *KD : KillingDefs)
1631 KillingBlocks.insert(KD->getParent());
1632 assert(!KillingBlocks.empty() &&
1633 "Expected at least a single killing block");
1634
1635 // Find the common post-dominator of all killing blocks.
1636 BasicBlock *CommonPred = *KillingBlocks.begin();
1637 for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) {
1638 if (!CommonPred)
1639 break;
1640 CommonPred = PDT.findNearestCommonDominator(CommonPred, BB);
1641 }
1642
1643 // If the common post-dominator does not post-dominate MaybeDeadAccess,
1644 // there is a path from MaybeDeadAccess to an exit not going through a
1645 // killing block.
1646 if (!PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) {
1647 if (!AnyUnreachableExit)
1648 return std::nullopt;
1649
1650 // Fall back to CFG scan starting at all non-unreachable roots if not
1651 // all paths to the exit go through CommonPred.
1652 CommonPred = nullptr;
1653 }
1654
1655 // If CommonPred itself is in the set of killing blocks, we're done.
1656 if (KillingBlocks.count(CommonPred))
1657 return {MaybeDeadAccess};
1658
1659 SetVector<BasicBlock *> WorkList;
1660 // If CommonPred is null, there are multiple exits from the function.
1661 // They all have to be added to the worklist.
1662 if (CommonPred)
1663 WorkList.insert(CommonPred);
1664 else
1665 for (BasicBlock *R : PDT.roots()) {
1666 if (!isa<UnreachableInst>(R->getTerminator()))
1667 WorkList.insert(R);
1668 }
1669
1670 NumCFGTries++;
1671 // Check if all paths starting from an exit node go through one of the
1672 // killing blocks before reaching MaybeDeadAccess.
1673 for (unsigned I = 0; I < WorkList.size(); I++) {
1674 NumCFGChecks++;
1675 BasicBlock *Current = WorkList[I];
1676 if (KillingBlocks.count(Current))
1677 continue;
1678 if (Current == MaybeDeadAccess->getBlock())
1679 return std::nullopt;
1680
1681 // MaybeDeadAccess is reachable from the entry, so we don't have to
1682 // explore unreachable blocks further.
1683 if (!DT.isReachableFromEntry(Current))
1684 continue;
1685
1686 for (BasicBlock *Pred : predecessors(Current))
1687 WorkList.insert(Pred);
1688
1689 if (WorkList.size() >= MemorySSAPathCheckLimit)
1690 return std::nullopt;
1691 }
1692 NumCFGSuccess++;
1693 }
1694
1695 // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is
1696 // potentially dead.
1697 return {MaybeDeadAccess};
1698 }
1699
1700 // Delete dead memory defs
1702 MemorySSAUpdater Updater(&MSSA);
1703 SmallVector<Instruction *, 32> NowDeadInsts;
1704 NowDeadInsts.push_back(SI);
1705 --NumFastOther;
1706
1707 while (!NowDeadInsts.empty()) {
1708 Instruction *DeadInst = NowDeadInsts.pop_back_val();
1709 ++NumFastOther;
1710
1711 // Try to preserve debug information attached to the dead instruction.
1712 salvageDebugInfo(*DeadInst);
1713 salvageKnowledge(DeadInst);
1714
1715 // Remove the Instruction from MSSA.
1716 if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1717 if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1718 SkipStores.insert(MD);
1719 if (auto *SI = dyn_cast<StoreInst>(MD->getMemoryInst())) {
1720 if (SI->getValueOperand()->getType()->isPointerTy()) {
1721 const Value *UO = getUnderlyingObject(SI->getValueOperand());
1722 if (CapturedBeforeReturn.erase(UO))
1723 ShouldIterateEndOfFunctionDSE = true;
1724 InvisibleToCallerAfterRet.erase(UO);
1725 }
1726 }
1727 }
1728
1729 Updater.removeMemoryAccess(MA);
1730 }
1731
1732 auto I = IOLs.find(DeadInst->getParent());
1733 if (I != IOLs.end())
1734 I->second.erase(DeadInst);
1735 // Remove its operands
1736 for (Use &O : DeadInst->operands())
1737 if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1738 O = nullptr;
1739 if (isInstructionTriviallyDead(OpI, &TLI))
1740 NowDeadInsts.push_back(OpI);
1741 }
1742
1743 EI.removeInstruction(DeadInst);
1744 DeadInst->eraseFromParent();
1745 }
1746 }
1747
1748 // Check for any extra throws between \p KillingI and \p DeadI that block
1749 // DSE. This only checks extra maythrows (those that aren't MemoryDef's).
1750 // MemoryDef that may throw are handled during the walk from one def to the
1751 // next.
1752 bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
1753 const Value *KillingUndObj) {
1754 // First see if we can ignore it by using the fact that KillingI is an
1755 // alloca/alloca like object that is not visible to the caller during
1756 // execution of the function.
1757 if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj))
1758 return false;
1759
1760 if (KillingI->getParent() == DeadI->getParent())
1761 return ThrowingBlocks.count(KillingI->getParent());
1762 return !ThrowingBlocks.empty();
1763 }
1764
1765 // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following
1766 // instructions act as barriers:
1767 // * A memory instruction that may throw and \p KillingI accesses a non-stack
1768 // object.
1769 // * Atomic stores stronger that monotonic.
1770 bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) {
1771 // If DeadI may throw it acts as a barrier, unless we are to an
1772 // alloca/alloca like object that does not escape.
1773 if (DeadI->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj))
1774 return true;
1775
1776 // If DeadI is an atomic load/store stronger than monotonic, do not try to
1777 // eliminate/reorder it.
1778 if (DeadI->isAtomic()) {
1779 if (auto *LI = dyn_cast<LoadInst>(DeadI))
1780 return isStrongerThanMonotonic(LI->getOrdering());
1781 if (auto *SI = dyn_cast<StoreInst>(DeadI))
1782 return isStrongerThanMonotonic(SI->getOrdering());
1783 if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI))
1784 return isStrongerThanMonotonic(ARMW->getOrdering());
1785 if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI))
1786 return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
1787 isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
1788 llvm_unreachable("other instructions should be skipped in MemorySSA");
1789 }
1790 return false;
1791 }
1792
1793 /// Eliminate writes to objects that are not visible in the caller and are not
1794 /// accessed before returning from the function.
1795 bool eliminateDeadWritesAtEndOfFunction() {
1796 bool MadeChange = false;
1797 LLVM_DEBUG(
1798 dbgs()
1799 << "Trying to eliminate MemoryDefs at the end of the function\n");
1800 do {
1801 ShouldIterateEndOfFunctionDSE = false;
1802 for (MemoryDef *Def : llvm::reverse(MemDefs)) {
1803 if (SkipStores.contains(Def))
1804 continue;
1805
1806 Instruction *DefI = Def->getMemoryInst();
1807 auto DefLoc = getLocForWrite(DefI);
1808 if (!DefLoc || !isRemovable(DefI))
1809 continue;
1810
1811 // NOTE: Currently eliminating writes at the end of a function is
1812 // limited to MemoryDefs with a single underlying object, to save
1813 // compile-time. In practice it appears the case with multiple
1814 // underlying objects is very uncommon. If it turns out to be important,
1815 // we can use getUnderlyingObjects here instead.
1816 const Value *UO = getUnderlyingObject(DefLoc->Ptr);
1817 if (!isInvisibleToCallerAfterRet(UO))
1818 continue;
1819
1820 if (isWriteAtEndOfFunction(Def)) {
1821 // See through pointer-to-pointer bitcasts
1822 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "
1823 "of the function\n");
1825 ++NumFastStores;
1826 MadeChange = true;
1827 }
1828 }
1829 } while (ShouldIterateEndOfFunctionDSE);
1830 return MadeChange;
1831 }
1832
1833 /// If we have a zero initializing memset following a call to malloc,
1834 /// try folding it into a call to calloc.
1835 bool tryFoldIntoCalloc(MemoryDef *Def, const Value *DefUO) {
1836 Instruction *DefI = Def->getMemoryInst();
1837 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
1838 if (!MemSet)
1839 // TODO: Could handle zero store to small allocation as well.
1840 return false;
1841 Constant *StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1842 if (!StoredConstant || !StoredConstant->isNullValue())
1843 return false;
1844
1845 if (!isRemovable(DefI))
1846 // The memset might be volatile..
1847 return false;
1848
1849 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
1850 F.hasFnAttribute(Attribute::SanitizeAddress) ||
1851 F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
1852 F.getName() == "calloc")
1853 return false;
1854 auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUO));
1855 if (!Malloc)
1856 return false;
1857 auto *InnerCallee = Malloc->getCalledFunction();
1858 if (!InnerCallee)
1859 return false;
1860 LibFunc Func;
1861 if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
1862 Func != LibFunc_malloc)
1863 return false;
1864
1865 auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) {
1866 // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end
1867 // of malloc block
1868 auto *MallocBB = Malloc->getParent(),
1869 *MemsetBB = Memset->getParent();
1870 if (MallocBB == MemsetBB)
1871 return true;
1872 auto *Ptr = Memset->getArgOperand(0);
1873 auto *TI = MallocBB->getTerminator();
1875 BasicBlock *TrueBB, *FalseBB;
1876 if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB,
1877 FalseBB)))
1878 return false;
1879 if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB)
1880 return false;
1881 return true;
1882 };
1883
1884 if (Malloc->getOperand(0) != MemSet->getLength())
1885 return false;
1886 if (!shouldCreateCalloc(Malloc, MemSet) ||
1887 !DT.dominates(Malloc, MemSet) ||
1888 !memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT))
1889 return false;
1890 IRBuilder<> IRB(Malloc);
1891 Type *SizeTTy = Malloc->getArgOperand(0)->getType();
1892 auto *Calloc = emitCalloc(ConstantInt::get(SizeTTy, 1),
1893 Malloc->getArgOperand(0), IRB, TLI);
1894 if (!Calloc)
1895 return false;
1896 MemorySSAUpdater Updater(&MSSA);
1897 auto *LastDef =
1898 cast<MemoryDef>(Updater.getMemorySSA()->getMemoryAccess(Malloc));
1899 auto *NewAccess =
1900 Updater.createMemoryAccessAfter(cast<Instruction>(Calloc), LastDef,
1901 LastDef);
1902 auto *NewAccessMD = cast<MemoryDef>(NewAccess);
1903 Updater.insertDef(NewAccessMD, /*RenameUses=*/true);
1904 Updater.removeMemoryAccess(Malloc);
1905 Malloc->replaceAllUsesWith(Calloc);
1906 Malloc->eraseFromParent();
1907 return true;
1908 }
1909
1910 /// \returns true if \p Def is a no-op store, either because it
1911 /// directly stores back a loaded value or stores zero to a calloced object.
1912 bool storeIsNoop(MemoryDef *Def, const Value *DefUO) {
1913 Instruction *DefI = Def->getMemoryInst();
1914 StoreInst *Store = dyn_cast<StoreInst>(DefI);
1915 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
1916 Constant *StoredConstant = nullptr;
1917 if (Store)
1918 StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
1919 else if (MemSet)
1920 StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1921 else
1922 return false;
1923
1924 if (!isRemovable(DefI))
1925 return false;
1926
1927 if (StoredConstant) {
1928 Constant *InitC =
1929 getInitialValueOfAllocation(DefUO, &TLI, StoredConstant->getType());
1930 // If the clobbering access is LiveOnEntry, no instructions between them
1931 // can modify the memory location.
1932 if (InitC && InitC == StoredConstant)
1933 return MSSA.isLiveOnEntryDef(
1934 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def, BatchAA));
1935 }
1936
1937 if (!Store)
1938 return false;
1939
1940 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
1941 if (LoadI->getPointerOperand() == Store->getOperand(1)) {
1942 // Get the defining access for the load.
1943 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
1944 // Fast path: the defining accesses are the same.
1945 if (LoadAccess == Def->getDefiningAccess())
1946 return true;
1947
1948 // Look through phi accesses. Recursively scan all phi accesses by
1949 // adding them to a worklist. Bail when we run into a memory def that
1950 // does not match LoadAccess.
1952 MemoryAccess *Current =
1953 MSSA.getWalker()->getClobberingMemoryAccess(Def, BatchAA);
1954 // We don't want to bail when we run into the store memory def. But,
1955 // the phi access may point to it. So, pretend like we've already
1956 // checked it.
1957 ToCheck.insert(Def);
1958 ToCheck.insert(Current);
1959 // Start at current (1) to simulate already having checked Def.
1960 for (unsigned I = 1; I < ToCheck.size(); ++I) {
1961 Current = ToCheck[I];
1962 if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
1963 // Check all the operands.
1964 for (auto &Use : PhiAccess->incoming_values())
1965 ToCheck.insert(cast<MemoryAccess>(&Use));
1966 continue;
1967 }
1968
1969 // If we found a memory def, bail. This happens when we have an
1970 // unrelated write in between an otherwise noop store.
1971 assert(isa<MemoryDef>(Current) &&
1972 "Only MemoryDefs should reach here.");
1973 // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1974 // We are searching for the definition of the store's destination.
1975 // So, if that is the same definition as the load, then this is a
1976 // noop. Otherwise, fail.
1977 if (LoadAccess != Current)
1978 return false;
1979 }
1980 return true;
1981 }
1982 }
1983
1984 return false;
1985 }
1986
1987 bool removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL) {
1988 bool Changed = false;
1989 for (auto OI : IOL) {
1990 Instruction *DeadI = OI.first;
1991 MemoryLocation Loc = *getLocForWrite(DeadI);
1992 assert(isRemovable(DeadI) && "Expect only removable instruction");
1993
1994 const Value *Ptr = Loc.Ptr->stripPointerCasts();
1995 int64_t DeadStart = 0;
1996 uint64_t DeadSize = Loc.Size.getValue();
1997 GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL);
1998 OverlapIntervalsTy &IntervalMap = OI.second;
1999 Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize);
2000 if (IntervalMap.empty())
2001 continue;
2002 Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize);
2003 }
2004 return Changed;
2005 }
2006
2007 /// Eliminates writes to locations where the value that is being written
2008 /// is already stored at the same location.
2009 bool eliminateRedundantStoresOfExistingValues() {
2010 bool MadeChange = false;
2011 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the "
2012 "already existing value\n");
2013 for (auto *Def : MemDefs) {
2014 if (SkipStores.contains(Def) || MSSA.isLiveOnEntryDef(Def))
2015 continue;
2016
2017 Instruction *DefInst = Def->getMemoryInst();
2018 auto MaybeDefLoc = getLocForWrite(DefInst);
2019 if (!MaybeDefLoc || !isRemovable(DefInst))
2020 continue;
2021
2022 MemoryDef *UpperDef;
2023 // To conserve compile-time, we avoid walking to the next clobbering def.
2024 // Instead, we just try to get the optimized access, if it exists. DSE
2025 // will try to optimize defs during the earlier traversal.
2026 if (Def->isOptimized())
2027 UpperDef = dyn_cast<MemoryDef>(Def->getOptimized());
2028 else
2029 UpperDef = dyn_cast<MemoryDef>(Def->getDefiningAccess());
2030 if (!UpperDef || MSSA.isLiveOnEntryDef(UpperDef))
2031 continue;
2032
2033 Instruction *UpperInst = UpperDef->getMemoryInst();
2034 auto IsRedundantStore = [&]() {
2035 if (DefInst->isIdenticalTo(UpperInst))
2036 return true;
2037 if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) {
2038 if (auto *SI = dyn_cast<StoreInst>(DefInst)) {
2039 // MemSetInst must have a write location.
2040 MemoryLocation UpperLoc = *getLocForWrite(UpperInst);
2041 int64_t InstWriteOffset = 0;
2042 int64_t DepWriteOffset = 0;
2043 auto OR = isOverwrite(UpperInst, DefInst, UpperLoc, *MaybeDefLoc,
2044 InstWriteOffset, DepWriteOffset);
2045 Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL);
2046 return StoredByte && StoredByte == MemSetI->getOperand(1) &&
2047 OR == OW_Complete;
2048 }
2049 }
2050 return false;
2051 };
2052
2053 if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst))
2054 continue;
2055 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *DefInst
2056 << '\n');
2057 deleteDeadInstruction(DefInst);
2058 NumRedundantStores++;
2059 MadeChange = true;
2060 }
2061 return MadeChange;
2062 }
2063};
2064
2065static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
2067 AssumptionCache &AC,
2068 const TargetLibraryInfo &TLI,
2069 const LoopInfo &LI) {
2070 bool MadeChange = false;
2071
2072 MSSA.ensureOptimizedUses();
2073 DSEState State(F, AA, MSSA, DT, PDT, AC, TLI, LI);
2074 // For each store:
2075 for (unsigned I = 0; I < State.MemDefs.size(); I++) {
2076 MemoryDef *KillingDef = State.MemDefs[I];
2077 if (State.SkipStores.count(KillingDef))
2078 continue;
2079 Instruction *KillingI = KillingDef->getMemoryInst();
2080
2081 std::optional<MemoryLocation> MaybeKillingLoc;
2082 if (State.isMemTerminatorInst(KillingI)) {
2083 if (auto KillingLoc = State.getLocForTerminator(KillingI))
2084 MaybeKillingLoc = KillingLoc->first;
2085 } else {
2086 MaybeKillingLoc = State.getLocForWrite(KillingI);
2087 }
2088
2089 if (!MaybeKillingLoc) {
2090 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
2091 << *KillingI << "\n");
2092 continue;
2093 }
2094 MemoryLocation KillingLoc = *MaybeKillingLoc;
2095 assert(KillingLoc.Ptr && "KillingLoc should not be null");
2096 const Value *KillingUndObj = getUnderlyingObject(KillingLoc.Ptr);
2097 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
2098 << *KillingDef << " (" << *KillingI << ")\n");
2099
2100 unsigned ScanLimit = MemorySSAScanLimit;
2101 unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
2102 unsigned PartialLimit = MemorySSAPartialStoreLimit;
2103 // Worklist of MemoryAccesses that may be killed by KillingDef.
2105 ToCheck.insert(KillingDef->getDefiningAccess());
2106
2107 bool Shortend = false;
2108 bool IsMemTerm = State.isMemTerminatorInst(KillingI);
2109 // Check if MemoryAccesses in the worklist are killed by KillingDef.
2110 for (unsigned I = 0; I < ToCheck.size(); I++) {
2111 MemoryAccess *Current = ToCheck[I];
2112 if (State.SkipStores.count(Current))
2113 continue;
2114
2115 std::optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef(
2116 KillingDef, Current, KillingLoc, KillingUndObj, ScanLimit,
2117 WalkerStepLimit, IsMemTerm, PartialLimit);
2118
2119 if (!MaybeDeadAccess) {
2120 LLVM_DEBUG(dbgs() << " finished walk\n");
2121 continue;
2122 }
2123
2124 MemoryAccess *DeadAccess = *MaybeDeadAccess;
2125 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DeadAccess);
2126 if (isa<MemoryPhi>(DeadAccess)) {
2127 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n");
2128 for (Value *V : cast<MemoryPhi>(DeadAccess)->incoming_values()) {
2129 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
2130 BasicBlock *IncomingBlock = IncomingAccess->getBlock();
2131 BasicBlock *PhiBlock = DeadAccess->getBlock();
2132
2133 // We only consider incoming MemoryAccesses that come before the
2134 // MemoryPhi. Otherwise we could discover candidates that do not
2135 // strictly dominate our starting def.
2136 if (State.PostOrderNumbers[IncomingBlock] >
2137 State.PostOrderNumbers[PhiBlock])
2138 ToCheck.insert(IncomingAccess);
2139 }
2140 continue;
2141 }
2142 auto *DeadDefAccess = cast<MemoryDef>(DeadAccess);
2143 Instruction *DeadI = DeadDefAccess->getMemoryInst();
2144 LLVM_DEBUG(dbgs() << " (" << *DeadI << ")\n");
2145 ToCheck.insert(DeadDefAccess->getDefiningAccess());
2146 NumGetDomMemoryDefPassed++;
2147
2148 if (!DebugCounter::shouldExecute(MemorySSACounter))
2149 continue;
2150
2151 MemoryLocation DeadLoc = *State.getLocForWrite(DeadI);
2152
2153 if (IsMemTerm) {
2154 const Value *DeadUndObj = getUnderlyingObject(DeadLoc.Ptr);
2155 if (KillingUndObj != DeadUndObj)
2156 continue;
2157 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI
2158 << "\n KILLER: " << *KillingI << '\n');
2159 State.deleteDeadInstruction(DeadI);
2160 ++NumFastStores;
2161 MadeChange = true;
2162 } else {
2163 // Check if DeadI overwrites KillingI.
2164 int64_t KillingOffset = 0;
2165 int64_t DeadOffset = 0;
2166 OverwriteResult OR = State.isOverwrite(
2167 KillingI, DeadI, KillingLoc, DeadLoc, KillingOffset, DeadOffset);
2168 if (OR == OW_MaybePartial) {
2169 auto Iter = State.IOLs.insert(
2170 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2171 DeadI->getParent(), InstOverlapIntervalsTy()));
2172 auto &IOL = Iter.first->second;
2173 OR = isPartialOverwrite(KillingLoc, DeadLoc, KillingOffset,
2174 DeadOffset, DeadI, IOL);
2175 }
2176
2177 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2178 auto *DeadSI = dyn_cast<StoreInst>(DeadI);
2179 auto *KillingSI = dyn_cast<StoreInst>(KillingI);
2180 // We are re-using tryToMergePartialOverlappingStores, which requires
2181 // DeadSI to dominate DeadSI.
2182 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2183 if (DeadSI && KillingSI && DT.dominates(DeadSI, KillingSI)) {
2185 KillingSI, DeadSI, KillingOffset, DeadOffset, State.DL,
2186 State.BatchAA, &DT)) {
2187
2188 // Update stored value of earlier store to merged constant.
2189 DeadSI->setOperand(0, Merged);
2190 ++NumModifiedStores;
2191 MadeChange = true;
2192
2193 Shortend = true;
2194 // Remove killing store and remove any outstanding overlap
2195 // intervals for the updated store.
2196 State.deleteDeadInstruction(KillingSI);
2197 auto I = State.IOLs.find(DeadSI->getParent());
2198 if (I != State.IOLs.end())
2199 I->second.erase(DeadSI);
2200 break;
2201 }
2202 }
2203 }
2204
2205 if (OR == OW_Complete) {
2206 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI
2207 << "\n KILLER: " << *KillingI << '\n');
2208 State.deleteDeadInstruction(DeadI);
2209 ++NumFastStores;
2210 MadeChange = true;
2211 }
2212 }
2213 }
2214
2215 // Check if the store is a no-op.
2216 if (!Shortend && State.storeIsNoop(KillingDef, KillingUndObj)) {
2217 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *KillingI
2218 << '\n');
2219 State.deleteDeadInstruction(KillingI);
2220 NumRedundantStores++;
2221 MadeChange = true;
2222 continue;
2223 }
2224
2225 // Can we form a calloc from a memset/malloc pair?
2226 if (!Shortend && State.tryFoldIntoCalloc(KillingDef, KillingUndObj)) {
2227 LLVM_DEBUG(dbgs() << "DSE: Remove memset after forming calloc:\n"
2228 << " DEAD: " << *KillingI << '\n');
2229 State.deleteDeadInstruction(KillingI);
2230 MadeChange = true;
2231 continue;
2232 }
2233 }
2234
2236 for (auto &KV : State.IOLs)
2237 MadeChange |= State.removePartiallyOverlappedStores(KV.second);
2238
2239 MadeChange |= State.eliminateRedundantStoresOfExistingValues();
2240 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2241 return MadeChange;
2242}
2243} // end anonymous namespace
2244
2245//===----------------------------------------------------------------------===//
2246// DSE Pass
2247//===----------------------------------------------------------------------===//
2252 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2255 LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
2256
2257 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, AC, TLI, LI);
2258
2259#ifdef LLVM_ENABLE_STATS
2261 for (auto &I : instructions(F))
2262 NumRemainingStores += isa<StoreInst>(&I);
2263#endif
2264
2265 if (!Changed)
2266 return PreservedAnalyses::all();
2267
2271 PA.preserve<LoopAnalysis>();
2272 return PA;
2273}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static void shortenAssignment(Instruction *Inst, Value *OriginalDest, uint64_t OldOffsetInBits, uint64_t OldSizeInBits, uint64_t NewSizeInBits, bool IsOverwriteEnd)
static bool isShortenableAtTheEnd(Instruction *I)
Returns true if the end of this instruction can be safely shortened in length.
static cl::opt< bool > EnablePartialStoreMerging("enable-dse-partial-store-merging", cl::init(true), cl::Hidden, cl::desc("Enable partial store merging in DSE"))
static bool tryToShortenBegin(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, int64_t &DeadStart, uint64_t &DeadSize)
std::map< int64_t, int64_t > OverlapIntervalsTy
static bool isShortenableAtTheBeginning(Instruction *I)
Returns true if the beginning of this instruction can be safely shortened in length.
static cl::opt< unsigned > MemorySSADefsPerBlockLimit("dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, cl::desc("The number of MemoryDefs we consider as candidates to eliminated " "other stores per basic block (default = 5000)"))
static Constant * tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI, int64_t KillingOffset, int64_t DeadOffset, const DataLayout &DL, BatchAAResults &AA, DominatorTree *DT)
static uint64_t getPointerSize(const Value *V, const DataLayout &DL, const TargetLibraryInfo &TLI, const Function *F)
static bool memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, BatchAAResults &AA, const DataLayout &DL, DominatorTree *DT)
Returns true if the memory which is accessed by the second instruction is not modified between the fi...
static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI, const Instruction *DeadI, BatchAAResults &AA)
Check if two instruction are masked stores that completely overwrite one another.
static cl::opt< unsigned > MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), cl::Hidden, cl::desc("The cost of a step in a different basic " "block than the killing MemoryDef" "(default = 5)"))
static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, uint64_t &DeadSize, int64_t KillingStart, uint64_t KillingSize, bool IsOverwriteEnd)
static cl::opt< unsigned > MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, cl::desc("The number of memory instructions to scan for " "dead store elimination (default = 150)"))
static cl::opt< unsigned > MemorySSASameBBStepCost("dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, cl::desc("The cost of a step in the same basic block as the killing MemoryDef" "(default = 1)"))
static cl::opt< bool > EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", cl::init(true), cl::Hidden, cl::desc("Enable partial-overwrite tracking in DSE"))
static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc, const MemoryLocation &DeadLoc, int64_t KillingOff, int64_t DeadOff, Instruction *DeadI, InstOverlapIntervalsTy &IOL)
Return 'OW_Complete' if a store to the 'KillingLoc' location completely overwrites a store to the 'De...
static cl::opt< unsigned > MemorySSAPartialStoreLimit("dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, cl::desc("The maximum number candidates that only partially overwrite the " "killing MemoryDef to consider" " (default = 5)"))
static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, int64_t &DeadStart, uint64_t &DeadSize)
static cl::opt< unsigned > MemorySSAUpwardsStepLimit("dse-memoryssa-walklimit", cl::init(90), cl::Hidden, cl::desc("The maximum number of steps while walking upwards to find " "MemoryDefs that may be killed (default = 90)"))
static cl::opt< bool > OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden, cl::desc("Allow DSE to optimize memory accesses."))
static cl::opt< unsigned > MemorySSAPathCheckLimit("dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, cl::desc("The maximum number of blocks to check when trying to prove that " "all paths to an exit go through a killing block (default = 50)"))
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:182
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Addr
uint64_t Size
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
static void deleteDeadInstruction(Instruction *I)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility analysis objects describing memory locations.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Module.h This file contains the declarations for the Module class.
print must be executed print the must be executed context for all instructions
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
This header defines various interfaces for pass management in LLVM.
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:75
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:973
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
Definition: APInt.h:241
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1443
The possible results of an alias query.
Definition: AliasAnalysis.h:83
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
constexpr int32_t getOffset() const
constexpr bool hasOffset() const
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:113
This class represents a function call, abstracting a target machine's calling convention.
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
This is an important base class in LLVM.
Definition: Constant.h:41
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:76
Assignment ID.
static DIAssignID * getDistinct(LLVMContext &Context)
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.assign instruction.
void setAssignId(DIAssignID *New)
void setKillAddress()
Kill the address component.
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:72
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool erase(const KeyT &Val)
Definition: DenseMap.h:329
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
iterator_range< root_iterator > roots()
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
Context-sensitive CaptureInfo provider, which computes and caches the earliest common dominator closu...
void removeInstruction(Instruction *I)
const BasicBlock & getEntryBlock() const
Definition: Function.h:749
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:993
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2564
bool mayThrow(bool IncludePhaseOneUnwind=false) const LLVM_READONLY
Return true if this instruction may throw an exception.
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:70
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const BasicBlock * getParent() const
Definition: Instruction.h:90
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
bool isIdenticalTo(const Instruction *I) const LLVM_READONLY
Return true if the specified instruction is exactly identical to the current one.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:362
const_iterator begin() const
Definition: IntervalMap.h:1146
bool empty() const
empty - Return true when no intervals are mapped.
Definition: IntervalMap.h:1101
const_iterator end() const
Definition: IntervalMap.h:1158
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
static LocationSize precise(uint64_t Value)
uint64_t getValue() const
bool isPrecise() const
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:569
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:47
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
iterator end()
Definition: MapVector.h:72
iterator find(const KeyT &Key)
Definition: MapVector.h:147
Value * getLength() const
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
BasicBlock * getBlock() const
Definition: MemorySSA.h:164
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:372
void setOptimized(MemoryAccess *MA)
Definition: MemorySSA.h:392
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
MemoryLocation getWithNewPtr(const Value *NewPtr) const
const Value * Ptr
The address of the start of the location.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:936
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1046
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:700
MemorySSAWalker * getSkipSelfWalker()
Definition: MemorySSA.cpp:1570
MemorySSAWalker * getWalker()
Definition: MemorySSA.cpp:1557
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:717
void ensureOptimizedUses()
By default, uses are not optimized during MemorySSA construction.
Definition: MemorySSA.cpp:2142
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:737
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:262
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:259
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
PHITransAddr - An address value which tracks and handles phi translation.
Definition: PHITransAddr.h:35
Value * translateValue(BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree *DT, bool MustDominate)
translateValue - PHI translate the current address up the CFG from CurBB to Pred, updating our state ...
bool isPotentiallyPHITranslatable() const
isPotentiallyPHITranslatable - If this needs PHI translation, return true if we have some hope of doi...
bool needsPHITranslationFromBlock(BasicBlock *BB) const
needsPHITranslationFromBlock - Return true if moving from the specified BasicBlock to its predecessor...
Definition: PHITransAddr.h:62
Value * getAddr() const
Definition: PHITransAddr.h:58
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
bool dominates(const Instruction *I1, const Instruction *I2) const
Return true if I1 dominates I2.
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:188
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
A vector that has set insertion semantics.
Definition: SetVector.h:51
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:88
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:152
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
iterator begin() const
Definition: SmallPtrSet.h:403
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:389
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:312
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
Value * getValueOperand()
Definition: Instructions.h:390
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getInt8Ty(LLVMContext &C)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:688
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1069
iterator_range< use_iterator > uses()
Definition: Value.h:376
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:772
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:147
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:537
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
Definition: DebugInfo.cpp:1728
bool calculateFragmentIntersect(const DataLayout &DL, const Value *Dest, uint64_t SliceOffsetInBits, uint64_t SliceSizeInBits, const DbgAssignIntrinsic *DAI, std::optional< DIExpression::FragmentInfo > &Result)
Calculate the fragment of the variable in DAI covered from (Dest + SliceOffsetInBits) to to (Dest + S...
Definition: DebugInfo.cpp:1783
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:413
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool isStrongerThanMonotonic(AtomicOrdering AO)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1367
Value * emitCalloc(Value *Num, Value *Size, IRBuilderBase &B, const TargetLibraryInfo &TLI)
Emit a call to the calloc function.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
iterator_range< po_iterator< T > > post_order(const T &G)
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:398
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:511
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2102
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:139
bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition: Alignment.h:197
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
auto predecessors(const MachineBasicBlock *BB)
bool mayContainIrreducibleControl(const Function &F, const LoopInfo *LI)
bool isStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
Returns true if ao is stronger than other as defined by the AtomicOrdering lattice,...
bool isRefSet(const ModRefInfo MRI)
Definition: ModRef.h:51
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
Definition: CodeMetrics.cpp:70
Holds the characteristics of one fragment of a larger variable.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.