LLVM 18.0.0git
LoopLoadElimination.cpp
Go to the documentation of this file.
1//===- LoopLoadElimination.cpp - Loop Load Elimination Pass ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implement a loop-aware load elimination pass.
10//
11// It uses LoopAccessAnalysis to identify loop-carried dependences with a
12// distance of one between stores and loads. These form the candidates for the
13// transformation. The source value of each store then propagated to the user
14// of the corresponding load. This makes the load dead.
15//
16// The pass can also version the loop and add memchecks in order to prove that
17// may-aliasing stores can't change the value in memory before it's read by the
18// load.
19//
20//===----------------------------------------------------------------------===//
21
23#include "llvm/ADT/APInt.h"
24#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/Statistic.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
45#include "llvm/IR/Module.h"
46#include "llvm/IR/PassManager.h"
47#include "llvm/IR/Type.h"
48#include "llvm/IR/Value.h"
51#include "llvm/Support/Debug.h"
58#include <algorithm>
59#include <cassert>
60#include <forward_list>
61#include <tuple>
62#include <utility>
63
64using namespace llvm;
65
66#define LLE_OPTION "loop-load-elim"
67#define DEBUG_TYPE LLE_OPTION
68
70 "runtime-check-per-loop-load-elim", cl::Hidden,
71 cl::desc("Max number of memchecks allowed per eliminated load on average"),
72 cl::init(1));
73
75 "loop-load-elimination-scev-check-threshold", cl::init(8), cl::Hidden,
76 cl::desc("The maximum number of SCEV checks allowed for Loop "
77 "Load Elimination"));
78
79STATISTIC(NumLoopLoadEliminted, "Number of loads eliminated by LLE");
80
81namespace {
82
83/// Represent a store-to-forwarding candidate.
84struct StoreToLoadForwardingCandidate {
85 LoadInst *Load;
86 StoreInst *Store;
87
88 StoreToLoadForwardingCandidate(LoadInst *Load, StoreInst *Store)
89 : Load(Load), Store(Store) {}
90
91 /// Return true if the dependence from the store to the load has an
92 /// absolute distance of one.
93 /// E.g. A[i+1] = A[i] (or A[i-1] = A[i] for descending loop)
94 bool isDependenceDistanceOfOne(PredicatedScalarEvolution &PSE,
95 Loop *L) const {
96 Value *LoadPtr = Load->getPointerOperand();
97 Value *StorePtr = Store->getPointerOperand();
98 Type *LoadType = getLoadStoreType(Load);
99 auto &DL = Load->getParent()->getModule()->getDataLayout();
100
101 assert(LoadPtr->getType()->getPointerAddressSpace() ==
102 StorePtr->getType()->getPointerAddressSpace() &&
103 DL.getTypeSizeInBits(LoadType) ==
104 DL.getTypeSizeInBits(getLoadStoreType(Store)) &&
105 "Should be a known dependence");
106
107 int64_t StrideLoad = getPtrStride(PSE, LoadType, LoadPtr, L).value_or(0);
108 int64_t StrideStore = getPtrStride(PSE, LoadType, StorePtr, L).value_or(0);
109 if (!StrideLoad || !StrideStore || StrideLoad != StrideStore)
110 return false;
111
112 // TODO: This check for stride values other than 1 and -1 can be eliminated.
113 // However, doing so may cause the LoopAccessAnalysis to overcompensate,
114 // generating numerous non-wrap runtime checks that may undermine the
115 // benefits of load elimination. To safely implement support for non-unit
116 // strides, we would need to ensure either that the processed case does not
117 // require these additional checks, or improve the LAA to handle them more
118 // efficiently, or potentially both.
119 if (std::abs(StrideLoad) != 1)
120 return false;
121
122 unsigned TypeByteSize = DL.getTypeAllocSize(const_cast<Type *>(LoadType));
123
124 auto *LoadPtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(LoadPtr));
125 auto *StorePtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(StorePtr));
126
127 // We don't need to check non-wrapping here because forward/backward
128 // dependence wouldn't be valid if these weren't monotonic accesses.
129 auto *Dist = cast<SCEVConstant>(
130 PSE.getSE()->getMinusSCEV(StorePtrSCEV, LoadPtrSCEV));
131 const APInt &Val = Dist->getAPInt();
132 return Val == TypeByteSize * StrideLoad;
133 }
134
135 Value *getLoadPtr() const { return Load->getPointerOperand(); }
136
137#ifndef NDEBUG
139 const StoreToLoadForwardingCandidate &Cand) {
140 OS << *Cand.Store << " -->\n";
141 OS.indent(2) << *Cand.Load << "\n";
142 return OS;
143 }
144#endif
145};
146
147} // end anonymous namespace
148
149/// Check if the store dominates all latches, so as long as there is no
150/// intervening store this value will be loaded in the next iteration.
151static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L,
152 DominatorTree *DT) {
154 L->getLoopLatches(Latches);
155 return llvm::all_of(Latches, [&](const BasicBlock *Latch) {
156 return DT->dominates(StoreBlock, Latch);
157 });
158}
159
160/// Return true if the load is not executed on all paths in the loop.
161static bool isLoadConditional(LoadInst *Load, Loop *L) {
162 return Load->getParent() != L->getHeader();
163}
164
165namespace {
166
167/// The per-loop class that does most of the work.
168class LoadEliminationForLoop {
169public:
170 LoadEliminationForLoop(Loop *L, LoopInfo *LI, const LoopAccessInfo &LAI,
173 : L(L), LI(LI), LAI(LAI), DT(DT), BFI(BFI), PSI(PSI), PSE(LAI.getPSE()) {}
174
175 /// Look through the loop-carried and loop-independent dependences in
176 /// this loop and find store->load dependences.
177 ///
178 /// Note that no candidate is returned if LAA has failed to analyze the loop
179 /// (e.g. if it's not bottom-tested, contains volatile memops, etc.)
180 std::forward_list<StoreToLoadForwardingCandidate>
181 findStoreToLoadDependences(const LoopAccessInfo &LAI) {
182 std::forward_list<StoreToLoadForwardingCandidate> Candidates;
183
184 const auto *Deps = LAI.getDepChecker().getDependences();
185 if (!Deps)
186 return Candidates;
187
188 // Find store->load dependences (consequently true dep). Both lexically
189 // forward and backward dependences qualify. Disqualify loads that have
190 // other unknown dependences.
191
192 SmallPtrSet<Instruction *, 4> LoadsWithUnknownDepedence;
193
194 for (const auto &Dep : *Deps) {
195 Instruction *Source = Dep.getSource(LAI);
196 Instruction *Destination = Dep.getDestination(LAI);
197
199 if (isa<LoadInst>(Source))
200 LoadsWithUnknownDepedence.insert(Source);
201 if (isa<LoadInst>(Destination))
202 LoadsWithUnknownDepedence.insert(Destination);
203 continue;
204 }
205
206 if (Dep.isBackward())
207 // Note that the designations source and destination follow the program
208 // order, i.e. source is always first. (The direction is given by the
209 // DepType.)
210 std::swap(Source, Destination);
211 else
212 assert(Dep.isForward() && "Needs to be a forward dependence");
213
214 auto *Store = dyn_cast<StoreInst>(Source);
215 if (!Store)
216 continue;
217 auto *Load = dyn_cast<LoadInst>(Destination);
218 if (!Load)
219 continue;
220
221 // Only propagate if the stored values are bit/pointer castable.
224 Store->getParent()->getModule()->getDataLayout()))
225 continue;
226
227 Candidates.emplace_front(Load, Store);
228 }
229
230 if (!LoadsWithUnknownDepedence.empty())
231 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &C) {
232 return LoadsWithUnknownDepedence.count(C.Load);
233 });
234
235 return Candidates;
236 }
237
238 /// Return the index of the instruction according to program order.
239 unsigned getInstrIndex(Instruction *Inst) {
240 auto I = InstOrder.find(Inst);
241 assert(I != InstOrder.end() && "No index for instruction");
242 return I->second;
243 }
244
245 /// If a load has multiple candidates associated (i.e. different
246 /// stores), it means that it could be forwarding from multiple stores
247 /// depending on control flow. Remove these candidates.
248 ///
249 /// Here, we rely on LAA to include the relevant loop-independent dependences.
250 /// LAA is known to omit these in the very simple case when the read and the
251 /// write within an alias set always takes place using the *same* pointer.
252 ///
253 /// However, we know that this is not the case here, i.e. we can rely on LAA
254 /// to provide us with loop-independent dependences for the cases we're
255 /// interested. Consider the case for example where a loop-independent
256 /// dependece S1->S2 invalidates the forwarding S3->S2.
257 ///
258 /// A[i] = ... (S1)
259 /// ... = A[i] (S2)
260 /// A[i+1] = ... (S3)
261 ///
262 /// LAA will perform dependence analysis here because there are two
263 /// *different* pointers involved in the same alias set (&A[i] and &A[i+1]).
264 void removeDependencesFromMultipleStores(
265 std::forward_list<StoreToLoadForwardingCandidate> &Candidates) {
266 // If Store is nullptr it means that we have multiple stores forwarding to
267 // this store.
268 using LoadToSingleCandT =
270 LoadToSingleCandT LoadToSingleCand;
271
272 for (const auto &Cand : Candidates) {
273 bool NewElt;
274 LoadToSingleCandT::iterator Iter;
275
276 std::tie(Iter, NewElt) =
277 LoadToSingleCand.insert(std::make_pair(Cand.Load, &Cand));
278 if (!NewElt) {
279 const StoreToLoadForwardingCandidate *&OtherCand = Iter->second;
280 // Already multiple stores forward to this load.
281 if (OtherCand == nullptr)
282 continue;
283
284 // Handle the very basic case when the two stores are in the same block
285 // so deciding which one forwards is easy. The later one forwards as
286 // long as they both have a dependence distance of one to the load.
287 if (Cand.Store->getParent() == OtherCand->Store->getParent() &&
288 Cand.isDependenceDistanceOfOne(PSE, L) &&
289 OtherCand->isDependenceDistanceOfOne(PSE, L)) {
290 // They are in the same block, the later one will forward to the load.
291 if (getInstrIndex(OtherCand->Store) < getInstrIndex(Cand.Store))
292 OtherCand = &Cand;
293 } else
294 OtherCand = nullptr;
295 }
296 }
297
298 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &Cand) {
299 if (LoadToSingleCand[Cand.Load] != &Cand) {
300 LLVM_DEBUG(
301 dbgs() << "Removing from candidates: \n"
302 << Cand
303 << " The load may have multiple stores forwarding to "
304 << "it\n");
305 return true;
306 }
307 return false;
308 });
309 }
310
311 /// Given two pointers operations by their RuntimePointerChecking
312 /// indices, return true if they require an alias check.
313 ///
314 /// We need a check if one is a pointer for a candidate load and the other is
315 /// a pointer for a possibly intervening store.
316 bool needsChecking(unsigned PtrIdx1, unsigned PtrIdx2,
317 const SmallPtrSetImpl<Value *> &PtrsWrittenOnFwdingPath,
318 const SmallPtrSetImpl<Value *> &CandLoadPtrs) {
319 Value *Ptr1 =
321 Value *Ptr2 =
323 return ((PtrsWrittenOnFwdingPath.count(Ptr1) && CandLoadPtrs.count(Ptr2)) ||
324 (PtrsWrittenOnFwdingPath.count(Ptr2) && CandLoadPtrs.count(Ptr1)));
325 }
326
327 /// Return pointers that are possibly written to on the path from a
328 /// forwarding store to a load.
329 ///
330 /// These pointers need to be alias-checked against the forwarding candidates.
331 SmallPtrSet<Value *, 4> findPointersWrittenOnForwardingPath(
333 // From FirstStore to LastLoad neither of the elimination candidate loads
334 // should overlap with any of the stores.
335 //
336 // E.g.:
337 //
338 // st1 C[i]
339 // ld1 B[i] <-------,
340 // ld0 A[i] <----, | * LastLoad
341 // ... | |
342 // st2 E[i] | |
343 // st3 B[i+1] -- | -' * FirstStore
344 // st0 A[i+1] ---'
345 // st4 D[i]
346 //
347 // st0 forwards to ld0 if the accesses in st4 and st1 don't overlap with
348 // ld0.
349
350 LoadInst *LastLoad =
351 std::max_element(Candidates.begin(), Candidates.end(),
352 [&](const StoreToLoadForwardingCandidate &A,
353 const StoreToLoadForwardingCandidate &B) {
354 return getInstrIndex(A.Load) < getInstrIndex(B.Load);
355 })
356 ->Load;
357 StoreInst *FirstStore =
358 std::min_element(Candidates.begin(), Candidates.end(),
359 [&](const StoreToLoadForwardingCandidate &A,
360 const StoreToLoadForwardingCandidate &B) {
361 return getInstrIndex(A.Store) <
362 getInstrIndex(B.Store);
363 })
364 ->Store;
365
366 // We're looking for stores after the first forwarding store until the end
367 // of the loop, then from the beginning of the loop until the last
368 // forwarded-to load. Collect the pointer for the stores.
369 SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath;
370
371 auto InsertStorePtr = [&](Instruction *I) {
372 if (auto *S = dyn_cast<StoreInst>(I))
373 PtrsWrittenOnFwdingPath.insert(S->getPointerOperand());
374 };
375 const auto &MemInstrs = LAI.getDepChecker().getMemoryInstructions();
376 std::for_each(MemInstrs.begin() + getInstrIndex(FirstStore) + 1,
377 MemInstrs.end(), InsertStorePtr);
378 std::for_each(MemInstrs.begin(), &MemInstrs[getInstrIndex(LastLoad)],
379 InsertStorePtr);
380
381 return PtrsWrittenOnFwdingPath;
382 }
383
384 /// Determine the pointer alias checks to prove that there are no
385 /// intervening stores.
388
389 SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath =
390 findPointersWrittenOnForwardingPath(Candidates);
391
392 // Collect the pointers of the candidate loads.
393 SmallPtrSet<Value *, 4> CandLoadPtrs;
394 for (const auto &Candidate : Candidates)
395 CandLoadPtrs.insert(Candidate.getLoadPtr());
396
397 const auto &AllChecks = LAI.getRuntimePointerChecking()->getChecks();
399
400 copy_if(AllChecks, std::back_inserter(Checks),
401 [&](const RuntimePointerCheck &Check) {
402 for (auto PtrIdx1 : Check.first->Members)
403 for (auto PtrIdx2 : Check.second->Members)
404 if (needsChecking(PtrIdx1, PtrIdx2, PtrsWrittenOnFwdingPath,
405 CandLoadPtrs))
406 return true;
407 return false;
408 });
409
410 LLVM_DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size()
411 << "):\n");
413
414 return Checks;
415 }
416
417 /// Perform the transformation for a candidate.
418 void
419 propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate &Cand,
420 SCEVExpander &SEE) {
421 // loop:
422 // %x = load %gep_i
423 // = ... %x
424 // store %y, %gep_i_plus_1
425 //
426 // =>
427 //
428 // ph:
429 // %x.initial = load %gep_0
430 // loop:
431 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
432 // %x = load %gep_i <---- now dead
433 // = ... %x.storeforward
434 // store %y, %gep_i_plus_1
435
436 Value *Ptr = Cand.Load->getPointerOperand();
437 auto *PtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(Ptr));
438 auto *PH = L->getLoopPreheader();
439 assert(PH && "Preheader should exist!");
440 Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(),
441 PH->getTerminator());
442 Value *Initial = new LoadInst(
443 Cand.Load->getType(), InitialPtr, "load_initial",
444 /* isVolatile */ false, Cand.Load->getAlign(), PH->getTerminator());
445
446 PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded");
447 PHI->insertBefore(L->getHeader()->begin());
448 PHI->addIncoming(Initial, PH);
449
450 Type *LoadType = Initial->getType();
451 Type *StoreType = Cand.Store->getValueOperand()->getType();
452 auto &DL = Cand.Load->getParent()->getModule()->getDataLayout();
453 (void)DL;
454
455 assert(DL.getTypeSizeInBits(LoadType) == DL.getTypeSizeInBits(StoreType) &&
456 "The type sizes should match!");
457
458 Value *StoreValue = Cand.Store->getValueOperand();
459 if (LoadType != StoreType)
461 StoreValue, LoadType, "store_forward_cast", Cand.Store);
462
463 PHI->addIncoming(StoreValue, L->getLoopLatch());
464
465 Cand.Load->replaceAllUsesWith(PHI);
466 }
467
468 /// Top-level driver for each loop: find store->load forwarding
469 /// candidates, add run-time checks and perform transformation.
470 bool processLoop() {
471 LLVM_DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName()
472 << "\" checking " << *L << "\n");
473
474 // Look for store-to-load forwarding cases across the
475 // backedge. E.g.:
476 //
477 // loop:
478 // %x = load %gep_i
479 // = ... %x
480 // store %y, %gep_i_plus_1
481 //
482 // =>
483 //
484 // ph:
485 // %x.initial = load %gep_0
486 // loop:
487 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
488 // %x = load %gep_i <---- now dead
489 // = ... %x.storeforward
490 // store %y, %gep_i_plus_1
491
492 // First start with store->load dependences.
493 auto StoreToLoadDependences = findStoreToLoadDependences(LAI);
494 if (StoreToLoadDependences.empty())
495 return false;
496
497 // Generate an index for each load and store according to the original
498 // program order. This will be used later.
499 InstOrder = LAI.getDepChecker().generateInstructionOrderMap();
500
501 // To keep things simple for now, remove those where the load is potentially
502 // fed by multiple stores.
503 removeDependencesFromMultipleStores(StoreToLoadDependences);
504 if (StoreToLoadDependences.empty())
505 return false;
506
507 // Filter the candidates further.
509 for (const StoreToLoadForwardingCandidate &Cand : StoreToLoadDependences) {
510 LLVM_DEBUG(dbgs() << "Candidate " << Cand);
511
512 // Make sure that the stored values is available everywhere in the loop in
513 // the next iteration.
514 if (!doesStoreDominatesAllLatches(Cand.Store->getParent(), L, DT))
515 continue;
516
517 // If the load is conditional we can't hoist its 0-iteration instance to
518 // the preheader because that would make it unconditional. Thus we would
519 // access a memory location that the original loop did not access.
520 if (isLoadConditional(Cand.Load, L))
521 continue;
522
523 // Check whether the SCEV difference is the same as the induction step,
524 // thus we load the value in the next iteration.
525 if (!Cand.isDependenceDistanceOfOne(PSE, L))
526 continue;
527
528 assert(isa<SCEVAddRecExpr>(PSE.getSCEV(Cand.Load->getPointerOperand())) &&
529 "Loading from something other than indvar?");
530 assert(
531 isa<SCEVAddRecExpr>(PSE.getSCEV(Cand.Store->getPointerOperand())) &&
532 "Storing to something other than indvar?");
533
534 Candidates.push_back(Cand);
536 dbgs()
537 << Candidates.size()
538 << ". Valid store-to-load forwarding across the loop backedge\n");
539 }
540 if (Candidates.empty())
541 return false;
542
543 // Check intervening may-alias stores. These need runtime checks for alias
544 // disambiguation.
545 SmallVector<RuntimePointerCheck, 4> Checks = collectMemchecks(Candidates);
546
547 // Too many checks are likely to outweigh the benefits of forwarding.
548 if (Checks.size() > Candidates.size() * CheckPerElim) {
549 LLVM_DEBUG(dbgs() << "Too many run-time checks needed.\n");
550 return false;
551 }
552
553 if (LAI.getPSE().getPredicate().getComplexity() >
555 LLVM_DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n");
556 return false;
557 }
558
559 if (!L->isLoopSimplifyForm()) {
560 LLVM_DEBUG(dbgs() << "Loop is not is loop-simplify form");
561 return false;
562 }
563
564 if (!Checks.empty() || !LAI.getPSE().getPredicate().isAlwaysTrue()) {
565 if (LAI.hasConvergentOp()) {
566 LLVM_DEBUG(dbgs() << "Versioning is needed but not allowed with "
567 "convergent calls\n");
568 return false;
569 }
570
571 auto *HeaderBB = L->getHeader();
572 auto *F = HeaderBB->getParent();
573 bool OptForSize = F->hasOptSize() ||
574 llvm::shouldOptimizeForSize(HeaderBB, PSI, BFI,
575 PGSOQueryType::IRPass);
576 if (OptForSize) {
578 dbgs() << "Versioning is needed but not allowed when optimizing "
579 "for size.\n");
580 return false;
581 }
582
583 // Point of no-return, start the transformation. First, version the loop
584 // if necessary.
585
586 LoopVersioning LV(LAI, Checks, L, LI, DT, PSE.getSE());
587 LV.versionLoop();
588
589 // After versioning, some of the candidates' pointers could stop being
590 // SCEVAddRecs. We need to filter them out.
591 auto NoLongerGoodCandidate = [this](
592 const StoreToLoadForwardingCandidate &Cand) {
593 return !isa<SCEVAddRecExpr>(
594 PSE.getSCEV(Cand.Load->getPointerOperand())) ||
595 !isa<SCEVAddRecExpr>(
596 PSE.getSCEV(Cand.Store->getPointerOperand()));
597 };
598 llvm::erase_if(Candidates, NoLongerGoodCandidate);
599 }
600
601 // Next, propagate the value stored by the store to the users of the load.
602 // Also for the first iteration, generate the initial value of the load.
603 SCEVExpander SEE(*PSE.getSE(), L->getHeader()->getModule()->getDataLayout(),
604 "storeforward");
605 for (const auto &Cand : Candidates)
606 propagateStoredValueToLoadUsers(Cand, SEE);
607 NumLoopLoadEliminted += Candidates.size();
608
609 return true;
610 }
611
612private:
613 Loop *L;
614
615 /// Maps the load/store instructions to their index according to
616 /// program order.
618
619 // Analyses used.
620 LoopInfo *LI;
621 const LoopAccessInfo &LAI;
622 DominatorTree *DT;
626};
627
628} // end anonymous namespace
629
631 DominatorTree &DT,
635 LoopAccessInfoManager &LAIs) {
636 // Build up a worklist of inner-loops to transform to avoid iterator
637 // invalidation.
638 // FIXME: This logic comes from other passes that actually change the loop
639 // nest structure. It isn't clear this is necessary (or useful) for a pass
640 // which merely optimizes the use of loads in a loop.
641 SmallVector<Loop *, 8> Worklist;
642
643 bool Changed = false;
644
645 for (Loop *TopLevelLoop : LI)
646 for (Loop *L : depth_first(TopLevelLoop)) {
647 Changed |= simplifyLoop(L, &DT, &LI, SE, AC, /*MSSAU*/ nullptr, false);
648 // We only handle inner-most loops.
649 if (L->isInnermost())
650 Worklist.push_back(L);
651 }
652
653 // Now walk the identified inner loops.
654 for (Loop *L : Worklist) {
655 // Match historical behavior
656 if (!L->isRotatedForm() || !L->getExitingBlock())
657 continue;
658 // The actual work is performed by LoadEliminationForLoop.
659 LoadEliminationForLoop LEL(L, &LI, LAIs.getInfo(*L), &DT, BFI, PSI);
660 Changed |= LEL.processLoop();
661 if (Changed)
662 LAIs.clear();
663 }
664 return Changed;
665}
666
669 auto &LI = AM.getResult<LoopAnalysis>(F);
670 // There are no loops in the function. Return before computing other expensive
671 // analyses.
672 if (LI.empty())
673 return PreservedAnalyses::all();
674 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
675 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
676 auto &AC = AM.getResult<AssumptionAnalysis>(F);
677 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
678 auto *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
679 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
680 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
682
683 bool Changed = eliminateLoadsAcrossLoops(F, LI, DT, BFI, PSI, &SE, &AC, LAIs);
684
685 if (!Changed)
686 return PreservedAnalyses::all();
687
691 return PA;
692}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
#define Check(C,...)
This is the interface for a simple mod/ref and alias analysis over globals.
This header provides classes for managing per-loop analyses.
static bool eliminateLoadsAcrossLoops(Function &F, LoopInfo &LI, DominatorTree &DT, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, ScalarEvolution *SE, AssumptionCache *AC, LoopAccessInfoManager &LAIs)
static cl::opt< unsigned > LoadElimSCEVCheckThreshold("loop-load-elimination-scev-check-threshold", cl::init(8), cl::Hidden, cl::desc("The maximum number of SCEV checks allowed for Loop " "Load Elimination"))
static bool isLoadConditional(LoadInst *Load, Loop *L)
Return true if the load is not executed on all paths in the loop.
static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L, DominatorTree *DT)
Check if the store dominates all latches, so as long as there is no intervening store this value will...
static cl::opt< unsigned > CheckPerElim("runtime-check-per-loop-load-elim", cl::Hidden, cl::desc("Max number of memchecks allowed per eliminated load on average"), cl::init(1))
This header defines the LoopLoadEliminationPass object.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Module.h This file contains the declarations for the Module class.
if(VerifyEach)
This header defines various interfaces for pass management in LLVM.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition: APInt.h:76
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
An instruction for reading from memory.
Definition: Instructions.h:177
This analysis provides dependence information for the memory accesses of a loop.
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
const RuntimePointerChecking * getRuntimePointerChecking() const
const PredicatedScalarEvolution & getPSE() const
Used to add runtime SCEV checks.
bool hasConvergentOp() const
Return true if there is a convergent operation in the loop.
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:569
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:47
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
DenseMap< Instruction *, unsigned > generateInstructionOrderMap() const
Generate a mapping between the memory instructions and their indices according to program order.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:1058
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
const PointerInfo & getPointerInfo(unsigned PtrIdx) const
Return PointerInfo for pointer at index PtrIdx.
This class uses information about analyze scalars to rewrite expressions in canonical form.
virtual unsigned getComplexity() const
Returns the estimated complexity of this predicate.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:345
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1727
std::pair< const RuntimeCheckingPtrGroup *, const RuntimeCheckingPtrGroup * > RuntimePointerCheck
A memcheck which made up of a pair of grouped pointers.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P)
Provide wrappers to std::copy_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1773
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:292
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2021
iterator_range< df_iterator< T > > depth_first(const T &G)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define SEE(c)
Definition: regcomp.c:254
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
TrackingVH< Value > PointerValue
Holds the pointer value that we need to check.