LLVM 17.0.0git
LICM.cpp
Go to the documentation of this file.
1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
42#include "llvm/ADT/Statistic.h"
50#include "llvm/Analysis/Loads.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/Dominators.h"
71#include "llvm/IR/IRBuilder.h"
72#include "llvm/IR/LLVMContext.h"
73#include "llvm/IR/Metadata.h"
78#include "llvm/Support/Debug.h"
87#include <algorithm>
88#include <utility>
89using namespace llvm;
90
91namespace llvm {
92class LPMUpdater;
93} // namespace llvm
94
95#define DEBUG_TYPE "licm"
96
97STATISTIC(NumCreatedBlocks, "Number of blocks created");
98STATISTIC(NumClonedBranches, "Number of branches cloned");
99STATISTIC(NumSunk, "Number of instructions sunk out of loop");
100STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
101STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
102STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
103STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
104STATISTIC(NumLoadPromoted, "Number of load-only promotions");
105STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
106STATISTIC(NumMinMaxHoisted,
107 "Number min/max expressions hoisted out of the loop");
108
109/// Memory promotion is enabled by default.
110static cl::opt<bool>
111 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
112 cl::desc("Disable memory promotion in LICM pass"));
113
115 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
116 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
117
118static cl::opt<bool>
119 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
120 cl::desc("Force thread model single in LICM pass"));
121
123 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
124 cl::desc("Max num uses visited for identifying load "
125 "invariance in loop using invariant start (default = 8)"));
126
127// Experimental option to allow imprecision in LICM in pathological cases, in
128// exchange for faster compile. This is to be removed if MemorySSA starts to
129// address the same issue. LICM calls MemorySSAWalker's
130// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
131// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
132// which may not be precise, since optimizeUses is capped. The result is
133// correct, but we may not get as "far up" as possible to get which access is
134// clobbering the one queried.
136 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
137 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
138 "for faster compile. Caps the MemorySSA clobbering calls."));
139
140// Experimentally, memory promotion carries less importance than sinking and
141// hoisting. Limit when we do promotion when using MemorySSA, in order to save
142// compile time.
144 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
145 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
146 "effect. When MSSA in LICM is enabled, then this is the maximum "
147 "number of accesses allowed to be present in a loop in order to "
148 "enable memory promotion."));
149
150static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
151static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
152 const LoopSafetyInfo *SafetyInfo,
153 TargetTransformInfo *TTI, bool &FreeInLoop,
154 bool LoopNestMode);
155static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
156 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
159static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
160 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
163 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
164 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
165 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
166 AssumptionCache *AC, bool AllowSpeculation);
167static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
168 Loop *CurLoop, Instruction &I,
170 bool InvariantGroup);
171static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
172 MemoryUse &MU);
173/// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
174/// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
175/// minimun can be computed outside of loop, and X is not a loop-invariant.
176static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
177 MemorySSAUpdater &MSSAU);
179 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
180 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
181
182static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
183 MemorySSAUpdater &MSSAU);
184
186 ICFLoopSafetyInfo &SafetyInfo,
188
189static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
190 function_ref<void(Instruction *)> Fn);
192 std::pair<SmallSetVector<Value *, 8>, bool>;
195
196namespace {
197struct LoopInvariantCodeMotion {
198 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
201 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
202
203 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
204 unsigned LicmMssaNoAccForPromotionCap,
205 bool LicmAllowSpeculation)
206 : LicmMssaOptCap(LicmMssaOptCap),
207 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
208 LicmAllowSpeculation(LicmAllowSpeculation) {}
209
210private:
211 unsigned LicmMssaOptCap;
212 unsigned LicmMssaNoAccForPromotionCap;
213 bool LicmAllowSpeculation;
214};
215
216struct LegacyLICMPass : public LoopPass {
217 static char ID; // Pass identification, replacement for typeid
218 LegacyLICMPass(
219 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
220 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
221 bool LicmAllowSpeculation = true)
222 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
223 LicmAllowSpeculation) {
225 }
226
227 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
228 if (skipLoop(L))
229 return false;
230
231 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
232 << L->getHeader()->getNameOrAsOperand() << "\n");
233
234 Function *F = L->getHeader()->getParent();
235
236 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
237 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
238 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
239 // pass. Function analyses need to be preserved across loop transformations
240 // but ORE cannot be preserved (see comment before the pass definition).
241 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
242 return LICM.runOnLoop(
243 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
244 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
245 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
246 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F),
247 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F),
248 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F),
249 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
250 }
251
252 /// This transformation requires natural loop information & requires that
253 /// loop preheaders be inserted into the CFG...
254 ///
255 void getAnalysisUsage(AnalysisUsage &AU) const override {
267 }
268
269private:
270 LoopInvariantCodeMotion LICM;
271};
272} // namespace
273
276 if (!AR.MSSA)
277 report_fatal_error("LICM requires MemorySSA (loop-mssa)",
278 /*GenCrashDiag*/false);
279
280 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
281 // pass. Function analyses need to be preserved across loop transformations
282 // but ORE cannot be preserved (see comment before the pass definition).
283 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
284
285 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
286 Opts.AllowSpeculation);
287 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI,
288 &AR.SE, AR.MSSA, &ORE))
289 return PreservedAnalyses::all();
290
292 PA.preserve<MemorySSAAnalysis>();
293
294 return PA;
295}
296
298 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
299 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
300 OS, MapClassName2PassName);
301
302 OS << '<';
303 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
304 OS << '>';
305}
306
309 LPMUpdater &) {
310 if (!AR.MSSA)
311 report_fatal_error("LNICM requires MemorySSA (loop-mssa)",
312 /*GenCrashDiag*/false);
313
314 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
315 // pass. Function analyses need to be preserved across loop transformations
316 // but ORE cannot be preserved (see comment before the pass definition).
318
319 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
320 Opts.AllowSpeculation);
321
322 Loop &OutermostLoop = LN.getOutermostLoop();
323 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC,
324 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
325
326 if (!Changed)
327 return PreservedAnalyses::all();
328
330
331 PA.preserve<DominatorTreeAnalysis>();
332 PA.preserve<LoopAnalysis>();
333 PA.preserve<MemorySSAAnalysis>();
334
335 return PA;
336}
337
339 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
340 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
341 OS, MapClassName2PassName);
342
343 OS << '<';
344 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
345 OS << '>';
346}
347
348char LegacyLICMPass::ID = 0;
349INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
350 false, false)
356INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
357 false)
358
359Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
360Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
361 unsigned LicmMssaNoAccForPromotionCap,
362 bool LicmAllowSpeculation) {
363 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
364 LicmAllowSpeculation);
365}
366
368 MemorySSA &MSSA)
370 IsSink, L, MSSA) {}
371
373 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
374 Loop &L, MemorySSA &MSSA)
375 : LicmMssaOptCap(LicmMssaOptCap),
376 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
377 IsSink(IsSink) {
378 unsigned AccessCapCount = 0;
379 for (auto *BB : L.getBlocks())
380 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
381 for (const auto &MA : *Accesses) {
382 (void)MA;
383 ++AccessCapCount;
384 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
385 NoOfMemAccTooLarge = true;
386 return;
387 }
388 }
389}
390
391/// Hoist expressions out of the specified loop. Note, alias info for inner
392/// loop is not preserved so it is not a good idea to run LICM multiple
393/// times on one loop.
394bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
398 ScalarEvolution *SE, MemorySSA *MSSA,
400 bool LoopNestMode) {
401 bool Changed = false;
402
403 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
404 MSSA->ensureOptimizedUses();
405
406 // If this loop has metadata indicating that LICM is not to be performed then
407 // just exit.
409 return false;
410 }
411
412 // Don't sink stores from loops with coroutine suspend instructions.
413 // LICM would sink instructions into the default destination of
414 // the coroutine switch. The default destination of the switch is to
415 // handle the case where the coroutine is suspended, by which point the
416 // coroutine frame may have been destroyed. No instruction can be sunk there.
417 // FIXME: This would unfortunately hurt the performance of coroutines, however
418 // there is currently no general solution for this. Similar issues could also
419 // potentially happen in other passes where instructions are being moved
420 // across that edge.
421 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
422 return llvm::any_of(*BB, [](Instruction &I) {
423 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
424 return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
425 });
426 });
427
428 MemorySSAUpdater MSSAU(MSSA);
429 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
430 /*IsSink=*/true, *L, *MSSA);
431
432 // Get the preheader block to move instructions into...
433 BasicBlock *Preheader = L->getLoopPreheader();
434
435 // Compute loop safety information.
436 ICFLoopSafetyInfo SafetyInfo;
437 SafetyInfo.computeLoopSafetyInfo(L);
438
439 // We want to visit all of the instructions in this loop... that are not parts
440 // of our subloops (they have already had their invariants hoisted out of
441 // their loop, into this loop, so there is no need to process the BODIES of
442 // the subloops).
443 //
444 // Traverse the body of the loop in depth first order on the dominator tree so
445 // that we are guaranteed to see definitions before we see uses. This allows
446 // us to sink instructions in one pass, without iteration. After sinking
447 // instructions, we perform another pass to hoist them out of the loop.
448 if (L->hasDedicatedExits())
449 Changed |=
450 LoopNestMode
451 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT,
452 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
453 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
454 MSSAU, &SafetyInfo, Flags, ORE);
455 Flags.setIsSink(false);
456 if (Preheader)
457 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
458 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
459 LicmAllowSpeculation);
460
461 // Now that all loop invariants have been removed from the loop, promote any
462 // memory references to scalars that we can.
463 // Don't sink stores from loops without dedicated block exits. Exits
464 // containing indirect branches are not transformed by loop simplify,
465 // make sure we catch that. An additional load may be generated in the
466 // preheader for SSA updater, so also avoid sinking when no preheader
467 // is available.
468 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
469 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
470 // Figure out the loop exits and their insertion points
472 L->getUniqueExitBlocks(ExitBlocks);
473
474 // We can't insert into a catchswitch.
475 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
476 return isa<CatchSwitchInst>(Exit->getTerminator());
477 });
478
479 if (!HasCatchSwitch) {
481 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
482 InsertPts.reserve(ExitBlocks.size());
483 MSSAInsertPts.reserve(ExitBlocks.size());
484 for (BasicBlock *ExitBlock : ExitBlocks) {
485 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
486 MSSAInsertPts.push_back(nullptr);
487 }
488
490
491 // Promoting one set of accesses may make the pointers for another set
492 // loop invariant, so run this in a loop.
493 bool Promoted = false;
494 bool LocalPromoted;
495 do {
496 LocalPromoted = false;
497 for (auto [PointerMustAliases, HasReadsOutsideSet] :
498 collectPromotionCandidates(MSSA, AA, L)) {
499 LocalPromoted |= promoteLoopAccessesToScalars(
500 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
501 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
502 LicmAllowSpeculation, HasReadsOutsideSet);
503 }
504 Promoted |= LocalPromoted;
505 } while (LocalPromoted);
506
507 // Once we have promoted values across the loop body we have to
508 // recursively reform LCSSA as any nested loop may now have values defined
509 // within the loop used in the outer loop.
510 // FIXME: This is really heavy handed. It would be a bit better to use an
511 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
512 // it as it went.
513 if (Promoted)
514 formLCSSARecursively(*L, *DT, LI, SE);
515
516 Changed |= Promoted;
517 }
518 }
519
520 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
521 // specifically moving instructions across the loop boundary and so it is
522 // especially in need of basic functional correctness checking here.
523 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
524 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
525 "Parent loop not left in LCSSA form after LICM!");
526
527 if (VerifyMemorySSA)
528 MSSA->verifyMemorySSA();
529
530 if (Changed && SE)
532 return Changed;
533}
534
535/// Walk the specified region of the CFG (defined by all blocks dominated by
536/// the specified block, and that are in the current loop) in reverse depth
537/// first order w.r.t the DominatorTree. This allows us to visit uses before
538/// definitions, allowing us to sink a loop body in one pass without iteration.
539///
542 TargetTransformInfo *TTI, Loop *CurLoop,
543 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
545 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
546
547 // Verify inputs.
548 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
549 CurLoop != nullptr && SafetyInfo != nullptr &&
550 "Unexpected input to sinkRegion.");
551
552 // We want to visit children before parents. We will enqueue all the parents
553 // before their children in the worklist and process the worklist in reverse
554 // order.
556
557 bool Changed = false;
558 for (DomTreeNode *DTN : reverse(Worklist)) {
559 BasicBlock *BB = DTN->getBlock();
560 // Only need to process the contents of this block if it is not part of a
561 // subloop (which would already have been processed).
562 if (inSubLoop(BB, CurLoop, LI))
563 continue;
564
565 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
566 Instruction &I = *--II;
567
568 // The instruction is not used in the loop if it is dead. In this case,
569 // we just delete it instead of sinking it.
570 if (isInstructionTriviallyDead(&I, TLI)) {
571 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
574 ++II;
575 eraseInstruction(I, *SafetyInfo, MSSAU);
576 Changed = true;
577 continue;
578 }
579
580 // Check to see if we can sink this instruction to the exit blocks
581 // of the loop. We can do this if the all users of the instruction are
582 // outside of the loop. In this case, it doesn't even matter if the
583 // operands of the instruction are loop invariant.
584 //
585 bool FreeInLoop = false;
586 bool LoopNestMode = OutermostLoop != nullptr;
587 if (!I.mayHaveSideEffects() &&
588 isNotUsedOrFreeInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
589 SafetyInfo, TTI, FreeInLoop, LoopNestMode) &&
590 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) {
591 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
592 if (!FreeInLoop) {
593 ++II;
595 eraseInstruction(I, *SafetyInfo, MSSAU);
596 }
597 Changed = true;
598 }
599 }
600 }
601 }
602 if (VerifyMemorySSA)
603 MSSAU.getMemorySSA()->verifyMemorySSA();
604 return Changed;
605}
606
609 TargetTransformInfo *TTI, Loop *CurLoop,
610 MemorySSAUpdater &MSSAU,
611 ICFLoopSafetyInfo *SafetyInfo,
614
615 bool Changed = false;
617 Worklist.insert(CurLoop);
618 appendLoopsToWorklist(*CurLoop, Worklist);
619 while (!Worklist.empty()) {
620 Loop *L = Worklist.pop_back_val();
621 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
622 MSSAU, SafetyInfo, Flags, ORE, CurLoop);
623 }
624 return Changed;
625}
626
627namespace {
628// This is a helper class for hoistRegion to make it able to hoist control flow
629// in order to be able to hoist phis. The way this works is that we initially
630// start hoisting to the loop preheader, and when we see a loop invariant branch
631// we make note of this. When we then come to hoist an instruction that's
632// conditional on such a branch we duplicate the branch and the relevant control
633// flow, then hoist the instruction into the block corresponding to its original
634// block in the duplicated control flow.
635class ControlFlowHoister {
636private:
637 // Information about the loop we are hoisting from
638 LoopInfo *LI;
639 DominatorTree *DT;
640 Loop *CurLoop;
641 MemorySSAUpdater &MSSAU;
642
643 // A map of blocks in the loop to the block their instructions will be hoisted
644 // to.
645 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
646
647 // The branches that we can hoist, mapped to the block that marks a
648 // convergence point of their control flow.
649 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
650
651public:
652 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
653 MemorySSAUpdater &MSSAU)
654 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
655
656 void registerPossiblyHoistableBranch(BranchInst *BI) {
657 // We can only hoist conditional branches with loop invariant operands.
658 if (!ControlFlowHoisting || !BI->isConditional() ||
659 !CurLoop->hasLoopInvariantOperands(BI))
660 return;
661
662 // The branch destinations need to be in the loop, and we don't gain
663 // anything by duplicating conditional branches with duplicate successors,
664 // as it's essentially the same as an unconditional branch.
665 BasicBlock *TrueDest = BI->getSuccessor(0);
666 BasicBlock *FalseDest = BI->getSuccessor(1);
667 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
668 TrueDest == FalseDest)
669 return;
670
671 // We can hoist BI if one branch destination is the successor of the other,
672 // or both have common successor which we check by seeing if the
673 // intersection of their successors is non-empty.
674 // TODO: This could be expanded to allowing branches where both ends
675 // eventually converge to a single block.
676 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
677 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
678 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
679 BasicBlock *CommonSucc = nullptr;
680 if (TrueDestSucc.count(FalseDest)) {
681 CommonSucc = FalseDest;
682 } else if (FalseDestSucc.count(TrueDest)) {
683 CommonSucc = TrueDest;
684 } else {
685 set_intersect(TrueDestSucc, FalseDestSucc);
686 // If there's one common successor use that.
687 if (TrueDestSucc.size() == 1)
688 CommonSucc = *TrueDestSucc.begin();
689 // If there's more than one pick whichever appears first in the block list
690 // (we can't use the value returned by TrueDestSucc.begin() as it's
691 // unpredicatable which element gets returned).
692 else if (!TrueDestSucc.empty()) {
693 Function *F = TrueDest->getParent();
694 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
695 auto It = llvm::find_if(*F, IsSucc);
696 assert(It != F->end() && "Could not find successor in function");
697 CommonSucc = &*It;
698 }
699 }
700 // The common successor has to be dominated by the branch, as otherwise
701 // there will be some other path to the successor that will not be
702 // controlled by this branch so any phi we hoist would be controlled by the
703 // wrong condition. This also takes care of avoiding hoisting of loop back
704 // edges.
705 // TODO: In some cases this could be relaxed if the successor is dominated
706 // by another block that's been hoisted and we can guarantee that the
707 // control flow has been replicated exactly.
708 if (CommonSucc && DT->dominates(BI, CommonSucc))
709 HoistableBranches[BI] = CommonSucc;
710 }
711
712 bool canHoistPHI(PHINode *PN) {
713 // The phi must have loop invariant operands.
714 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
715 return false;
716 // We can hoist phis if the block they are in is the target of hoistable
717 // branches which cover all of the predecessors of the block.
718 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
719 BasicBlock *BB = PN->getParent();
720 for (BasicBlock *PredBB : predecessors(BB))
721 PredecessorBlocks.insert(PredBB);
722 // If we have less predecessor blocks than predecessors then the phi will
723 // have more than one incoming value for the same block which we can't
724 // handle.
725 // TODO: This could be handled be erasing some of the duplicate incoming
726 // values.
727 if (PredecessorBlocks.size() != pred_size(BB))
728 return false;
729 for (auto &Pair : HoistableBranches) {
730 if (Pair.second == BB) {
731 // Which blocks are predecessors via this branch depends on if the
732 // branch is triangle-like or diamond-like.
733 if (Pair.first->getSuccessor(0) == BB) {
734 PredecessorBlocks.erase(Pair.first->getParent());
735 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
736 } else if (Pair.first->getSuccessor(1) == BB) {
737 PredecessorBlocks.erase(Pair.first->getParent());
738 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
739 } else {
740 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
741 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
742 }
743 }
744 }
745 // PredecessorBlocks will now be empty if for every predecessor of BB we
746 // found a hoistable branch source.
747 return PredecessorBlocks.empty();
748 }
749
750 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
752 return CurLoop->getLoopPreheader();
753 // If BB has already been hoisted, return that
754 if (HoistDestinationMap.count(BB))
755 return HoistDestinationMap[BB];
756
757 // Check if this block is conditional based on a pending branch
758 auto HasBBAsSuccessor =
760 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
761 Pair.first->getSuccessor(1) == BB);
762 };
763 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
764
765 // If not involved in a pending branch, hoist to preheader
766 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
767 if (It == HoistableBranches.end()) {
768 LLVM_DEBUG(dbgs() << "LICM using "
769 << InitialPreheader->getNameOrAsOperand()
770 << " as hoist destination for "
771 << BB->getNameOrAsOperand() << "\n");
772 HoistDestinationMap[BB] = InitialPreheader;
773 return InitialPreheader;
774 }
775 BranchInst *BI = It->first;
776 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
777 HoistableBranches.end() &&
778 "BB is expected to be the target of at most one branch");
779
780 LLVMContext &C = BB->getContext();
781 BasicBlock *TrueDest = BI->getSuccessor(0);
782 BasicBlock *FalseDest = BI->getSuccessor(1);
783 BasicBlock *CommonSucc = HoistableBranches[BI];
784 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
785
786 // Create hoisted versions of blocks that currently don't have them
787 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
788 if (HoistDestinationMap.count(Orig))
789 return HoistDestinationMap[Orig];
790 BasicBlock *New =
791 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
792 HoistDestinationMap[Orig] = New;
793 DT->addNewBlock(New, HoistTarget);
794 if (CurLoop->getParentLoop())
795 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
796 ++NumCreatedBlocks;
797 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
798 << " as hoist destination for " << Orig->getName()
799 << "\n");
800 return New;
801 };
802 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
803 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
804 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
805
806 // Link up these blocks with branches.
807 if (!HoistCommonSucc->getTerminator()) {
808 // The new common successor we've generated will branch to whatever that
809 // hoist target branched to.
810 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
811 assert(TargetSucc && "Expected hoist target to have a single successor");
812 HoistCommonSucc->moveBefore(TargetSucc);
813 BranchInst::Create(TargetSucc, HoistCommonSucc);
814 }
815 if (!HoistTrueDest->getTerminator()) {
816 HoistTrueDest->moveBefore(HoistCommonSucc);
817 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
818 }
819 if (!HoistFalseDest->getTerminator()) {
820 HoistFalseDest->moveBefore(HoistCommonSucc);
821 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
822 }
823
824 // If BI is being cloned to what was originally the preheader then
825 // HoistCommonSucc will now be the new preheader.
826 if (HoistTarget == InitialPreheader) {
827 // Phis in the loop header now need to use the new preheader.
828 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
830 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
831 // The new preheader dominates the loop header.
832 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
833 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
834 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
835 // The preheader hoist destination is now the new preheader, with the
836 // exception of the hoist destination of this branch.
837 for (auto &Pair : HoistDestinationMap)
838 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
839 Pair.second = HoistCommonSucc;
840 }
841
842 // Now finally clone BI.
844 HoistTarget->getTerminator(),
845 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
846 ++NumClonedBranches;
847
848 assert(CurLoop->getLoopPreheader() &&
849 "Hoisting blocks should not have destroyed preheader");
850 return HoistDestinationMap[BB];
851 }
852};
853} // namespace
854
855/// Walk the specified region of the CFG (defined by all blocks dominated by
856/// the specified block, and that are in the current loop) in depth first
857/// order w.r.t the DominatorTree. This allows us to visit definitions before
858/// uses, allowing us to hoist a loop body in one pass without iteration.
859///
862 TargetLibraryInfo *TLI, Loop *CurLoop,
864 ICFLoopSafetyInfo *SafetyInfo,
866 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
867 bool AllowSpeculation) {
868 // Verify inputs.
869 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
870 CurLoop != nullptr && SafetyInfo != nullptr &&
871 "Unexpected input to hoistRegion.");
872
873 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
874
875 // Keep track of instructions that have been hoisted, as they may need to be
876 // re-hoisted if they end up not dominating all of their uses.
877 SmallVector<Instruction *, 16> HoistedInstructions;
878
879 // For PHI hoisting to work we need to hoist blocks before their successors.
880 // We can do this by iterating through the blocks in the loop in reverse
881 // post-order.
882 LoopBlocksRPO Worklist(CurLoop);
883 Worklist.perform(LI);
884 bool Changed = false;
885 for (BasicBlock *BB : Worklist) {
886 // Only need to process the contents of this block if it is not part of a
887 // subloop (which would already have been processed).
888 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
889 continue;
890
892 // Try constant folding this instruction. If all the operands are
893 // constants, it is technically hoistable, but it would be better to
894 // just fold it.
896 &I, I.getModule()->getDataLayout(), TLI)) {
897 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C
898 << '\n');
899 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
900 I.replaceAllUsesWith(C);
901 if (isInstructionTriviallyDead(&I, TLI))
902 eraseInstruction(I, *SafetyInfo, MSSAU);
903 Changed = true;
904 continue;
905 }
906
907 // Try hoisting the instruction out to the preheader. We can only do
908 // this if all of the operands of the instruction are loop invariant and
909 // if it is safe to hoist the instruction. We also check block frequency
910 // to make sure instruction only gets hoisted into colder blocks.
911 // TODO: It may be safe to hoist if we are hoisting to a conditional block
912 // and we have accurately duplicated the control flow from the loop header
913 // to that block.
914 if (CurLoop->hasLoopInvariantOperands(&I) &&
915 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
917 I, DT, TLI, CurLoop, SafetyInfo, ORE,
918 CurLoop->getLoopPreheader()->getTerminator(), AC,
919 AllowSpeculation)) {
920 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
921 MSSAU, SE, ORE);
922 HoistedInstructions.push_back(&I);
923 Changed = true;
924 continue;
925 }
926
927 // Attempt to remove floating point division out of the loop by
928 // converting it to a reciprocal multiplication.
929 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
930 CurLoop->isLoopInvariant(I.getOperand(1))) {
931 auto Divisor = I.getOperand(1);
932 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
933 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
934 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
935 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
936 ReciprocalDivisor->insertBefore(&I);
937
938 auto Product =
939 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
940 Product->setFastMathFlags(I.getFastMathFlags());
941 SafetyInfo->insertInstructionTo(Product, I.getParent());
942 Product->insertAfter(&I);
943 I.replaceAllUsesWith(Product);
944 eraseInstruction(I, *SafetyInfo, MSSAU);
945
946 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
947 SafetyInfo, MSSAU, SE, ORE);
948 HoistedInstructions.push_back(ReciprocalDivisor);
949 Changed = true;
950 continue;
951 }
952
953 auto IsInvariantStart = [&](Instruction &I) {
954 using namespace PatternMatch;
955 return I.use_empty() &&
956 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
957 };
958 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
959 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
960 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
961 };
962 if ((IsInvariantStart(I) || isGuard(&I)) &&
963 CurLoop->hasLoopInvariantOperands(&I) &&
964 MustExecuteWithoutWritesBefore(I)) {
965 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
966 MSSAU, SE, ORE);
967 HoistedInstructions.push_back(&I);
968 Changed = true;
969 continue;
970 }
971
972 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
973 if (CFH.canHoistPHI(PN)) {
974 // Redirect incoming blocks first to ensure that we create hoisted
975 // versions of those blocks before we hoist the phi.
976 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
978 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
979 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
980 MSSAU, SE, ORE);
981 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
982 Changed = true;
983 continue;
984 }
985 }
986
987 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
988 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
989 // expression out of the loop.
990 if (hoistMinMax(I, *CurLoop, *SafetyInfo, MSSAU)) {
991 ++NumMinMaxHoisted;
992 Changed = true;
993 continue;
994 }
995
996 // Remember possibly hoistable branches so we can actually hoist them
997 // later if needed.
998 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
999 CFH.registerPossiblyHoistableBranch(BI);
1000 }
1001 }
1002
1003 // If we hoisted instructions to a conditional block they may not dominate
1004 // their uses that weren't hoisted (such as phis where some operands are not
1005 // loop invariant). If so make them unconditional by moving them to their
1006 // immediate dominator. We iterate through the instructions in reverse order
1007 // which ensures that when we rehoist an instruction we rehoist its operands,
1008 // and also keep track of where in the block we are rehoisting to to make sure
1009 // that we rehoist instructions before the instructions that use them.
1010 Instruction *HoistPoint = nullptr;
1011 if (ControlFlowHoisting) {
1012 for (Instruction *I : reverse(HoistedInstructions)) {
1013 if (!llvm::all_of(I->uses(),
1014 [&](Use &U) { return DT->dominates(I, U); })) {
1015 BasicBlock *Dominator =
1016 DT->getNode(I->getParent())->getIDom()->getBlock();
1017 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1018 if (HoistPoint)
1019 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1020 "New hoist point expected to dominate old hoist point");
1021 HoistPoint = Dominator->getTerminator();
1022 }
1023 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1024 << HoistPoint->getParent()->getNameOrAsOperand()
1025 << ": " << *I << "\n");
1026 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
1027 HoistPoint = I;
1028 Changed = true;
1029 }
1030 }
1031 }
1032 if (VerifyMemorySSA)
1033 MSSAU.getMemorySSA()->verifyMemorySSA();
1034
1035 // Now that we've finished hoisting make sure that LI and DT are still
1036 // valid.
1037#ifdef EXPENSIVE_CHECKS
1038 if (Changed) {
1039 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1040 "Dominator tree verification failed");
1041 LI->verify(*DT);
1042 }
1043#endif
1044
1045 return Changed;
1046}
1047
1048// Return true if LI is invariant within scope of the loop. LI is invariant if
1049// CurLoop is dominated by an invariant.start representing the same memory
1050// location and size as the memory location LI loads from, and also the
1051// invariant.start has no uses.
1053 Loop *CurLoop) {
1054 Value *Addr = LI->getOperand(0);
1055 const DataLayout &DL = LI->getModule()->getDataLayout();
1056 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1057
1058 // It is not currently possible for clang to generate an invariant.start
1059 // intrinsic with scalable vector types because we don't support thread local
1060 // sizeless types and we don't permit sizeless types in structs or classes.
1061 // Furthermore, even if support is added for this in future the intrinsic
1062 // itself is defined to have a size of -1 for variable sized objects. This
1063 // makes it impossible to verify if the intrinsic envelops our region of
1064 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1065 // types would have a -1 parameter, but the former is clearly double the size
1066 // of the latter.
1067 if (LocSizeInBits.isScalable())
1068 return false;
1069
1070 // if the type is i8 addrspace(x)*, we know this is the type of
1071 // llvm.invariant.start operand
1072 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
1074 unsigned BitcastsVisited = 0;
1075 // Look through bitcasts until we reach the i8* type (this is invariant.start
1076 // operand type).
1077 while (Addr->getType() != PtrInt8Ty) {
1078 auto *BC = dyn_cast<BitCastInst>(Addr);
1079 // Avoid traversing high number of bitcast uses.
1080 if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1081 return false;
1082 Addr = BC->getOperand(0);
1083 }
1084 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1085 // uselists for non-local Values in a loop pass.
1086 if (isa<Constant>(Addr))
1087 return false;
1088
1089 unsigned UsesVisited = 0;
1090 // Traverse all uses of the load operand value, to see if invariant.start is
1091 // one of the uses, and whether it dominates the load instruction.
1092 for (auto *U : Addr->users()) {
1093 // Avoid traversing for Load operand with high number of users.
1094 if (++UsesVisited > MaxNumUsesTraversed)
1095 return false;
1096 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1097 // If there are escaping uses of invariant.start instruction, the load maybe
1098 // non-invariant.
1099 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1100 !II->use_empty())
1101 continue;
1102 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1103 // The intrinsic supports having a -1 argument for variable sized objects
1104 // so we should check for that here.
1105 if (InvariantSize->isNegative())
1106 continue;
1107 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1108 // Confirm the invariant.start location size contains the load operand size
1109 // in bits. Also, the invariant.start should dominate the load, and we
1110 // should not hoist the load out of a loop that contains this dominating
1111 // invariant.start.
1112 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1113 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1114 return true;
1115 }
1116
1117 return false;
1118}
1119
1120namespace {
1121/// Return true if-and-only-if we know how to (mechanically) both hoist and
1122/// sink a given instruction out of a loop. Does not address legality
1123/// concerns such as aliasing or speculation safety.
1124bool isHoistableAndSinkableInst(Instruction &I) {
1125 // Only these instructions are hoistable/sinkable.
1126 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1127 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1128 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1129 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1130 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1131 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1132 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1133}
1134/// Return true if MSSA knows there are no MemoryDefs in the loop.
1135bool isReadOnly(const MemorySSAUpdater &MSSAU, const Loop *L) {
1136 for (auto *BB : L->getBlocks())
1137 if (MSSAU.getMemorySSA()->getBlockDefs(BB))
1138 return false;
1139 return true;
1140}
1141
1142/// Return true if I is the only Instruction with a MemoryAccess in L.
1143bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1144 const MemorySSAUpdater &MSSAU) {
1145 for (auto *BB : L->getBlocks())
1146 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1147 int NotAPhi = 0;
1148 for (const auto &Acc : *Accs) {
1149 if (isa<MemoryPhi>(&Acc))
1150 continue;
1151 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1152 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1153 return false;
1154 }
1155 }
1156 return true;
1157}
1158}
1159
1161 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1162 bool TargetExecutesOncePerLoop,
1163 SinkAndHoistLICMFlags &Flags,
1165 // If we don't understand the instruction, bail early.
1166 if (!isHoistableAndSinkableInst(I))
1167 return false;
1168
1169 MemorySSA *MSSA = MSSAU.getMemorySSA();
1170 // Loads have extra constraints we have to verify before we can hoist them.
1171 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1172 if (!LI->isUnordered())
1173 return false; // Don't sink/hoist volatile or ordered atomic loads!
1174
1175 // Loads from constant memory are always safe to move, even if they end up
1176 // in the same alias set as something that ends up being modified.
1177 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0))))
1178 return true;
1179 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1180 return true;
1181
1182 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1183 return false; // Don't risk duplicating unordered loads
1184
1185 // This checks for an invariant.start dominating the load.
1186 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1187 return true;
1188
1189 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI));
1190
1191 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group);
1192
1193 bool Invalidated = pointerInvalidatedByLoop(
1194 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1195 // Check loop-invariant address because this may also be a sinkable load
1196 // whose address is not necessarily loop-invariant.
1197 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1198 ORE->emit([&]() {
1200 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1201 << "failed to move load with loop-invariant address "
1202 "because the loop may invalidate its value";
1203 });
1204
1205 return !Invalidated;
1206 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1207 // Don't sink or hoist dbg info; it's legal, but not useful.
1208 if (isa<DbgInfoIntrinsic>(I))
1209 return false;
1210
1211 // Don't sink calls which can throw.
1212 if (CI->mayThrow())
1213 return false;
1214
1215 // Convergent attribute has been used on operations that involve
1216 // inter-thread communication which results are implicitly affected by the
1217 // enclosing control flows. It is not safe to hoist or sink such operations
1218 // across control flow.
1219 if (CI->isConvergent())
1220 return false;
1221
1222 using namespace PatternMatch;
1223 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1224 // Assumes don't actually alias anything or throw
1225 return true;
1226
1227 // Handle simple cases by querying alias analysis.
1228 MemoryEffects Behavior = AA->getMemoryEffects(CI);
1229 if (Behavior.doesNotAccessMemory())
1230 return true;
1231 if (Behavior.onlyReadsMemory()) {
1232 // A readonly argmemonly function only reads from memory pointed to by
1233 // it's arguments with arbitrary offsets. If we can prove there are no
1234 // writes to this memory in the loop, we can hoist or sink.
1235 if (Behavior.onlyAccessesArgPointees()) {
1236 // TODO: expand to writeable arguments
1237 for (Value *Op : CI->args())
1238 if (Op->getType()->isPointerTy() &&
1240 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1241 Flags, /*InvariantGroup=*/false))
1242 return false;
1243 return true;
1244 }
1245
1246 // If this call only reads from memory and there are no writes to memory
1247 // in the loop, we can hoist or sink the call as appropriate.
1248 if (isReadOnly(MSSAU, CurLoop))
1249 return true;
1250 }
1251
1252 // FIXME: This should use mod/ref information to see if we can hoist or
1253 // sink the call.
1254
1255 return false;
1256 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1257 // Fences alias (most) everything to provide ordering. For the moment,
1258 // just give up if there are any other memory operations in the loop.
1259 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1260 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1261 if (!SI->isUnordered())
1262 return false; // Don't sink/hoist volatile or ordered atomic store!
1263
1264 // We can only hoist a store that we can prove writes a value which is not
1265 // read or overwritten within the loop. For those cases, we fallback to
1266 // load store promotion instead. TODO: We can extend this to cases where
1267 // there is exactly one write to the location and that write dominates an
1268 // arbitrary number of reads in the loop.
1269 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1270 return true;
1271 // If there are more accesses than the Promotion cap or no "quota" to
1272 // check clobber, then give up as we're not walking a list that long.
1273 if (Flags.tooManyMemoryAccesses() || Flags.tooManyClobberingCalls())
1274 return false;
1275 // If there are interfering Uses (i.e. their defining access is in the
1276 // loop), or ordered loads (stored as Defs!), don't move this store.
1277 // Could do better here, but this is conservatively correct.
1278 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1279 // moving accesses. Can also extend to dominating uses.
1280 auto *SIMD = MSSA->getMemoryAccess(SI);
1281 for (auto *BB : CurLoop->getBlocks())
1282 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1283 for (const auto &MA : *Accesses)
1284 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1285 auto *MD = MU->getDefiningAccess();
1286 if (!MSSA->isLiveOnEntryDef(MD) &&
1287 CurLoop->contains(MD->getBlock()))
1288 return false;
1289 // Disable hoisting past potentially interfering loads. Optimized
1290 // Uses may point to an access outside the loop, as getClobbering
1291 // checks the previous iteration when walking the backedge.
1292 // FIXME: More precise: no Uses that alias SI.
1293 if (!Flags.getIsSink() && !MSSA->dominates(SIMD, MU))
1294 return false;
1295 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1296 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1297 (void)LI; // Silence warning.
1298 assert(!LI->isUnordered() && "Expected unordered load");
1299 return false;
1300 }
1301 // Any call, while it may not be clobbering SI, it may be a use.
1302 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1303 // Check if the call may read from the memory location written
1304 // to by SI. Check CI's attributes and arguments; the number of
1305 // such checks performed is limited above by NoOfMemAccTooLarge.
1307 if (isModOrRefSet(MRI))
1308 return false;
1309 }
1310 }
1311 }
1312 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1313 Flags.incrementClobberingCalls();
1314 // If there are no clobbering Defs in the loop, store is safe to hoist.
1315 return MSSA->isLiveOnEntryDef(Source) ||
1316 !CurLoop->contains(Source->getBlock());
1317 }
1318
1319 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1320
1321 // We've established mechanical ability and aliasing, it's up to the caller
1322 // to check fault safety
1323 return true;
1324}
1325
1326/// Returns true if a PHINode is a trivially replaceable with an
1327/// Instruction.
1328/// This is true when all incoming values are that instruction.
1329/// This pattern occurs most often with LCSSA PHI nodes.
1330///
1331static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1332 for (const Value *IncValue : PN.incoming_values())
1333 if (IncValue != &I)
1334 return false;
1335
1336 return true;
1337}
1338
1339/// Return true if the instruction is free in the loop.
1340static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1341 const TargetTransformInfo *TTI) {
1342 InstructionCost CostI =
1344
1345 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1346 if (CostI != TargetTransformInfo::TCC_Free)
1347 return false;
1348 // For a GEP, we cannot simply use getInstructionCost because currently
1349 // it optimistically assumes that a GEP will fold into addressing mode
1350 // regardless of its users.
1351 const BasicBlock *BB = GEP->getParent();
1352 for (const User *U : GEP->users()) {
1353 const Instruction *UI = cast<Instruction>(U);
1354 if (CurLoop->contains(UI) &&
1355 (BB != UI->getParent() ||
1356 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1357 return false;
1358 }
1359 return true;
1360 }
1361
1362 return CostI == TargetTransformInfo::TCC_Free;
1363}
1364
1365/// Return true if the only users of this instruction are outside of
1366/// the loop. If this is true, we can sink the instruction to the exit
1367/// blocks of the loop.
1368///
1369/// We also return true if the instruction could be folded away in lowering.
1370/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1371static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1372 const LoopSafetyInfo *SafetyInfo,
1373 TargetTransformInfo *TTI, bool &FreeInLoop,
1374 bool LoopNestMode) {
1375 const auto &BlockColors = SafetyInfo->getBlockColors();
1376 bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1377 for (const User *U : I.users()) {
1378 const Instruction *UI = cast<Instruction>(U);
1379 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1380 const BasicBlock *BB = PN->getParent();
1381 // We cannot sink uses in catchswitches.
1382 if (isa<CatchSwitchInst>(BB->getTerminator()))
1383 return false;
1384
1385 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1386 // phi use is too muddled.
1387 if (isa<CallInst>(I))
1388 if (!BlockColors.empty() &&
1389 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1390 return false;
1391
1392 if (LoopNestMode) {
1393 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1394 UI->getNumOperands() == 1) {
1395 if (!CurLoop->contains(UI))
1396 break;
1397 UI = cast<Instruction>(UI->user_back());
1398 }
1399 }
1400 }
1401
1402 if (CurLoop->contains(UI)) {
1403 if (IsFree) {
1404 FreeInLoop = true;
1405 continue;
1406 }
1407 return false;
1408 }
1409 }
1410 return true;
1411}
1412
1414 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1415 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1416 Instruction *New;
1417 if (auto *CI = dyn_cast<CallInst>(&I)) {
1418 const auto &BlockColors = SafetyInfo->getBlockColors();
1419
1420 // Sinking call-sites need to be handled differently from other
1421 // instructions. The cloned call-site needs a funclet bundle operand
1422 // appropriate for its location in the CFG.
1424 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1425 BundleIdx != BundleEnd; ++BundleIdx) {
1426 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1427 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1428 continue;
1429
1430 OpBundles.emplace_back(Bundle);
1431 }
1432
1433 if (!BlockColors.empty()) {
1434 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1435 assert(CV.size() == 1 && "non-unique color for exit block!");
1436 BasicBlock *BBColor = CV.front();
1437 Instruction *EHPad = BBColor->getFirstNonPHI();
1438 if (EHPad->isEHPad())
1439 OpBundles.emplace_back("funclet", EHPad);
1440 }
1441
1442 New = CallInst::Create(CI, OpBundles);
1443 } else {
1444 New = I.clone();
1445 }
1446
1447 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt());
1448 if (!I.getName().empty())
1449 New->setName(I.getName() + ".le");
1450
1451 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) {
1452 // Create a new MemoryAccess and let MemorySSA set its defining access.
1453 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1454 New, nullptr, New->getParent(), MemorySSA::Beginning);
1455 if (NewMemAcc) {
1456 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1457 MSSAU.insertDef(MemDef, /*RenameUses=*/true);
1458 else {
1459 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1460 MSSAU.insertUse(MemUse, /*RenameUses=*/true);
1461 }
1462 }
1463 }
1464
1465 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1466 // this is particularly cheap because we can rip off the PHI node that we're
1467 // replacing for the number and blocks of the predecessors.
1468 // OPT: If this shows up in a profile, we can instead finish sinking all
1469 // invariant instructions, and then walk their operands to re-establish
1470 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1471 // sinking bottom-up.
1472 for (Use &Op : New->operands())
1473 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1474 auto *OInst = cast<Instruction>(Op.get());
1475 PHINode *OpPN =
1476 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1477 OInst->getName() + ".lcssa", &ExitBlock.front());
1478 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1479 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1480 Op = OpPN;
1481 }
1482 return New;
1483}
1484
1486 MemorySSAUpdater &MSSAU) {
1487 MSSAU.removeMemoryAccess(&I);
1488 SafetyInfo.removeInstruction(&I);
1489 I.eraseFromParent();
1490}
1491
1493 ICFLoopSafetyInfo &SafetyInfo,
1494 MemorySSAUpdater &MSSAU,
1495 ScalarEvolution *SE) {
1496 SafetyInfo.removeInstruction(&I);
1497 SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1498 I.moveBefore(&Dest);
1499 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1500 MSSAU.getMemorySSA()->getMemoryAccess(&I)))
1501 MSSAU.moveToPlace(OldMemAcc, Dest.getParent(), MemorySSA::BeforeTerminator);
1502 if (SE)
1503 SE->forgetValue(&I);
1504}
1505
1507 PHINode *TPN, Instruction *I, LoopInfo *LI,
1509 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1510 MemorySSAUpdater &MSSAU) {
1512 "Expect only trivially replaceable PHI");
1513 BasicBlock *ExitBlock = TPN->getParent();
1514 Instruction *New;
1515 auto It = SunkCopies.find(ExitBlock);
1516 if (It != SunkCopies.end())
1517 New = It->second;
1518 else
1519 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1520 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1521 return New;
1522}
1523
1524static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1525 BasicBlock *BB = PN->getParent();
1526 if (!BB->canSplitPredecessors())
1527 return false;
1528 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1529 // it require updating BlockColors for all offspring blocks accordingly. By
1530 // skipping such corner case, we can make updating BlockColors after splitting
1531 // predecessor fairly simple.
1532 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1533 return false;
1534 for (BasicBlock *BBPred : predecessors(BB)) {
1535 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1536 return false;
1537 }
1538 return true;
1539}
1540
1542 LoopInfo *LI, const Loop *CurLoop,
1543 LoopSafetyInfo *SafetyInfo,
1544 MemorySSAUpdater *MSSAU) {
1545#ifndef NDEBUG
1547 CurLoop->getUniqueExitBlocks(ExitBlocks);
1548 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1549 ExitBlocks.end());
1550#endif
1551 BasicBlock *ExitBB = PN->getParent();
1552 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1553
1554 // Split predecessors of the loop exit to make instructions in the loop are
1555 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1556 // loop in the canonical form where each predecessor of each exit block should
1557 // be contained within the loop. For example, this will convert the loop below
1558 // from
1559 //
1560 // LB1:
1561 // %v1 =
1562 // br %LE, %LB2
1563 // LB2:
1564 // %v2 =
1565 // br %LE, %LB1
1566 // LE:
1567 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1568 //
1569 // to
1570 //
1571 // LB1:
1572 // %v1 =
1573 // br %LE.split, %LB2
1574 // LB2:
1575 // %v2 =
1576 // br %LE.split2, %LB1
1577 // LE.split:
1578 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1579 // br %LE
1580 // LE.split2:
1581 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1582 // br %LE
1583 // LE:
1584 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1585 //
1586 const auto &BlockColors = SafetyInfo->getBlockColors();
1587 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1588 while (!PredBBs.empty()) {
1589 BasicBlock *PredBB = *PredBBs.begin();
1590 assert(CurLoop->contains(PredBB) &&
1591 "Expect all predecessors are in the loop");
1592 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1594 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1595 // Since we do not allow splitting EH-block with BlockColors in
1596 // canSplitPredecessors(), we can simply assign predecessor's color to
1597 // the new block.
1598 if (!BlockColors.empty())
1599 // Grab a reference to the ColorVector to be inserted before getting the
1600 // reference to the vector we are copying because inserting the new
1601 // element in BlockColors might cause the map to be reallocated.
1602 SafetyInfo->copyColors(NewPred, PredBB);
1603 }
1604 PredBBs.remove(PredBB);
1605 }
1606}
1607
1608/// When an instruction is found to only be used outside of the loop, this
1609/// function moves it to the exit blocks and patches up SSA form as needed.
1610/// This method is guaranteed to remove the original instruction from its
1611/// position, and may either delete it or move it to outside of the loop.
1612///
1613static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1614 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1616 bool Changed = false;
1617 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1618
1619 // Iterate over users to be ready for actual sinking. Replace users via
1620 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1621 SmallPtrSet<Instruction *, 8> VisitedUsers;
1622 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1623 auto *User = cast<Instruction>(*UI);
1624 Use &U = UI.getUse();
1625 ++UI;
1626
1627 if (VisitedUsers.count(User) || CurLoop->contains(User))
1628 continue;
1629
1630 if (!DT->isReachableFromEntry(User->getParent())) {
1631 U = PoisonValue::get(I.getType());
1632 Changed = true;
1633 continue;
1634 }
1635
1636 // The user must be a PHI node.
1637 PHINode *PN = cast<PHINode>(User);
1638
1639 // Surprisingly, instructions can be used outside of loops without any
1640 // exits. This can only happen in PHI nodes if the incoming block is
1641 // unreachable.
1642 BasicBlock *BB = PN->getIncomingBlock(U);
1643 if (!DT->isReachableFromEntry(BB)) {
1644 U = PoisonValue::get(I.getType());
1645 Changed = true;
1646 continue;
1647 }
1648
1649 VisitedUsers.insert(PN);
1650 if (isTriviallyReplaceablePHI(*PN, I))
1651 continue;
1652
1653 if (!canSplitPredecessors(PN, SafetyInfo))
1654 return Changed;
1655
1656 // Split predecessors of the PHI so that we can make users trivially
1657 // replaceable.
1658 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU);
1659
1660 // Should rebuild the iterators, as they may be invalidated by
1661 // splitPredecessorsOfLoopExit().
1662 UI = I.user_begin();
1663 UE = I.user_end();
1664 }
1665
1666 if (VisitedUsers.empty())
1667 return Changed;
1668
1669 ORE->emit([&]() {
1670 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1671 << "sinking " << ore::NV("Inst", &I);
1672 });
1673 if (isa<LoadInst>(I))
1674 ++NumMovedLoads;
1675 else if (isa<CallInst>(I))
1676 ++NumMovedCalls;
1677 ++NumSunk;
1678
1679#ifndef NDEBUG
1681 CurLoop->getUniqueExitBlocks(ExitBlocks);
1682 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1683 ExitBlocks.end());
1684#endif
1685
1686 // Clones of this instruction. Don't create more than one per exit block!
1688
1689 // If this instruction is only used outside of the loop, then all users are
1690 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1691 // the instruction.
1692 // First check if I is worth sinking for all uses. Sink only when it is worth
1693 // across all uses.
1694 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1695 for (auto *UI : Users) {
1696 auto *User = cast<Instruction>(UI);
1697
1698 if (CurLoop->contains(User))
1699 continue;
1700
1701 PHINode *PN = cast<PHINode>(User);
1702 assert(ExitBlockSet.count(PN->getParent()) &&
1703 "The LCSSA PHI is not in an exit block!");
1704
1705 // The PHI must be trivially replaceable.
1707 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1708 PN->replaceAllUsesWith(New);
1709 eraseInstruction(*PN, *SafetyInfo, MSSAU);
1710 Changed = true;
1711 }
1712 return Changed;
1713}
1714
1715/// When an instruction is found to only use loop invariant operands that
1716/// is safe to hoist, this instruction is called to do the dirty work.
1717///
1718static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1719 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1722 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1723 << I << "\n");
1724 ORE->emit([&]() {
1725 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1726 << ore::NV("Inst", &I);
1727 });
1728
1729 // Metadata can be dependent on conditions we are hoisting above.
1730 // Conservatively strip all metadata on the instruction unless we were
1731 // guaranteed to execute I if we entered the loop, in which case the metadata
1732 // is valid in the loop preheader.
1733 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1734 // then moving to the preheader means we should strip attributes on the call
1735 // that can cause UB since we may be hoisting above conditions that allowed
1736 // inferring those attributes. They may not be valid at the preheader.
1737 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1738 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1739 // time in isGuaranteedToExecute if we don't actually have anything to
1740 // drop. It is a compile time optimization, not required for correctness.
1741 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1742 I.dropUBImplyingAttrsAndUnknownMetadata();
1743
1744 if (isa<PHINode>(I))
1745 // Move the new node to the end of the phi list in the destination block.
1746 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1747 else
1748 // Move the new node to the destination block, before its terminator.
1749 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1750
1751 I.updateLocationAfterHoist();
1752
1753 if (isa<LoadInst>(I))
1754 ++NumMovedLoads;
1755 else if (isa<CallInst>(I))
1756 ++NumMovedCalls;
1757 ++NumHoisted;
1758}
1759
1760/// Only sink or hoist an instruction if it is not a trapping instruction,
1761/// or if the instruction is known not to trap when moved to the preheader.
1762/// or if it is a trapping instruction and is guaranteed to execute.
1764 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1765 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1766 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1767 AssumptionCache *AC, bool AllowSpeculation) {
1768 if (AllowSpeculation &&
1769 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI))
1770 return true;
1771
1772 bool GuaranteedToExecute =
1773 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1774
1775 if (!GuaranteedToExecute) {
1776 auto *LI = dyn_cast<LoadInst>(&Inst);
1777 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1778 ORE->emit([&]() {
1780 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1781 << "failed to hoist load with loop-invariant address "
1782 "because load is conditionally executed";
1783 });
1784 }
1785
1786 return GuaranteedToExecute;
1787}
1788
1789namespace {
1790class LoopPromoter : public LoadAndStorePromoter {
1791 Value *SomePtr; // Designated pointer to store to.
1792 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1793 SmallVectorImpl<Instruction *> &LoopInsertPts;
1794 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1795 PredIteratorCache &PredCache;
1796 MemorySSAUpdater &MSSAU;
1797 LoopInfo &LI;
1798 DebugLoc DL;
1799 Align Alignment;
1800 bool UnorderedAtomic;
1801 AAMDNodes AATags;
1802 ICFLoopSafetyInfo &SafetyInfo;
1803 bool CanInsertStoresInExitBlocks;
1805
1806 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1807 // (if legal) if doing so would add an out-of-loop use to an instruction
1808 // defined in-loop.
1809 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1810 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1811 return V;
1812
1813 Instruction *I = cast<Instruction>(V);
1814 // We need to create an LCSSA PHI node for the incoming value and
1815 // store that.
1816 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1817 I->getName() + ".lcssa", &BB->front());
1818 for (BasicBlock *Pred : PredCache.get(BB))
1819 PN->addIncoming(I, Pred);
1820 return PN;
1821 }
1822
1823public:
1824 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1828 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1829 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1830 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1831 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1832 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1833 LI(li), DL(std::move(dl)), Alignment(Alignment),
1834 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1835 SafetyInfo(SafetyInfo),
1836 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1837
1838 void insertStoresInLoopExitBlocks() {
1839 // Insert stores after in the loop exit blocks. Each exit block gets a
1840 // store of the live-out values that feed them. Since we've already told
1841 // the SSA updater about the defs in the loop and the preheader
1842 // definition, it is all set and we can start using it.
1843 DIAssignID *NewID = nullptr;
1844 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1845 BasicBlock *ExitBlock = LoopExitBlocks[i];
1846 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1847 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1848 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1849 Instruction *InsertPos = LoopInsertPts[i];
1850 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1851 if (UnorderedAtomic)
1852 NewSI->setOrdering(AtomicOrdering::Unordered);
1853 NewSI->setAlignment(Alignment);
1854 NewSI->setDebugLoc(DL);
1855 // Attach DIAssignID metadata to the new store, generating it on the
1856 // first loop iteration.
1857 if (i == 0) {
1858 // NewSI will have its DIAssignID set here if there are any stores in
1859 // Uses with a DIAssignID attachment. This merged ID will then be
1860 // attached to the other inserted stores (in the branch below).
1861 NewSI->mergeDIAssignID(Uses);
1862 NewID = cast_or_null<DIAssignID>(
1863 NewSI->getMetadata(LLVMContext::MD_DIAssignID));
1864 } else {
1865 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1866 // above.
1867 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID);
1868 }
1869
1870 if (AATags)
1871 NewSI->setAAMetadata(AATags);
1872
1873 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1874 MemoryAccess *NewMemAcc;
1875 if (!MSSAInsertPoint) {
1876 NewMemAcc = MSSAU.createMemoryAccessInBB(
1877 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1878 } else {
1879 NewMemAcc =
1880 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1881 }
1882 MSSAInsertPts[i] = NewMemAcc;
1883 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true);
1884 // FIXME: true for safety, false may still be correct.
1885 }
1886 }
1887
1888 void doExtraRewritesBeforeFinalDeletion() override {
1889 if (CanInsertStoresInExitBlocks)
1890 insertStoresInLoopExitBlocks();
1891 }
1892
1893 void instructionDeleted(Instruction *I) const override {
1894 SafetyInfo.removeInstruction(I);
1895 MSSAU.removeMemoryAccess(I);
1896 }
1897
1898 bool shouldDelete(Instruction *I) const override {
1899 if (isa<StoreInst>(I))
1900 return CanInsertStoresInExitBlocks;
1901 return true;
1902 }
1903};
1904
1905bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1906 DominatorTree *DT) {
1907 // We can perform the captured-before check against any instruction in the
1908 // loop header, as the loop header is reachable from any instruction inside
1909 // the loop.
1910 // TODO: ReturnCaptures=true shouldn't be necessary here.
1911 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1912 /* StoreCaptures */ true,
1913 L->getHeader()->getTerminator(), DT);
1914}
1915
1916/// Return true if we can prove that a caller cannot inspect the object if an
1917/// unwind occurs inside the loop.
1918bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1919 DominatorTree *DT) {
1920 bool RequiresNoCaptureBeforeUnwind;
1921 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1922 return false;
1923
1924 return !RequiresNoCaptureBeforeUnwind ||
1925 isNotCapturedBeforeOrInLoop(Object, L, DT);
1926}
1927
1928bool isWritableObject(const Value *Object) {
1929 // TODO: Alloca might not be writable after its lifetime ends.
1930 // See https://github.com/llvm/llvm-project/issues/51838.
1931 if (isa<AllocaInst>(Object))
1932 return true;
1933
1934 // TODO: Also handle sret.
1935 if (auto *A = dyn_cast<Argument>(Object))
1936 return A->hasByValAttr();
1937
1938 if (auto *G = dyn_cast<GlobalVariable>(Object))
1939 return !G->isConstant();
1940
1941 // TODO: Noalias has nothing to do with writability, this should check for
1942 // an allocator function.
1943 return isNoAliasCall(Object);
1944}
1945
1946bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1948 // The object must be function-local to start with, and then not captured
1949 // before/in the loop.
1951 isNotCapturedBeforeOrInLoop(Object, L, DT)) ||
1953}
1954
1955} // namespace
1956
1957/// Try to promote memory values to scalars by sinking stores out of the
1958/// loop and moving loads to before the loop. We do this by looping over
1959/// the stores in the loop, looking for stores to Must pointers which are
1960/// loop invariant.
1961///
1963 const SmallSetVector<Value *, 8> &PointerMustAliases,
1968 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1969 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1970 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1971 bool HasReadsOutsideSet) {
1972 // Verify inputs.
1973 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1974 SafetyInfo != nullptr &&
1975 "Unexpected Input to promoteLoopAccessesToScalars");
1976
1977 LLVM_DEBUG({
1978 dbgs() << "Trying to promote set of must-aliased pointers:\n";
1979 for (Value *Ptr : PointerMustAliases)
1980 dbgs() << " " << *Ptr << "\n";
1981 });
1982 ++NumPromotionCandidates;
1983
1984 Value *SomePtr = *PointerMustAliases.begin();
1985 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1986
1987 // It is not safe to promote a load/store from the loop if the load/store is
1988 // conditional. For example, turning:
1989 //
1990 // for () { if (c) *P += 1; }
1991 //
1992 // into:
1993 //
1994 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1995 //
1996 // is not safe, because *P may only be valid to access if 'c' is true.
1997 //
1998 // The safety property divides into two parts:
1999 // p1) The memory may not be dereferenceable on entry to the loop. In this
2000 // case, we can't insert the required load in the preheader.
2001 // p2) The memory model does not allow us to insert a store along any dynamic
2002 // path which did not originally have one.
2003 //
2004 // If at least one store is guaranteed to execute, both properties are
2005 // satisfied, and promotion is legal.
2006 //
2007 // This, however, is not a necessary condition. Even if no store/load is
2008 // guaranteed to execute, we can still establish these properties.
2009 // We can establish (p1) by proving that hoisting the load into the preheader
2010 // is safe (i.e. proving dereferenceability on all paths through the loop). We
2011 // can use any access within the alias set to prove dereferenceability,
2012 // since they're all must alias.
2013 //
2014 // There are two ways establish (p2):
2015 // a) Prove the location is thread-local. In this case the memory model
2016 // requirement does not apply, and stores are safe to insert.
2017 // b) Prove a store dominates every exit block. In this case, if an exit
2018 // blocks is reached, the original dynamic path would have taken us through
2019 // the store, so inserting a store into the exit block is safe. Note that this
2020 // is different from the store being guaranteed to execute. For instance,
2021 // if an exception is thrown on the first iteration of the loop, the original
2022 // store is never executed, but the exit blocks are not executed either.
2023
2024 bool DereferenceableInPH = false;
2025 bool StoreIsGuanteedToExecute = false;
2026 bool FoundLoadToPromote = false;
2027 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
2028 enum {
2029 StoreSafe,
2030 StoreUnsafe,
2031 StoreSafetyUnknown,
2032 } StoreSafety = StoreSafetyUnknown;
2033
2035
2036 // We start with an alignment of one and try to find instructions that allow
2037 // us to prove better alignment.
2038 Align Alignment;
2039 // Keep track of which types of access we see
2040 bool SawUnorderedAtomic = false;
2041 bool SawNotAtomic = false;
2042 AAMDNodes AATags;
2043
2044 const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2045
2046 // If there are reads outside the promoted set, then promoting stores is
2047 // definitely not safe.
2048 if (HasReadsOutsideSet)
2049 StoreSafety = StoreUnsafe;
2050
2051 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2052 // If a loop can throw, we have to insert a store along each unwind edge.
2053 // That said, we can't actually make the unwind edge explicit. Therefore,
2054 // we have to prove that the store is dead along the unwind edge. We do
2055 // this by proving that the caller can't have a reference to the object
2056 // after return and thus can't possibly load from the object.
2057 Value *Object = getUnderlyingObject(SomePtr);
2058 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2059 StoreSafety = StoreUnsafe;
2060 }
2061
2062 // Check that all accesses to pointers in the alias set use the same type.
2063 // We cannot (yet) promote a memory location that is loaded and stored in
2064 // different sizes. While we are at it, collect alignment and AA info.
2065 Type *AccessTy = nullptr;
2066 for (Value *ASIV : PointerMustAliases) {
2067 for (Use &U : ASIV->uses()) {
2068 // Ignore instructions that are outside the loop.
2069 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2070 if (!UI || !CurLoop->contains(UI))
2071 continue;
2072
2073 // If there is an non-load/store instruction in the loop, we can't promote
2074 // it.
2075 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2076 if (!Load->isUnordered())
2077 return false;
2078
2079 SawUnorderedAtomic |= Load->isAtomic();
2080 SawNotAtomic |= !Load->isAtomic();
2081 FoundLoadToPromote = true;
2082
2083 Align InstAlignment = Load->getAlign();
2084
2085 // Note that proving a load safe to speculate requires proving
2086 // sufficient alignment at the target location. Proving it guaranteed
2087 // to execute does as well. Thus we can increase our guaranteed
2088 // alignment as well.
2089 if (!DereferenceableInPH || (InstAlignment > Alignment))
2091 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2092 Preheader->getTerminator(), AC, AllowSpeculation)) {
2093 DereferenceableInPH = true;
2094 Alignment = std::max(Alignment, InstAlignment);
2095 }
2096 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2097 // Stores *of* the pointer are not interesting, only stores *to* the
2098 // pointer.
2099 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2100 continue;
2101 if (!Store->isUnordered())
2102 return false;
2103
2104 SawUnorderedAtomic |= Store->isAtomic();
2105 SawNotAtomic |= !Store->isAtomic();
2106
2107 // If the store is guaranteed to execute, both properties are satisfied.
2108 // We may want to check if a store is guaranteed to execute even if we
2109 // already know that promotion is safe, since it may have higher
2110 // alignment than any other guaranteed stores, in which case we can
2111 // raise the alignment on the promoted store.
2112 Align InstAlignment = Store->getAlign();
2113 bool GuaranteedToExecute =
2114 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2115 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2116 if (GuaranteedToExecute) {
2117 DereferenceableInPH = true;
2118 if (StoreSafety == StoreSafetyUnknown)
2119 StoreSafety = StoreSafe;
2120 Alignment = std::max(Alignment, InstAlignment);
2121 }
2122
2123 // If a store dominates all exit blocks, it is safe to sink.
2124 // As explained above, if an exit block was executed, a dominating
2125 // store must have been executed at least once, so we are not
2126 // introducing stores on paths that did not have them.
2127 // Note that this only looks at explicit exit blocks. If we ever
2128 // start sinking stores into unwind edges (see above), this will break.
2129 if (StoreSafety == StoreSafetyUnknown &&
2130 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2131 return DT->dominates(Store->getParent(), Exit);
2132 }))
2133 StoreSafety = StoreSafe;
2134
2135 // If the store is not guaranteed to execute, we may still get
2136 // deref info through it.
2137 if (!DereferenceableInPH) {
2138 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2139 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2140 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI);
2141 }
2142 } else
2143 continue; // Not a load or store.
2144
2145 if (!AccessTy)
2146 AccessTy = getLoadStoreType(UI);
2147 else if (AccessTy != getLoadStoreType(UI))
2148 return false;
2149
2150 // Merge the AA tags.
2151 if (LoopUses.empty()) {
2152 // On the first load/store, just take its AA tags.
2153 AATags = UI->getAAMetadata();
2154 } else if (AATags) {
2155 AATags = AATags.merge(UI->getAAMetadata());
2156 }
2157
2158 LoopUses.push_back(UI);
2159 }
2160 }
2161
2162 // If we found both an unordered atomic instruction and a non-atomic memory
2163 // access, bail. We can't blindly promote non-atomic to atomic since we
2164 // might not be able to lower the result. We can't downgrade since that
2165 // would violate memory model. Also, align 0 is an error for atomics.
2166 if (SawUnorderedAtomic && SawNotAtomic)
2167 return false;
2168
2169 // If we're inserting an atomic load in the preheader, we must be able to
2170 // lower it. We're only guaranteed to be able to lower naturally aligned
2171 // atomics.
2172 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2173 return false;
2174
2175 // If we couldn't prove we can hoist the load, bail.
2176 if (!DereferenceableInPH) {
2177 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2178 return false;
2179 }
2180
2181 // We know we can hoist the load, but don't have a guaranteed store.
2182 // Check whether the location is writable and thread-local. If it is, then we
2183 // can insert stores along paths which originally didn't have them without
2184 // violating the memory model.
2185 if (StoreSafety == StoreSafetyUnknown) {
2186 Value *Object = getUnderlyingObject(SomePtr);
2187 if (isWritableObject(Object) &&
2188 isThreadLocalObject(Object, CurLoop, DT, TTI))
2189 StoreSafety = StoreSafe;
2190 }
2191
2192 // If we've still failed to prove we can sink the store, hoist the load
2193 // only, if possible.
2194 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2195 // If we cannot hoist the load either, give up.
2196 return false;
2197
2198 // Lets do the promotion!
2199 if (StoreSafety == StoreSafe) {
2200 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2201 << '\n');
2202 ++NumLoadStorePromoted;
2203 } else {
2204 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2205 << '\n');
2206 ++NumLoadPromoted;
2207 }
2208
2209 ORE->emit([&]() {
2210 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2211 LoopUses[0])
2212 << "Moving accesses to memory location out of the loop";
2213 });
2214
2215 // Look at all the loop uses, and try to merge their locations.
2216 std::vector<const DILocation *> LoopUsesLocs;
2217 for (auto *U : LoopUses)
2218 LoopUsesLocs.push_back(U->getDebugLoc().get());
2219 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2220
2221 // We use the SSAUpdater interface to insert phi nodes as required.
2223 SSAUpdater SSA(&NewPHIs);
2224 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2225 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2226 SawUnorderedAtomic, AATags, *SafetyInfo,
2227 StoreSafety == StoreSafe);
2228
2229 // Set up the preheader to have a definition of the value. It is the live-out
2230 // value from the preheader that uses in the loop will use.
2231 LoadInst *PreheaderLoad = nullptr;
2232 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2233 PreheaderLoad =
2234 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2235 Preheader->getTerminator());
2236 if (SawUnorderedAtomic)
2237 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2238 PreheaderLoad->setAlignment(Alignment);
2239 PreheaderLoad->setDebugLoc(DebugLoc());
2240 if (AATags)
2241 PreheaderLoad->setAAMetadata(AATags);
2242
2243 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2244 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2245 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2246 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true);
2247 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2248 } else {
2249 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy));
2250 }
2251
2252 if (VerifyMemorySSA)
2253 MSSAU.getMemorySSA()->verifyMemorySSA();
2254 // Rewrite all the loads in the loop and remember all the definitions from
2255 // stores in the loop.
2256 Promoter.run(LoopUses);
2257
2258 if (VerifyMemorySSA)
2259 MSSAU.getMemorySSA()->verifyMemorySSA();
2260 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2261 if (PreheaderLoad && PreheaderLoad->use_empty())
2262 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2263
2264 return true;
2265}
2266
2267static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2268 function_ref<void(Instruction *)> Fn) {
2269 for (const BasicBlock *BB : L->blocks())
2270 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2271 for (const auto &Access : *Accesses)
2272 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2273 Fn(MUD->getMemoryInst());
2274}
2275
2276// The bool indicates whether there might be reads outside the set, in which
2277// case only loads may be promoted.
2280 BatchAAResults BatchAA(*AA);
2281 AliasSetTracker AST(BatchAA);
2282
2283 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2284 if (const auto *SI = dyn_cast<StoreInst>(I))
2285 return L->isLoopInvariant(SI->getPointerOperand());
2286 if (const auto *LI = dyn_cast<LoadInst>(I))
2287 return L->isLoopInvariant(LI->getPointerOperand());
2288 return false;
2289 };
2290
2291 // Populate AST with potentially promotable accesses.
2292 SmallPtrSet<Value *, 16> AttemptingPromotion;
2293 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2294 if (IsPotentiallyPromotable(I)) {
2295 AttemptingPromotion.insert(I);
2296 AST.add(I);
2297 }
2298 });
2299
2300 // We're only interested in must-alias sets that contain a mod.
2302 for (AliasSet &AS : AST)
2303 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2304 Sets.push_back({&AS, false});
2305
2306 if (Sets.empty())
2307 return {}; // Nothing to promote...
2308
2309 // Discard any sets for which there is an aliasing non-promotable access.
2310 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2311 if (AttemptingPromotion.contains(I))
2312 return;
2313
2315 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA);
2316 // Cannot promote if there are writes outside the set.
2317 if (isModSet(MR))
2318 return true;
2319 if (isRefSet(MR)) {
2320 // Remember reads outside the set.
2321 Pair.setInt(true);
2322 // If this is a mod-only set and there are reads outside the set,
2323 // we will not be able to promote, so bail out early.
2324 return !Pair.getPointer()->isRef();
2325 }
2326 return false;
2327 });
2328 });
2329
2331 for (auto [Set, HasReadsOutsideSet] : Sets) {
2332 SmallSetVector<Value *, 8> PointerMustAliases;
2333 for (const auto &ASI : *Set)
2334 PointerMustAliases.insert(ASI.getValue());
2335 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet);
2336 }
2337
2338 return Result;
2339}
2340
2342 Loop *CurLoop, Instruction &I,
2343 SinkAndHoistLICMFlags &Flags,
2344 bool InvariantGroup) {
2345 // For hoisting, use the walker to determine safety
2346 if (!Flags.getIsSink()) {
2347 MemoryAccess *Source;
2348 // See declaration of SetLicmMssaOptCap for usage details.
2349 if (Flags.tooManyClobberingCalls())
2350 Source = MU->getDefiningAccess();
2351 else {
2352 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2353 Flags.incrementClobberingCalls();
2354 }
2355 // If hoisting an invariant group, we only need to check that there
2356 // is no store to the loaded pointer between the start of the loop,
2357 // and the load (since all values must be the same).
2358
2359 // This can be checked in two conditions:
2360 // 1) if the memoryaccess is outside the loop
2361 // 2) the earliest access is at the loop header,
2362 // if the memory loaded is the phi node
2363
2364 return !MSSA->isLiveOnEntryDef(Source) &&
2365 CurLoop->contains(Source->getBlock()) &&
2366 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source));
2367 }
2368
2369 // For sinking, we'd need to check all Defs below this use. The getClobbering
2370 // call will look on the backedge of the loop, but will check aliasing with
2371 // the instructions on the previous iteration.
2372 // For example:
2373 // for (i ... )
2374 // load a[i] ( Use (LoE)
2375 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2376 // i++;
2377 // The load sees no clobbering inside the loop, as the backedge alias check
2378 // does phi translation, and will check aliasing against store a[i-1].
2379 // However sinking the load outside the loop, below the store is incorrect.
2380
2381 // For now, only sink if there are no Defs in the loop, and the existing ones
2382 // precede the use and are in the same block.
2383 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2384 // needs PostDominatorTreeAnalysis.
2385 // FIXME: More precise: no Defs that alias this Use.
2386 if (Flags.tooManyMemoryAccesses())
2387 return true;
2388 for (auto *BB : CurLoop->getBlocks())
2389 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU))
2390 return true;
2391 // When sinking, the source block may not be part of the loop so check it.
2392 if (!CurLoop->contains(&I))
2393 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU);
2394
2395 return false;
2396}
2397
2399 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2400 for (const auto &MA : *Accesses)
2401 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2402 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2403 return true;
2404 return false;
2405}
2406
2407static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2408 MemorySSAUpdater &MSSAU) {
2409 bool Inverse = false;
2410 bool IsLogical = false;
2411 using namespace PatternMatch;
2412 Value *Cond1, *Cond2;
2413 if (match(&I, m_Or(m_Value(Cond1), m_Value(Cond2))))
2414 Inverse = true;
2415 else if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) {
2416 Inverse = true;
2417 IsLogical = true;
2418 } else if (match(&I, m_And(m_Value(Cond1), m_Value(Cond2)))) {
2419 // Do nothing
2420 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2))))
2421 IsLogical = true;
2422 else
2423 return false;
2424
2425 auto MatchICmpAgainstInvariant = [&](Value *C, ICmpInst::Predicate &P,
2426 Value *&LHS, Value *&RHS) {
2427 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS)))))
2428 return false;
2429 if (!LHS->getType()->isIntegerTy())
2430 return false;
2432 return false;
2433 if (L.isLoopInvariant(LHS)) {
2434 std::swap(LHS, RHS);
2436 }
2437 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
2438 return false;
2439 if (Inverse)
2441 return true;
2442 };
2443 ICmpInst::Predicate P1, P2;
2444 Value *LHS1, *LHS2, *RHS1, *RHS2;
2445 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2446 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2447 return false;
2448 if (P1 != P2 || LHS1 != LHS2)
2449 return false;
2450
2451 // Everything is fine, we can do the transform.
2452 bool UseMin = ICmpInst::isLT(P1) || ICmpInst::isLE(P1);
2453 assert(
2454 (UseMin || ICmpInst::isGT(P1) || ICmpInst::isGE(P1)) &&
2455 "Relational predicate is either less (or equal) or greater (or equal)!");
2457 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2458 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2459 auto *Preheader = L.getLoopPreheader();
2460 assert(Preheader && "Loop is not in simplify form?");
2461 IRBuilder<> Builder(Preheader->getTerminator());
2462 // We are about to create a new guaranteed use for RHS2 which might not exist
2463 // before (if it was a non-taken input of logical and/or instruction). If it
2464 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2465 // introduced, so they don't need this.
2466 if (IsLogical)
2467 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr");
2468 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2469 id, RHS1, RHS2, nullptr, StringRef("invariant.") +
2470 (ICmpInst::isSigned(P1) ? "s" : "u") +
2471 (UseMin ? "min" : "max"));
2472 Builder.SetInsertPoint(&I);
2474 if (Inverse)
2476 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS);
2477 NewCond->takeName(&I);
2478 I.replaceAllUsesWith(NewCond);
2479 eraseInstruction(I, SafetyInfo, MSSAU);
2480 eraseInstruction(*cast<Instruction>(Cond1), SafetyInfo, MSSAU);
2481 eraseInstruction(*cast<Instruction>(Cond2), SafetyInfo, MSSAU);
2482 return true;
2483}
2484
2485/// Little predicate that returns true if the specified basic block is in
2486/// a subloop of the current one, not the current one itself.
2487///
2488static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2489 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2490 return LI->getLoopFor(BB) != CurLoop;
2491}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
SmallPtrSet< MachineInstr *, 2 > Uses
assume Assume Builder
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
#define DEBUG_TYPE
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
static cl::opt< bool > SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false), cl::desc("Force thread model single in LICM pass"))
static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, LoopInfo *LI, const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU)
Definition: LICM.cpp:1541
static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A < min(INV_1, INV_2)),...
Definition: LICM.cpp:2407
static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, TargetTransformInfo *TTI, bool &FreeInLoop, bool LoopNestMode)
Return true if the only users of this instruction are outside of the loop.
Definition: LICM.cpp:1371
static Instruction * cloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1413
static cl::opt< bool > ControlFlowHoisting("licm-control-flow-hoisting", cl::Hidden, cl::init(false), cl::desc("Enable control flow (and PHI) hoisting in LICM"))
static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU, Loop *CurLoop, Instruction &I, SinkAndHoistLICMFlags &Flags, bool InvariantGroup)
Definition: LICM.cpp:2341
static SmallVector< PointersAndHasReadsOutsideSet, 0 > collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L)
Definition: LICM.cpp:2279
static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
When an instruction is found to only use loop invariant operands that is safe to hoist,...
Definition: LICM.cpp:1718
static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo)
Definition: LICM.cpp:1524
static void moveInstructionBefore(Instruction &I, Instruction &Dest, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE)
Definition: LICM.cpp:1492
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE)
When an instruction is found to only be used outside of the loop, this function moves it to the exit ...
Definition: LICM.cpp:1613
static cl::opt< uint32_t > MaxNumUsesTraversed("licm-max-num-uses-traversed", cl::Hidden, cl::init(8), cl::desc("Max num uses visited for identifying load " "invariance in loop using invariant start (default = 8)"))
static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, function_ref< void(Instruction *)> Fn)
Definition: LICM.cpp:2267
static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, Loop *CurLoop)
Definition: LICM.cpp:1052
static Instruction * sinkThroughTriviallyReplaceablePHI(PHINode *TPN, Instruction *I, LoopInfo *LI, SmallDenseMap< BasicBlock *, Instruction *, 32 > &SunkCopies, const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1506
licm
Definition: LICM.cpp:356
static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI)
Little predicate that returns true if the specified basic block is in a subloop of the current one,...
Definition: LICM.cpp:2488
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1485
static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, const Instruction *CtxI, AssumptionCache *AC, bool AllowSpeculation)
Only sink or hoist an instruction if it is not a trapping instruction, or if the instruction is known...
Definition: LICM.cpp:1763
static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I)
Returns true if a PHINode is a trivially replaceable with an Instruction.
Definition: LICM.cpp:1331
std::pair< SmallSetVector< Value *, 8 >, bool > PointersAndHasReadsOutsideSet
Definition: LICM.cpp:192
static cl::opt< bool > DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), cl::desc("Disable memory promotion in LICM pass"))
Memory promotion is enabled by default.
static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop, const TargetTransformInfo *TTI)
Return true if the instruction is free in the loop.
Definition: LICM.cpp:1340
static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU)
Definition: LICM.cpp:2398
This file defines the interface for the loop nest analysis.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
Machine Loop Invariant Code Motion
Memory SSA
Definition: MemorySSA.cpp:71
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
#define P(N)
PassInstrumentationCallbacks PIC
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file provides a priority worklist.
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file defines generic set operations that may be used on set's of different types,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This pass exposes codegen information to IR-level passes.
@ Flags
Definition: TextStubV5.cpp:93
static cl::opt< bool > DisablePromotion("disable-type-promotion", cl::Hidden, cl::init(false), cl::desc("Disable type promotion pass"))
Value * RHS
Value * LHS
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Check whether or not an instruction may read or write the optionally specified memory location.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
void add(Value *Ptr, LocationSize Size, const AAMDNodes &AAInfo)
These methods are used to add different types of instructions to the alias sets.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New)
Update all phi nodes in this basic block's successors to refer to basic block New instead of basic bl...
Definition: BasicBlock.cpp:507
iterator end()
Definition: BasicBlock.h:325
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:323
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:254
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
Definition: BasicBlock.cpp:136
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:217
const Instruction & front() const
Definition: BasicBlock.h:335
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:323
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:35
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
bool canSplitPredecessors() const
Definition: BasicBlock.cpp:379
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:146
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1353
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:718
bool isSigned() const
Definition: InstrTypes.h:957
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:859
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:832
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:927
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
bool isNegative() const
Definition: Constants.h:192
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:151
This is an important base class in LLVM.
Definition: Constant.h:41
Assignment ID.
static const DILocation * getMergedLocations(ArrayRef< const DILocation * > Locs)
Try to combine the vector of locations passed as input in a single one.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:468
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to give precise answers on "may...
Definition: MustExecute.h:132
bool doesNotWriteMemoryBefore(const BasicBlock *BB, const Loop *CurLoop) const
Returns true if we could not execute a memory-modifying instruction before we enter BB under assumpti...
void removeInstruction(const Instruction *Inst)
Inform safety info that we are planning to remove the instruction Inst from its block.
Definition: MustExecute.cpp:99
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const override
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
Definition: MustExecute.cpp:75
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
Definition: MustExecute.cpp:79
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Inform the safety info that we are planning to insert a new instruction Inst into the basic block BB.
Definition: MustExecute.cpp:93
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2564
void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
Definition: DebugInfo.cpp:888
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:70
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1513
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:684
const BasicBlock * getParent() const
Definition: Instruction.h:90
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:87
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:275
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1455
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1499
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:355
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: LICM.cpp:297
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition: LICM.cpp:274
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition: LICM.cpp:307
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: LICM.cpp:338
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
This is an alternative analysis pass to BranchProbabilityInfoWrapperPass.
Helper class for promoting a collection of loads and stores into SSA Form using the SSAUpdater.
Definition: SSAUpdater.h:147
An instruction for reading from memory.
Definition: Instructions.h:177
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:270
void setAlignment(Align Align)
Definition: Instructions.h:224
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:234
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:1268
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
Definition: LoopInfo.h:139
BlockT * getHeader() const
Definition: LoopInfo.h:105
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
Definition: LoopInfoImpl.h:258
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
Definition: LoopInfoImpl.h:183
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
Definition: LoopInfo.h:188
void getUniqueExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all unique successor blocks of this loop.
Definition: LoopInfoImpl.h:142
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Definition: LoopInfo.h:114
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
void perform(LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
Definition: LoopIterator.h:180
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
Definition: LoopInfoImpl.h:699
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Definition: LoopInfo.h:992
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:1293
bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V, const BasicBlock *ExitBB) const
Definition: LoopInfo.cpp:933
This class represents a loop nest and can be used to query its properties.
Function * getParent() const
Return the function to which the loop-nest belongs.
Loop & getOutermostLoop() const
Return the outermost loop in the loop nest.
Captures loop safety information.
Definition: MustExecute.h:60
void copyColors(BasicBlock *New, BasicBlock *Old)
Copy colors of block Old into the block New.
Definition: MustExecute.cpp:36
const DenseMap< BasicBlock *, ColorVector > & getBlockColors() const
Returns block colors map that is used to update funclet operand bundles.
Definition: MustExecute.cpp:32
virtual bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const =0
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:547
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition: LoopInfo.cpp:66
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition: LoopInfo.cpp:60
BasicBlock * getBlock() const
Definition: MemorySSA.h:164
Summary of how a function affects memory in the program.
Definition: ModRef.h:63
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:200
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:194
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:191
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:936
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block, with a specified clobbering defin...
void insertUse(MemoryUse *Use, bool RenameUses=false)
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1046
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:986
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:700
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:757
MemorySSAWalker * getSkipSelfWalker()
Definition: MemorySSA.cpp:1570
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2115
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1862
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:717
void ensureOptimizedUses()
By default, uses are not optimized during MemorySSA construction.
Definition: MemorySSA.cpp:2142
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition: MemorySSA.h:765
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:2084
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:737
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:252
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:262
Represents read-only accesses to memory.
Definition: MemorySSA.h:312
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingBlock(unsigned i, BasicBlock *BB)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1750
PredIteratorCache - This class is an extremely trivial cache for predecessor iterator queries.
size_t size(BasicBlock *BB) const
ArrayRef< BasicBlock * > get(BasicBlock *BB)
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
bool empty() const
Determine if the PriorityWorklist is empty or not.
bool insert(const T &X)
Insert a new element into the PriorityWorklist.
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:39
The main scalar evolution driver.
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:168
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:152
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:83
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:93
Flags controlling how much is checked when sinking or hoisting instructions.
Definition: LoopUtils.h:114
SinkAndHoistLICMFlags(unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, Loop &L, MemorySSA &MSSA)
Definition: LICM.cpp:372
unsigned LicmMssaNoAccForPromotionCap
Definition: LoopUtils.h:133
A version of PriorityWorklist that selects small size optimized data structures for the vector and ma...
size_type size() const
Definition: SmallPtrSet.h:93
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:379
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
iterator begin() const
Definition: SmallPtrSet.h:403
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:389
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:312
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
void setAlignment(Align Align)
Definition: Instructions.h:349
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Definition: Instructions.h:360
static unsigned getPointerOperandIndex()
Definition: Instructions.h:395
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Provides information about what library functions are available for the current target.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Free
Expected to fold away in lowering.
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
unsigned size() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static IntegerType * getInt8Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:231
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:156
std::string getNameOrAsOperand() const
Definition: Value.cpp:443
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:994
user_iterator_impl< User > user_iterator
Definition: Value.h:390
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:381
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:166
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
SmallVector< DomTreeNode *, 16 > collectChildrenInLoop(DomTreeNode *N, const Loop *CurLoop)
Does a BFS from a given node to all of its children inside a given loop.
Definition: LoopUtils.cpp:450
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:102
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, Loop *CurLoop, MemorySSAUpdater &MSSAU, bool TargetExecutesOncePerLoop, SinkAndHoistLICMFlags &LICMFlags, OptimizationRemarkEmitter *ORE=nullptr)
Returns true if is legal to hoist or sink this instruction disregarding the possible introduction of ...
Definition: LICM.cpp:1160
void set_intersect(S1Ty &S1, const S2Ty &S2)
set_intersect(A, B) - Compute A := A ^ B Identical to set_intersection, except that it works on set<>...
Definition: SetOperations.h:40
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1367
void initializeLegacyLICMPassPass(PassRegistry &)
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:201
bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition: LCSSA.cpp:410
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:99
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:748
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
Pass * createLICMPass()
Definition: LICM.cpp:359
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, AssumptionCache *, TargetLibraryInfo *, Loop *, MemorySSAUpdater &, ScalarEvolution *, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, bool, bool AllowSpeculation)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition: LICM.cpp:860
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:398
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
Definition: GuardUtils.cpp:18
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:511
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
bool isModOrRefSet(const ModRefInfo MRI)
Definition: ModRef.h:42
bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
void getLoopAnalysisUsage(AnalysisUsage &AU)
Helper to consistently add the set of standard passes to a loop pass's AnalysisUsage.
Definition: LoopUtils.cpp:141
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: ModRef.h:27
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:89
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool hasDisableLICMTransformsHint(const Loop *L)
Look for the loop attribute that disables the LICM transformation heuristics.
Definition: LoopUtils.cpp:348
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist< Loop *, 4 > &)
Utility that implements appending of loops onto a worklist given a range.
Definition: LoopUtils.cpp:1537
bool promoteLoopAccessesToScalars(const SmallSetVector< Value *, 8 > &, SmallVectorImpl< BasicBlock * > &, SmallVectorImpl< Instruction * > &, SmallVectorImpl< MemoryAccess * > &, PredIteratorCache &, LoopInfo *, DominatorTree *, AssumptionCache *AC, const TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, OptimizationRemarkEmitter *, bool AllowSpeculation, bool HasReadsOutsideSet)
Try to promote memory values to scalars by sinking stores out of the loop and moving loads to before ...
Definition: LICM.cpp:1962
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1946
PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1846
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2113
auto predecessors(const MachineBasicBlock *BB)
bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *CurLoop, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, Loop *OutermostLoop=nullptr)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition: LICM.cpp:540
cl::opt< unsigned > SetLicmMssaNoAccForPromotionCap
unsigned pred_size(const MachineBasicBlock *BB)
cl::opt< unsigned > SetLicmMssaOptCap
bool sinkRegionForLoopNest(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *)
Call sinkRegion on loops contained within the specified loop in order from innermost to outermost.
Definition: LICM.cpp:607
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:651
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
unsigned MssaOptCap
Definition: LICM.h:49
unsigned MssaNoAccForPromotionCap
Definition: LICM.h:50
bool AllowSpeculation
Definition: LICM.h:51
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1081
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1109
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:371