LLVM 22.0.0git
LICM.cpp
Go to the documentation of this file.
1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
42#include "llvm/ADT/Statistic.h"
50#include "llvm/Analysis/Loads.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/Dominators.h"
69#include "llvm/IR/IRBuilder.h"
72#include "llvm/IR/LLVMContext.h"
73#include "llvm/IR/Metadata.h"
78#include "llvm/Support/Debug.h"
86#include <algorithm>
87#include <utility>
88using namespace llvm;
89
90namespace llvm {
91class LPMUpdater;
92} // namespace llvm
93
94#define DEBUG_TYPE "licm"
95
96STATISTIC(NumCreatedBlocks, "Number of blocks created");
97STATISTIC(NumClonedBranches, "Number of branches cloned");
98STATISTIC(NumSunk, "Number of instructions sunk out of loop");
99STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
100STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
101STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
102STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
103STATISTIC(NumLoadPromoted, "Number of load-only promotions");
104STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
105STATISTIC(NumMinMaxHoisted,
106 "Number of min/max expressions hoisted out of the loop");
107STATISTIC(NumGEPsHoisted,
108 "Number of geps reassociated and hoisted out of the loop");
109STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
110 "and hoisted out of the loop");
111STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
112 "reassociated and hoisted out of the loop");
113STATISTIC(NumIntAssociationsHoisted,
114 "Number of invariant int expressions "
115 "reassociated and hoisted out of the loop");
116STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions "
117 "reassociated and hoisted out of the loop");
118
119/// Memory promotion is enabled by default.
120static cl::opt<bool>
121 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
122 cl::desc("Disable memory promotion in LICM pass"));
123
125 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
126 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
127
128static cl::opt<bool>
129 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
130 cl::desc("Force thread model single in LICM pass"));
131
133 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
134 cl::desc("Max num uses visited for identifying load "
135 "invariance in loop using invariant start (default = 8)"));
136
138 "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden,
139 cl::desc(
140 "Set upper limit for the number of transformations performed "
141 "during a single round of hoisting the reassociated expressions."));
142
144 "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden,
145 cl::desc(
146 "Set upper limit for the number of transformations performed "
147 "during a single round of hoisting the reassociated expressions."));
148
149// Experimental option to allow imprecision in LICM in pathological cases, in
150// exchange for faster compile. This is to be removed if MemorySSA starts to
151// address the same issue. LICM calls MemorySSAWalker's
152// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
153// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
154// which may not be precise, since optimizeUses is capped. The result is
155// correct, but we may not get as "far up" as possible to get which access is
156// clobbering the one queried.
158 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
159 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
160 "for faster compile. Caps the MemorySSA clobbering calls."));
161
162// Experimentally, memory promotion carries less importance than sinking and
163// hoisting. Limit when we do promotion when using MemorySSA, in order to save
164// compile time.
166 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
167 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
168 "effect. When MSSA in LICM is enabled, then this is the maximum "
169 "number of accesses allowed to be present in a loop in order to "
170 "enable memory promotion."));
171
172namespace llvm {
174} // end namespace llvm
175
176static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
177static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
178 const LoopSafetyInfo *SafetyInfo,
180 bool &FoldableInLoop, bool LoopNestMode);
181static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
182 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
185static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
186 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
189 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
190 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
191 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
192 AssumptionCache *AC, bool AllowSpeculation);
194 AAResults *AA, Loop *CurLoop,
195 SinkAndHoistLICMFlags &Flags);
196static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
197 Loop *CurLoop, Instruction &I,
199 bool InvariantGroup);
200static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
201 MemoryUse &MU);
202/// Aggregates various functions for hoisting computations out of loop.
203static bool hoistArithmetics(Instruction &I, Loop &L,
204 ICFLoopSafetyInfo &SafetyInfo,
206 DominatorTree *DT);
208 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
209 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
210
211static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
212 MemorySSAUpdater &MSSAU);
213
214static void moveInstructionBefore(
218
220 Loop *L, AAResults *AA, ICFLoopSafetyInfo *SafetyInfo,
223
224static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
225 function_ref<void(Instruction *)> Fn);
227 std::pair<SmallSetVector<Value *, 8>, bool>;
230
231namespace {
232struct LoopInvariantCodeMotion {
233 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
236 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
237
238 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
239 unsigned LicmMssaNoAccForPromotionCap,
240 bool LicmAllowSpeculation)
241 : LicmMssaOptCap(LicmMssaOptCap),
242 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
243 LicmAllowSpeculation(LicmAllowSpeculation) {}
244
245private:
246 unsigned LicmMssaOptCap;
247 unsigned LicmMssaNoAccForPromotionCap;
248 bool LicmAllowSpeculation;
249};
250
251struct LegacyLICMPass : public LoopPass {
252 static char ID; // Pass identification, replacement for typeid
253 LegacyLICMPass(
254 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
255 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
256 bool LicmAllowSpeculation = true)
257 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
258 LicmAllowSpeculation) {
260 }
261
262 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
263 if (skipLoop(L))
264 return false;
265
266 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
267 << L->getHeader()->getNameOrAsOperand() << "\n");
268
269 Function *F = L->getHeader()->getParent();
270
271 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
272 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
273 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
274 // pass. Function analyses need to be preserved across loop transformations
275 // but ORE cannot be preserved (see comment before the pass definition).
276 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
277 return LICM.runOnLoop(
278 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
279 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
280 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
281 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F),
282 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F),
283 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F),
284 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
285 }
286
287 /// This transformation requires natural loop information & requires that
288 /// loop preheaders be inserted into the CFG...
289 ///
290 void getAnalysisUsage(AnalysisUsage &AU) const override {
291 AU.addPreserved<DominatorTreeWrapperPass>();
292 AU.addPreserved<LoopInfoWrapperPass>();
293 AU.addRequired<TargetLibraryInfoWrapperPass>();
294 AU.addRequired<MemorySSAWrapperPass>();
295 AU.addPreserved<MemorySSAWrapperPass>();
296 AU.addRequired<TargetTransformInfoWrapperPass>();
297 AU.addRequired<AssumptionCacheTracker>();
300 AU.addPreserved<LazyBlockFrequencyInfoPass>();
301 AU.addPreserved<LazyBranchProbabilityInfoPass>();
302 }
303
304private:
305 LoopInvariantCodeMotion LICM;
306};
307} // namespace
308
311 if (!AR.MSSA)
312 reportFatalUsageError("LICM requires MemorySSA (loop-mssa)");
313
314 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
315 // pass. Function analyses need to be preserved across loop transformations
316 // but ORE cannot be preserved (see comment before the pass definition).
317 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
318
319 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
320 Opts.AllowSpeculation);
321 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI,
322 &AR.SE, AR.MSSA, &ORE))
323 return PreservedAnalyses::all();
324
326 PA.preserve<MemorySSAAnalysis>();
327
328 return PA;
329}
330
332 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
333 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
334 OS, MapClassName2PassName);
335
336 OS << '<';
337 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
338 OS << '>';
339}
340
343 LPMUpdater &) {
344 if (!AR.MSSA)
345 reportFatalUsageError("LNICM requires MemorySSA (loop-mssa)");
346
347 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
348 // pass. Function analyses need to be preserved across loop transformations
349 // but ORE cannot be preserved (see comment before the pass definition).
351
352 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
353 Opts.AllowSpeculation);
354
355 Loop &OutermostLoop = LN.getOutermostLoop();
356 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC,
357 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
358
359 if (!Changed)
360 return PreservedAnalyses::all();
361
363
364 PA.preserve<DominatorTreeAnalysis>();
365 PA.preserve<LoopAnalysis>();
366 PA.preserve<MemorySSAAnalysis>();
367
368 return PA;
369}
370
372 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
373 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
374 OS, MapClassName2PassName);
375
376 OS << '<';
377 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
378 OS << '>';
379}
380
381char LegacyLICMPass::ID = 0;
382INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
383 false, false)
389INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
390 false)
391
392Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
393
398
400 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
401 Loop &L, MemorySSA &MSSA)
404 IsSink(IsSink) {
405 unsigned AccessCapCount = 0;
406 for (auto *BB : L.getBlocks())
407 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
408 for (const auto &MA : *Accesses) {
409 (void)MA;
410 ++AccessCapCount;
411 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
412 NoOfMemAccTooLarge = true;
413 return;
414 }
415 }
416}
417
418/// Hoist expressions out of the specified loop. Note, alias info for inner
419/// loop is not preserved so it is not a good idea to run LICM multiple
420/// times on one loop.
421bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
425 ScalarEvolution *SE, MemorySSA *MSSA,
427 bool LoopNestMode) {
428 bool Changed = false;
429
430 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
431
432 // If this loop has metadata indicating that LICM is not to be performed then
433 // just exit.
435 return false;
436 }
437
438 // Don't sink stores from loops with coroutine suspend instructions.
439 // LICM would sink instructions into the default destination of
440 // the coroutine switch. The default destination of the switch is to
441 // handle the case where the coroutine is suspended, by which point the
442 // coroutine frame may have been destroyed. No instruction can be sunk there.
443 // FIXME: This would unfortunately hurt the performance of coroutines, however
444 // there is currently no general solution for this. Similar issues could also
445 // potentially happen in other passes where instructions are being moved
446 // across that edge.
447 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
448 using namespace PatternMatch;
449 return any_of(make_pointer_range(*BB),
450 match_fn(m_Intrinsic<Intrinsic::coro_suspend>()));
451 });
452
453 MemorySSAUpdater MSSAU(MSSA);
454 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
455 /*IsSink=*/true, *L, *MSSA);
456
457 // Get the preheader block to move instructions into...
458 BasicBlock *Preheader = L->getLoopPreheader();
459
460 // Compute loop safety information.
461 ICFLoopSafetyInfo SafetyInfo;
462 SafetyInfo.computeLoopSafetyInfo(L);
463
464 // We want to visit all of the instructions in this loop... that are not parts
465 // of our subloops (they have already had their invariants hoisted out of
466 // their loop, into this loop, so there is no need to process the BODIES of
467 // the subloops).
468 //
469 // Traverse the body of the loop in depth first order on the dominator tree so
470 // that we are guaranteed to see definitions before we see uses. This allows
471 // us to sink instructions in one pass, without iteration. After sinking
472 // instructions, we perform another pass to hoist them out of the loop.
473 if (L->hasDedicatedExits())
474 Changed |=
475 LoopNestMode
476 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT,
477 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
478 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
479 MSSAU, &SafetyInfo, Flags, ORE);
480
481 // sink pre-header defs that are unused in-loop into the unique exit to reduce
482 // pressure.
483 Changed |= sinkUnusedInvariantsFromPreheaderToExit(L, AA, &SafetyInfo, MSSAU,
484 SE, DT, Flags, ORE);
485
486 Flags.setIsSink(false);
487 if (Preheader)
488 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
489 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
490 LicmAllowSpeculation);
491
492 // Now that all loop invariants have been removed from the loop, promote any
493 // memory references to scalars that we can.
494 // Don't sink stores from loops without dedicated block exits. Exits
495 // containing indirect branches are not transformed by loop simplify,
496 // make sure we catch that. An additional load may be generated in the
497 // preheader for SSA updater, so also avoid sinking when no preheader
498 // is available.
499 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
500 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
501 // Figure out the loop exits and their insertion points
503 L->getUniqueExitBlocks(ExitBlocks);
504
505 // We can't insert into a catchswitch.
506 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
507 return isa<CatchSwitchInst>(Exit->getTerminator());
508 });
509
510 if (!HasCatchSwitch) {
512 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
513 InsertPts.reserve(ExitBlocks.size());
514 MSSAInsertPts.reserve(ExitBlocks.size());
515 for (BasicBlock *ExitBlock : ExitBlocks) {
516 InsertPts.push_back(ExitBlock->getFirstInsertionPt());
517 MSSAInsertPts.push_back(nullptr);
518 }
519
521
522 // Promoting one set of accesses may make the pointers for another set
523 // loop invariant, so run this in a loop.
524 bool Promoted = false;
525 bool LocalPromoted;
526 do {
527 LocalPromoted = false;
528 for (auto [PointerMustAliases, HasReadsOutsideSet] :
529 collectPromotionCandidates(MSSA, AA, L)) {
530 LocalPromoted |= promoteLoopAccessesToScalars(
531 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
532 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
533 LicmAllowSpeculation, HasReadsOutsideSet);
534 }
535 Promoted |= LocalPromoted;
536 } while (LocalPromoted);
537
538 // Once we have promoted values across the loop body we have to
539 // recursively reform LCSSA as any nested loop may now have values defined
540 // within the loop used in the outer loop.
541 // FIXME: This is really heavy handed. It would be a bit better to use an
542 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
543 // it as it went.
544 if (Promoted)
545 formLCSSARecursively(*L, *DT, LI, SE);
546
547 Changed |= Promoted;
548 }
549 }
550
551 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
552 // specifically moving instructions across the loop boundary and so it is
553 // especially in need of basic functional correctness checking here.
554 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
555 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
556 "Parent loop not left in LCSSA form after LICM!");
557
558 if (VerifyMemorySSA)
559 MSSA->verifyMemorySSA();
560
561 if (Changed && SE)
563 return Changed;
564}
565
566/// Walk the specified region of the CFG (defined by all blocks dominated by
567/// the specified block, and that are in the current loop) in reverse depth
568/// first order w.r.t the DominatorTree. This allows us to visit uses before
569/// definitions, allowing us to sink a loop body in one pass without iteration.
570///
573 TargetTransformInfo *TTI, Loop *CurLoop,
574 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
576 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
577
578 // Verify inputs.
579 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
580 CurLoop != nullptr && SafetyInfo != nullptr &&
581 "Unexpected input to sinkRegion.");
582
583 // We want to visit children before parents. We will enqueue all the parents
584 // before their children in the worklist and process the worklist in reverse
585 // order.
587 collectChildrenInLoop(DT, N, CurLoop);
588
589 bool Changed = false;
590 for (BasicBlock *BB : reverse(Worklist)) {
591 // subloop (which would already have been processed).
592 if (inSubLoop(BB, CurLoop, LI))
593 continue;
594
595 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
596 Instruction &I = *--II;
597
598 // The instruction is not used in the loop if it is dead. In this case,
599 // we just delete it instead of sinking it.
600 if (isInstructionTriviallyDead(&I, TLI)) {
601 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
604 ++II;
605 eraseInstruction(I, *SafetyInfo, MSSAU);
606 Changed = true;
607 continue;
608 }
609
610 // Check to see if we can sink this instruction to the exit blocks
611 // of the loop. We can do this if the all users of the instruction are
612 // outside of the loop. In this case, it doesn't even matter if the
613 // operands of the instruction are loop invariant.
614 //
615 bool FoldableInLoop = false;
616 bool LoopNestMode = OutermostLoop != nullptr;
617 if (!I.mayHaveSideEffects() &&
618 isNotUsedOrFoldableInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
619 SafetyInfo, TTI, FoldableInLoop,
620 LoopNestMode) &&
621 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) {
622 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
623 if (!FoldableInLoop) {
624 ++II;
626 eraseInstruction(I, *SafetyInfo, MSSAU);
627 }
628 Changed = true;
629 }
630 }
631 }
632 }
633 if (VerifyMemorySSA)
634 MSSAU.getMemorySSA()->verifyMemorySSA();
635 return Changed;
636}
637
640 TargetTransformInfo *TTI, Loop *CurLoop,
641 MemorySSAUpdater &MSSAU,
642 ICFLoopSafetyInfo *SafetyInfo,
645
646 bool Changed = false;
648 Worklist.insert(CurLoop);
649 appendLoopsToWorklist(*CurLoop, Worklist);
650 while (!Worklist.empty()) {
651 Loop *L = Worklist.pop_back_val();
652 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
653 MSSAU, SafetyInfo, Flags, ORE, CurLoop);
654 }
655 return Changed;
656}
657
658namespace {
659// This is a helper class for hoistRegion to make it able to hoist control flow
660// in order to be able to hoist phis. The way this works is that we initially
661// start hoisting to the loop preheader, and when we see a loop invariant branch
662// we make note of this. When we then come to hoist an instruction that's
663// conditional on such a branch we duplicate the branch and the relevant control
664// flow, then hoist the instruction into the block corresponding to its original
665// block in the duplicated control flow.
666class ControlFlowHoister {
667private:
668 // Information about the loop we are hoisting from
669 LoopInfo *LI;
670 DominatorTree *DT;
671 Loop *CurLoop;
672 MemorySSAUpdater &MSSAU;
673
674 // A map of blocks in the loop to the block their instructions will be hoisted
675 // to.
676 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
677
678 // The branches that we can hoist, mapped to the block that marks a
679 // convergence point of their control flow.
680 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
681
682public:
683 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
684 MemorySSAUpdater &MSSAU)
685 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
686
687 void registerPossiblyHoistableBranch(BranchInst *BI) {
688 // We can only hoist conditional branches with loop invariant operands.
689 if (!ControlFlowHoisting || !BI->isConditional() ||
690 !CurLoop->hasLoopInvariantOperands(BI))
691 return;
692
693 // The branch destinations need to be in the loop, and we don't gain
694 // anything by duplicating conditional branches with duplicate successors,
695 // as it's essentially the same as an unconditional branch.
696 BasicBlock *TrueDest = BI->getSuccessor(0);
697 BasicBlock *FalseDest = BI->getSuccessor(1);
698 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
699 TrueDest == FalseDest)
700 return;
701
702 // We can hoist BI if one branch destination is the successor of the other,
703 // or both have common successor which we check by seeing if the
704 // intersection of their successors is non-empty.
705 // TODO: This could be expanded to allowing branches where both ends
706 // eventually converge to a single block.
707 SmallPtrSet<BasicBlock *, 4> TrueDestSucc(llvm::from_range,
708 successors(TrueDest));
709 SmallPtrSet<BasicBlock *, 4> FalseDestSucc(llvm::from_range,
710 successors(FalseDest));
711 BasicBlock *CommonSucc = nullptr;
712 if (TrueDestSucc.count(FalseDest)) {
713 CommonSucc = FalseDest;
714 } else if (FalseDestSucc.count(TrueDest)) {
715 CommonSucc = TrueDest;
716 } else {
717 set_intersect(TrueDestSucc, FalseDestSucc);
718 // If there's one common successor use that.
719 if (TrueDestSucc.size() == 1)
720 CommonSucc = *TrueDestSucc.begin();
721 // If there's more than one pick whichever appears first in the block list
722 // (we can't use the value returned by TrueDestSucc.begin() as it's
723 // unpredicatable which element gets returned).
724 else if (!TrueDestSucc.empty()) {
725 Function *F = TrueDest->getParent();
726 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
727 auto It = llvm::find_if(*F, IsSucc);
728 assert(It != F->end() && "Could not find successor in function");
729 CommonSucc = &*It;
730 }
731 }
732 // The common successor has to be dominated by the branch, as otherwise
733 // there will be some other path to the successor that will not be
734 // controlled by this branch so any phi we hoist would be controlled by the
735 // wrong condition. This also takes care of avoiding hoisting of loop back
736 // edges.
737 // TODO: In some cases this could be relaxed if the successor is dominated
738 // by another block that's been hoisted and we can guarantee that the
739 // control flow has been replicated exactly.
740 if (CommonSucc && DT->dominates(BI, CommonSucc))
741 HoistableBranches[BI] = CommonSucc;
742 }
743
744 bool canHoistPHI(PHINode *PN) {
745 // The phi must have loop invariant operands.
746 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
747 return false;
748 // We can hoist phis if the block they are in is the target of hoistable
749 // branches which cover all of the predecessors of the block.
750 BasicBlock *BB = PN->getParent();
751 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks(llvm::from_range,
752 predecessors(BB));
753 // If we have less predecessor blocks than predecessors then the phi will
754 // have more than one incoming value for the same block which we can't
755 // handle.
756 // TODO: This could be handled be erasing some of the duplicate incoming
757 // values.
758 if (PredecessorBlocks.size() != pred_size(BB))
759 return false;
760 for (auto &Pair : HoistableBranches) {
761 if (Pair.second == BB) {
762 // Which blocks are predecessors via this branch depends on if the
763 // branch is triangle-like or diamond-like.
764 if (Pair.first->getSuccessor(0) == BB) {
765 PredecessorBlocks.erase(Pair.first->getParent());
766 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
767 } else if (Pair.first->getSuccessor(1) == BB) {
768 PredecessorBlocks.erase(Pair.first->getParent());
769 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
770 } else {
771 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
772 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
773 }
774 }
775 }
776 // PredecessorBlocks will now be empty if for every predecessor of BB we
777 // found a hoistable branch source.
778 return PredecessorBlocks.empty();
779 }
780
781 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
783 return CurLoop->getLoopPreheader();
784 // If BB has already been hoisted, return that
785 if (auto It = HoistDestinationMap.find(BB); It != HoistDestinationMap.end())
786 return It->second;
787
788 // Check if this block is conditional based on a pending branch
789 auto HasBBAsSuccessor =
790 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
791 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
792 Pair.first->getSuccessor(1) == BB);
793 };
794 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
795
796 // If not involved in a pending branch, hoist to preheader
797 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
798 if (It == HoistableBranches.end()) {
799 LLVM_DEBUG(dbgs() << "LICM using "
800 << InitialPreheader->getNameOrAsOperand()
801 << " as hoist destination for "
802 << BB->getNameOrAsOperand() << "\n");
803 HoistDestinationMap[BB] = InitialPreheader;
804 return InitialPreheader;
805 }
806 BranchInst *BI = It->first;
807 assert(std::none_of(std::next(It), HoistableBranches.end(),
808 HasBBAsSuccessor) &&
809 "BB is expected to be the target of at most one branch");
810
811 LLVMContext &C = BB->getContext();
812 BasicBlock *TrueDest = BI->getSuccessor(0);
813 BasicBlock *FalseDest = BI->getSuccessor(1);
814 BasicBlock *CommonSucc = HoistableBranches[BI];
815 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
816
817 // Create hoisted versions of blocks that currently don't have them
818 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
819 auto [It, Inserted] = HoistDestinationMap.try_emplace(Orig);
820 if (!Inserted)
821 return It->second;
822 BasicBlock *New =
823 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
824 It->second = New;
825 DT->addNewBlock(New, HoistTarget);
826 if (CurLoop->getParentLoop())
827 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
828 ++NumCreatedBlocks;
829 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
830 << " as hoist destination for " << Orig->getName()
831 << "\n");
832 return New;
833 };
834 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
835 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
836 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
837
838 // Link up these blocks with branches.
839 if (!HoistCommonSucc->getTerminator()) {
840 // The new common successor we've generated will branch to whatever that
841 // hoist target branched to.
842 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
843 assert(TargetSucc && "Expected hoist target to have a single successor");
844 HoistCommonSucc->moveBefore(TargetSucc);
845 BranchInst::Create(TargetSucc, HoistCommonSucc);
846 }
847 if (!HoistTrueDest->getTerminator()) {
848 HoistTrueDest->moveBefore(HoistCommonSucc);
849 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
850 }
851 if (!HoistFalseDest->getTerminator()) {
852 HoistFalseDest->moveBefore(HoistCommonSucc);
853 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
854 }
855
856 // If BI is being cloned to what was originally the preheader then
857 // HoistCommonSucc will now be the new preheader.
858 if (HoistTarget == InitialPreheader) {
859 // Phis in the loop header now need to use the new preheader.
860 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
862 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
863 // The new preheader dominates the loop header.
864 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
865 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
866 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
867 // The preheader hoist destination is now the new preheader, with the
868 // exception of the hoist destination of this branch.
869 for (auto &Pair : HoistDestinationMap)
870 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
871 Pair.second = HoistCommonSucc;
872 }
873
874 // Now finally clone BI.
875 auto *NewBI =
876 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition(),
877 HoistTarget->getTerminator()->getIterator());
878 HoistTarget->getTerminator()->eraseFromParent();
879 // md_prof should also come from the original branch - since the
880 // condition was hoisted, the branch probabilities shouldn't change.
882 NewBI->copyMetadata(*BI, {LLVMContext::MD_prof});
883 // FIXME: Issue #152767: debug info should also be the same as the
884 // original branch, **if** the user explicitly indicated that.
885 NewBI->setDebugLoc(HoistTarget->getTerminator()->getDebugLoc());
886
887 ++NumClonedBranches;
888
889 assert(CurLoop->getLoopPreheader() &&
890 "Hoisting blocks should not have destroyed preheader");
891 return HoistDestinationMap[BB];
892 }
893};
894} // namespace
895
896/// Walk the specified region of the CFG (defined by all blocks dominated by
897/// the specified block, and that are in the current loop) in depth first
898/// order w.r.t the DominatorTree. This allows us to visit definitions before
899/// uses, allowing us to hoist a loop body in one pass without iteration.
900///
903 TargetLibraryInfo *TLI, Loop *CurLoop,
905 ICFLoopSafetyInfo *SafetyInfo,
907 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
908 bool AllowSpeculation) {
909 // Verify inputs.
910 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
911 CurLoop != nullptr && SafetyInfo != nullptr &&
912 "Unexpected input to hoistRegion.");
913
914 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
915
916 // Keep track of instructions that have been hoisted, as they may need to be
917 // re-hoisted if they end up not dominating all of their uses.
918 SmallVector<Instruction *, 16> HoistedInstructions;
919
920 // For PHI hoisting to work we need to hoist blocks before their successors.
921 // We can do this by iterating through the blocks in the loop in reverse
922 // post-order.
923 LoopBlocksRPO Worklist(CurLoop);
924 Worklist.perform(LI);
925 bool Changed = false;
926 BasicBlock *Preheader = CurLoop->getLoopPreheader();
927 for (BasicBlock *BB : Worklist) {
928 // Only need to process the contents of this block if it is not part of a
929 // subloop (which would already have been processed).
930 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
931 continue;
932
934 // Try hoisting the instruction out to the preheader. We can only do
935 // this if all of the operands of the instruction are loop invariant and
936 // if it is safe to hoist the instruction. We also check block frequency
937 // to make sure instruction only gets hoisted into colder blocks.
938 // TODO: It may be safe to hoist if we are hoisting to a conditional block
939 // and we have accurately duplicated the control flow from the loop header
940 // to that block.
941 if (CurLoop->hasLoopInvariantOperands(&I) &&
942 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
943 isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo, ORE,
944 Preheader->getTerminator(), AC,
945 AllowSpeculation)) {
946 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
947 MSSAU, SE, ORE);
948 HoistedInstructions.push_back(&I);
949 Changed = true;
950 continue;
951 }
952
953 // Attempt to remove floating point division out of the loop by
954 // converting it to a reciprocal multiplication.
955 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
956 CurLoop->isLoopInvariant(I.getOperand(1))) {
957 auto Divisor = I.getOperand(1);
958 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
959 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
960 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
961 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
962 ReciprocalDivisor->insertBefore(I.getIterator());
963 ReciprocalDivisor->setDebugLoc(I.getDebugLoc());
964
965 auto Product =
966 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
967 Product->setFastMathFlags(I.getFastMathFlags());
968 SafetyInfo->insertInstructionTo(Product, I.getParent());
969 Product->insertAfter(I.getIterator());
970 Product->setDebugLoc(I.getDebugLoc());
971 I.replaceAllUsesWith(Product);
972 eraseInstruction(I, *SafetyInfo, MSSAU);
973
974 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
975 SafetyInfo, MSSAU, SE, ORE);
976 HoistedInstructions.push_back(ReciprocalDivisor);
977 Changed = true;
978 continue;
979 }
980
981 auto IsInvariantStart = [&](Instruction &I) {
982 using namespace PatternMatch;
983 return I.use_empty() &&
985 };
986 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
987 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
988 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
989 };
990 if ((IsInvariantStart(I) || isGuard(&I)) &&
991 CurLoop->hasLoopInvariantOperands(&I) &&
992 MustExecuteWithoutWritesBefore(I)) {
993 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
994 MSSAU, SE, ORE);
995 HoistedInstructions.push_back(&I);
996 Changed = true;
997 continue;
998 }
999
1000 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
1001 if (CFH.canHoistPHI(PN)) {
1002 // Redirect incoming blocks first to ensure that we create hoisted
1003 // versions of those blocks before we hoist the phi.
1004 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
1005 PN->setIncomingBlock(
1006 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
1007 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
1008 MSSAU, SE, ORE);
1009 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
1010 Changed = true;
1011 continue;
1012 }
1013 }
1014
1015 // Try to reassociate instructions so that part of computations can be
1016 // done out of loop.
1017 if (hoistArithmetics(I, *CurLoop, *SafetyInfo, MSSAU, AC, DT)) {
1018 Changed = true;
1019 continue;
1020 }
1021
1022 // Remember possibly hoistable branches so we can actually hoist them
1023 // later if needed.
1024 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
1025 CFH.registerPossiblyHoistableBranch(BI);
1026 }
1027 }
1028
1029 // If we hoisted instructions to a conditional block they may not dominate
1030 // their uses that weren't hoisted (such as phis where some operands are not
1031 // loop invariant). If so make them unconditional by moving them to their
1032 // immediate dominator. We iterate through the instructions in reverse order
1033 // which ensures that when we rehoist an instruction we rehoist its operands,
1034 // and also keep track of where in the block we are rehoisting to make sure
1035 // that we rehoist instructions before the instructions that use them.
1036 Instruction *HoistPoint = nullptr;
1037 if (ControlFlowHoisting) {
1038 for (Instruction *I : reverse(HoistedInstructions)) {
1039 if (!llvm::all_of(I->uses(),
1040 [&](Use &U) { return DT->dominates(I, U); })) {
1041 BasicBlock *Dominator =
1042 DT->getNode(I->getParent())->getIDom()->getBlock();
1043 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1044 if (HoistPoint)
1045 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1046 "New hoist point expected to dominate old hoist point");
1047 HoistPoint = Dominator->getTerminator();
1048 }
1049 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1050 << HoistPoint->getParent()->getNameOrAsOperand()
1051 << ": " << *I << "\n");
1052 moveInstructionBefore(*I, HoistPoint->getIterator(), *SafetyInfo, MSSAU,
1053 SE);
1054 HoistPoint = I;
1055 Changed = true;
1056 }
1057 }
1058 }
1059 if (VerifyMemorySSA)
1060 MSSAU.getMemorySSA()->verifyMemorySSA();
1061
1062 // Now that we've finished hoisting make sure that LI and DT are still
1063 // valid.
1064#ifdef EXPENSIVE_CHECKS
1065 if (Changed) {
1066 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1067 "Dominator tree verification failed");
1068 LI->verify(*DT);
1069 }
1070#endif
1071
1072 return Changed;
1073}
1074
1075// Return true if LI is invariant within scope of the loop. LI is invariant if
1076// CurLoop is dominated by an invariant.start representing the same memory
1077// location and size as the memory location LI loads from, and also the
1078// invariant.start has no uses.
1080 Loop *CurLoop) {
1081 Value *Addr = LI->getPointerOperand();
1082 const DataLayout &DL = LI->getDataLayout();
1083 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1084
1085 // It is not currently possible for clang to generate an invariant.start
1086 // intrinsic with scalable vector types because we don't support thread local
1087 // sizeless types and we don't permit sizeless types in structs or classes.
1088 // Furthermore, even if support is added for this in future the intrinsic
1089 // itself is defined to have a size of -1 for variable sized objects. This
1090 // makes it impossible to verify if the intrinsic envelops our region of
1091 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1092 // types would have a -1 parameter, but the former is clearly double the size
1093 // of the latter.
1094 if (LocSizeInBits.isScalable())
1095 return false;
1096
1097 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1098 // uselists for non-local Values in a loop pass.
1099 if (isa<Constant>(Addr))
1100 return false;
1101
1102 unsigned UsesVisited = 0;
1103 // Traverse all uses of the load operand value, to see if invariant.start is
1104 // one of the uses, and whether it dominates the load instruction.
1105 for (auto *U : Addr->users()) {
1106 // Avoid traversing for Load operand with high number of users.
1107 if (++UsesVisited > MaxNumUsesTraversed)
1108 return false;
1110 // If there are escaping uses of invariant.start instruction, the load maybe
1111 // non-invariant.
1112 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1113 !II->use_empty())
1114 continue;
1115 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1116 // The intrinsic supports having a -1 argument for variable sized objects
1117 // so we should check for that here.
1118 if (InvariantSize->isNegative())
1119 continue;
1120 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1121 // Confirm the invariant.start location size contains the load operand size
1122 // in bits. Also, the invariant.start should dominate the load, and we
1123 // should not hoist the load out of a loop that contains this dominating
1124 // invariant.start.
1125 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1126 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1127 return true;
1128 }
1129
1130 return false;
1131}
1132
1133/// Return true if-and-only-if we know how to (mechanically) both hoist and
1134/// sink a given instruction out of a loop. Does not address legality
1135/// concerns such as aliasing or speculation safety.
1146
1147/// Return true if I is the only Instruction with a MemoryAccess in L.
1148static bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1149 const MemorySSAUpdater &MSSAU) {
1150 for (auto *BB : L->getBlocks())
1151 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1152 int NotAPhi = 0;
1153 for (const auto &Acc : *Accs) {
1154 if (isa<MemoryPhi>(&Acc))
1155 continue;
1156 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1157 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1158 return false;
1159 }
1160 }
1161 return true;
1162}
1163
1165 BatchAAResults &BAA,
1166 SinkAndHoistLICMFlags &Flags,
1167 MemoryUseOrDef *MA) {
1168 // See declaration of SetLicmMssaOptCap for usage details.
1169 if (Flags.tooManyClobberingCalls())
1170 return MA->getDefiningAccess();
1171
1172 MemoryAccess *Source =
1174 Flags.incrementClobberingCalls();
1175 return Source;
1176}
1177
1179 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1180 bool TargetExecutesOncePerLoop,
1181 SinkAndHoistLICMFlags &Flags,
1183 // If we don't understand the instruction, bail early.
1185 return false;
1186
1187 MemorySSA *MSSA = MSSAU.getMemorySSA();
1188 // Loads have extra constraints we have to verify before we can hoist them.
1189 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1190 if (!LI->isUnordered())
1191 return false; // Don't sink/hoist volatile or ordered atomic loads!
1192
1193 // Loads from constant memory are always safe to move, even if they end up
1194 // in the same alias set as something that ends up being modified.
1195 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0))))
1196 return true;
1197 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1198 return true;
1199
1200 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1201 return false; // Don't risk duplicating unordered loads
1202
1203 // This checks for an invariant.start dominating the load.
1204 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1205 return true;
1206
1207 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI));
1208
1209 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group);
1210
1211 bool Invalidated = pointerInvalidatedByLoop(
1212 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1213 // Check loop-invariant address because this may also be a sinkable load
1214 // whose address is not necessarily loop-invariant.
1215 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1216 ORE->emit([&]() {
1218 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1219 << "failed to move load with loop-invariant address "
1220 "because the loop may invalidate its value";
1221 });
1222
1223 return !Invalidated;
1224 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1225 // Don't sink calls which can throw.
1226 if (CI->mayThrow())
1227 return false;
1228
1229 // Convergent attribute has been used on operations that involve
1230 // inter-thread communication which results are implicitly affected by the
1231 // enclosing control flows. It is not safe to hoist or sink such operations
1232 // across control flow.
1233 if (CI->isConvergent())
1234 return false;
1235
1236 // FIXME: Current LLVM IR semantics don't work well with coroutines and
1237 // thread local globals. We currently treat getting the address of a thread
1238 // local global as not accessing memory, even though it may not be a
1239 // constant throughout a function with coroutines. Remove this check after
1240 // we better model semantics of thread local globals.
1241 if (CI->getFunction()->isPresplitCoroutine())
1242 return false;
1243
1244 using namespace PatternMatch;
1246 // Assumes don't actually alias anything or throw
1247 return true;
1248
1249 // Handle simple cases by querying alias analysis.
1250 MemoryEffects Behavior = AA->getMemoryEffects(CI);
1251
1252 if (Behavior.doesNotAccessMemory())
1253 return true;
1254 if (Behavior.onlyReadsMemory()) {
1255 // Might have stale MemoryDef for call that was later inferred to be
1256 // read-only.
1257 auto *MU = dyn_cast<MemoryUse>(MSSA->getMemoryAccess(CI));
1258 if (!MU)
1259 return false;
1260
1261 // If we can prove there are no writes to the memory read by the call, we
1262 // can hoist or sink.
1264 MSSA, MU, CurLoop, I, Flags, /*InvariantGroup=*/false);
1265 }
1266
1267 if (Behavior.onlyWritesMemory()) {
1268 // can hoist or sink if there are no conflicting read/writes to the
1269 // memory location written to by the call.
1270 return noConflictingReadWrites(CI, MSSA, AA, CurLoop, Flags);
1271 }
1272
1273 return false;
1274 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1275 // Fences alias (most) everything to provide ordering. For the moment,
1276 // just give up if there are any other memory operations in the loop.
1277 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1278 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1279 if (!SI->isUnordered())
1280 return false; // Don't sink/hoist volatile or ordered atomic store!
1281
1282 // We can only hoist a store that we can prove writes a value which is not
1283 // read or overwritten within the loop. For those cases, we fallback to
1284 // load store promotion instead. TODO: We can extend this to cases where
1285 // there is exactly one write to the location and that write dominates an
1286 // arbitrary number of reads in the loop.
1287 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1288 return true;
1289 return noConflictingReadWrites(SI, MSSA, AA, CurLoop, Flags);
1290 }
1291
1292 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1293
1294 // We've established mechanical ability and aliasing, it's up to the caller
1295 // to check fault safety
1296 return true;
1297}
1298
1299/// Returns true if a PHINode is a trivially replaceable with an
1300/// Instruction.
1301/// This is true when all incoming values are that instruction.
1302/// This pattern occurs most often with LCSSA PHI nodes.
1303///
1304static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1305 for (const Value *IncValue : PN.incoming_values())
1306 if (IncValue != &I)
1307 return false;
1308
1309 return true;
1310}
1311
1312/// Return true if the instruction is foldable in the loop.
1313static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1314 const TargetTransformInfo *TTI) {
1315 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1316 InstructionCost CostI =
1317 TTI->getInstructionCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1318 if (CostI != TargetTransformInfo::TCC_Free)
1319 return false;
1320 // For a GEP, we cannot simply use getInstructionCost because currently
1321 // it optimistically assumes that a GEP will fold into addressing mode
1322 // regardless of its users.
1323 const BasicBlock *BB = GEP->getParent();
1324 for (const User *U : GEP->users()) {
1325 const Instruction *UI = cast<Instruction>(U);
1326 if (CurLoop->contains(UI) &&
1327 (BB != UI->getParent() ||
1328 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1329 return false;
1330 }
1331 return true;
1332 }
1333
1334 return false;
1335}
1336
1337/// Return true if the only users of this instruction are outside of
1338/// the loop. If this is true, we can sink the instruction to the exit
1339/// blocks of the loop.
1340///
1341/// We also return true if the instruction could be folded away in lowering.
1342/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1343static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1344 const LoopSafetyInfo *SafetyInfo,
1346 bool &FoldableInLoop, bool LoopNestMode) {
1347 const auto &BlockColors = SafetyInfo->getBlockColors();
1348 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI);
1349 for (const User *U : I.users()) {
1350 const Instruction *UI = cast<Instruction>(U);
1351 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1352 const BasicBlock *BB = PN->getParent();
1353 // We cannot sink uses in catchswitches.
1355 return false;
1356
1357 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1358 // phi use is too muddled.
1359 if (isa<CallInst>(I))
1360 if (!BlockColors.empty() &&
1361 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1362 return false;
1363
1364 if (LoopNestMode) {
1365 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1366 UI->getNumOperands() == 1) {
1367 if (!CurLoop->contains(UI))
1368 break;
1369 UI = cast<Instruction>(UI->user_back());
1370 }
1371 }
1372 }
1373
1374 if (CurLoop->contains(UI)) {
1375 if (IsFoldable) {
1376 FoldableInLoop = true;
1377 continue;
1378 }
1379 return false;
1380 }
1381 }
1382 return true;
1383}
1384
1386 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1387 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1388 Instruction *New;
1389 if (auto *CI = dyn_cast<CallInst>(&I)) {
1390 const auto &BlockColors = SafetyInfo->getBlockColors();
1391
1392 // Sinking call-sites need to be handled differently from other
1393 // instructions. The cloned call-site needs a funclet bundle operand
1394 // appropriate for its location in the CFG.
1396 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1397 BundleIdx != BundleEnd; ++BundleIdx) {
1398 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1399 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1400 continue;
1401
1402 OpBundles.emplace_back(Bundle);
1403 }
1404
1405 if (!BlockColors.empty()) {
1406 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1407 assert(CV.size() == 1 && "non-unique color for exit block!");
1408 BasicBlock *BBColor = CV.front();
1409 BasicBlock::iterator EHPad = BBColor->getFirstNonPHIIt();
1410 if (EHPad->isEHPad())
1411 OpBundles.emplace_back("funclet", &*EHPad);
1412 }
1413
1414 New = CallInst::Create(CI, OpBundles);
1415 New->copyMetadata(*CI);
1416 } else {
1417 New = I.clone();
1418 }
1419
1420 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt());
1421 if (!I.getName().empty())
1422 New->setName(I.getName() + ".le");
1423
1424 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) {
1425 // Create a new MemoryAccess and let MemorySSA set its defining access.
1426 // After running some passes, MemorySSA might be outdated, and the
1427 // instruction `I` may have become a non-memory touching instruction.
1428 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1429 New, nullptr, New->getParent(), MemorySSA::Beginning,
1430 /*CreationMustSucceed=*/false);
1431 if (NewMemAcc) {
1432 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1433 MSSAU.insertDef(MemDef, /*RenameUses=*/true);
1434 else {
1435 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1436 MSSAU.insertUse(MemUse, /*RenameUses=*/true);
1437 }
1438 }
1439 }
1440
1441 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1442 // this is particularly cheap because we can rip off the PHI node that we're
1443 // replacing for the number and blocks of the predecessors.
1444 // OPT: If this shows up in a profile, we can instead finish sinking all
1445 // invariant instructions, and then walk their operands to re-establish
1446 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1447 // sinking bottom-up.
1448 for (Use &Op : New->operands())
1449 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1450 auto *OInst = cast<Instruction>(Op.get());
1451 PHINode *OpPN =
1452 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1453 OInst->getName() + ".lcssa");
1454 OpPN->insertBefore(ExitBlock.begin());
1455 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1456 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1457 Op = OpPN;
1458 }
1459 return New;
1460}
1461
1463 MemorySSAUpdater &MSSAU) {
1464 MSSAU.removeMemoryAccess(&I);
1465 SafetyInfo.removeInstruction(&I);
1466 I.eraseFromParent();
1467}
1468
1470 ICFLoopSafetyInfo &SafetyInfo,
1473 SafetyInfo.removeInstruction(&I);
1474 SafetyInfo.insertInstructionTo(&I, Dest->getParent());
1475 I.moveBefore(*Dest->getParent(), Dest);
1477 MSSAU.getMemorySSA()->getMemoryAccess(&I)))
1478 MSSAU.moveToPlace(OldMemAcc, Dest->getParent(), Point);
1479 if (SE)
1481}
1482
1483// If there's a single exit block, sink any loop-invariant values that were
1484// defined in the preheader but not used inside the loop into the exit block
1485// to reduce register pressure in the loop.
1487 Loop *L, AAResults *AA, ICFLoopSafetyInfo *SafetyInfo,
1490 BasicBlock *ExitBlock = L->getExitBlock();
1491 if (!ExitBlock)
1492 return false;
1493
1494 BasicBlock *Preheader = L->getLoopPreheader();
1495 if (!Preheader)
1496 return false;
1497
1498 bool MadeAnyChanges = false;
1499
1501
1502 // Skip terminator.
1503 if (Preheader->getTerminator() == &I)
1504 continue;
1505
1506 // New instructions were inserted at the end of the preheader.
1507 if (isa<PHINode>(I))
1508 break;
1509
1510 // Don't move instructions which might have side effects, since the side
1511 // effects need to complete before instructions inside the loop. Note that
1512 // it's okay if the instruction might have undefined behavior: LoopSimplify
1513 // guarantees that the preheader dominates the exit block.
1514 if (I.mayHaveSideEffects())
1515 continue;
1516
1517 if (!canSinkOrHoistInst(I, AA, DT, L, MSSAU, true, SinkFlags, nullptr))
1518 continue;
1519
1520 // Determine if there is a use in or before the loop (direct or
1521 // otherwise).
1522 bool UsedInLoopOrPreheader = false;
1523 for (Use &U : I.uses()) {
1524 auto *UserI = cast<Instruction>(U.getUser());
1525 BasicBlock *UseBB = UserI->getParent();
1526 if (auto *PN = dyn_cast<PHINode>(UserI)) {
1527 UseBB = PN->getIncomingBlock(U);
1528 }
1529 if (UseBB == Preheader || L->contains(UseBB)) {
1530 UsedInLoopOrPreheader = true;
1531 break;
1532 }
1533 }
1534 if (UsedInLoopOrPreheader)
1535 continue;
1536
1537 moveInstructionBefore(I, ExitBlock->getFirstInsertionPt(), *SafetyInfo,
1538 MSSAU, SE, MemorySSA::Beginning);
1539 MadeAnyChanges = true;
1540 }
1541
1542 return MadeAnyChanges;
1543}
1544
1546 PHINode *TPN, Instruction *I, LoopInfo *LI,
1548 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1549 MemorySSAUpdater &MSSAU) {
1551 "Expect only trivially replaceable PHI");
1552 BasicBlock *ExitBlock = TPN->getParent();
1553 auto [It, Inserted] = SunkCopies.try_emplace(ExitBlock);
1554 if (Inserted)
1555 It->second = cloneInstructionInExitBlock(*I, *ExitBlock, *TPN, LI,
1556 SafetyInfo, MSSAU);
1557 return It->second;
1558}
1559
1560static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1561 BasicBlock *BB = PN->getParent();
1562 if (!BB->canSplitPredecessors())
1563 return false;
1564 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1565 // it require updating BlockColors for all offspring blocks accordingly. By
1566 // skipping such corner case, we can make updating BlockColors after splitting
1567 // predecessor fairly simple.
1568 if (!SafetyInfo->getBlockColors().empty() &&
1569 BB->getFirstNonPHIIt()->isEHPad())
1570 return false;
1571 for (BasicBlock *BBPred : predecessors(BB)) {
1572 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1573 return false;
1574 }
1575 return true;
1576}
1577
1579 LoopInfo *LI, const Loop *CurLoop,
1580 LoopSafetyInfo *SafetyInfo,
1581 MemorySSAUpdater *MSSAU) {
1582#ifndef NDEBUG
1584 CurLoop->getUniqueExitBlocks(ExitBlocks);
1585 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1586#endif
1587 BasicBlock *ExitBB = PN->getParent();
1588 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1589
1590 // Split predecessors of the loop exit to make instructions in the loop are
1591 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1592 // loop in the canonical form where each predecessor of each exit block should
1593 // be contained within the loop. For example, this will convert the loop below
1594 // from
1595 //
1596 // LB1:
1597 // %v1 =
1598 // br %LE, %LB2
1599 // LB2:
1600 // %v2 =
1601 // br %LE, %LB1
1602 // LE:
1603 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1604 //
1605 // to
1606 //
1607 // LB1:
1608 // %v1 =
1609 // br %LE.split, %LB2
1610 // LB2:
1611 // %v2 =
1612 // br %LE.split2, %LB1
1613 // LE.split:
1614 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1615 // br %LE
1616 // LE.split2:
1617 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1618 // br %LE
1619 // LE:
1620 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1621 //
1622 const auto &BlockColors = SafetyInfo->getBlockColors();
1623 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1624 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1625 while (!PredBBs.empty()) {
1626 BasicBlock *PredBB = *PredBBs.begin();
1627 assert(CurLoop->contains(PredBB) &&
1628 "Expect all predecessors are in the loop");
1629 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1631 ExitBB, PredBB, ".split.loop.exit", &DTU, LI, MSSAU, true);
1632 // Since we do not allow splitting EH-block with BlockColors in
1633 // canSplitPredecessors(), we can simply assign predecessor's color to
1634 // the new block.
1635 if (!BlockColors.empty())
1636 // Grab a reference to the ColorVector to be inserted before getting the
1637 // reference to the vector we are copying because inserting the new
1638 // element in BlockColors might cause the map to be reallocated.
1639 SafetyInfo->copyColors(NewPred, PredBB);
1640 }
1641 PredBBs.remove(PredBB);
1642 }
1643}
1644
1645/// When an instruction is found to only be used outside of the loop, this
1646/// function moves it to the exit blocks and patches up SSA form as needed.
1647/// This method is guaranteed to remove the original instruction from its
1648/// position, and may either delete it or move it to outside of the loop.
1649///
1650static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1651 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1653 bool Changed = false;
1654 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1655
1656 // Iterate over users to be ready for actual sinking. Replace users via
1657 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1658 SmallPtrSet<Instruction *, 8> VisitedUsers;
1659 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1660 auto *User = cast<Instruction>(*UI);
1661 Use &U = UI.getUse();
1662 ++UI;
1663
1664 if (VisitedUsers.count(User) || CurLoop->contains(User))
1665 continue;
1666
1667 if (!DT->isReachableFromEntry(User->getParent())) {
1668 U = PoisonValue::get(I.getType());
1669 Changed = true;
1670 continue;
1671 }
1672
1673 // The user must be a PHI node.
1674 PHINode *PN = cast<PHINode>(User);
1675
1676 // Surprisingly, instructions can be used outside of loops without any
1677 // exits. This can only happen in PHI nodes if the incoming block is
1678 // unreachable.
1679 BasicBlock *BB = PN->getIncomingBlock(U);
1680 if (!DT->isReachableFromEntry(BB)) {
1681 U = PoisonValue::get(I.getType());
1682 Changed = true;
1683 continue;
1684 }
1685
1686 VisitedUsers.insert(PN);
1687 if (isTriviallyReplaceablePHI(*PN, I))
1688 continue;
1689
1690 if (!canSplitPredecessors(PN, SafetyInfo))
1691 return Changed;
1692
1693 // Split predecessors of the PHI so that we can make users trivially
1694 // replaceable.
1695 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU);
1696
1697 // Should rebuild the iterators, as they may be invalidated by
1698 // splitPredecessorsOfLoopExit().
1699 UI = I.user_begin();
1700 UE = I.user_end();
1701 }
1702
1703 if (VisitedUsers.empty())
1704 return Changed;
1705
1706 ORE->emit([&]() {
1707 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1708 << "sinking " << ore::NV("Inst", &I);
1709 });
1710 if (isa<LoadInst>(I))
1711 ++NumMovedLoads;
1712 else if (isa<CallInst>(I))
1713 ++NumMovedCalls;
1714 ++NumSunk;
1715
1716#ifndef NDEBUG
1718 CurLoop->getUniqueExitBlocks(ExitBlocks);
1719 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1720#endif
1721
1722 // Clones of this instruction. Don't create more than one per exit block!
1724
1725 // If this instruction is only used outside of the loop, then all users are
1726 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1727 // the instruction.
1728 // First check if I is worth sinking for all uses. Sink only when it is worth
1729 // across all uses.
1730 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1731 for (auto *UI : Users) {
1732 auto *User = cast<Instruction>(UI);
1733
1734 if (CurLoop->contains(User))
1735 continue;
1736
1737 PHINode *PN = cast<PHINode>(User);
1738 assert(ExitBlockSet.count(PN->getParent()) &&
1739 "The LCSSA PHI is not in an exit block!");
1740
1741 // The PHI must be trivially replaceable.
1743 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1744 // As we sink the instruction out of the BB, drop its debug location.
1745 New->dropLocation();
1746 PN->replaceAllUsesWith(New);
1747 eraseInstruction(*PN, *SafetyInfo, MSSAU);
1748 Changed = true;
1749 }
1750 return Changed;
1751}
1752
1753/// When an instruction is found to only use loop invariant operands that
1754/// is safe to hoist, this instruction is called to do the dirty work.
1755///
1756static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1757 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1760 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1761 << I << "\n");
1762 ORE->emit([&]() {
1763 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1764 << ore::NV("Inst", &I);
1765 });
1766
1767 // Metadata can be dependent on conditions we are hoisting above.
1768 // Conservatively strip all metadata on the instruction unless we were
1769 // guaranteed to execute I if we entered the loop, in which case the metadata
1770 // is valid in the loop preheader.
1771 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1772 // then moving to the preheader means we should strip attributes on the call
1773 // that can cause UB since we may be hoisting above conditions that allowed
1774 // inferring those attributes. They may not be valid at the preheader.
1775 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1776 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1777 // time in isGuaranteedToExecute if we don't actually have anything to
1778 // drop. It is a compile time optimization, not required for correctness.
1779 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) {
1780 I.dropUBImplyingAttrsAndMetadata();
1781 }
1782
1783 if (isa<PHINode>(I))
1784 // Move the new node to the end of the phi list in the destination block.
1785 moveInstructionBefore(I, Dest->getFirstNonPHIIt(), *SafetyInfo, MSSAU, SE);
1786 else
1787 // Move the new node to the destination block, before its terminator.
1788 moveInstructionBefore(I, Dest->getTerminator()->getIterator(), *SafetyInfo,
1789 MSSAU, SE);
1790
1791 I.updateLocationAfterHoist();
1792
1793 if (isa<LoadInst>(I))
1794 ++NumMovedLoads;
1795 else if (isa<CallInst>(I))
1796 ++NumMovedCalls;
1797 ++NumHoisted;
1798}
1799
1800/// Only sink or hoist an instruction if it is not a trapping instruction,
1801/// or if the instruction is known not to trap when moved to the preheader.
1802/// or if it is a trapping instruction and is guaranteed to execute.
1804 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1805 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1806 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1807 AssumptionCache *AC, bool AllowSpeculation) {
1808 if (AllowSpeculation &&
1809 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI))
1810 return true;
1811
1812 bool GuaranteedToExecute =
1813 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1814
1815 if (!GuaranteedToExecute) {
1816 auto *LI = dyn_cast<LoadInst>(&Inst);
1817 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1818 ORE->emit([&]() {
1820 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1821 << "failed to hoist load with loop-invariant address "
1822 "because load is conditionally executed";
1823 });
1824 }
1825
1826 return GuaranteedToExecute;
1827}
1828
1829namespace {
1830class LoopPromoter : public LoadAndStorePromoter {
1831 Value *SomePtr; // Designated pointer to store to.
1832 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1833 SmallVectorImpl<BasicBlock::iterator> &LoopInsertPts;
1834 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1835 PredIteratorCache &PredCache;
1836 MemorySSAUpdater &MSSAU;
1837 LoopInfo &LI;
1838 DebugLoc DL;
1839 Align Alignment;
1840 bool UnorderedAtomic;
1841 AAMDNodes AATags;
1842 ICFLoopSafetyInfo &SafetyInfo;
1843 bool CanInsertStoresInExitBlocks;
1845
1846 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1847 // (if legal) if doing so would add an out-of-loop use to an instruction
1848 // defined in-loop.
1849 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1850 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1851 return V;
1852
1854 // We need to create an LCSSA PHI node for the incoming value and
1855 // store that.
1856 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1857 I->getName() + ".lcssa");
1858 PN->insertBefore(BB->begin());
1859 for (BasicBlock *Pred : PredCache.get(BB))
1860 PN->addIncoming(I, Pred);
1861 return PN;
1862 }
1863
1864public:
1865 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1866 SmallVectorImpl<BasicBlock *> &LEB,
1867 SmallVectorImpl<BasicBlock::iterator> &LIP,
1868 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1869 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1870 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1871 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1872 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1873 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1874 LI(li), DL(std::move(dl)), Alignment(Alignment),
1875 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1876 SafetyInfo(SafetyInfo),
1877 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1878
1879 void insertStoresInLoopExitBlocks() {
1880 // Insert stores after in the loop exit blocks. Each exit block gets a
1881 // store of the live-out values that feed them. Since we've already told
1882 // the SSA updater about the defs in the loop and the preheader
1883 // definition, it is all set and we can start using it.
1884 DIAssignID *NewID = nullptr;
1885 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1886 BasicBlock *ExitBlock = LoopExitBlocks[i];
1887 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1888 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1889 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1890 BasicBlock::iterator InsertPos = LoopInsertPts[i];
1891 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1892 if (UnorderedAtomic)
1893 NewSI->setOrdering(AtomicOrdering::Unordered);
1894 NewSI->setAlignment(Alignment);
1895 NewSI->setDebugLoc(DL);
1896 // Attach DIAssignID metadata to the new store, generating it on the
1897 // first loop iteration.
1898 if (i == 0) {
1899 // NewSI will have its DIAssignID set here if there are any stores in
1900 // Uses with a DIAssignID attachment. This merged ID will then be
1901 // attached to the other inserted stores (in the branch below).
1902 NewSI->mergeDIAssignID(Uses);
1904 NewSI->getMetadata(LLVMContext::MD_DIAssignID));
1905 } else {
1906 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1907 // above.
1908 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID);
1909 }
1910
1911 if (AATags)
1912 NewSI->setAAMetadata(AATags);
1913
1914 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1915 MemoryAccess *NewMemAcc;
1916 if (!MSSAInsertPoint) {
1917 NewMemAcc = MSSAU.createMemoryAccessInBB(
1918 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1919 } else {
1920 NewMemAcc =
1921 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1922 }
1923 MSSAInsertPts[i] = NewMemAcc;
1924 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true);
1925 // FIXME: true for safety, false may still be correct.
1926 }
1927 }
1928
1929 void doExtraRewritesBeforeFinalDeletion() override {
1930 if (CanInsertStoresInExitBlocks)
1931 insertStoresInLoopExitBlocks();
1932 }
1933
1934 void instructionDeleted(Instruction *I) const override {
1935 SafetyInfo.removeInstruction(I);
1936 MSSAU.removeMemoryAccess(I);
1937 }
1938
1939 bool shouldDelete(Instruction *I) const override {
1940 if (isa<StoreInst>(I))
1941 return CanInsertStoresInExitBlocks;
1942 return true;
1943 }
1944};
1945
1946bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1947 DominatorTree *DT) {
1948 // We can perform the captured-before check against any instruction in the
1949 // loop header, as the loop header is reachable from any instruction inside
1950 // the loop.
1951 // TODO: ReturnCaptures=true shouldn't be necessary here.
1953 V, /*ReturnCaptures=*/true, L->getHeader()->getTerminator(), DT,
1954 /*IncludeI=*/false, CaptureComponents::Provenance));
1955}
1956
1957/// Return true if we can prove that a caller cannot inspect the object if an
1958/// unwind occurs inside the loop.
1959bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1960 DominatorTree *DT) {
1961 bool RequiresNoCaptureBeforeUnwind;
1962 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1963 return false;
1964
1965 return !RequiresNoCaptureBeforeUnwind ||
1966 isNotCapturedBeforeOrInLoop(Object, L, DT);
1967}
1968
1969bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1971 // The object must be function-local to start with, and then not captured
1972 // before/in the loop.
1973 return (isIdentifiedFunctionLocal(Object) &&
1974 isNotCapturedBeforeOrInLoop(Object, L, DT)) ||
1975 (TTI->isSingleThreaded() || SingleThread);
1976}
1977
1978} // namespace
1979
1980/// Try to promote memory values to scalars by sinking stores out of the
1981/// loop and moving loads to before the loop. We do this by looping over
1982/// the stores in the loop, looking for stores to Must pointers which are
1983/// loop invariant.
1984///
1986 const SmallSetVector<Value *, 8> &PointerMustAliases,
1991 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1992 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1993 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1994 bool HasReadsOutsideSet) {
1995 // Verify inputs.
1996 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1997 SafetyInfo != nullptr &&
1998 "Unexpected Input to promoteLoopAccessesToScalars");
1999
2000 LLVM_DEBUG({
2001 dbgs() << "Trying to promote set of must-aliased pointers:\n";
2002 for (Value *Ptr : PointerMustAliases)
2003 dbgs() << " " << *Ptr << "\n";
2004 });
2005 ++NumPromotionCandidates;
2006
2007 Value *SomePtr = *PointerMustAliases.begin();
2008 BasicBlock *Preheader = CurLoop->getLoopPreheader();
2009
2010 // It is not safe to promote a load/store from the loop if the load/store is
2011 // conditional. For example, turning:
2012 //
2013 // for () { if (c) *P += 1; }
2014 //
2015 // into:
2016 //
2017 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
2018 //
2019 // is not safe, because *P may only be valid to access if 'c' is true.
2020 //
2021 // The safety property divides into two parts:
2022 // p1) The memory may not be dereferenceable on entry to the loop. In this
2023 // case, we can't insert the required load in the preheader.
2024 // p2) The memory model does not allow us to insert a store along any dynamic
2025 // path which did not originally have one.
2026 //
2027 // If at least one store is guaranteed to execute, both properties are
2028 // satisfied, and promotion is legal.
2029 //
2030 // This, however, is not a necessary condition. Even if no store/load is
2031 // guaranteed to execute, we can still establish these properties.
2032 // We can establish (p1) by proving that hoisting the load into the preheader
2033 // is safe (i.e. proving dereferenceability on all paths through the loop). We
2034 // can use any access within the alias set to prove dereferenceability,
2035 // since they're all must alias.
2036 //
2037 // There are two ways establish (p2):
2038 // a) Prove the location is thread-local. In this case the memory model
2039 // requirement does not apply, and stores are safe to insert.
2040 // b) Prove a store dominates every exit block. In this case, if an exit
2041 // blocks is reached, the original dynamic path would have taken us through
2042 // the store, so inserting a store into the exit block is safe. Note that this
2043 // is different from the store being guaranteed to execute. For instance,
2044 // if an exception is thrown on the first iteration of the loop, the original
2045 // store is never executed, but the exit blocks are not executed either.
2046
2047 bool DereferenceableInPH = false;
2048 bool StoreIsGuanteedToExecute = false;
2049 bool LoadIsGuaranteedToExecute = false;
2050 bool FoundLoadToPromote = false;
2051
2052 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
2053 enum {
2054 StoreSafe,
2055 StoreUnsafe,
2056 StoreSafetyUnknown,
2057 } StoreSafety = StoreSafetyUnknown;
2058
2060
2061 // We start with an alignment of one and try to find instructions that allow
2062 // us to prove better alignment.
2063 Align Alignment;
2064 // Keep track of which types of access we see
2065 bool SawUnorderedAtomic = false;
2066 bool SawNotAtomic = false;
2067 AAMDNodes AATags;
2068
2069 const DataLayout &MDL = Preheader->getDataLayout();
2070
2071 // If there are reads outside the promoted set, then promoting stores is
2072 // definitely not safe.
2073 if (HasReadsOutsideSet)
2074 StoreSafety = StoreUnsafe;
2075
2076 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2077 // If a loop can throw, we have to insert a store along each unwind edge.
2078 // That said, we can't actually make the unwind edge explicit. Therefore,
2079 // we have to prove that the store is dead along the unwind edge. We do
2080 // this by proving that the caller can't have a reference to the object
2081 // after return and thus can't possibly load from the object.
2082 Value *Object = getUnderlyingObject(SomePtr);
2083 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2084 StoreSafety = StoreUnsafe;
2085 }
2086
2087 // Check that all accesses to pointers in the alias set use the same type.
2088 // We cannot (yet) promote a memory location that is loaded and stored in
2089 // different sizes. While we are at it, collect alignment and AA info.
2090 Type *AccessTy = nullptr;
2091 for (Value *ASIV : PointerMustAliases) {
2092 for (Use &U : ASIV->uses()) {
2093 // Ignore instructions that are outside the loop.
2094 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2095 if (!UI || !CurLoop->contains(UI))
2096 continue;
2097
2098 // If there is an non-load/store instruction in the loop, we can't promote
2099 // it.
2100 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2101 if (!Load->isUnordered())
2102 return false;
2103
2104 SawUnorderedAtomic |= Load->isAtomic();
2105 SawNotAtomic |= !Load->isAtomic();
2106 FoundLoadToPromote = true;
2107
2108 Align InstAlignment = Load->getAlign();
2109
2110 if (!LoadIsGuaranteedToExecute)
2111 LoadIsGuaranteedToExecute =
2112 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2113
2114 // Note that proving a load safe to speculate requires proving
2115 // sufficient alignment at the target location. Proving it guaranteed
2116 // to execute does as well. Thus we can increase our guaranteed
2117 // alignment as well.
2118 if (!DereferenceableInPH || (InstAlignment > Alignment))
2120 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2121 Preheader->getTerminator(), AC, AllowSpeculation)) {
2122 DereferenceableInPH = true;
2123 Alignment = std::max(Alignment, InstAlignment);
2124 }
2125 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2126 // Stores *of* the pointer are not interesting, only stores *to* the
2127 // pointer.
2128 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2129 continue;
2130 if (!Store->isUnordered())
2131 return false;
2132
2133 SawUnorderedAtomic |= Store->isAtomic();
2134 SawNotAtomic |= !Store->isAtomic();
2135
2136 // If the store is guaranteed to execute, both properties are satisfied.
2137 // We may want to check if a store is guaranteed to execute even if we
2138 // already know that promotion is safe, since it may have higher
2139 // alignment than any other guaranteed stores, in which case we can
2140 // raise the alignment on the promoted store.
2141 Align InstAlignment = Store->getAlign();
2142 bool GuaranteedToExecute =
2143 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2144 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2145 if (GuaranteedToExecute) {
2146 DereferenceableInPH = true;
2147 if (StoreSafety == StoreSafetyUnknown)
2148 StoreSafety = StoreSafe;
2149 Alignment = std::max(Alignment, InstAlignment);
2150 }
2151
2152 // If a store dominates all exit blocks, it is safe to sink.
2153 // As explained above, if an exit block was executed, a dominating
2154 // store must have been executed at least once, so we are not
2155 // introducing stores on paths that did not have them.
2156 // Note that this only looks at explicit exit blocks. If we ever
2157 // start sinking stores into unwind edges (see above), this will break.
2158 if (StoreSafety == StoreSafetyUnknown &&
2159 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2160 return DT->dominates(Store->getParent(), Exit);
2161 }))
2162 StoreSafety = StoreSafe;
2163
2164 // If the store is not guaranteed to execute, we may still get
2165 // deref info through it.
2166 if (!DereferenceableInPH) {
2167 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2168 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2169 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI);
2170 }
2171 } else
2172 continue; // Not a load or store.
2173
2174 if (!AccessTy)
2175 AccessTy = getLoadStoreType(UI);
2176 else if (AccessTy != getLoadStoreType(UI))
2177 return false;
2178
2179 // Merge the AA tags.
2180 if (LoopUses.empty()) {
2181 // On the first load/store, just take its AA tags.
2182 AATags = UI->getAAMetadata();
2183 } else if (AATags) {
2184 AATags = AATags.merge(UI->getAAMetadata());
2185 }
2186
2187 LoopUses.push_back(UI);
2188 }
2189 }
2190
2191 // If we found both an unordered atomic instruction and a non-atomic memory
2192 // access, bail. We can't blindly promote non-atomic to atomic since we
2193 // might not be able to lower the result. We can't downgrade since that
2194 // would violate memory model. Also, align 0 is an error for atomics.
2195 if (SawUnorderedAtomic && SawNotAtomic)
2196 return false;
2197
2198 // If we're inserting an atomic load in the preheader, we must be able to
2199 // lower it. We're only guaranteed to be able to lower naturally aligned
2200 // atomics.
2201 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2202 return false;
2203
2204 // If we couldn't prove we can hoist the load, bail.
2205 if (!DereferenceableInPH) {
2206 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2207 return false;
2208 }
2209
2210 // We know we can hoist the load, but don't have a guaranteed store.
2211 // Check whether the location is writable and thread-local. If it is, then we
2212 // can insert stores along paths which originally didn't have them without
2213 // violating the memory model.
2214 if (StoreSafety == StoreSafetyUnknown) {
2215 Value *Object = getUnderlyingObject(SomePtr);
2216 bool ExplicitlyDereferenceableOnly;
2217 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) &&
2218 (!ExplicitlyDereferenceableOnly ||
2219 isDereferenceablePointer(SomePtr, AccessTy, MDL)) &&
2220 isThreadLocalObject(Object, CurLoop, DT, TTI))
2221 StoreSafety = StoreSafe;
2222 }
2223
2224 // If we've still failed to prove we can sink the store, hoist the load
2225 // only, if possible.
2226 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2227 // If we cannot hoist the load either, give up.
2228 return false;
2229
2230 // Lets do the promotion!
2231 if (StoreSafety == StoreSafe) {
2232 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2233 << '\n');
2234 ++NumLoadStorePromoted;
2235 } else {
2236 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2237 << '\n');
2238 ++NumLoadPromoted;
2239 }
2240
2241 ORE->emit([&]() {
2242 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2243 LoopUses[0])
2244 << "Moving accesses to memory location out of the loop";
2245 });
2246
2247 // Look at all the loop uses, and try to merge their locations.
2248 std::vector<DebugLoc> LoopUsesLocs;
2249 for (auto U : LoopUses)
2250 LoopUsesLocs.push_back(U->getDebugLoc());
2251 auto DL = DebugLoc::getMergedLocations(LoopUsesLocs);
2252
2253 // We use the SSAUpdater interface to insert phi nodes as required.
2255 SSAUpdater SSA(&NewPHIs);
2256 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2257 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2258 SawUnorderedAtomic,
2259 StoreIsGuanteedToExecute ? AATags : AAMDNodes(),
2260 *SafetyInfo, StoreSafety == StoreSafe);
2261
2262 // Set up the preheader to have a definition of the value. It is the live-out
2263 // value from the preheader that uses in the loop will use.
2264 LoadInst *PreheaderLoad = nullptr;
2265 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2266 PreheaderLoad =
2267 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2268 Preheader->getTerminator()->getIterator());
2269 if (SawUnorderedAtomic)
2270 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2271 PreheaderLoad->setAlignment(Alignment);
2272 PreheaderLoad->setDebugLoc(DebugLoc::getDropped());
2273 if (AATags && LoadIsGuaranteedToExecute)
2274 PreheaderLoad->setAAMetadata(AATags);
2275
2276 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2277 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2278 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2279 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true);
2280 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2281 } else {
2282 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy));
2283 }
2284
2285 if (VerifyMemorySSA)
2286 MSSAU.getMemorySSA()->verifyMemorySSA();
2287 // Rewrite all the loads in the loop and remember all the definitions from
2288 // stores in the loop.
2289 Promoter.run(LoopUses);
2290
2291 if (VerifyMemorySSA)
2292 MSSAU.getMemorySSA()->verifyMemorySSA();
2293 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2294 if (PreheaderLoad && PreheaderLoad->use_empty())
2295 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2296
2297 return true;
2298}
2299
2300static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2301 function_ref<void(Instruction *)> Fn) {
2302 for (const BasicBlock *BB : L->blocks())
2303 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2304 for (const auto &Access : *Accesses)
2305 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2306 Fn(MUD->getMemoryInst());
2307}
2308
2309// The bool indicates whether there might be reads outside the set, in which
2310// case only loads may be promoted.
2313 BatchAAResults BatchAA(*AA);
2314 AliasSetTracker AST(BatchAA);
2315
2316 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2317 if (const auto *SI = dyn_cast<StoreInst>(I)) {
2318 const Value *PtrOp = SI->getPointerOperand();
2319 return !isa<ConstantData>(PtrOp) && L->isLoopInvariant(PtrOp);
2320 }
2321 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2322 const Value *PtrOp = LI->getPointerOperand();
2323 return !isa<ConstantData>(PtrOp) && L->isLoopInvariant(PtrOp);
2324 }
2325 return false;
2326 };
2327
2328 // Populate AST with potentially promotable accesses.
2329 SmallPtrSet<Value *, 16> AttemptingPromotion;
2330 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2331 if (IsPotentiallyPromotable(I)) {
2332 AttemptingPromotion.insert(I);
2333 AST.add(I);
2334 }
2335 });
2336
2337 // We're only interested in must-alias sets that contain a mod.
2339 for (AliasSet &AS : AST)
2340 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2341 Sets.push_back({&AS, false});
2342
2343 if (Sets.empty())
2344 return {}; // Nothing to promote...
2345
2346 // Discard any sets for which there is an aliasing non-promotable access.
2347 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2348 if (AttemptingPromotion.contains(I))
2349 return;
2350
2352 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA);
2353 // Cannot promote if there are writes outside the set.
2354 if (isModSet(MR))
2355 return true;
2356 if (isRefSet(MR)) {
2357 // Remember reads outside the set.
2358 Pair.setInt(true);
2359 // If this is a mod-only set and there are reads outside the set,
2360 // we will not be able to promote, so bail out early.
2361 return !Pair.getPointer()->isRef();
2362 }
2363 return false;
2364 });
2365 });
2366
2368 for (auto [Set, HasReadsOutsideSet] : Sets) {
2369 SmallSetVector<Value *, 8> PointerMustAliases;
2370 for (const auto &MemLoc : *Set)
2371 PointerMustAliases.insert(const_cast<Value *>(MemLoc.Ptr));
2372 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet);
2373 }
2374
2375 return Result;
2376}
2377
2378// For a given store instruction or writeonly call instruction, this function
2379// checks that there are no read or writes that conflict with the memory
2380// access in the instruction
2382 AAResults *AA, Loop *CurLoop,
2383 SinkAndHoistLICMFlags &Flags) {
2385 // If there are more accesses than the Promotion cap, then give up as we're
2386 // not walking a list that long.
2387 if (Flags.tooManyMemoryAccesses())
2388 return false;
2389
2390 auto *IMD = MSSA->getMemoryAccess(I);
2391 BatchAAResults BAA(*AA);
2392 auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, IMD);
2393 // Make sure there are no clobbers inside the loop.
2394 if (!MSSA->isLiveOnEntryDef(Source) && CurLoop->contains(Source->getBlock()))
2395 return false;
2396
2397 // If there are interfering Uses (i.e. their defining access is in the
2398 // loop), or ordered loads (stored as Defs!), don't move this store.
2399 // Could do better here, but this is conservatively correct.
2400 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
2401 // moving accesses. Can also extend to dominating uses.
2402 for (auto *BB : CurLoop->getBlocks()) {
2403 auto *Accesses = MSSA->getBlockAccesses(BB);
2404 if (!Accesses)
2405 continue;
2406 for (const auto &MA : *Accesses)
2407 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
2408 auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags,
2409 const_cast<MemoryUse *>(MU));
2410 if (!MSSA->isLiveOnEntryDef(MD) && CurLoop->contains(MD->getBlock()))
2411 return false;
2412 // Disable hoisting past potentially interfering loads. Optimized
2413 // Uses may point to an access outside the loop, as getClobbering
2414 // checks the previous iteration when walking the backedge.
2415 // FIXME: More precise: no Uses that alias I.
2416 if (!Flags.getIsSink() && !MSSA->dominates(IMD, MU))
2417 return false;
2418 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
2419 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
2420 (void)LI; // Silence warning.
2421 assert(!LI->isUnordered() && "Expected unordered load");
2422 return false;
2423 }
2424 // Any call, while it may not be clobbering I, it may be a use.
2425 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
2426 // Check if the call may read from the memory location written
2427 // to by I. Check CI's attributes and arguments; the number of
2428 // such checks performed is limited above by NoOfMemAccTooLarge.
2429 if (auto *SI = dyn_cast<StoreInst>(I)) {
2431 if (isModOrRefSet(MRI))
2432 return false;
2433 } else {
2434 auto *SCI = cast<CallInst>(I);
2435 // If the instruction we are wanting to hoist is also a call
2436 // instruction then we need not check mod/ref info with itself
2437 if (SCI == CI)
2438 continue;
2439 ModRefInfo MRI = BAA.getModRefInfo(CI, SCI);
2440 if (isModOrRefSet(MRI))
2441 return false;
2442 }
2443 }
2444 }
2445 }
2446 return true;
2447}
2448
2450 Loop *CurLoop, Instruction &I,
2451 SinkAndHoistLICMFlags &Flags,
2452 bool InvariantGroup) {
2453 // For hoisting, use the walker to determine safety
2454 if (!Flags.getIsSink()) {
2455 // If hoisting an invariant group, we only need to check that there
2456 // is no store to the loaded pointer between the start of the loop,
2457 // and the load (since all values must be the same).
2458
2459 // This can be checked in two conditions:
2460 // 1) if the memoryaccess is outside the loop
2461 // 2) the earliest access is at the loop header,
2462 // if the memory loaded is the phi node
2463
2464 BatchAAResults BAA(MSSA->getAA());
2465 MemoryAccess *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, MU);
2466 return !MSSA->isLiveOnEntryDef(Source) &&
2467 CurLoop->contains(Source->getBlock()) &&
2468 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source));
2469 }
2470
2471 // For sinking, we'd need to check all Defs below this use. The getClobbering
2472 // call will look on the backedge of the loop, but will check aliasing with
2473 // the instructions on the previous iteration.
2474 // For example:
2475 // for (i ... )
2476 // load a[i] ( Use (LoE)
2477 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2478 // i++;
2479 // The load sees no clobbering inside the loop, as the backedge alias check
2480 // does phi translation, and will check aliasing against store a[i-1].
2481 // However sinking the load outside the loop, below the store is incorrect.
2482
2483 // For now, only sink if there are no Defs in the loop, and the existing ones
2484 // precede the use and are in the same block.
2485 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2486 // needs PostDominatorTreeAnalysis.
2487 // FIXME: More precise: no Defs that alias this Use.
2488 if (Flags.tooManyMemoryAccesses())
2489 return true;
2490 for (auto *BB : CurLoop->getBlocks())
2491 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU))
2492 return true;
2493 // When sinking, the source block may not be part of the loop so check it.
2494 if (!CurLoop->contains(&I))
2495 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU);
2496
2497 return false;
2498}
2499
2501 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2502 for (const auto &MA : *Accesses)
2503 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2504 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2505 return true;
2506 return false;
2507}
2508
2509/// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
2510/// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
2511/// minimun can be computed outside of loop, and X is not a loop-invariant.
2512static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2513 MemorySSAUpdater &MSSAU) {
2514 bool Inverse = false;
2515 using namespace PatternMatch;
2516 Value *Cond1, *Cond2;
2517 if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) {
2518 Inverse = true;
2519 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2)))) {
2520 // Do nothing
2521 } else
2522 return false;
2523
2524 auto MatchICmpAgainstInvariant = [&](Value *C, CmpPredicate &P, Value *&LHS,
2525 Value *&RHS) {
2526 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS)))))
2527 return false;
2528 if (!LHS->getType()->isIntegerTy())
2529 return false;
2531 return false;
2532 if (L.isLoopInvariant(LHS)) {
2533 std::swap(LHS, RHS);
2535 }
2536 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
2537 return false;
2538 if (Inverse)
2540 return true;
2541 };
2542 CmpPredicate P1, P2;
2543 Value *LHS1, *LHS2, *RHS1, *RHS2;
2544 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2545 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2546 return false;
2547 auto MatchingPred = CmpPredicate::getMatching(P1, P2);
2548 if (!MatchingPred || LHS1 != LHS2)
2549 return false;
2550
2551 // Everything is fine, we can do the transform.
2552 bool UseMin = ICmpInst::isLT(*MatchingPred) || ICmpInst::isLE(*MatchingPred);
2553 assert(
2554 (UseMin || ICmpInst::isGT(*MatchingPred) ||
2555 ICmpInst::isGE(*MatchingPred)) &&
2556 "Relational predicate is either less (or equal) or greater (or equal)!");
2557 Intrinsic::ID id = ICmpInst::isSigned(*MatchingPred)
2558 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2559 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2560 auto *Preheader = L.getLoopPreheader();
2561 assert(Preheader && "Loop is not in simplify form?");
2562 IRBuilder<> Builder(Preheader->getTerminator());
2563 // We are about to create a new guaranteed use for RHS2 which might not exist
2564 // before (if it was a non-taken input of logical and/or instruction). If it
2565 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2566 // introduced, so they don't need this.
2567 if (isa<SelectInst>(I))
2568 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr");
2569 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2570 id, RHS1, RHS2, nullptr,
2571 StringRef("invariant.") +
2572 (ICmpInst::isSigned(*MatchingPred) ? "s" : "u") +
2573 (UseMin ? "min" : "max"));
2574 Builder.SetInsertPoint(&I);
2575 ICmpInst::Predicate P = *MatchingPred;
2576 if (Inverse)
2578 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS);
2579 NewCond->takeName(&I);
2580 I.replaceAllUsesWith(NewCond);
2581 eraseInstruction(I, SafetyInfo, MSSAU);
2582 Instruction &CondI1 = *cast<Instruction>(Cond1);
2583 Instruction &CondI2 = *cast<Instruction>(Cond2);
2584 salvageDebugInfo(CondI1);
2585 salvageDebugInfo(CondI2);
2586 eraseInstruction(CondI1, SafetyInfo, MSSAU);
2587 eraseInstruction(CondI2, SafetyInfo, MSSAU);
2588 return true;
2589}
2590
2591/// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
2592/// this allows hoisting the inner GEP.
2593static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2595 DominatorTree *DT) {
2597 if (!GEP)
2598 return false;
2599
2600 // Do not try to hoist a constant GEP out of the loop via reassociation.
2601 // Constant GEPs can often be folded into addressing modes, and reassociating
2602 // them may inhibit CSE of a common base.
2603 if (GEP->hasAllConstantIndices())
2604 return false;
2605
2606 auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
2607 if (!Src || !Src->hasOneUse() || !L.contains(Src))
2608 return false;
2609
2610 Value *SrcPtr = Src->getPointerOperand();
2611 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); };
2612 if (!L.isLoopInvariant(SrcPtr) || !all_of(GEP->indices(), LoopInvariant))
2613 return false;
2614
2615 // This can only happen if !AllowSpeculation, otherwise this would already be
2616 // handled.
2617 // FIXME: Should we respect AllowSpeculation in these reassociation folds?
2618 // The flag exists to prevent metadata dropping, which is not relevant here.
2619 if (all_of(Src->indices(), LoopInvariant))
2620 return false;
2621
2622 // The swapped GEPs are inbounds if both original GEPs are inbounds
2623 // and the sign of the offsets is the same. For simplicity, only
2624 // handle both offsets being non-negative.
2625 const DataLayout &DL = GEP->getDataLayout();
2626 auto NonNegative = [&](Value *V) {
2627 return isKnownNonNegative(V, SimplifyQuery(DL, DT, AC, GEP));
2628 };
2629 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() &&
2630 all_of(Src->indices(), NonNegative) &&
2631 all_of(GEP->indices(), NonNegative);
2632
2633 BasicBlock *Preheader = L.getLoopPreheader();
2634 IRBuilder<> Builder(Preheader->getTerminator());
2635 Value *NewSrc = Builder.CreateGEP(GEP->getSourceElementType(), SrcPtr,
2636 SmallVector<Value *>(GEP->indices()),
2637 "invariant.gep", IsInBounds);
2638 Builder.SetInsertPoint(GEP);
2639 Value *NewGEP = Builder.CreateGEP(Src->getSourceElementType(), NewSrc,
2640 SmallVector<Value *>(Src->indices()), "gep",
2641 IsInBounds);
2642 GEP->replaceAllUsesWith(NewGEP);
2643 eraseInstruction(*GEP, SafetyInfo, MSSAU);
2644 salvageDebugInfo(*Src);
2645 eraseInstruction(*Src, SafetyInfo, MSSAU);
2646 return true;
2647}
2648
2649/// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
2650/// C1 and C2 are loop invariants and LV is a loop-variant.
2651static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
2652 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2653 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2654 AssumptionCache *AC, DominatorTree *DT) {
2655 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2656 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2657
2658 bool IsSigned = ICmpInst::isSigned(Pred);
2659
2660 // Try to represent VariantLHS as sum of invariant and variant operands.
2661 using namespace PatternMatch;
2662 Value *VariantOp, *InvariantOp;
2663 if (IsSigned &&
2664 !match(VariantLHS, m_NSWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2665 return false;
2666 if (!IsSigned &&
2667 !match(VariantLHS, m_NUWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2668 return false;
2669
2670 // LHS itself is a loop-variant, try to represent it in the form:
2671 // "VariantOp + InvariantOp". If it is possible, then we can reassociate.
2672 if (L.isLoopInvariant(VariantOp))
2673 std::swap(VariantOp, InvariantOp);
2674 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2675 return false;
2676
2677 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to
2678 // freely move values from left side of inequality to right side (just as in
2679 // normal linear arithmetics). Overflows make things much more complicated, so
2680 // we want to avoid this.
2681 auto &DL = L.getHeader()->getDataLayout();
2682 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2683 if (IsSigned && computeOverflowForSignedSub(InvariantRHS, InvariantOp, SQ) !=
2685 return false;
2686 if (!IsSigned &&
2687 computeOverflowForUnsignedSub(InvariantRHS, InvariantOp, SQ) !=
2689 return false;
2690 auto *Preheader = L.getLoopPreheader();
2691 assert(Preheader && "Loop is not in simplify form?");
2692 IRBuilder<> Builder(Preheader->getTerminator());
2693 Value *NewCmpOp =
2694 Builder.CreateSub(InvariantRHS, InvariantOp, "invariant.op",
2695 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2696 ICmp.setPredicate(Pred);
2697 ICmp.setOperand(0, VariantOp);
2698 ICmp.setOperand(1, NewCmpOp);
2699
2700 Instruction &DeadI = cast<Instruction>(*VariantLHS);
2701 salvageDebugInfo(DeadI);
2702 eraseInstruction(DeadI, SafetyInfo, MSSAU);
2703 return true;
2704}
2705
2706/// Try to reassociate and hoist the following two patterns:
2707/// LV - C1 < C2 --> LV < C1 + C2,
2708/// C1 - LV < C2 --> LV > C1 - C2.
2709static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
2710 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2711 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2712 AssumptionCache *AC, DominatorTree *DT) {
2713 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2714 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2715
2716 bool IsSigned = ICmpInst::isSigned(Pred);
2717
2718 // Try to represent VariantLHS as sum of invariant and variant operands.
2719 using namespace PatternMatch;
2720 Value *VariantOp, *InvariantOp;
2721 if (IsSigned &&
2722 !match(VariantLHS, m_NSWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2723 return false;
2724 if (!IsSigned &&
2725 !match(VariantLHS, m_NUWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2726 return false;
2727
2728 bool VariantSubtracted = false;
2729 // LHS itself is a loop-variant, try to represent it in the form:
2730 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If
2731 // the variant operand goes with minus, we use a slightly different scheme.
2732 if (L.isLoopInvariant(VariantOp)) {
2733 std::swap(VariantOp, InvariantOp);
2734 VariantSubtracted = true;
2735 Pred = ICmpInst::getSwappedPredicate(Pred);
2736 }
2737 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2738 return false;
2739
2740 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to
2741 // freely move values from left side of inequality to right side (just as in
2742 // normal linear arithmetics). Overflows make things much more complicated, so
2743 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that
2744 // "C1 - C2" does not overflow.
2745 auto &DL = L.getHeader()->getDataLayout();
2746 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2747 if (VariantSubtracted && IsSigned) {
2748 // C1 - LV < C2 --> LV > C1 - C2
2749 if (computeOverflowForSignedSub(InvariantOp, InvariantRHS, SQ) !=
2751 return false;
2752 } else if (VariantSubtracted && !IsSigned) {
2753 // C1 - LV < C2 --> LV > C1 - C2
2754 if (computeOverflowForUnsignedSub(InvariantOp, InvariantRHS, SQ) !=
2756 return false;
2757 } else if (!VariantSubtracted && IsSigned) {
2758 // LV - C1 < C2 --> LV < C1 + C2
2759 if (computeOverflowForSignedAdd(InvariantOp, InvariantRHS, SQ) !=
2761 return false;
2762 } else { // !VariantSubtracted && !IsSigned
2763 // LV - C1 < C2 --> LV < C1 + C2
2764 if (computeOverflowForUnsignedAdd(InvariantOp, InvariantRHS, SQ) !=
2766 return false;
2767 }
2768 auto *Preheader = L.getLoopPreheader();
2769 assert(Preheader && "Loop is not in simplify form?");
2770 IRBuilder<> Builder(Preheader->getTerminator());
2771 Value *NewCmpOp =
2772 VariantSubtracted
2773 ? Builder.CreateSub(InvariantOp, InvariantRHS, "invariant.op",
2774 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned)
2775 : Builder.CreateAdd(InvariantOp, InvariantRHS, "invariant.op",
2776 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2777 ICmp.setPredicate(Pred);
2778 ICmp.setOperand(0, VariantOp);
2779 ICmp.setOperand(1, NewCmpOp);
2780
2781 Instruction &DeadI = cast<Instruction>(*VariantLHS);
2782 salvageDebugInfo(DeadI);
2783 eraseInstruction(DeadI, SafetyInfo, MSSAU);
2784 return true;
2785}
2786
2787/// Reassociate and hoist add/sub expressions.
2788static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2790 DominatorTree *DT) {
2791 using namespace PatternMatch;
2792 CmpPredicate Pred;
2793 Value *LHS, *RHS;
2794 if (!match(&I, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
2795 return false;
2796
2797 // Put variant operand to LHS position.
2798 if (L.isLoopInvariant(LHS)) {
2799 std::swap(LHS, RHS);
2800 Pred = ICmpInst::getSwappedPredicate(Pred);
2801 }
2802 // We want to delete the initial operation after reassociation, so only do it
2803 // if it has no other uses.
2804 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS) || !LHS->hasOneUse())
2805 return false;
2806
2807 // TODO: We could go with smarter context, taking common dominator of all I's
2808 // users instead of I itself.
2809 if (hoistAdd(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2810 return true;
2811
2812 if (hoistSub(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2813 return true;
2814
2815 return false;
2816}
2817
2818static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
2819 unsigned FPOpcode) {
2820 if (I->getOpcode() == IntOpcode)
2821 return true;
2822 if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() &&
2823 I->hasNoSignedZeros())
2824 return true;
2825 return false;
2826}
2827
2828/// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
2829/// A1, A2, ... and C are loop invariants into expressions like
2830/// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
2831/// invariant expressions. This functions returns true only if any hoisting has
2832/// actually occured.
2834 ICFLoopSafetyInfo &SafetyInfo,
2836 DominatorTree *DT) {
2837 if (!isReassociableOp(&I, Instruction::Mul, Instruction::FMul))
2838 return false;
2839 Value *VariantOp = I.getOperand(0);
2840 Value *InvariantOp = I.getOperand(1);
2841 if (L.isLoopInvariant(VariantOp))
2842 std::swap(VariantOp, InvariantOp);
2843 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2844 return false;
2845 Value *Factor = InvariantOp;
2846
2847 // First, we need to make sure we should do the transformation.
2848 SmallVector<Use *> Changes;
2851 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(VariantOp))
2852 Worklist.push_back(VariantBinOp);
2853 while (!Worklist.empty()) {
2854 BinaryOperator *BO = Worklist.pop_back_val();
2855 if (!BO->hasOneUse())
2856 return false;
2857 if (isReassociableOp(BO, Instruction::Add, Instruction::FAdd) &&
2860 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(0)));
2861 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(1)));
2862 Adds.push_back(BO);
2863 continue;
2864 }
2865 if (!isReassociableOp(BO, Instruction::Mul, Instruction::FMul) ||
2866 L.isLoopInvariant(BO))
2867 return false;
2868 Use &U0 = BO->getOperandUse(0);
2869 Use &U1 = BO->getOperandUse(1);
2870 if (L.isLoopInvariant(U0))
2871 Changes.push_back(&U0);
2872 else if (L.isLoopInvariant(U1))
2873 Changes.push_back(&U1);
2874 else
2875 return false;
2876 unsigned Limit = I.getType()->isIntOrIntVectorTy()
2879 if (Changes.size() > Limit)
2880 return false;
2881 }
2882 if (Changes.empty())
2883 return false;
2884
2885 // Drop the poison flags for any adds we looked through.
2886 if (I.getType()->isIntOrIntVectorTy()) {
2887 for (auto *Add : Adds)
2888 Add->dropPoisonGeneratingFlags();
2889 }
2890
2891 // We know we should do it so let's do the transformation.
2892 auto *Preheader = L.getLoopPreheader();
2893 assert(Preheader && "Loop is not in simplify form?");
2894 IRBuilder<> Builder(Preheader->getTerminator());
2895 for (auto *U : Changes) {
2896 assert(L.isLoopInvariant(U->get()));
2897 auto *Ins = cast<BinaryOperator>(U->getUser());
2898 Value *Mul;
2899 if (I.getType()->isIntOrIntVectorTy()) {
2900 Mul = Builder.CreateMul(U->get(), Factor, "factor.op.mul");
2901 // Drop the poison flags on the original multiply.
2902 Ins->dropPoisonGeneratingFlags();
2903 } else
2904 Mul = Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul");
2905
2906 // Rewrite the reassociable instruction.
2907 unsigned OpIdx = U->getOperandNo();
2908 auto *LHS = OpIdx == 0 ? Mul : Ins->getOperand(0);
2909 auto *RHS = OpIdx == 1 ? Mul : Ins->getOperand(1);
2910 auto *NewBO =
2911 BinaryOperator::Create(Ins->getOpcode(), LHS, RHS,
2912 Ins->getName() + ".reass", Ins->getIterator());
2913 NewBO->setDebugLoc(DebugLoc::getDropped());
2914 NewBO->copyIRFlags(Ins);
2915 if (VariantOp == Ins)
2916 VariantOp = NewBO;
2917 Ins->replaceAllUsesWith(NewBO);
2918 eraseInstruction(*Ins, SafetyInfo, MSSAU);
2919 }
2920
2921 I.replaceAllUsesWith(VariantOp);
2922 eraseInstruction(I, SafetyInfo, MSSAU);
2923 return true;
2924}
2925
2926/// Reassociate associative binary expressions of the form
2927///
2928/// 1. "(LV op C1) op C2" ==> "LV op (C1 op C2)"
2929/// 2. "(C1 op LV) op C2" ==> "LV op (C1 op C2)"
2930/// 3. "C2 op (C1 op LV)" ==> "LV op (C1 op C2)"
2931/// 4. "C2 op (LV op C1)" ==> "LV op (C1 op C2)"
2932///
2933/// where op is an associative BinOp, LV is a loop variant, and C1 and C2 are
2934/// loop invariants that we want to hoist, noting that associativity implies
2935/// commutativity.
2937 ICFLoopSafetyInfo &SafetyInfo,
2939 DominatorTree *DT) {
2940 auto *BO = dyn_cast<BinaryOperator>(&I);
2941 if (!BO || !BO->isAssociative())
2942 return false;
2943
2944 Instruction::BinaryOps Opcode = BO->getOpcode();
2945 bool LVInRHS = L.isLoopInvariant(BO->getOperand(0));
2946 auto *BO0 = dyn_cast<BinaryOperator>(BO->getOperand(LVInRHS));
2947 if (!BO0 || BO0->getOpcode() != Opcode || !BO0->isAssociative() ||
2948 BO0->hasNUsesOrMore(BO0->getType()->isIntegerTy() ? 2 : 3))
2949 return false;
2950
2951 Value *LV = BO0->getOperand(0);
2952 Value *C1 = BO0->getOperand(1);
2953 Value *C2 = BO->getOperand(!LVInRHS);
2954
2955 assert(BO->isCommutative() && BO0->isCommutative() &&
2956 "Associativity implies commutativity");
2957 if (L.isLoopInvariant(LV) && !L.isLoopInvariant(C1))
2958 std::swap(LV, C1);
2959 if (L.isLoopInvariant(LV) || !L.isLoopInvariant(C1) || !L.isLoopInvariant(C2))
2960 return false;
2961
2962 auto *Preheader = L.getLoopPreheader();
2963 assert(Preheader && "Loop is not in simplify form?");
2964
2965 IRBuilder<> Builder(Preheader->getTerminator());
2966 auto *Inv = Builder.CreateBinOp(Opcode, C1, C2, "invariant.op");
2967
2968 auto *NewBO = BinaryOperator::Create(
2969 Opcode, LV, Inv, BO->getName() + ".reass", BO->getIterator());
2970 NewBO->setDebugLoc(DebugLoc::getDropped());
2971
2972 if (Opcode == Instruction::FAdd || Opcode == Instruction::FMul) {
2973 // Intersect FMF flags for FADD and FMUL.
2974 FastMathFlags Intersect = BO->getFastMathFlags() & BO0->getFastMathFlags();
2975 if (auto *I = dyn_cast<Instruction>(Inv))
2976 I->setFastMathFlags(Intersect);
2977 NewBO->setFastMathFlags(Intersect);
2978 } else {
2979 OverflowTracking Flags;
2980 Flags.AllKnownNonNegative = false;
2981 Flags.AllKnownNonZero = false;
2982 Flags.mergeFlags(*BO);
2983 Flags.mergeFlags(*BO0);
2984 // If `Inv` was not constant-folded, a new Instruction has been created.
2985 if (auto *I = dyn_cast<Instruction>(Inv))
2986 Flags.applyFlags(*I);
2987 Flags.applyFlags(*NewBO);
2988 }
2989
2990 BO->replaceAllUsesWith(NewBO);
2991 eraseInstruction(*BO, SafetyInfo, MSSAU);
2992
2993 // (LV op C1) might not be erased if it has more uses than the one we just
2994 // replaced.
2995 if (BO0->use_empty()) {
2996 salvageDebugInfo(*BO0);
2997 eraseInstruction(*BO0, SafetyInfo, MSSAU);
2998 }
2999
3000 return true;
3001}
3002
3004 ICFLoopSafetyInfo &SafetyInfo,
3006 DominatorTree *DT) {
3007 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
3008 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
3009 // expression out of the loop.
3010 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) {
3011 ++NumHoisted;
3012 ++NumMinMaxHoisted;
3013 return true;
3014 }
3015
3016 // Try to hoist GEPs by reassociation.
3017 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) {
3018 ++NumHoisted;
3019 ++NumGEPsHoisted;
3020 return true;
3021 }
3022
3023 // Try to hoist add/sub's by reassociation.
3024 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) {
3025 ++NumHoisted;
3026 ++NumAddSubHoisted;
3027 return true;
3028 }
3029
3030 bool IsInt = I.getType()->isIntOrIntVectorTy();
3031 if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
3032 ++NumHoisted;
3033 if (IsInt)
3034 ++NumIntAssociationsHoisted;
3035 else
3036 ++NumFPAssociationsHoisted;
3037 return true;
3038 }
3039
3040 if (hoistBOAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
3041 ++NumHoisted;
3042 ++NumBOAssociationsHoisted;
3043 return true;
3044 }
3045
3046 return false;
3047}
3048
3049/// Little predicate that returns true if the specified basic block is in
3050/// a subloop of the current one, not the current one itself.
3051///
3052static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
3053 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
3054 return LI->getLoopFor(BB) != CurLoop;
3055}
unsigned const MachineRegisterInfo * MRI
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
DXIL Resource Access
early cse Early CSE w MemorySSA
#define DEBUG_TYPE
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool isReassociableOp(Instruction *I, unsigned IntOpcode, unsigned FPOpcode)
Definition LICM.cpp:2818
static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, TargetTransformInfo *TTI, bool &FoldableInLoop, bool LoopNestMode)
Return true if the only users of this instruction are outside of the loop.
Definition LICM.cpp:1343
static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if this allows hoisting the inner ...
Definition LICM.cpp:2593
static cl::opt< bool > SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false), cl::desc("Force thread model single in LICM pass"))
static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, LoopInfo *LI, const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU)
Definition LICM.cpp:1578
static bool sinkUnusedInvariantsFromPreheaderToExit(Loop *L, AAResults *AA, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE, DominatorTree *DT, SinkAndHoistLICMFlags &SinkFlags, OptimizationRemarkEmitter *ORE)
Definition LICM.cpp:1486
static cl::opt< unsigned > FPAssociationUpperLimit("licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop, const TargetTransformInfo *TTI)
Return true if the instruction is foldable in the loop.
Definition LICM.cpp:1313
static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A < min(INV_1, INV_2)),...
Definition LICM.cpp:2512
static Instruction * cloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1385
static cl::opt< bool > ControlFlowHoisting("licm-control-flow-hoisting", cl::Hidden, cl::init(false), cl::desc("Enable control flow (and PHI) hoisting in LICM"))
static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU, Loop *CurLoop, Instruction &I, SinkAndHoistLICMFlags &Flags, bool InvariantGroup)
Definition LICM.cpp:2449
static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to turn things like "LV + C1 < C2" into "LV < C2 - C1".
Definition LICM.cpp:2651
static MemoryAccess * getClobberingMemoryAccess(MemorySSA &MSSA, BatchAAResults &BAA, SinkAndHoistLICMFlags &Flags, MemoryUseOrDef *MA)
Definition LICM.cpp:1164
static SmallVector< PointersAndHasReadsOutsideSet, 0 > collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L)
Definition LICM.cpp:2312
static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
When an instruction is found to only use loop invariant operands that is safe to hoist,...
Definition LICM.cpp:1756
static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo)
Definition LICM.cpp:1560
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE)
When an instruction is found to only be used outside of the loop, this function moves it to the exit ...
Definition LICM.cpp:1650
static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate and hoist add/sub expressions.
Definition LICM.cpp:2788
static bool hoistMulAddAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where A1, A2,...
Definition LICM.cpp:2833
static cl::opt< uint32_t > MaxNumUsesTraversed("licm-max-num-uses-traversed", cl::Hidden, cl::init(8), cl::desc("Max num uses visited for identifying load " "invariance in loop using invariant start (default = 8)"))
static bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, const MemorySSAUpdater &MSSAU)
Return true if I is the only Instruction with a MemoryAccess in L.
Definition LICM.cpp:1148
static cl::opt< unsigned > IntAssociationUpperLimit("licm-max-num-int-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, function_ref< void(Instruction *)> Fn)
Definition LICM.cpp:2300
static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, Loop *CurLoop)
Definition LICM.cpp:1079
static bool isHoistableAndSinkableInst(Instruction &I)
Return true if-and-only-if we know how to (mechanically) both hoist and sink a given instruction out ...
Definition LICM.cpp:1136
static Instruction * sinkThroughTriviallyReplaceablePHI(PHINode *TPN, Instruction *I, LoopInfo *LI, SmallDenseMap< BasicBlock *, Instruction *, 32 > &SunkCopies, const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1545
static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE, MemorySSA::InsertionPlace Point=MemorySSA::BeforeTerminator)
Definition LICM.cpp:1469
static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI)
Little predicate that returns true if the specified basic block is in a subloop of the current one,...
Definition LICM.cpp:3052
static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate and hoist the following two patterns: LV - C1 < C2 --> LV < C1 + C2,...
Definition LICM.cpp:2709
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1462
static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, const Instruction *CtxI, AssumptionCache *AC, bool AllowSpeculation)
Only sink or hoist an instruction if it is not a trapping instruction, or if the instruction is known...
Definition LICM.cpp:1803
static bool hoistArithmetics(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Aggregates various functions for hoisting computations out of loop.
Definition LICM.cpp:3003
static bool noConflictingReadWrites(Instruction *I, MemorySSA *MSSA, AAResults *AA, Loop *CurLoop, SinkAndHoistLICMFlags &Flags)
Definition LICM.cpp:2381
static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I)
Returns true if a PHINode is a trivially replaceable with an Instruction.
Definition LICM.cpp:1304
std::pair< SmallSetVector< Value *, 8 >, bool > PointersAndHasReadsOutsideSet
Definition LICM.cpp:226
static cl::opt< bool > DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), cl::desc("Disable memory promotion in LICM pass"))
Memory promotion is enabled by default.
static bool hoistBOAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate associative binary expressions of the form.
Definition LICM.cpp:2936
static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU)
Definition LICM.cpp:2500
This file defines the interface for the loop nest analysis.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Memory SSA
Definition MemorySSA.cpp:72
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
if(PassOpts->AAPipeline)
PassInstrumentationCallbacks PIC
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file provides a priority worklist.
Remove Loads Into Fake Uses
This file defines generic set operations that may be used on set's of different types,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
static cl::opt< bool > DisablePromotion("disable-type-promotion", cl::Hidden, cl::init(false), cl::desc("Disable type promotion pass"))
Value * RHS
Value * LHS
BinaryOperator * Mul
LLVM_ABI void add(const MemoryLocation &Loc)
These methods are used to add different types of instructions to the alias sets.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New)
Update all phi nodes in this basic block's successors to refer to basic block New instead of basic bl...
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
Definition BasicBlock.h:386
LLVM_ABI bool canSplitPredecessors() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isNegative() const
Definition Constants.h:209
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:169
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition DataLayout.h:557
static LLVM_ABI DebugLoc getMergedLocations(ArrayRef< DebugLoc > Locs)
Try to combine the vector of locations passed as input in a single one.
Definition DebugLoc.cpp:170
static DebugLoc getDropped()
Definition DebugLoc.h:164
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:248
iterator end()
Definition DenseMap.h:81
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to give precise answers on "may...
bool doesNotWriteMemoryBefore(const BasicBlock *BB, const Loop *CurLoop) const
Returns true if we could not execute a memory-modifying instruction before we enter BB under assumpti...
void removeInstruction(const Instruction *Inst)
Inform safety info that we are planning to remove the instruction Inst from its block.
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const override
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Inform the safety info that we are planning to insert a new instruction Inst into the basic block BB.
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition LICM.cpp:331
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition LICM.cpp:309
PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition LICM.cpp:341
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition LICM.cpp:371
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
Helper class for promoting a collection of loads and stores into SSA Form using the SSAUpdater.
Definition SSAUpdater.h:147
An instruction for reading from memory.
void setAlignment(Align Align)
Value * getPointerOperand()
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
void getUniqueExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all unique successor blocks of this loop.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V, const BasicBlock *ExitBB) const
Definition LoopInfo.cpp:943
This class represents a loop nest and can be used to query its properties.
Function * getParent() const
Return the function to which the loop-nest belongs.
Loop & getOutermostLoop() const
Return the outermost loop in the loop nest.
Captures loop safety information.
Definition MustExecute.h:60
LLVM_ABI void copyColors(BasicBlock *New, BasicBlock *Old)
Copy colors of block Old into the block New.
LLVM_ABI const DenseMap< BasicBlock *, ColorVector > & getBlockColors() const
Returns block colors map that is used to update funclet operand bundles.
virtual bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const =0
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:67
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:61
BasicBlock * getBlock() const
Definition MemorySSA.h:162
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:221
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:215
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:218
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
An analysis that produces MemorySSA for a function.
Definition MemorySSA.h:936
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
LLVM_ABI void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
LLVM_ABI void insertUse(MemoryUse *Use, bool RenameUses=false)
LLVM_ABI MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
LLVM_ABI MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
LLVM_ABI void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
LLVM_ABI void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition MemorySSA.h:1053
Legacy analysis pass which computes MemorySSA.
Definition MemorySSA.h:993
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition MemorySSA.h:702
AliasAnalysis & getAA()
Definition MemorySSA.h:802
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition MemorySSA.h:760
LLVM_ABI MemorySSAWalker * getSkipSelfWalker()
LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition MemorySSA.h:793
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition MemorySSA.h:720
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition MemorySSA.h:768
LLVM_ABI bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition MemorySSA.h:740
Class that has the common methods + fields of memory uses/defs.
Definition MemorySSA.h:250
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition MemorySSA.h:260
Represents read-only accesses to memory.
Definition MemorySSA.h:310
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingBlock(unsigned i, BasicBlock *BB)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
PointerIntPair - This class implements a pair of a pointer and small integer.
void setInt(IntType IntVal) &
PointerTy getPointer() const
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
PredIteratorCache - This class is an extremely trivial cache for predecessor iterator queries.
size_t size(BasicBlock *BB)
ArrayRef< BasicBlock * > get(BasicBlock *BB)
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
bool empty() const
Determine if the PriorityWorklist is empty or not.
bool insert(const T &X)
Insert a new element into the PriorityWorklist.
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition SSAUpdater.h:39
The main scalar evolution driver.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition SetVector.h:180
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:99
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:105
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:150
Flags controlling how much is checked when sinking or hoisting instructions.
Definition LoopUtils.h:124
LLVM_ABI SinkAndHoistLICMFlags(unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, Loop &L, MemorySSA &MSSA)
Definition LICM.cpp:399
unsigned LicmMssaNoAccForPromotionCap
Definition LoopUtils.h:143
A version of PriorityWorklist that selects small size optimized data structures for the vector and ma...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:338
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setAlignment(Align Align)
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
static unsigned getPointerOperandIndex()
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Provides information about what library functions are available for the current target.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Free
Expected to fold away in lowering.
EltTy front() const
unsigned size() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
const Use & getOperandUse(unsigned i) const
Definition User.h:245
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI std::string getNameOrAsOperand() const
Definition Value.cpp:457
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
iterator_range< use_iterator > uses()
Definition Value.h:380
user_iterator_impl< User > user_iterator
Definition Value.h:391
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
Abstract Attribute helper functions.
Definition Attributor.h:165
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
@ NeverOverflows
Never overflows.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, Loop *CurLoop, MemorySSAUpdater &MSSAU, bool TargetExecutesOncePerLoop, SinkAndHoistLICMFlags &LICMFlags, OptimizationRemarkEmitter *ORE=nullptr)
Returns true if is legal to hoist or sink this instruction disregarding the possible introduction of ...
Definition LICM.cpp:1178
auto pred_end(const MachineBasicBlock *BB)
void set_intersect(S1Ty &S1, const S2Ty &S2)
set_intersect(A, B) - Compute A := A ^ B Identical to set_intersection, except that it works on set<>...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1724
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition Loads.cpp:229
constexpr from_range_t from_range
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
auto cast_or_null(const Y &Val)
Definition Casting.h:714
auto pred_size(const MachineBasicBlock *BB)
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:296
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:392
LLVM_ABI SmallVector< BasicBlock *, 16 > collectChildrenInLoop(DominatorTree *DT, DomTreeNode *N, const Loop *CurLoop)
Does a BFS from a given node to all of its children inside a given loop.
DomTreeNodeBase< BasicBlock > DomTreeNode
Definition Dominators.h:95
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
LLVM_ABI bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, AssumptionCache *, TargetLibraryInfo *, Loop *, MemorySSAUpdater &, ScalarEvolution *, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, bool, bool AllowSpeculation)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition LICM.cpp:901
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI void initializeLegacyLICMPassPass(PassRegistry &)
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_TEMPLATE_ABI void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist< Loop *, 4 > &)
Utility that implements appending of loops onto a worklist given a range.
bool isModOrRefSet(const ModRefInfo MRI)
Definition ModRef.h:43
LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI void getLoopAnalysisUsage(AnalysisUsage &AU)
Helper to consistently add the set of standard passes to a loop pass's AnalysisUsage.
LLVM_ABI BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
TargetTransformInfo TTI
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition MemorySSA.cpp:84
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool hasDisableLICMTransformsHint(const Loop *L)
Look for the loop attribute that disables the LICM transformation heuristics.
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
@ Add
Sum of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition Loads.cpp:249
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1867
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
TinyPtrVector< BasicBlock * > ColorVector
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2120
auto predecessors(const MachineBasicBlock *BB)
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *CurLoop, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, Loop *OutermostLoop=nullptr)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition LICM.cpp:571
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
cl::opt< unsigned > SetLicmMssaNoAccForPromotionCap
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
bool capturesNothing(CaptureComponents CC)
Definition ModRef.h:315
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool promoteLoopAccessesToScalars(const SmallSetVector< Value *, 8 > &, SmallVectorImpl< BasicBlock * > &, SmallVectorImpl< BasicBlock::iterator > &, SmallVectorImpl< MemoryAccess * > &, PredIteratorCache &, LoopInfo *, DominatorTree *, AssumptionCache *AC, const TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, OptimizationRemarkEmitter *, bool AllowSpeculation, bool HasReadsOutsideSet)
Try to promote memory values to scalars by sinking stores out of the loop and moving loads to before ...
Definition LICM.cpp:1985
cl::opt< unsigned > SetLicmMssaOptCap
LLVM_ABI bool sinkRegionForLoopNest(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *)
Call sinkRegion on loops contained within the specified loop in order from innermost to outermost.
Definition LICM.cpp:638
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly)
Return true if the Object is writable, in the sense that any location based on this pointer that can ...
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
A lightweight accessor for an operand bundle meant to be passed around by value.
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70