LLVM 18.0.0git
LoopUnrollAndJam.cpp
Go to the documentation of this file.
1//===-- LoopUnrollAndJam.cpp - Loop unrolling utilities -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements loop unroll and jam as a routine, much like
10// LoopUnroll.cpp implements loop unroll.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/DenseMap.h"
16#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/Twine.h"
31#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/DebugLoc.h"
35#include "llvm/IR/Dominators.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/Instruction.h"
40#include "llvm/IR/User.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/IR/ValueMap.h"
45#include "llvm/Support/Debug.h"
54#include <assert.h>
55#include <memory>
56#include <type_traits>
57#include <vector>
58
59using namespace llvm;
60
61#define DEBUG_TYPE "loop-unroll-and-jam"
62
63STATISTIC(NumUnrolledAndJammed, "Number of loops unroll and jammed");
64STATISTIC(NumCompletelyUnrolledAndJammed, "Number of loops unroll and jammed");
65
67
68// Partition blocks in an outer/inner loop pair into blocks before and after
69// the loop
70static bool partitionLoopBlocks(Loop &L, BasicBlockSet &ForeBlocks,
71 BasicBlockSet &AftBlocks, DominatorTree &DT) {
72 Loop *SubLoop = L.getSubLoops()[0];
73 BasicBlock *SubLoopLatch = SubLoop->getLoopLatch();
74
75 for (BasicBlock *BB : L.blocks()) {
76 if (!SubLoop->contains(BB)) {
77 if (DT.dominates(SubLoopLatch, BB))
78 AftBlocks.insert(BB);
79 else
80 ForeBlocks.insert(BB);
81 }
82 }
83
84 // Check that all blocks in ForeBlocks together dominate the subloop
85 // TODO: This might ideally be done better with a dominator/postdominators.
86 BasicBlock *SubLoopPreHeader = SubLoop->getLoopPreheader();
87 for (BasicBlock *BB : ForeBlocks) {
88 if (BB == SubLoopPreHeader)
89 continue;
90 Instruction *TI = BB->getTerminator();
91 for (BasicBlock *Succ : successors(TI))
92 if (!ForeBlocks.count(Succ))
93 return false;
94 }
95
96 return true;
97}
98
99/// Partition blocks in a loop nest into blocks before and after each inner
100/// loop.
102 Loop &Root, Loop &JamLoop, BasicBlockSet &JamLoopBlocks,
103 DenseMap<Loop *, BasicBlockSet> &ForeBlocksMap,
105 JamLoopBlocks.insert(JamLoop.block_begin(), JamLoop.block_end());
106
107 for (Loop *L : Root.getLoopsInPreorder()) {
108 if (L == &JamLoop)
109 break;
110
111 if (!partitionLoopBlocks(*L, ForeBlocksMap[L], AftBlocksMap[L], DT))
112 return false;
113 }
114
115 return true;
116}
117
118// TODO Remove when UnrollAndJamLoop changed to support unroll and jamming more
119// than 2 levels loop.
120static bool partitionOuterLoopBlocks(Loop *L, Loop *SubLoop,
121 BasicBlockSet &ForeBlocks,
122 BasicBlockSet &SubLoopBlocks,
123 BasicBlockSet &AftBlocks,
124 DominatorTree *DT) {
125 SubLoopBlocks.insert(SubLoop->block_begin(), SubLoop->block_end());
126 return partitionLoopBlocks(*L, ForeBlocks, AftBlocks, *DT);
127}
128
129// Looks at the phi nodes in Header for values coming from Latch. For these
130// instructions and all their operands calls Visit on them, keeping going for
131// all the operands in AftBlocks. Returns false if Visit returns false,
132// otherwise returns true. This is used to process the instructions in the
133// Aft blocks that need to be moved before the subloop. It is used in two
134// places. One to check that the required set of instructions can be moved
135// before the loop. Then to collect the instructions to actually move in
136// moveHeaderPhiOperandsToForeBlocks.
137template <typename T>
139 BasicBlockSet &AftBlocks, T Visit) {
141
142 std::function<bool(Instruction * I)> ProcessInstr = [&](Instruction *I) {
143 if (VisitedInstr.count(I))
144 return true;
145
146 VisitedInstr.insert(I);
147
148 if (AftBlocks.count(I->getParent()))
149 for (auto &U : I->operands())
150 if (Instruction *II = dyn_cast<Instruction>(U))
151 if (!ProcessInstr(II))
152 return false;
153
154 return Visit(I);
155 };
156
157 for (auto &Phi : Header->phis()) {
158 Value *V = Phi.getIncomingValueForBlock(Latch);
159 if (Instruction *I = dyn_cast<Instruction>(V))
160 if (!ProcessInstr(I))
161 return false;
162 }
163
164 return true;
165}
166
167// Move the phi operands of Header from Latch out of AftBlocks to InsertLoc.
169 BasicBlock *Latch,
170 Instruction *InsertLoc,
171 BasicBlockSet &AftBlocks) {
172 // We need to ensure we move the instructions in the correct order,
173 // starting with the earliest required instruction and moving forward.
174 processHeaderPhiOperands(Header, Latch, AftBlocks,
175 [&AftBlocks, &InsertLoc](Instruction *I) {
176 if (AftBlocks.count(I->getParent()))
177 I->moveBefore(InsertLoc);
178 return true;
179 });
180}
181
182/*
183 This method performs Unroll and Jam. For a simple loop like:
184 for (i = ..)
185 Fore(i)
186 for (j = ..)
187 SubLoop(i, j)
188 Aft(i)
189
190 Instead of doing normal inner or outer unrolling, we do:
191 for (i = .., i+=2)
192 Fore(i)
193 Fore(i+1)
194 for (j = ..)
195 SubLoop(i, j)
196 SubLoop(i+1, j)
197 Aft(i)
198 Aft(i+1)
199
200 So the outer loop is essetially unrolled and then the inner loops are fused
201 ("jammed") together into a single loop. This can increase speed when there
202 are loads in SubLoop that are invariant to i, as they become shared between
203 the now jammed inner loops.
204
205 We do this by spliting the blocks in the loop into Fore, Subloop and Aft.
206 Fore blocks are those before the inner loop, Aft are those after. Normal
207 Unroll code is used to copy each of these sets of blocks and the results are
208 combined together into the final form above.
209
210 isSafeToUnrollAndJam should be used prior to calling this to make sure the
211 unrolling will be valid. Checking profitablility is also advisable.
212
213 If EpilogueLoop is non-null, it receives the epilogue loop (if it was
214 necessary to create one and not fully unrolled).
215*/
217llvm::UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount,
218 unsigned TripMultiple, bool UnrollRemainder,
221 OptimizationRemarkEmitter *ORE, Loop **EpilogueLoop) {
222
223 // When we enter here we should have already checked that it is safe
224 BasicBlock *Header = L->getHeader();
225 assert(Header && "No header.");
226 assert(L->getSubLoops().size() == 1);
227 Loop *SubLoop = *L->begin();
228
229 // Don't enter the unroll code if there is nothing to do.
230 if (TripCount == 0 && Count < 2) {
231 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; almost nothing to do\n");
232 return LoopUnrollResult::Unmodified;
233 }
234
235 assert(Count > 0);
236 assert(TripMultiple > 0);
237 assert(TripCount == 0 || TripCount % TripMultiple == 0);
238
239 // Are we eliminating the loop control altogether?
240 bool CompletelyUnroll = (Count == TripCount);
241
242 // We use the runtime remainder in cases where we don't know trip multiple
243 if (TripMultiple % Count != 0) {
244 if (!UnrollRuntimeLoopRemainder(L, Count, /*AllowExpensiveTripCount*/ false,
245 /*UseEpilogRemainder*/ true,
246 UnrollRemainder, /*ForgetAllSCEV*/ false,
247 LI, SE, DT, AC, TTI, true, EpilogueLoop)) {
248 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; remainder loop could not be "
249 "generated when assuming runtime trip count\n");
250 return LoopUnrollResult::Unmodified;
251 }
252 }
253
254 // Notify ScalarEvolution that the loop will be substantially changed,
255 // if not outright eliminated.
256 if (SE) {
257 SE->forgetLoop(L);
259 }
260
261 using namespace ore;
262 // Report the unrolling decision.
263 if (CompletelyUnroll) {
264 LLVM_DEBUG(dbgs() << "COMPLETELY UNROLL AND JAMMING loop %"
265 << Header->getName() << " with trip count " << TripCount
266 << "!\n");
267 ORE->emit(OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(),
268 L->getHeader())
269 << "completely unroll and jammed loop with "
270 << NV("UnrollCount", TripCount) << " iterations");
271 } else {
272 auto DiagBuilder = [&]() {
273 OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(),
274 L->getHeader());
275 return Diag << "unroll and jammed loop by a factor of "
276 << NV("UnrollCount", Count);
277 };
278
279 LLVM_DEBUG(dbgs() << "UNROLL AND JAMMING loop %" << Header->getName()
280 << " by " << Count);
281 if (TripMultiple != 1) {
282 LLVM_DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
283 ORE->emit([&]() {
284 return DiagBuilder() << " with " << NV("TripMultiple", TripMultiple)
285 << " trips per branch";
286 });
287 } else {
288 LLVM_DEBUG(dbgs() << " with run-time trip count");
289 ORE->emit([&]() { return DiagBuilder() << " with run-time trip count"; });
290 }
291 LLVM_DEBUG(dbgs() << "!\n");
292 }
293
294 BasicBlock *Preheader = L->getLoopPreheader();
295 BasicBlock *LatchBlock = L->getLoopLatch();
296 assert(Preheader && "No preheader");
297 assert(LatchBlock && "No latch block");
298 BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator());
299 assert(BI && !BI->isUnconditional());
300 bool ContinueOnTrue = L->contains(BI->getSuccessor(0));
301 BasicBlock *LoopExit = BI->getSuccessor(ContinueOnTrue);
302 bool SubLoopContinueOnTrue = SubLoop->contains(
303 SubLoop->getLoopLatch()->getTerminator()->getSuccessor(0));
304
305 // Partition blocks in an outer/inner loop pair into blocks before and after
306 // the loop
307 BasicBlockSet SubLoopBlocks;
308 BasicBlockSet ForeBlocks;
309 BasicBlockSet AftBlocks;
310 partitionOuterLoopBlocks(L, SubLoop, ForeBlocks, SubLoopBlocks, AftBlocks,
311 DT);
312
313 // We keep track of the entering/first and exiting/last block of each of
314 // Fore/SubLoop/Aft in each iteration. This helps make the stapling up of
315 // blocks easier.
316 std::vector<BasicBlock *> ForeBlocksFirst;
317 std::vector<BasicBlock *> ForeBlocksLast;
318 std::vector<BasicBlock *> SubLoopBlocksFirst;
319 std::vector<BasicBlock *> SubLoopBlocksLast;
320 std::vector<BasicBlock *> AftBlocksFirst;
321 std::vector<BasicBlock *> AftBlocksLast;
322 ForeBlocksFirst.push_back(Header);
323 ForeBlocksLast.push_back(SubLoop->getLoopPreheader());
324 SubLoopBlocksFirst.push_back(SubLoop->getHeader());
325 SubLoopBlocksLast.push_back(SubLoop->getExitingBlock());
326 AftBlocksFirst.push_back(SubLoop->getExitBlock());
327 AftBlocksLast.push_back(L->getExitingBlock());
328 // Maps Blocks[0] -> Blocks[It]
329 ValueToValueMapTy LastValueMap;
330
331 // Move any instructions from fore phi operands from AftBlocks into Fore.
333 Header, LatchBlock, ForeBlocksLast[0]->getTerminator(), AftBlocks);
334
335 // The current on-the-fly SSA update requires blocks to be processed in
336 // reverse postorder so that LastValueMap contains the correct value at each
337 // exit.
338 LoopBlocksDFS DFS(L);
339 DFS.perform(LI);
340 // Stash the DFS iterators before adding blocks to the loop.
341 LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO();
342 LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO();
343
344 // When a FSDiscriminator is enabled, we don't need to add the multiply
345 // factors to the discriminators.
346 if (Header->getParent()->shouldEmitDebugInfoForProfiling() &&
348 for (BasicBlock *BB : L->getBlocks())
349 for (Instruction &I : *BB)
350 if (!I.isDebugOrPseudoInst())
351 if (const DILocation *DIL = I.getDebugLoc()) {
352 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(Count);
353 if (NewDIL)
354 I.setDebugLoc(*NewDIL);
355 else
357 << "Failed to create new discriminator: "
358 << DIL->getFilename() << " Line: " << DIL->getLine());
359 }
360
361 // Copy all blocks
362 for (unsigned It = 1; It != Count; ++It) {
364 // Maps Blocks[It] -> Blocks[It-1]
365 DenseMap<Value *, Value *> PrevItValueMap;
367 NewLoops[L] = L;
368 NewLoops[SubLoop] = SubLoop;
369
370 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
372 BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
373 Header->getParent()->insert(Header->getParent()->end(), New);
374
375 // Tell LI about New.
376 addClonedBlockToLoopInfo(*BB, New, LI, NewLoops);
377
378 if (ForeBlocks.count(*BB)) {
379 if (*BB == ForeBlocksFirst[0])
380 ForeBlocksFirst.push_back(New);
381 if (*BB == ForeBlocksLast[0])
382 ForeBlocksLast.push_back(New);
383 } else if (SubLoopBlocks.count(*BB)) {
384 if (*BB == SubLoopBlocksFirst[0])
385 SubLoopBlocksFirst.push_back(New);
386 if (*BB == SubLoopBlocksLast[0])
387 SubLoopBlocksLast.push_back(New);
388 } else if (AftBlocks.count(*BB)) {
389 if (*BB == AftBlocksFirst[0])
390 AftBlocksFirst.push_back(New);
391 if (*BB == AftBlocksLast[0])
392 AftBlocksLast.push_back(New);
393 } else {
394 llvm_unreachable("BB being cloned should be in Fore/Sub/Aft");
395 }
396
397 // Update our running maps of newest clones
398 PrevItValueMap[New] = (It == 1 ? *BB : LastValueMap[*BB]);
399 LastValueMap[*BB] = New;
400 for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
401 VI != VE; ++VI) {
402 PrevItValueMap[VI->second] =
403 const_cast<Value *>(It == 1 ? VI->first : LastValueMap[VI->first]);
404 LastValueMap[VI->first] = VI->second;
405 }
406
407 NewBlocks.push_back(New);
408
409 // Update DomTree:
410 if (*BB == ForeBlocksFirst[0])
411 DT->addNewBlock(New, ForeBlocksLast[It - 1]);
412 else if (*BB == SubLoopBlocksFirst[0])
413 DT->addNewBlock(New, SubLoopBlocksLast[It - 1]);
414 else if (*BB == AftBlocksFirst[0])
415 DT->addNewBlock(New, AftBlocksLast[It - 1]);
416 else {
417 // Each set of blocks (Fore/Sub/Aft) will have the same internal domtree
418 // structure.
419 auto BBDomNode = DT->getNode(*BB);
420 auto BBIDom = BBDomNode->getIDom();
421 BasicBlock *OriginalBBIDom = BBIDom->getBlock();
422 assert(OriginalBBIDom);
423 assert(LastValueMap[cast<Value>(OriginalBBIDom)]);
424 DT->addNewBlock(
425 New, cast<BasicBlock>(LastValueMap[cast<Value>(OriginalBBIDom)]));
426 }
427 }
428
429 // Remap all instructions in the most recent iteration
430 remapInstructionsInBlocks(NewBlocks, LastValueMap);
431 for (BasicBlock *NewBlock : NewBlocks) {
432 for (Instruction &I : *NewBlock) {
433 if (auto *II = dyn_cast<AssumeInst>(&I))
434 AC->registerAssumption(II);
435 }
436 }
437
438 // Alter the ForeBlocks phi's, pointing them at the latest version of the
439 // value from the previous iteration's phis
440 for (PHINode &Phi : ForeBlocksFirst[It]->phis()) {
441 Value *OldValue = Phi.getIncomingValueForBlock(AftBlocksLast[It]);
442 assert(OldValue && "should have incoming edge from Aft[It]");
443 Value *NewValue = OldValue;
444 if (Value *PrevValue = PrevItValueMap[OldValue])
445 NewValue = PrevValue;
446
447 assert(Phi.getNumOperands() == 2);
448 Phi.setIncomingBlock(0, ForeBlocksLast[It - 1]);
449 Phi.setIncomingValue(0, NewValue);
450 Phi.removeIncomingValue(1);
451 }
452 }
453
454 // Now that all the basic blocks for the unrolled iterations are in place,
455 // finish up connecting the blocks and phi nodes. At this point LastValueMap
456 // is the last unrolled iterations values.
457
458 // Update Phis in BB from OldBB to point to NewBB and use the latest value
459 // from LastValueMap
460 auto updatePHIBlocksAndValues = [](BasicBlock *BB, BasicBlock *OldBB,
461 BasicBlock *NewBB,
462 ValueToValueMapTy &LastValueMap) {
463 for (PHINode &Phi : BB->phis()) {
464 for (unsigned b = 0; b < Phi.getNumIncomingValues(); ++b) {
465 if (Phi.getIncomingBlock(b) == OldBB) {
466 Value *OldValue = Phi.getIncomingValue(b);
467 if (Value *LastValue = LastValueMap[OldValue])
468 Phi.setIncomingValue(b, LastValue);
469 Phi.setIncomingBlock(b, NewBB);
470 break;
471 }
472 }
473 }
474 };
475 // Move all the phis from Src into Dest
476 auto movePHIs = [](BasicBlock *Src, BasicBlock *Dest) {
477 Instruction *insertPoint = Dest->getFirstNonPHI();
478 while (PHINode *Phi = dyn_cast<PHINode>(Src->begin()))
479 Phi->moveBefore(insertPoint);
480 };
481
482 // Update the PHI values outside the loop to point to the last block
483 updatePHIBlocksAndValues(LoopExit, AftBlocksLast[0], AftBlocksLast.back(),
484 LastValueMap);
485
486 // Update ForeBlocks successors and phi nodes
487 BranchInst *ForeTerm =
488 cast<BranchInst>(ForeBlocksLast.back()->getTerminator());
489 assert(ForeTerm->getNumSuccessors() == 1 && "Expecting one successor");
490 ForeTerm->setSuccessor(0, SubLoopBlocksFirst[0]);
491
492 if (CompletelyUnroll) {
493 while (PHINode *Phi = dyn_cast<PHINode>(ForeBlocksFirst[0]->begin())) {
494 Phi->replaceAllUsesWith(Phi->getIncomingValueForBlock(Preheader));
495 Phi->eraseFromParent();
496 }
497 } else {
498 // Update the PHI values to point to the last aft block
499 updatePHIBlocksAndValues(ForeBlocksFirst[0], AftBlocksLast[0],
500 AftBlocksLast.back(), LastValueMap);
501 }
502
503 for (unsigned It = 1; It != Count; It++) {
504 // Remap ForeBlock successors from previous iteration to this
505 BranchInst *ForeTerm =
506 cast<BranchInst>(ForeBlocksLast[It - 1]->getTerminator());
507 assert(ForeTerm->getNumSuccessors() == 1 && "Expecting one successor");
508 ForeTerm->setSuccessor(0, ForeBlocksFirst[It]);
509 }
510
511 // Subloop successors and phis
512 BranchInst *SubTerm =
513 cast<BranchInst>(SubLoopBlocksLast.back()->getTerminator());
514 SubTerm->setSuccessor(!SubLoopContinueOnTrue, SubLoopBlocksFirst[0]);
515 SubTerm->setSuccessor(SubLoopContinueOnTrue, AftBlocksFirst[0]);
516 SubLoopBlocksFirst[0]->replacePhiUsesWith(ForeBlocksLast[0],
517 ForeBlocksLast.back());
518 SubLoopBlocksFirst[0]->replacePhiUsesWith(SubLoopBlocksLast[0],
519 SubLoopBlocksLast.back());
520
521 for (unsigned It = 1; It != Count; It++) {
522 // Replace the conditional branch of the previous iteration subloop with an
523 // unconditional one to this one
524 BranchInst *SubTerm =
525 cast<BranchInst>(SubLoopBlocksLast[It - 1]->getTerminator());
526 BranchInst::Create(SubLoopBlocksFirst[It], SubTerm);
527 SubTerm->eraseFromParent();
528
529 SubLoopBlocksFirst[It]->replacePhiUsesWith(ForeBlocksLast[It],
530 ForeBlocksLast.back());
531 SubLoopBlocksFirst[It]->replacePhiUsesWith(SubLoopBlocksLast[It],
532 SubLoopBlocksLast.back());
533 movePHIs(SubLoopBlocksFirst[It], SubLoopBlocksFirst[0]);
534 }
535
536 // Aft blocks successors and phis
537 BranchInst *AftTerm = cast<BranchInst>(AftBlocksLast.back()->getTerminator());
538 if (CompletelyUnroll) {
539 BranchInst::Create(LoopExit, AftTerm);
540 AftTerm->eraseFromParent();
541 } else {
542 AftTerm->setSuccessor(!ContinueOnTrue, ForeBlocksFirst[0]);
543 assert(AftTerm->getSuccessor(ContinueOnTrue) == LoopExit &&
544 "Expecting the ContinueOnTrue successor of AftTerm to be LoopExit");
545 }
546 AftBlocksFirst[0]->replacePhiUsesWith(SubLoopBlocksLast[0],
547 SubLoopBlocksLast.back());
548
549 for (unsigned It = 1; It != Count; It++) {
550 // Replace the conditional branch of the previous iteration subloop with an
551 // unconditional one to this one
552 BranchInst *AftTerm =
553 cast<BranchInst>(AftBlocksLast[It - 1]->getTerminator());
554 BranchInst::Create(AftBlocksFirst[It], AftTerm);
555 AftTerm->eraseFromParent();
556
557 AftBlocksFirst[It]->replacePhiUsesWith(SubLoopBlocksLast[It],
558 SubLoopBlocksLast.back());
559 movePHIs(AftBlocksFirst[It], AftBlocksFirst[0]);
560 }
561
562 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
563 // Dominator Tree. Remove the old links between Fore, Sub and Aft, adding the
564 // new ones required.
565 if (Count != 1) {
567 DTUpdates.emplace_back(DominatorTree::UpdateKind::Delete, ForeBlocksLast[0],
568 SubLoopBlocksFirst[0]);
569 DTUpdates.emplace_back(DominatorTree::UpdateKind::Delete,
570 SubLoopBlocksLast[0], AftBlocksFirst[0]);
571
572 DTUpdates.emplace_back(DominatorTree::UpdateKind::Insert,
573 ForeBlocksLast.back(), SubLoopBlocksFirst[0]);
574 DTUpdates.emplace_back(DominatorTree::UpdateKind::Insert,
575 SubLoopBlocksLast.back(), AftBlocksFirst[0]);
576 DTU.applyUpdatesPermissive(DTUpdates);
577 }
578
579 // Merge adjacent basic blocks, if possible.
581 MergeBlocks.insert(ForeBlocksLast.begin(), ForeBlocksLast.end());
582 MergeBlocks.insert(SubLoopBlocksLast.begin(), SubLoopBlocksLast.end());
583 MergeBlocks.insert(AftBlocksLast.begin(), AftBlocksLast.end());
584
585 MergeBlockSuccessorsIntoGivenBlocks(MergeBlocks, L, &DTU, LI);
586
587 // Apply updates to the DomTree.
588 DT = &DTU.getDomTree();
589
590 // At this point, the code is well formed. We now do a quick sweep over the
591 // inserted code, doing constant propagation and dead code elimination as we
592 // go.
593 simplifyLoopAfterUnroll(SubLoop, true, LI, SE, DT, AC, TTI);
594 simplifyLoopAfterUnroll(L, !CompletelyUnroll && Count > 1, LI, SE, DT, AC,
595 TTI);
596
597 NumCompletelyUnrolledAndJammed += CompletelyUnroll;
598 ++NumUnrolledAndJammed;
599
600 // Update LoopInfo if the loop is completely removed.
601 if (CompletelyUnroll)
602 LI->erase(L);
603
604#ifndef NDEBUG
605 // We shouldn't have done anything to break loop simplify form or LCSSA.
606 Loop *OutestLoop = SubLoop->getParentLoop()
607 ? SubLoop->getParentLoop()->getParentLoop()
608 ? SubLoop->getParentLoop()->getParentLoop()
609 : SubLoop->getParentLoop()
610 : SubLoop;
611 assert(DT->verify());
612 LI->verify(*DT);
613 assert(OutestLoop->isRecursivelyLCSSAForm(*DT, *LI));
614 if (!CompletelyUnroll)
615 assert(L->isLoopSimplifyForm());
616 assert(SubLoop->isLoopSimplifyForm());
617 SE->verify();
618#endif
619
620 return CompletelyUnroll ? LoopUnrollResult::FullyUnrolled
621 : LoopUnrollResult::PartiallyUnrolled;
622}
623
626 // Scan the BBs and collect legal loads and stores.
627 // Returns false if non-simple loads/stores are found.
628 for (BasicBlock *BB : Blocks) {
629 for (Instruction &I : *BB) {
630 if (auto *Ld = dyn_cast<LoadInst>(&I)) {
631 if (!Ld->isSimple())
632 return false;
633 MemInstr.push_back(&I);
634 } else if (auto *St = dyn_cast<StoreInst>(&I)) {
635 if (!St->isSimple())
636 return false;
637 MemInstr.push_back(&I);
638 } else if (I.mayReadOrWriteMemory()) {
639 return false;
640 }
641 }
642 }
643 return true;
644}
645
647 unsigned UnrollLevel, unsigned JamLevel,
648 bool Sequentialized, Dependence *D) {
649 // UnrollLevel might carry the dependency Src --> Dst
650 // Does a different loop after unrolling?
651 for (unsigned CurLoopDepth = UnrollLevel + 1; CurLoopDepth <= JamLevel;
652 ++CurLoopDepth) {
653 auto JammedDir = D->getDirection(CurLoopDepth);
654 if (JammedDir == Dependence::DVEntry::LT)
655 return true;
656
657 if (JammedDir & Dependence::DVEntry::GT)
658 return false;
659 }
660
661 return true;
662}
663
665 unsigned UnrollLevel, unsigned JamLevel,
666 bool Sequentialized, Dependence *D) {
667 // UnrollLevel might carry the dependency Dst --> Src
668 for (unsigned CurLoopDepth = UnrollLevel + 1; CurLoopDepth <= JamLevel;
669 ++CurLoopDepth) {
670 auto JammedDir = D->getDirection(CurLoopDepth);
671 if (JammedDir == Dependence::DVEntry::GT)
672 return true;
673
674 if (JammedDir & Dependence::DVEntry::LT)
675 return false;
676 }
677
678 // Backward dependencies are only preserved if not interleaved.
679 return Sequentialized;
680}
681
682// Check whether it is semantically safe Src and Dst considering any potential
683// dependency between them.
684//
685// @param UnrollLevel The level of the loop being unrolled
686// @param JamLevel The level of the loop being jammed; if Src and Dst are on
687// different levels, the outermost common loop counts as jammed level
688//
689// @return true if is safe and false if there is a dependency violation.
691 unsigned UnrollLevel, unsigned JamLevel,
692 bool Sequentialized, DependenceInfo &DI) {
693 assert(UnrollLevel <= JamLevel &&
694 "Expecting JamLevel to be at least UnrollLevel");
695
696 if (Src == Dst)
697 return true;
698 // Ignore Input dependencies.
699 if (isa<LoadInst>(Src) && isa<LoadInst>(Dst))
700 return true;
701
702 // Check whether unroll-and-jam may violate a dependency.
703 // By construction, every dependency will be lexicographically non-negative
704 // (if it was, it would violate the current execution order), such as
705 // (0,0,>,*,*)
706 // Unroll-and-jam changes the GT execution of two executions to the same
707 // iteration of the chosen unroll level. That is, a GT dependence becomes a GE
708 // dependence (or EQ, if we fully unrolled the loop) at the loop's position:
709 // (0,0,>=,*,*)
710 // Now, the dependency is not necessarily non-negative anymore, i.e.
711 // unroll-and-jam may violate correctness.
712 std::unique_ptr<Dependence> D = DI.depends(Src, Dst, true);
713 if (!D)
714 return true;
715 assert(D->isOrdered() && "Expected an output, flow or anti dep.");
716
717 if (D->isConfused()) {
718 LLVM_DEBUG(dbgs() << " Confused dependency between:\n"
719 << " " << *Src << "\n"
720 << " " << *Dst << "\n");
721 return false;
722 }
723
724 // If outer levels (levels enclosing the loop being unroll-and-jammed) have a
725 // non-equal direction, then the locations accessed in the inner levels cannot
726 // overlap in memory. We assumes the indexes never overlap into neighboring
727 // dimensions.
728 for (unsigned CurLoopDepth = 1; CurLoopDepth < UnrollLevel; ++CurLoopDepth)
729 if (!(D->getDirection(CurLoopDepth) & Dependence::DVEntry::EQ))
730 return true;
731
732 auto UnrollDirection = D->getDirection(UnrollLevel);
733
734 // If the distance carried by the unrolled loop is 0, then after unrolling
735 // that distance will become non-zero resulting in non-overlapping accesses in
736 // the inner loops.
737 if (UnrollDirection == Dependence::DVEntry::EQ)
738 return true;
739
740 if (UnrollDirection & Dependence::DVEntry::LT &&
741 !preservesForwardDependence(Src, Dst, UnrollLevel, JamLevel,
742 Sequentialized, D.get()))
743 return false;
744
745 if (UnrollDirection & Dependence::DVEntry::GT &&
746 !preservesBackwardDependence(Src, Dst, UnrollLevel, JamLevel,
747 Sequentialized, D.get()))
748 return false;
749
750 return true;
751}
752
753static bool
754checkDependencies(Loop &Root, const BasicBlockSet &SubLoopBlocks,
755 const DenseMap<Loop *, BasicBlockSet> &ForeBlocksMap,
756 const DenseMap<Loop *, BasicBlockSet> &AftBlocksMap,
757 DependenceInfo &DI, LoopInfo &LI) {
759 for (Loop *L : Root.getLoopsInPreorder())
760 if (ForeBlocksMap.contains(L))
761 AllBlocks.push_back(ForeBlocksMap.lookup(L));
762 AllBlocks.push_back(SubLoopBlocks);
763 for (Loop *L : Root.getLoopsInPreorder())
764 if (AftBlocksMap.contains(L))
765 AllBlocks.push_back(AftBlocksMap.lookup(L));
766
767 unsigned LoopDepth = Root.getLoopDepth();
768 SmallVector<Instruction *, 4> EarlierLoadsAndStores;
769 SmallVector<Instruction *, 4> CurrentLoadsAndStores;
770 for (BasicBlockSet &Blocks : AllBlocks) {
771 CurrentLoadsAndStores.clear();
772 if (!getLoadsAndStores(Blocks, CurrentLoadsAndStores))
773 return false;
774
775 Loop *CurLoop = LI.getLoopFor((*Blocks.begin())->front().getParent());
776 unsigned CurLoopDepth = CurLoop->getLoopDepth();
777
778 for (auto *Earlier : EarlierLoadsAndStores) {
779 Loop *EarlierLoop = LI.getLoopFor(Earlier->getParent());
780 unsigned EarlierDepth = EarlierLoop->getLoopDepth();
781 unsigned CommonLoopDepth = std::min(EarlierDepth, CurLoopDepth);
782 for (auto *Later : CurrentLoadsAndStores) {
783 if (!checkDependency(Earlier, Later, LoopDepth, CommonLoopDepth, false,
784 DI))
785 return false;
786 }
787 }
788
789 size_t NumInsts = CurrentLoadsAndStores.size();
790 for (size_t I = 0; I < NumInsts; ++I) {
791 for (size_t J = I; J < NumInsts; ++J) {
792 if (!checkDependency(CurrentLoadsAndStores[I], CurrentLoadsAndStores[J],
793 LoopDepth, CurLoopDepth, true, DI))
794 return false;
795 }
796 }
797
798 EarlierLoadsAndStores.append(CurrentLoadsAndStores.begin(),
799 CurrentLoadsAndStores.end());
800 }
801 return true;
802}
803
804static bool isEligibleLoopForm(const Loop &Root) {
805 // Root must have a child.
806 if (Root.getSubLoops().size() != 1)
807 return false;
808
809 const Loop *L = &Root;
810 do {
811 // All loops in Root need to be in simplify and rotated form.
812 if (!L->isLoopSimplifyForm())
813 return false;
814
815 if (!L->isRotatedForm())
816 return false;
817
818 if (L->getHeader()->hasAddressTaken()) {
819 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Address taken\n");
820 return false;
821 }
822
823 unsigned SubLoopsSize = L->getSubLoops().size();
824 if (SubLoopsSize == 0)
825 return true;
826
827 // Only one child is allowed.
828 if (SubLoopsSize != 1)
829 return false;
830
831 // Only loops with a single exit block can be unrolled and jammed.
832 // The function getExitBlock() is used for this check, rather than
833 // getUniqueExitBlock() to ensure loops with mulitple exit edges are
834 // disallowed.
835 if (!L->getExitBlock()) {
836 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; only loops with single exit "
837 "blocks can be unrolled and jammed.\n");
838 return false;
839 }
840
841 // Only loops with a single exiting block can be unrolled and jammed.
842 if (!L->getExitingBlock()) {
843 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; only loops with single "
844 "exiting blocks can be unrolled and jammed.\n");
845 return false;
846 }
847
848 L = L->getSubLoops()[0];
849 } while (L);
850
851 return true;
852}
853
855 while (!L->getSubLoops().empty())
856 L = L->getSubLoops()[0];
857 return L;
858}
859
861 DependenceInfo &DI, LoopInfo &LI) {
862 if (!isEligibleLoopForm(*L)) {
863 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Ineligible loop form\n");
864 return false;
865 }
866
867 /* We currently handle outer loops like this:
868 |
869 ForeFirst <------\ }
870 Blocks | } ForeBlocks of L
871 ForeLast | }
872 | |
873 ... |
874 | |
875 ForeFirst <----\ | }
876 Blocks | | } ForeBlocks of a inner loop of L
877 ForeLast | | }
878 | | |
879 JamLoopFirst <\ | | }
880 Blocks | | | } JamLoopBlocks of the innermost loop
881 JamLoopLast -/ | | }
882 | | |
883 AftFirst | | }
884 Blocks | | } AftBlocks of a inner loop of L
885 AftLast ------/ | }
886 | |
887 ... |
888 | |
889 AftFirst | }
890 Blocks | } AftBlocks of L
891 AftLast --------/ }
892 |
893
894 There are (theoretically) any number of blocks in ForeBlocks, SubLoopBlocks
895 and AftBlocks, providing that there is one edge from Fores to SubLoops,
896 one edge from SubLoops to Afts and a single outer loop exit (from Afts).
897 In practice we currently limit Aft blocks to a single block, and limit
898 things further in the profitablility checks of the unroll and jam pass.
899
900 Because of the way we rearrange basic blocks, we also require that
901 the Fore blocks of L on all unrolled iterations are safe to move before the
902 blocks of the direct child of L of all iterations. So we require that the
903 phi node looping operands of ForeHeader can be moved to at least the end of
904 ForeEnd, so that we can arrange cloned Fore Blocks before the subloop and
905 match up Phi's correctly.
906
907 i.e. The old order of blocks used to be
908 (F1)1 (F2)1 J1_1 J1_2 (A2)1 (A1)1 (F1)2 (F2)2 J2_1 J2_2 (A2)2 (A1)2.
909 It needs to be safe to transform this to
910 (F1)1 (F1)2 (F2)1 (F2)2 J1_1 J1_2 J2_1 J2_2 (A2)1 (A2)2 (A1)1 (A1)2.
911
912 There are then a number of checks along the lines of no calls, no
913 exceptions, inner loop IV is consistent, etc. Note that for loops requiring
914 runtime unrolling, UnrollRuntimeLoopRemainder can also fail in
915 UnrollAndJamLoop if the trip count cannot be easily calculated.
916 */
917
918 // Split blocks into Fore/SubLoop/Aft based on dominators
919 Loop *JamLoop = getInnerMostLoop(L);
920 BasicBlockSet SubLoopBlocks;
923 if (!partitionOuterLoopBlocks(*L, *JamLoop, SubLoopBlocks, ForeBlocksMap,
924 AftBlocksMap, DT)) {
925 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Incompatible loop layout\n");
926 return false;
927 }
928
929 // Aft blocks may need to move instructions to fore blocks, which becomes more
930 // difficult if there are multiple (potentially conditionally executed)
931 // blocks. For now we just exclude loops with multiple aft blocks.
932 if (AftBlocksMap[L].size() != 1) {
933 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Can't currently handle "
934 "multiple blocks after the loop\n");
935 return false;
936 }
937
938 // Check inner loop backedge count is consistent on all iterations of the
939 // outer loop
940 if (any_of(L->getLoopsInPreorder(), [&SE](Loop *SubLoop) {
941 return !hasIterationCountInvariantInParent(SubLoop, SE);
942 })) {
943 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Inner loop iteration count is "
944 "not consistent on each iteration\n");
945 return false;
946 }
947
948 // Check the loop safety info for exceptions.
951 if (LSI.anyBlockMayThrow()) {
952 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Something may throw\n");
953 return false;
954 }
955
956 // We've ruled out the easy stuff and now need to check that there are no
957 // interdependencies which may prevent us from moving the:
958 // ForeBlocks before Subloop and AftBlocks.
959 // Subloop before AftBlocks.
960 // ForeBlock phi operands before the subloop
961
962 // Make sure we can move all instructions we need to before the subloop
963 BasicBlock *Header = L->getHeader();
964 BasicBlock *Latch = L->getLoopLatch();
965 BasicBlockSet AftBlocks = AftBlocksMap[L];
966 Loop *SubLoop = L->getSubLoops()[0];
968 Header, Latch, AftBlocks, [&AftBlocks, &SubLoop](Instruction *I) {
969 if (SubLoop->contains(I->getParent()))
970 return false;
971 if (AftBlocks.count(I->getParent())) {
972 // If we hit a phi node in afts we know we are done (probably
973 // LCSSA)
974 if (isa<PHINode>(I))
975 return false;
976 // Can't move instructions with side effects or memory
977 // reads/writes
978 if (I->mayHaveSideEffects() || I->mayReadOrWriteMemory())
979 return false;
980 }
981 // Keep going
982 return true;
983 })) {
984 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; can't move required "
985 "instructions after subloop to before it\n");
986 return false;
987 }
988
989 // Check for memory dependencies which prohibit the unrolling we are doing.
990 // Because of the way we are unrolling Fore/Sub/Aft blocks, we need to check
991 // there are no dependencies between Fore-Sub, Fore-Aft, Sub-Aft and Sub-Sub.
992 if (!checkDependencies(*L, SubLoopBlocks, ForeBlocksMap, AftBlocksMap, DI,
993 LI)) {
994 LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; failed dependency check\n");
995 return false;
996 }
997
998 return true;
999}
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
DenseMap< Block *, BlockRelaxAux > Blocks
Definition: ELF_riscv.cpp:496
This file defines a set of templates that efficiently compute a dominator tree over a generic graph.
SmallPtrSet< BasicBlock *, 4 > BasicBlockSet
static bool partitionLoopBlocks(Loop &L, BasicBlockSet &ForeBlocks, BasicBlockSet &AftBlocks, DominatorTree &DT)
static Loop * getInnerMostLoop(Loop *L)
static void moveHeaderPhiOperandsToForeBlocks(BasicBlock *Header, BasicBlock *Latch, Instruction *InsertLoc, BasicBlockSet &AftBlocks)
static bool getLoadsAndStores(BasicBlockSet &Blocks, SmallVector< Instruction *, 4 > &MemInstr)
static bool preservesForwardDependence(Instruction *Src, Instruction *Dst, unsigned UnrollLevel, unsigned JamLevel, bool Sequentialized, Dependence *D)
static bool partitionOuterLoopBlocks(Loop &Root, Loop &JamLoop, BasicBlockSet &JamLoopBlocks, DenseMap< Loop *, BasicBlockSet > &ForeBlocksMap, DenseMap< Loop *, BasicBlockSet > &AftBlocksMap, DominatorTree &DT)
Partition blocks in a loop nest into blocks before and after each inner loop.
static bool isEligibleLoopForm(const Loop &Root)
static bool preservesBackwardDependence(Instruction *Src, Instruction *Dst, unsigned UnrollLevel, unsigned JamLevel, bool Sequentialized, Dependence *D)
static bool checkDependencies(Loop &Root, const BasicBlockSet &SubLoopBlocks, const DenseMap< Loop *, BasicBlockSet > &ForeBlocksMap, const DenseMap< Loop *, BasicBlockSet > &AftBlocksMap, DependenceInfo &DI, LoopInfo &LI)
#define DEBUG_TYPE
static bool processHeaderPhiOperands(BasicBlock *Header, BasicBlock *Latch, BasicBlockSet &AftBlocks, T Visit)
static bool checkDependency(Instruction *Src, Instruction *Dst, unsigned UnrollLevel, unsigned JamLevel, bool Sequentialized, DependenceInfo &DI)
#define I(x, y, z)
Definition: MD5.cpp:58
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
if(VerifyEach)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
const Instruction & back() const
Definition: BasicBlock.h:349
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
unsigned getNumSuccessors() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
Debug location.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
DependenceInfo - This class is the main dependence-analysis driver.
std::unique_ptr< Dependence > depends(Instruction *Src, Instruction *Dst, bool PossiblyLoopIndependent)
depends - Tests for a dependence between the Src and Dst instructions.
Dependence - This class represents a dependence between two memory memory references in a function.
DomTreeNodeBase * getIDom() const
DominatorTree & getDomTree()
Flush DomTree updates and return DomTree.
void applyUpdatesPermissive(ArrayRef< DominatorTree::UpdateType > Updates)
Submit updates to all available trees.
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:83
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
SmallVector< const LoopT *, 4 > getLoopsInPreorder() const
Return all loops in the loop nest rooted by the loop in preorder, with siblings in forward program or...
const std::vector< LoopT * > & getSubLoops() const
Return the loops contained entirely within this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
block_iterator block_end() const
BlockT * getExitBlock() const
If getExitBlocks would return exactly one block, return that block.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
block_iterator block_begin() const
Store the result of a depth first search within basic blocks contained by a single loop.
Definition: LoopIterator.h:97
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
Definition: LoopIterator.h:136
std::vector< BasicBlock * >::const_reverse_iterator RPOIterator
Definition: LoopIterator.h:101
void perform(LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
Definition: LoopInfo.cpp:1221
RPOIterator endRPO() const
Definition: LoopIterator.h:140
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
void erase(Loop *L)
Update LoopInfo after removing the last backedge from a loop.
Definition: LoopInfo.cpp:876
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:47
bool isLoopSimplifyForm() const
Return true if the Loop is in the form that the LoopSimplify form transforms loops to,...
Definition: LoopInfo.cpp:479
bool isRecursivelyLCSSAForm(const DominatorTree &DT, const LoopInfo &LI, bool IgnoreTokens=true) const
Return true if this Loop and all inner subloops are in LCSSA form.
Definition: LoopInfo.cpp:469
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
The main scalar evolution driver.
void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
Simple and conservative implementation of LoopSafetyInfo that can give false-positive answers to its ...
Definition: MustExecute.h:110
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
Definition: MustExecute.cpp:51
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
Definition: MustExecute.cpp:47
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
size_t size() const
Definition: SmallVector.h:91
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
iterator begin()
Definition: ValueMap.h:134
iterator end()
Definition: ValueMap.h:135
LLVM Value Representation.
Definition: Value.h:74
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool isSafeToUnrollAndJam(Loop *L, ScalarEvolution &SE, DominatorTree &DT, DependenceInfo &DI, LoopInfo &LI)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1685
auto successors(const MachineBasicBlock *BB)
cl::opt< bool > EnableFSDiscriminator
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1734
BasicBlock * CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, const Twine &NameSuffix="", Function *F=nullptr, ClonedCodeInfo *CodeInfo=nullptr, DebugInfoFinder *DIFinder=nullptr)
Return a copy of the specified basic block, but without embedding the block into a particular functio...
bool MergeBlockSuccessorsIntoGivenBlocks(SmallPtrSetImpl< BasicBlock * > &MergeBlocks, Loop *L=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
Merge block(s) sucessors, if possible.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
LoopUnrollResult
Represents the result of a UnrollLoop invocation.
Definition: UnrollLoop.h:54
void remapInstructionsInBlocks(ArrayRef< BasicBlock * > Blocks, ValueToValueMapTy &VMap)
Remaps instructions in Blocks using the mapping in VMap.
const Loop * addClonedBlockToLoopInfo(BasicBlock *OriginalBB, BasicBlock *ClonedBB, LoopInfo *LI, NewLoopsMap &NewLoops)
Adds ClonedBB to LoopInfo, creates a new loop for ClonedBB if necessary and adds a mapping from the o...
Definition: LoopUnroll.cpp:148
LoopUnrollResult UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount, unsigned TripMultiple, bool UnrollRemainder, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const TargetTransformInfo *TTI, OptimizationRemarkEmitter *ORE, Loop **EpilogueLoop=nullptr)
void simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const TargetTransformInfo *TTI)
Perform some cleanup and simplifications on loops after unrolling.
Definition: LoopUnroll.cpp:216
bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, bool AllowExpensiveTripCount, bool UseEpilogRemainder, bool UnrollRemainder, bool ForgetAllSCEV, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, const TargetTransformInfo *TTI, bool PreserveLCSSA, Loop **ResultLoop=nullptr)
Insert code in the prolog/epilog code when unrolling a loop with a run-time trip-count.