LLVM 22.0.0git
LoopIdiomRecognize.cpp
Go to the documentation of this file.
1//===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements an idiom recognizer that transforms simple loops into a
10// non-loop form. In cases that this kicks in, it can be a significant
11// performance win.
12//
13// If compiling for code size we avoid idiom recognition if the resulting
14// code could be larger than the code for the original loop. One way this could
15// happen is if the loop is not removable after idiom recognition due to the
16// presence of non-idiom instructions. The initial implementation of the
17// heuristics applies to idioms in multi-block loops.
18//
19//===----------------------------------------------------------------------===//
20//
21// TODO List:
22//
23// Future loop memory idioms to recognize: memcmp, etc.
24//
25// This could recognize common matrix multiplies and dot product idioms and
26// replace them with calls to BLAS (if linked in??).
27//
28//===----------------------------------------------------------------------===//
29
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/ArrayRef.h"
33#include "llvm/ADT/DenseMap.h"
34#include "llvm/ADT/MapVector.h"
35#include "llvm/ADT/SetVector.h"
38#include "llvm/ADT/Statistic.h"
39#include "llvm/ADT/StringRef.h"
56#include "llvm/IR/BasicBlock.h"
57#include "llvm/IR/Constant.h"
58#include "llvm/IR/Constants.h"
59#include "llvm/IR/DataLayout.h"
60#include "llvm/IR/DebugLoc.h"
62#include "llvm/IR/Dominators.h"
63#include "llvm/IR/GlobalValue.h"
65#include "llvm/IR/IRBuilder.h"
66#include "llvm/IR/InstrTypes.h"
67#include "llvm/IR/Instruction.h"
70#include "llvm/IR/Intrinsics.h"
71#include "llvm/IR/LLVMContext.h"
72#include "llvm/IR/Module.h"
73#include "llvm/IR/PassManager.h"
76#include "llvm/IR/Type.h"
77#include "llvm/IR/User.h"
78#include "llvm/IR/Value.h"
79#include "llvm/IR/ValueHandle.h"
82#include "llvm/Support/Debug.h"
89#include <algorithm>
90#include <cassert>
91#include <cstdint>
92#include <utility>
93
94using namespace llvm;
95using namespace SCEVPatternMatch;
96
97#define DEBUG_TYPE "loop-idiom"
98
99STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
100STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
101STATISTIC(NumMemMove, "Number of memmove's formed from loop load+stores");
102STATISTIC(NumStrLen, "Number of strlen's and wcslen's formed from loop loads");
104 NumShiftUntilBitTest,
105 "Number of uncountable loops recognized as 'shift until bitttest' idiom");
106STATISTIC(NumShiftUntilZero,
107 "Number of uncountable loops recognized as 'shift until zero' idiom");
108
109namespace llvm {
112 DisableLIRPAll("disable-" DEBUG_TYPE "-all",
113 cl::desc("Options to disable Loop Idiom Recognize Pass."),
116
119 DisableLIRPMemset("disable-" DEBUG_TYPE "-memset",
120 cl::desc("Proceed with loop idiom recognize pass, but do "
121 "not convert loop(s) to memset."),
124
127 DisableLIRPMemcpy("disable-" DEBUG_TYPE "-memcpy",
128 cl::desc("Proceed with loop idiom recognize pass, but do "
129 "not convert loop(s) to memcpy."),
132
135 DisableLIRPStrlen("disable-loop-idiom-strlen",
136 cl::desc("Proceed with loop idiom recognize pass, but do "
137 "not convert loop(s) to strlen."),
140
143 EnableLIRPWcslen("disable-loop-idiom-wcslen",
144 cl::desc("Proceed with loop idiom recognize pass, "
145 "enable conversion of loop(s) to wcslen."),
148
151 DisableLIRPHashRecognize("disable-" DEBUG_TYPE "-hashrecognize",
152 cl::desc("Proceed with loop idiom recognize pass, "
153 "but do not optimize CRC loops."),
155 cl::init(false), cl::ReallyHidden);
156
158 "use-lir-code-size-heurs",
159 cl::desc("Use loop idiom recognition code size heuristics when compiling "
160 "with -Os/-Oz"),
161 cl::init(true), cl::Hidden);
162
164 "loop-idiom-force-memset-pattern-intrinsic",
165 cl::desc("Use memset.pattern intrinsic whenever possible"), cl::init(false),
166 cl::Hidden);
167
169
170} // namespace llvm
171
172namespace {
173
174class LoopIdiomRecognize {
175 Loop *CurLoop = nullptr;
177 DominatorTree *DT;
178 LoopInfo *LI;
179 ScalarEvolution *SE;
182 const DataLayout *DL;
184 bool ApplyCodeSizeHeuristics;
185 std::unique_ptr<MemorySSAUpdater> MSSAU;
186
187public:
188 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
189 LoopInfo *LI, ScalarEvolution *SE,
191 const TargetTransformInfo *TTI, MemorySSA *MSSA,
192 const DataLayout *DL,
194 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) {
195 if (MSSA)
196 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
197 }
198
199 bool runOnLoop(Loop *L);
200
201private:
202 using StoreList = SmallVector<StoreInst *, 8>;
203 using StoreListMap = MapVector<Value *, StoreList>;
204
205 StoreListMap StoreRefsForMemset;
206 StoreListMap StoreRefsForMemsetPattern;
207 StoreList StoreRefsForMemcpy;
208 bool HasMemset;
209 bool HasMemsetPattern;
210 bool HasMemcpy;
211
212 /// Return code for isLegalStore()
213 enum LegalStoreKind {
214 None = 0,
215 Memset,
216 MemsetPattern,
217 Memcpy,
218 UnorderedAtomicMemcpy,
219 DontUse // Dummy retval never to be used. Allows catching errors in retval
220 // handling.
221 };
222
223 /// \name Countable Loop Idiom Handling
224 /// @{
225
226 bool runOnCountableLoop();
227 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
228 SmallVectorImpl<BasicBlock *> &ExitBlocks);
229
230 void collectStores(BasicBlock *BB);
231 LegalStoreKind isLegalStore(StoreInst *SI);
232 enum class ForMemset { No, Yes };
233 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
234 ForMemset For);
235
236 template <typename MemInst>
237 bool processLoopMemIntrinsic(
238 BasicBlock *BB,
239 bool (LoopIdiomRecognize::*Processor)(MemInst *, const SCEV *),
240 const SCEV *BECount);
241 bool processLoopMemCpy(MemCpyInst *MCI, const SCEV *BECount);
242 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
243
244 bool processLoopStridedStore(Value *DestPtr, const SCEV *StoreSizeSCEV,
245 MaybeAlign StoreAlignment, Value *StoredVal,
246 Instruction *TheStore,
247 SmallPtrSetImpl<Instruction *> &Stores,
248 const SCEVAddRecExpr *Ev, const SCEV *BECount,
249 bool IsNegStride, bool IsLoopMemset = false);
250 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
251 bool processLoopStoreOfLoopLoad(Value *DestPtr, Value *SourcePtr,
252 const SCEV *StoreSize, MaybeAlign StoreAlign,
253 MaybeAlign LoadAlign, Instruction *TheStore,
254 Instruction *TheLoad,
255 const SCEVAddRecExpr *StoreEv,
256 const SCEVAddRecExpr *LoadEv,
257 const SCEV *BECount);
258 bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
259 bool IsLoopMemset = false);
260 bool optimizeCRCLoop(const PolynomialInfo &Info);
261
262 /// @}
263 /// \name Noncountable Loop Idiom Handling
264 /// @{
265
266 bool runOnNoncountableLoop();
267
268 bool recognizePopcount();
269 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
270 PHINode *CntPhi, Value *Var);
271 bool isProfitableToInsertFFS(Intrinsic::ID IntrinID, Value *InitX,
272 bool ZeroCheck, size_t CanonicalSize);
273 bool insertFFSIfProfitable(Intrinsic::ID IntrinID, Value *InitX,
274 Instruction *DefX, PHINode *CntPhi,
275 Instruction *CntInst);
276 bool recognizeAndInsertFFS(); /// Find First Set: ctlz or cttz
277 bool recognizeShiftUntilLessThan();
278 void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB,
279 Instruction *CntInst, PHINode *CntPhi,
280 Value *Var, Instruction *DefX,
281 const DebugLoc &DL, bool ZeroCheck,
282 bool IsCntPhiUsedOutsideLoop,
283 bool InsertSub = false);
284
285 bool recognizeShiftUntilBitTest();
286 bool recognizeShiftUntilZero();
287 bool recognizeAndInsertStrLen();
288
289 /// @}
290};
291} // end anonymous namespace
292
295 LPMUpdater &) {
297 return PreservedAnalyses::all();
298
299 const auto *DL = &L.getHeader()->getDataLayout();
300
301 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
302 // pass. Function analyses need to be preserved across loop transformations
303 // but ORE cannot be preserved (see comment before the pass definition).
304 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
305
306 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI,
307 AR.MSSA, DL, ORE);
308 if (!LIR.runOnLoop(&L))
309 return PreservedAnalyses::all();
310
312 if (AR.MSSA)
313 PA.preserve<MemorySSAAnalysis>();
314 return PA;
315}
316
318 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
319 I->eraseFromParent();
320}
321
322//===----------------------------------------------------------------------===//
323//
324// Implementation of LoopIdiomRecognize
325//
326//===----------------------------------------------------------------------===//
327
328bool LoopIdiomRecognize::runOnLoop(Loop *L) {
329 CurLoop = L;
330 // If the loop could not be converted to canonical form, it must have an
331 // indirectbr in it, just give up.
332 if (!L->getLoopPreheader())
333 return false;
334
335 // Disable loop idiom recognition if the function's name is a common idiom.
336 StringRef Name = L->getHeader()->getParent()->getName();
337 if (Name == "memset" || Name == "memcpy" || Name == "strlen" ||
338 Name == "wcslen")
339 return false;
340
341 // Determine if code size heuristics need to be applied.
342 ApplyCodeSizeHeuristics =
343 L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs;
344
345 HasMemset = TLI->has(LibFunc_memset);
346 // TODO: Unconditionally enable use of the memset pattern intrinsic (or at
347 // least, opt-in via target hook) once we are confident it will never result
348 // in worse codegen than without. For now, use it only when the target
349 // supports memset_pattern16 libcall (or unless this is overridden by
350 // command line option).
351 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
352 HasMemcpy = TLI->has(LibFunc_memcpy);
353
354 if (HasMemset || HasMemsetPattern || ForceMemsetPatternIntrinsic ||
355 HasMemcpy || !DisableLIRP::HashRecognize)
357 return runOnCountableLoop();
358
359 return runOnNoncountableLoop();
360}
361
362bool LoopIdiomRecognize::runOnCountableLoop() {
363 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
365 "runOnCountableLoop() called on a loop without a predictable"
366 "backedge-taken count");
367
368 // If this loop executes exactly one time, then it should be peeled, not
369 // optimized by this pass.
370 if (BECount->isZero())
371 return false;
372
374 CurLoop->getUniqueExitBlocks(ExitBlocks);
375
376 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F["
377 << CurLoop->getHeader()->getParent()->getName()
378 << "] Countable Loop %" << CurLoop->getHeader()->getName()
379 << "\n");
380
381 // The following transforms hoist stores/memsets into the loop pre-header.
382 // Give up if the loop has instructions that may throw.
383 SimpleLoopSafetyInfo SafetyInfo;
384 SafetyInfo.computeLoopSafetyInfo(CurLoop);
385 if (SafetyInfo.anyBlockMayThrow())
386 return false;
387
388 bool MadeChange = false;
389
390 // Scan all the blocks in the loop that are not in subloops.
391 for (auto *BB : CurLoop->getBlocks()) {
392 // Ignore blocks in subloops.
393 if (LI->getLoopFor(BB) != CurLoop)
394 continue;
395
396 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
397 }
398
399 // Optimize a CRC loop if HashRecognize found one, provided we're not
400 // optimizing for size.
401 if (!DisableLIRP::HashRecognize && !ApplyCodeSizeHeuristics)
402 if (auto Res = HashRecognize(*CurLoop, *SE).getResult())
403 optimizeCRCLoop(*Res);
404
405 return MadeChange;
406}
407
408static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
409 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
410 return ConstStride->getAPInt();
411}
412
413/// getMemSetPatternValue - If a strided store of the specified value is safe to
414/// turn into a memset.patternn intrinsic, return the Constant that should
415/// be passed in. Otherwise, return null.
416///
417/// TODO this function could allow more constants than it does today (e.g.
418/// those over 16 bytes) now it has transitioned to being used for the
419/// memset.pattern intrinsic rather than directly the memset_pattern16
420/// libcall.
422 // FIXME: This could check for UndefValue because it can be merged into any
423 // other valid pattern.
424
425 // If the value isn't a constant, we can't promote it to being in a constant
426 // array. We could theoretically do a store to an alloca or something, but
427 // that doesn't seem worthwhile.
429 if (!C || isa<ConstantExpr>(C))
430 return nullptr;
431
432 // Only handle simple values that are a power of two bytes in size.
433 uint64_t Size = DL->getTypeSizeInBits(V->getType());
434 if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
435 return nullptr;
436
437 // Don't care enough about darwin/ppc to implement this.
438 if (DL->isBigEndian())
439 return nullptr;
440
441 // Convert to size in bytes.
442 Size /= 8;
443
444 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
445 // if the top and bottom are the same (e.g. for vectors and large integers).
446 if (Size > 16)
447 return nullptr;
448
449 // For now, don't handle types that aren't int, floats, or pointers.
450 Type *CTy = C->getType();
451 if (!CTy->isIntOrPtrTy() && !CTy->isFloatingPointTy())
452 return nullptr;
453
454 return C;
455}
456
457LoopIdiomRecognize::LegalStoreKind
458LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
459 // Don't touch volatile stores.
460 if (SI->isVolatile())
461 return LegalStoreKind::None;
462 // We only want simple or unordered-atomic stores.
463 if (!SI->isUnordered())
464 return LegalStoreKind::None;
465
466 // Avoid merging nontemporal stores.
467 if (SI->getMetadata(LLVMContext::MD_nontemporal))
468 return LegalStoreKind::None;
469
470 Value *StoredVal = SI->getValueOperand();
471 Value *StorePtr = SI->getPointerOperand();
472
473 // Don't convert stores of non-integral pointer types to memsets (which stores
474 // integers).
475 if (DL->isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
476 return LegalStoreKind::None;
477
478 // Reject stores that are so large that they overflow an unsigned.
479 // When storing out scalable vectors we bail out for now, since the code
480 // below currently only works for constant strides.
481 TypeSize SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
482 if (SizeInBits.isScalable() || (SizeInBits.getFixedValue() & 7) ||
483 (SizeInBits.getFixedValue() >> 32) != 0)
484 return LegalStoreKind::None;
485
486 // See if the pointer expression is an AddRec like {base,+,1} on the current
487 // loop, which indicates a strided store. If we have something else, it's a
488 // random store we can't handle.
489 const SCEV *StoreEv = SE->getSCEV(StorePtr);
490 const SCEVConstant *Stride;
491 if (!match(StoreEv, m_scev_AffineAddRec(m_SCEV(), m_SCEVConstant(Stride),
492 m_SpecificLoop(CurLoop))))
493 return LegalStoreKind::None;
494
495 // See if the store can be turned into a memset.
496
497 // If the stored value is a byte-wise value (like i32 -1), then it may be
498 // turned into a memset of i8 -1, assuming that all the consecutive bytes
499 // are stored. A store of i32 0x01020304 can never be turned into a memset,
500 // but it can be turned into memset_pattern if the target supports it.
501 Value *SplatValue = isBytewiseValue(StoredVal, *DL);
502
503 // Note: memset and memset_pattern on unordered-atomic is yet not supported
504 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
505
506 // If we're allowed to form a memset, and the stored value would be
507 // acceptable for memset, use it.
508 if (!UnorderedAtomic && HasMemset && SplatValue && !DisableLIRP::Memset &&
509 // Verify that the stored value is loop invariant. If not, we can't
510 // promote the memset.
511 CurLoop->isLoopInvariant(SplatValue)) {
512 // It looks like we can use SplatValue.
513 return LegalStoreKind::Memset;
514 }
515 if (!UnorderedAtomic && (HasMemsetPattern || ForceMemsetPatternIntrinsic) &&
517 // Don't create memset_pattern16s with address spaces.
518 StorePtr->getType()->getPointerAddressSpace() == 0 &&
519 getMemSetPatternValue(StoredVal, DL)) {
520 // It looks like we can use PatternValue!
521 return LegalStoreKind::MemsetPattern;
522 }
523
524 // Otherwise, see if the store can be turned into a memcpy.
525 if (HasMemcpy && !DisableLIRP::Memcpy) {
526 // Check to see if the stride matches the size of the store. If so, then we
527 // know that every byte is touched in the loop.
528 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
529 APInt StrideAP = Stride->getAPInt();
530 if (StoreSize != StrideAP && StoreSize != -StrideAP)
531 return LegalStoreKind::None;
532
533 // The store must be feeding a non-volatile load.
534 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
535
536 // Only allow non-volatile loads
537 if (!LI || LI->isVolatile())
538 return LegalStoreKind::None;
539 // Only allow simple or unordered-atomic loads
540 if (!LI->isUnordered())
541 return LegalStoreKind::None;
542
543 // See if the pointer expression is an AddRec like {base,+,1} on the current
544 // loop, which indicates a strided load. If we have something else, it's a
545 // random load we can't handle.
546 const SCEV *LoadEv = SE->getSCEV(LI->getPointerOperand());
547
548 // The store and load must share the same stride.
549 if (!match(LoadEv, m_scev_AffineAddRec(m_SCEV(), m_scev_Specific(Stride),
550 m_SpecificLoop(CurLoop))))
551 return LegalStoreKind::None;
552
553 // Success. This store can be converted into a memcpy.
554 UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
555 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
556 : LegalStoreKind::Memcpy;
557 }
558 // This store can't be transformed into a memset/memcpy.
559 return LegalStoreKind::None;
560}
561
562void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
563 StoreRefsForMemset.clear();
564 StoreRefsForMemsetPattern.clear();
565 StoreRefsForMemcpy.clear();
566 for (Instruction &I : *BB) {
568 if (!SI)
569 continue;
570
571 // Make sure this is a strided store with a constant stride.
572 switch (isLegalStore(SI)) {
573 case LegalStoreKind::None:
574 // Nothing to do
575 break;
576 case LegalStoreKind::Memset: {
577 // Find the base pointer.
578 Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
579 StoreRefsForMemset[Ptr].push_back(SI);
580 } break;
581 case LegalStoreKind::MemsetPattern: {
582 // Find the base pointer.
583 Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
584 StoreRefsForMemsetPattern[Ptr].push_back(SI);
585 } break;
586 case LegalStoreKind::Memcpy:
587 case LegalStoreKind::UnorderedAtomicMemcpy:
588 StoreRefsForMemcpy.push_back(SI);
589 break;
590 default:
591 assert(false && "unhandled return value");
592 break;
593 }
594 }
595}
596
597/// runOnLoopBlock - Process the specified block, which lives in a counted loop
598/// with the specified backedge count. This block is known to be in the current
599/// loop and not in any subloops.
600bool LoopIdiomRecognize::runOnLoopBlock(
601 BasicBlock *BB, const SCEV *BECount,
602 SmallVectorImpl<BasicBlock *> &ExitBlocks) {
603 // We can only promote stores in this block if they are unconditionally
604 // executed in the loop. For a block to be unconditionally executed, it has
605 // to dominate all the exit blocks of the loop. Verify this now.
606 for (BasicBlock *ExitBlock : ExitBlocks)
607 if (!DT->dominates(BB, ExitBlock))
608 return false;
609
610 bool MadeChange = false;
611 // Look for store instructions, which may be optimized to memset/memcpy.
612 collectStores(BB);
613
614 // Look for a single store or sets of stores with a common base, which can be
615 // optimized into a memset (memset_pattern). The latter most commonly happens
616 // with structs and handunrolled loops.
617 for (auto &SL : StoreRefsForMemset)
618 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::Yes);
619
620 for (auto &SL : StoreRefsForMemsetPattern)
621 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::No);
622
623 // Optimize the store into a memcpy, if it feeds an similarly strided load.
624 for (auto &SI : StoreRefsForMemcpy)
625 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
626
627 MadeChange |= processLoopMemIntrinsic<MemCpyInst>(
628 BB, &LoopIdiomRecognize::processLoopMemCpy, BECount);
629 MadeChange |= processLoopMemIntrinsic<MemSetInst>(
630 BB, &LoopIdiomRecognize::processLoopMemSet, BECount);
631
632 return MadeChange;
633}
634
635/// See if this store(s) can be promoted to a memset.
636bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
637 const SCEV *BECount, ForMemset For) {
638 // Try to find consecutive stores that can be transformed into memsets.
639 SetVector<StoreInst *> Heads, Tails;
641
642 // Do a quadratic search on all of the given stores and find
643 // all of the pairs of stores that follow each other.
644 SmallVector<unsigned, 16> IndexQueue;
645 for (unsigned i = 0, e = SL.size(); i < e; ++i) {
646 assert(SL[i]->isSimple() && "Expected only non-volatile stores.");
647
648 Value *FirstStoredVal = SL[i]->getValueOperand();
649 Value *FirstStorePtr = SL[i]->getPointerOperand();
650 const SCEVAddRecExpr *FirstStoreEv =
651 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
652 APInt FirstStride = getStoreStride(FirstStoreEv);
653 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType());
654
655 // See if we can optimize just this store in isolation.
656 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
657 Heads.insert(SL[i]);
658 continue;
659 }
660
661 Value *FirstSplatValue = nullptr;
662 Constant *FirstPatternValue = nullptr;
663
664 if (For == ForMemset::Yes)
665 FirstSplatValue = isBytewiseValue(FirstStoredVal, *DL);
666 else
667 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
668
669 assert((FirstSplatValue || FirstPatternValue) &&
670 "Expected either splat value or pattern value.");
671
672 IndexQueue.clear();
673 // If a store has multiple consecutive store candidates, search Stores
674 // array according to the sequence: from i+1 to e, then from i-1 to 0.
675 // This is because usually pairing with immediate succeeding or preceding
676 // candidate create the best chance to find memset opportunity.
677 unsigned j = 0;
678 for (j = i + 1; j < e; ++j)
679 IndexQueue.push_back(j);
680 for (j = i; j > 0; --j)
681 IndexQueue.push_back(j - 1);
682
683 for (auto &k : IndexQueue) {
684 assert(SL[k]->isSimple() && "Expected only non-volatile stores.");
685 Value *SecondStorePtr = SL[k]->getPointerOperand();
686 const SCEVAddRecExpr *SecondStoreEv =
687 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
688 APInt SecondStride = getStoreStride(SecondStoreEv);
689
690 if (FirstStride != SecondStride)
691 continue;
692
693 Value *SecondStoredVal = SL[k]->getValueOperand();
694 Value *SecondSplatValue = nullptr;
695 Constant *SecondPatternValue = nullptr;
696
697 if (For == ForMemset::Yes)
698 SecondSplatValue = isBytewiseValue(SecondStoredVal, *DL);
699 else
700 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
701
702 assert((SecondSplatValue || SecondPatternValue) &&
703 "Expected either splat value or pattern value.");
704
705 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
706 if (For == ForMemset::Yes) {
707 if (isa<UndefValue>(FirstSplatValue))
708 FirstSplatValue = SecondSplatValue;
709 if (FirstSplatValue != SecondSplatValue)
710 continue;
711 } else {
712 if (isa<UndefValue>(FirstPatternValue))
713 FirstPatternValue = SecondPatternValue;
714 if (FirstPatternValue != SecondPatternValue)
715 continue;
716 }
717 Tails.insert(SL[k]);
718 Heads.insert(SL[i]);
719 ConsecutiveChain[SL[i]] = SL[k];
720 break;
721 }
722 }
723 }
724
725 // We may run into multiple chains that merge into a single chain. We mark the
726 // stores that we transformed so that we don't visit the same store twice.
727 SmallPtrSet<Value *, 16> TransformedStores;
728 bool Changed = false;
729
730 // For stores that start but don't end a link in the chain:
731 for (StoreInst *I : Heads) {
732 if (Tails.count(I))
733 continue;
734
735 // We found a store instr that starts a chain. Now follow the chain and try
736 // to transform it.
737 SmallPtrSet<Instruction *, 8> AdjacentStores;
738 StoreInst *HeadStore = I;
739 unsigned StoreSize = 0;
740
741 // Collect the chain into a list.
742 while (Tails.count(I) || Heads.count(I)) {
743 if (TransformedStores.count(I))
744 break;
745 AdjacentStores.insert(I);
746
747 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType());
748 // Move to the next value in the chain.
749 I = ConsecutiveChain[I];
750 }
751
752 Value *StoredVal = HeadStore->getValueOperand();
753 Value *StorePtr = HeadStore->getPointerOperand();
754 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
755 APInt Stride = getStoreStride(StoreEv);
756
757 // Check to see if the stride matches the size of the stores. If so, then
758 // we know that every byte is touched in the loop.
759 if (StoreSize != Stride && StoreSize != -Stride)
760 continue;
761
762 bool IsNegStride = StoreSize == -Stride;
763
764 Type *IntIdxTy = DL->getIndexType(StorePtr->getType());
765 const SCEV *StoreSizeSCEV = SE->getConstant(IntIdxTy, StoreSize);
766 if (processLoopStridedStore(StorePtr, StoreSizeSCEV,
767 MaybeAlign(HeadStore->getAlign()), StoredVal,
768 HeadStore, AdjacentStores, StoreEv, BECount,
769 IsNegStride)) {
770 TransformedStores.insert_range(AdjacentStores);
771 Changed = true;
772 }
773 }
774
775 return Changed;
776}
777
778/// processLoopMemIntrinsic - Template function for calling different processor
779/// functions based on mem intrinsic type.
780template <typename MemInst>
781bool LoopIdiomRecognize::processLoopMemIntrinsic(
782 BasicBlock *BB,
783 bool (LoopIdiomRecognize::*Processor)(MemInst *, const SCEV *),
784 const SCEV *BECount) {
785 bool MadeChange = false;
786 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
787 Instruction *Inst = &*I++;
788 // Look for memory instructions, which may be optimized to a larger one.
789 if (MemInst *MI = dyn_cast<MemInst>(Inst)) {
790 WeakTrackingVH InstPtr(&*I);
791 if (!(this->*Processor)(MI, BECount))
792 continue;
793 MadeChange = true;
794
795 // If processing the instruction invalidated our iterator, start over from
796 // the top of the block.
797 if (!InstPtr)
798 I = BB->begin();
799 }
800 }
801 return MadeChange;
802}
803
804/// processLoopMemCpy - See if this memcpy can be promoted to a large memcpy
805bool LoopIdiomRecognize::processLoopMemCpy(MemCpyInst *MCI,
806 const SCEV *BECount) {
807 // We can only handle non-volatile memcpys with a constant size.
808 if (MCI->isVolatile() || !isa<ConstantInt>(MCI->getLength()))
809 return false;
810
811 // If we're not allowed to hack on memcpy, we fail.
812 if ((!HasMemcpy && !MCI->isForceInlined()) || DisableLIRP::Memcpy)
813 return false;
814
815 Value *Dest = MCI->getDest();
816 Value *Source = MCI->getSource();
817 if (!Dest || !Source)
818 return false;
819
820 // See if the load and store pointer expressions are AddRec like {base,+,1} on
821 // the current loop, which indicates a strided load and store. If we have
822 // something else, it's a random load or store we can't handle.
823 const SCEV *StoreEv = SE->getSCEV(Dest);
824 const SCEV *LoadEv = SE->getSCEV(Source);
825 const APInt *StoreStrideValue, *LoadStrideValue;
826 if (!match(StoreEv,
827 m_scev_AffineAddRec(m_SCEV(), m_scev_APInt(StoreStrideValue),
828 m_SpecificLoop(CurLoop))) ||
829 !match(LoadEv,
830 m_scev_AffineAddRec(m_SCEV(), m_scev_APInt(LoadStrideValue),
831 m_SpecificLoop(CurLoop))))
832 return false;
833
834 // Reject memcpys that are so large that they overflow an unsigned.
835 uint64_t SizeInBytes = cast<ConstantInt>(MCI->getLength())->getZExtValue();
836 if ((SizeInBytes >> 32) != 0)
837 return false;
838
839 // Huge stride value - give up
840 if (StoreStrideValue->getBitWidth() > 64 ||
841 LoadStrideValue->getBitWidth() > 64)
842 return false;
843
844 if (SizeInBytes != *StoreStrideValue && SizeInBytes != -*StoreStrideValue) {
845 ORE.emit([&]() {
846 return OptimizationRemarkMissed(DEBUG_TYPE, "SizeStrideUnequal", MCI)
847 << ore::NV("Inst", "memcpy") << " in "
848 << ore::NV("Function", MCI->getFunction())
849 << " function will not be hoisted: "
850 << ore::NV("Reason", "memcpy size is not equal to stride");
851 });
852 return false;
853 }
854
855 int64_t StoreStrideInt = StoreStrideValue->getSExtValue();
856 int64_t LoadStrideInt = LoadStrideValue->getSExtValue();
857 // Check if the load stride matches the store stride.
858 if (StoreStrideInt != LoadStrideInt)
859 return false;
860
861 return processLoopStoreOfLoopLoad(
862 Dest, Source, SE->getConstant(Dest->getType(), SizeInBytes),
863 MCI->getDestAlign(), MCI->getSourceAlign(), MCI, MCI,
864 cast<SCEVAddRecExpr>(StoreEv), cast<SCEVAddRecExpr>(LoadEv), BECount);
865}
866
867/// processLoopMemSet - See if this memset can be promoted to a large memset.
868bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
869 const SCEV *BECount) {
870 // We can only handle non-volatile memsets.
871 if (MSI->isVolatile())
872 return false;
873
874 // If we're not allowed to hack on memset, we fail.
875 if (!HasMemset || DisableLIRP::Memset)
876 return false;
877
878 Value *Pointer = MSI->getDest();
879
880 // See if the pointer expression is an AddRec like {base,+,1} on the current
881 // loop, which indicates a strided store. If we have something else, it's a
882 // random store we can't handle.
883 const SCEV *Ev = SE->getSCEV(Pointer);
884 const SCEV *PointerStrideSCEV;
885 if (!match(Ev, m_scev_AffineAddRec(m_SCEV(), m_SCEV(PointerStrideSCEV),
886 m_SpecificLoop(CurLoop)))) {
887 LLVM_DEBUG(dbgs() << " Pointer is not affine, abort\n");
888 return false;
889 }
890
891 const SCEV *MemsetSizeSCEV = SE->getSCEV(MSI->getLength());
892
893 bool IsNegStride = false;
894 const bool IsConstantSize = isa<ConstantInt>(MSI->getLength());
895
896 if (IsConstantSize) {
897 // Memset size is constant.
898 // Check if the pointer stride matches the memset size. If so, then
899 // we know that every byte is touched in the loop.
900 LLVM_DEBUG(dbgs() << " memset size is constant\n");
901 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
902 const APInt *Stride;
903 if (!match(PointerStrideSCEV, m_scev_APInt(Stride)))
904 return false;
905
906 if (SizeInBytes != *Stride && SizeInBytes != -*Stride)
907 return false;
908
909 IsNegStride = SizeInBytes == -*Stride;
910 } else {
911 // Memset size is non-constant.
912 // Check if the pointer stride matches the memset size.
913 // To be conservative, the pass would not promote pointers that aren't in
914 // address space zero. Also, the pass only handles memset length and stride
915 // that are invariant for the top level loop.
916 LLVM_DEBUG(dbgs() << " memset size is non-constant\n");
917 if (Pointer->getType()->getPointerAddressSpace() != 0) {
918 LLVM_DEBUG(dbgs() << " pointer is not in address space zero, "
919 << "abort\n");
920 return false;
921 }
922 if (!SE->isLoopInvariant(MemsetSizeSCEV, CurLoop)) {
923 LLVM_DEBUG(dbgs() << " memset size is not a loop-invariant, "
924 << "abort\n");
925 return false;
926 }
927
928 // Compare positive direction PointerStrideSCEV with MemsetSizeSCEV
929 IsNegStride = PointerStrideSCEV->isNonConstantNegative();
930 const SCEV *PositiveStrideSCEV =
931 IsNegStride ? SE->getNegativeSCEV(PointerStrideSCEV)
932 : PointerStrideSCEV;
933 LLVM_DEBUG(dbgs() << " MemsetSizeSCEV: " << *MemsetSizeSCEV << "\n"
934 << " PositiveStrideSCEV: " << *PositiveStrideSCEV
935 << "\n");
936
937 if (PositiveStrideSCEV != MemsetSizeSCEV) {
938 // If an expression is covered by the loop guard, compare again and
939 // proceed with optimization if equal.
940 const SCEV *FoldedPositiveStride =
941 SE->applyLoopGuards(PositiveStrideSCEV, CurLoop);
942 const SCEV *FoldedMemsetSize =
943 SE->applyLoopGuards(MemsetSizeSCEV, CurLoop);
944
945 LLVM_DEBUG(dbgs() << " Try to fold SCEV based on loop guard\n"
946 << " FoldedMemsetSize: " << *FoldedMemsetSize << "\n"
947 << " FoldedPositiveStride: " << *FoldedPositiveStride
948 << "\n");
949
950 if (FoldedPositiveStride != FoldedMemsetSize) {
951 LLVM_DEBUG(dbgs() << " SCEV don't match, abort\n");
952 return false;
953 }
954 }
955 }
956
957 // Verify that the memset value is loop invariant. If not, we can't promote
958 // the memset.
959 Value *SplatValue = MSI->getValue();
960 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
961 return false;
962
964 MSIs.insert(MSI);
965 return processLoopStridedStore(Pointer, SE->getSCEV(MSI->getLength()),
966 MSI->getDestAlign(), SplatValue, MSI, MSIs,
967 cast<SCEVAddRecExpr>(Ev), BECount, IsNegStride,
968 /*IsLoopMemset=*/true);
969}
970
971/// mayLoopAccessLocation - Return true if the specified loop might access the
972/// specified pointer location, which is a loop-strided access. The 'Access'
973/// argument specifies what the verboten forms of access are (read or write).
974static bool
976 const SCEV *BECount, const SCEV *StoreSizeSCEV,
978 SmallPtrSetImpl<Instruction *> &IgnoredInsts) {
979 // Get the location that may be stored across the loop. Since the access is
980 // strided positively through memory, we say that the modified location starts
981 // at the pointer and has infinite size.
983
984 // If the loop iterates a fixed number of times, we can refine the access size
985 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
986 const APInt *BECst, *ConstSize;
987 if (match(BECount, m_scev_APInt(BECst)) &&
988 match(StoreSizeSCEV, m_scev_APInt(ConstSize))) {
989 std::optional<uint64_t> BEInt = BECst->tryZExtValue();
990 std::optional<uint64_t> SizeInt = ConstSize->tryZExtValue();
991 // FIXME: Should this check for overflow?
992 if (BEInt && SizeInt)
993 AccessSize = LocationSize::precise((*BEInt + 1) * *SizeInt);
994 }
995
996 // TODO: For this to be really effective, we have to dive into the pointer
997 // operand in the store. Store to &A[i] of 100 will always return may alias
998 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
999 // which will then no-alias a store to &A[100].
1000 MemoryLocation StoreLoc(Ptr, AccessSize);
1001
1002 for (BasicBlock *B : L->blocks())
1003 for (Instruction &I : *B)
1004 if (!IgnoredInsts.contains(&I) &&
1005 isModOrRefSet(AA.getModRefInfo(&I, StoreLoc) & Access))
1006 return true;
1007 return false;
1008}
1009
1010// If we have a negative stride, Start refers to the end of the memory location
1011// we're trying to memset. Therefore, we need to recompute the base pointer,
1012// which is just Start - BECount*Size.
1013static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
1014 Type *IntPtr, const SCEV *StoreSizeSCEV,
1015 ScalarEvolution *SE) {
1016 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
1017 if (!StoreSizeSCEV->isOne()) {
1018 // index = back edge count * store size
1019 Index = SE->getMulExpr(Index,
1020 SE->getTruncateOrZeroExtend(StoreSizeSCEV, IntPtr),
1022 }
1023 // base pointer = start - index * store size
1024 return SE->getMinusSCEV(Start, Index);
1025}
1026
1027/// Compute the number of bytes as a SCEV from the backedge taken count.
1028///
1029/// This also maps the SCEV into the provided type and tries to handle the
1030/// computation in a way that will fold cleanly.
1031static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
1032 const SCEV *StoreSizeSCEV, Loop *CurLoop,
1033 const DataLayout *DL, ScalarEvolution *SE) {
1034 const SCEV *TripCountSCEV =
1035 SE->getTripCountFromExitCount(BECount, IntPtr, CurLoop);
1036 return SE->getMulExpr(TripCountSCEV,
1037 SE->getTruncateOrZeroExtend(StoreSizeSCEV, IntPtr),
1039}
1040
1041/// processLoopStridedStore - We see a strided store of some value. If we can
1042/// transform this into a memset or memset_pattern in the loop preheader, do so.
1043bool LoopIdiomRecognize::processLoopStridedStore(
1044 Value *DestPtr, const SCEV *StoreSizeSCEV, MaybeAlign StoreAlignment,
1045 Value *StoredVal, Instruction *TheStore,
1047 const SCEV *BECount, bool IsNegStride, bool IsLoopMemset) {
1048 Module *M = TheStore->getModule();
1049
1050 // The trip count of the loop and the base pointer of the addrec SCEV is
1051 // guaranteed to be loop invariant, which means that it should dominate the
1052 // header. This allows us to insert code for it in the preheader.
1053 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
1054 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1055 IRBuilder<> Builder(Preheader->getTerminator());
1056 SCEVExpander Expander(*SE, "loop-idiom");
1057 SCEVExpanderCleaner ExpCleaner(Expander);
1058
1059 Type *DestInt8PtrTy = Builder.getPtrTy(DestAS);
1060 Type *IntIdxTy = DL->getIndexType(DestPtr->getType());
1061
1062 bool Changed = false;
1063 const SCEV *Start = Ev->getStart();
1064 // Handle negative strided loops.
1065 if (IsNegStride)
1066 Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSizeSCEV, SE);
1067
1068 // TODO: ideally we should still be able to generate memset if SCEV expander
1069 // is taught to generate the dependencies at the latest point.
1070 if (!Expander.isSafeToExpand(Start))
1071 return Changed;
1072
1073 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
1074 // this into a memset in the loop preheader now if we want. However, this
1075 // would be unsafe to do if there is anything else in the loop that may read
1076 // or write to the aliased location. Check for any overlap by generating the
1077 // base pointer and checking the region.
1078 Value *BasePtr =
1079 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
1080
1081 // From here on out, conservatively report to the pass manager that we've
1082 // changed the IR, even if we later clean up these added instructions. There
1083 // may be structural differences e.g. in the order of use lists not accounted
1084 // for in just a textual dump of the IR. This is written as a variable, even
1085 // though statically all the places this dominates could be replaced with
1086 // 'true', with the hope that anyone trying to be clever / "more precise" with
1087 // the return value will read this comment, and leave them alone.
1088 Changed = true;
1089
1090 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
1091 StoreSizeSCEV, *AA, Stores))
1092 return Changed;
1093
1094 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
1095 return Changed;
1096
1097 // Okay, everything looks good, insert the memset.
1098 Value *SplatValue = isBytewiseValue(StoredVal, *DL);
1099 Constant *PatternValue = nullptr;
1100 if (!SplatValue)
1101 PatternValue = getMemSetPatternValue(StoredVal, DL);
1102
1103 // MemsetArg is the number of bytes for the memset libcall, and the number
1104 // of pattern repetitions if the memset.pattern intrinsic is being used.
1105 Value *MemsetArg;
1106 std::optional<int64_t> BytesWritten;
1107
1108 if (PatternValue && (HasMemsetPattern || ForceMemsetPatternIntrinsic)) {
1109 const SCEV *TripCountS =
1110 SE->getTripCountFromExitCount(BECount, IntIdxTy, CurLoop);
1111 if (!Expander.isSafeToExpand(TripCountS))
1112 return Changed;
1113 const SCEVConstant *ConstStoreSize = dyn_cast<SCEVConstant>(StoreSizeSCEV);
1114 if (!ConstStoreSize)
1115 return Changed;
1116 Value *TripCount = Expander.expandCodeFor(TripCountS, IntIdxTy,
1117 Preheader->getTerminator());
1118 uint64_t PatternRepsPerTrip =
1119 (ConstStoreSize->getValue()->getZExtValue() * 8) /
1120 DL->getTypeSizeInBits(PatternValue->getType());
1121 // If ConstStoreSize is not equal to the width of PatternValue, then
1122 // MemsetArg is TripCount * (ConstStoreSize/PatternValueWidth). Else
1123 // MemSetArg is just TripCount.
1124 MemsetArg =
1125 PatternRepsPerTrip == 1
1126 ? TripCount
1127 : Builder.CreateMul(TripCount,
1128 Builder.getIntN(IntIdxTy->getIntegerBitWidth(),
1129 PatternRepsPerTrip));
1130 if (auto *CI = dyn_cast<ConstantInt>(TripCount))
1131 BytesWritten =
1132 CI->getZExtValue() * ConstStoreSize->getValue()->getZExtValue();
1133
1134 } else {
1135 const SCEV *NumBytesS =
1136 getNumBytes(BECount, IntIdxTy, StoreSizeSCEV, CurLoop, DL, SE);
1137
1138 // TODO: ideally we should still be able to generate memset if SCEV expander
1139 // is taught to generate the dependencies at the latest point.
1140 if (!Expander.isSafeToExpand(NumBytesS))
1141 return Changed;
1142 MemsetArg =
1143 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
1144 if (auto *CI = dyn_cast<ConstantInt>(MemsetArg))
1145 BytesWritten = CI->getZExtValue();
1146 }
1147 assert(MemsetArg && "MemsetArg should have been set");
1148
1149 AAMDNodes AATags = TheStore->getAAMetadata();
1150 for (Instruction *Store : Stores)
1151 AATags = AATags.merge(Store->getAAMetadata());
1152 if (BytesWritten)
1153 AATags = AATags.extendTo(BytesWritten.value());
1154 else
1155 AATags = AATags.extendTo(-1);
1156
1157 CallInst *NewCall;
1158 if (SplatValue) {
1159 NewCall = Builder.CreateMemSet(BasePtr, SplatValue, MemsetArg,
1160 MaybeAlign(StoreAlignment),
1161 /*isVolatile=*/false, AATags);
1162 } else if (ForceMemsetPatternIntrinsic ||
1163 isLibFuncEmittable(M, TLI, LibFunc_memset_pattern16)) {
1164 assert(isa<SCEVConstant>(StoreSizeSCEV) && "Expected constant store size");
1165
1166 NewCall = Builder.CreateIntrinsic(
1167 Intrinsic::experimental_memset_pattern,
1168 {DestInt8PtrTy, PatternValue->getType(), IntIdxTy},
1169 {BasePtr, PatternValue, MemsetArg,
1170 ConstantInt::getFalse(M->getContext())});
1171 if (StoreAlignment)
1172 cast<MemSetPatternInst>(NewCall)->setDestAlignment(*StoreAlignment);
1173 NewCall->setAAMetadata(AATags);
1174 } else {
1175 // Neither a memset, nor memset_pattern16
1176 return Changed;
1177 }
1178
1179 NewCall->setDebugLoc(TheStore->getDebugLoc());
1180
1181 if (MSSAU) {
1182 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1183 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator);
1184 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1185 }
1186
1187 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
1188 << " from store to: " << *Ev << " at: " << *TheStore
1189 << "\n");
1190
1191 ORE.emit([&]() {
1192 OptimizationRemark R(DEBUG_TYPE, "ProcessLoopStridedStore",
1193 NewCall->getDebugLoc(), Preheader);
1194 R << "Transformed loop-strided store in "
1195 << ore::NV("Function", TheStore->getFunction())
1196 << " function into a call to "
1197 << ore::NV("NewFunction", NewCall->getCalledFunction())
1198 << "() intrinsic";
1199 if (!Stores.empty())
1200 R << ore::setExtraArgs();
1201 for (auto *I : Stores) {
1202 R << ore::NV("FromBlock", I->getParent()->getName())
1203 << ore::NV("ToBlock", Preheader->getName());
1204 }
1205 return R;
1206 });
1207
1208 // Okay, the memset has been formed. Zap the original store and anything that
1209 // feeds into it.
1210 for (auto *I : Stores) {
1211 if (MSSAU)
1212 MSSAU->removeMemoryAccess(I, true);
1214 }
1215 if (MSSAU && VerifyMemorySSA)
1216 MSSAU->getMemorySSA()->verifyMemorySSA();
1217 ++NumMemSet;
1218 ExpCleaner.markResultUsed();
1219 return true;
1220}
1221
1222/// If the stored value is a strided load in the same loop with the same stride
1223/// this may be transformable into a memcpy. This kicks in for stuff like
1224/// for (i) A[i] = B[i];
1225bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
1226 const SCEV *BECount) {
1227 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
1228
1229 Value *StorePtr = SI->getPointerOperand();
1230 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
1231 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
1232
1233 // The store must be feeding a non-volatile load.
1234 LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
1235 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
1236
1237 // See if the pointer expression is an AddRec like {base,+,1} on the current
1238 // loop, which indicates a strided load. If we have something else, it's a
1239 // random load we can't handle.
1240 Value *LoadPtr = LI->getPointerOperand();
1241 const SCEVAddRecExpr *LoadEv = cast<SCEVAddRecExpr>(SE->getSCEV(LoadPtr));
1242
1243 const SCEV *StoreSizeSCEV = SE->getConstant(StorePtr->getType(), StoreSize);
1244 return processLoopStoreOfLoopLoad(StorePtr, LoadPtr, StoreSizeSCEV,
1245 SI->getAlign(), LI->getAlign(), SI, LI,
1246 StoreEv, LoadEv, BECount);
1247}
1248
1249namespace {
1250class MemmoveVerifier {
1251public:
1252 explicit MemmoveVerifier(const Value &LoadBasePtr, const Value &StoreBasePtr,
1253 const DataLayout &DL)
1255 LoadBasePtr.stripPointerCasts(), LoadOff, DL)),
1257 StoreBasePtr.stripPointerCasts(), StoreOff, DL)),
1258 IsSameObject(BP1 == BP2) {}
1259
1260 bool loadAndStoreMayFormMemmove(unsigned StoreSize, bool IsNegStride,
1261 const Instruction &TheLoad,
1262 bool IsMemCpy) const {
1263 if (IsMemCpy) {
1264 // Ensure that LoadBasePtr is after StoreBasePtr or before StoreBasePtr
1265 // for negative stride.
1266 if ((!IsNegStride && LoadOff <= StoreOff) ||
1267 (IsNegStride && LoadOff >= StoreOff))
1268 return false;
1269 } else {
1270 // Ensure that LoadBasePtr is after StoreBasePtr or before StoreBasePtr
1271 // for negative stride. LoadBasePtr shouldn't overlap with StoreBasePtr.
1272 int64_t LoadSize =
1273 DL.getTypeSizeInBits(TheLoad.getType()).getFixedValue() / 8;
1274 if (BP1 != BP2 || LoadSize != int64_t(StoreSize))
1275 return false;
1276 if ((!IsNegStride && LoadOff < StoreOff + int64_t(StoreSize)) ||
1277 (IsNegStride && LoadOff + LoadSize > StoreOff))
1278 return false;
1279 }
1280 return true;
1281 }
1282
1283private:
1284 const DataLayout &DL;
1285 int64_t LoadOff = 0;
1286 int64_t StoreOff = 0;
1287 const Value *BP1;
1288 const Value *BP2;
1289
1290public:
1291 const bool IsSameObject;
1292};
1293} // namespace
1294
1295bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
1296 Value *DestPtr, Value *SourcePtr, const SCEV *StoreSizeSCEV,
1297 MaybeAlign StoreAlign, MaybeAlign LoadAlign, Instruction *TheStore,
1298 Instruction *TheLoad, const SCEVAddRecExpr *StoreEv,
1299 const SCEVAddRecExpr *LoadEv, const SCEV *BECount) {
1300
1301 // FIXME: until llvm.memcpy.inline supports dynamic sizes, we need to
1302 // conservatively bail here, since otherwise we may have to transform
1303 // llvm.memcpy.inline into llvm.memcpy which is illegal.
1304 if (auto *MCI = dyn_cast<MemCpyInst>(TheStore); MCI && MCI->isForceInlined())
1305 return false;
1306
1307 // The trip count of the loop and the base pointer of the addrec SCEV is
1308 // guaranteed to be loop invariant, which means that it should dominate the
1309 // header. This allows us to insert code for it in the preheader.
1310 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1311 IRBuilder<> Builder(Preheader->getTerminator());
1312 SCEVExpander Expander(*SE, "loop-idiom");
1313
1314 SCEVExpanderCleaner ExpCleaner(Expander);
1315
1316 bool Changed = false;
1317 const SCEV *StrStart = StoreEv->getStart();
1318 unsigned StrAS = DestPtr->getType()->getPointerAddressSpace();
1319 Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS));
1320
1321 APInt Stride = getStoreStride(StoreEv);
1322 const SCEVConstant *ConstStoreSize = dyn_cast<SCEVConstant>(StoreSizeSCEV);
1323
1324 // TODO: Deal with non-constant size; Currently expect constant store size
1325 assert(ConstStoreSize && "store size is expected to be a constant");
1326
1327 int64_t StoreSize = ConstStoreSize->getValue()->getZExtValue();
1328 bool IsNegStride = StoreSize == -Stride;
1329
1330 // Handle negative strided loops.
1331 if (IsNegStride)
1332 StrStart =
1333 getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSizeSCEV, SE);
1334
1335 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
1336 // this into a memcpy in the loop preheader now if we want. However, this
1337 // would be unsafe to do if there is anything else in the loop that may read
1338 // or write the memory region we're storing to. This includes the load that
1339 // feeds the stores. Check for an alias by generating the base address and
1340 // checking everything.
1341 Value *StoreBasePtr = Expander.expandCodeFor(
1342 StrStart, Builder.getPtrTy(StrAS), Preheader->getTerminator());
1343
1344 // From here on out, conservatively report to the pass manager that we've
1345 // changed the IR, even if we later clean up these added instructions. There
1346 // may be structural differences e.g. in the order of use lists not accounted
1347 // for in just a textual dump of the IR. This is written as a variable, even
1348 // though statically all the places this dominates could be replaced with
1349 // 'true', with the hope that anyone trying to be clever / "more precise" with
1350 // the return value will read this comment, and leave them alone.
1351 Changed = true;
1352
1353 SmallPtrSet<Instruction *, 2> IgnoredInsts;
1354 IgnoredInsts.insert(TheStore);
1355
1356 bool IsMemCpy = isa<MemCpyInst>(TheStore);
1357 const StringRef InstRemark = IsMemCpy ? "memcpy" : "load and store";
1358
1359 bool LoopAccessStore =
1360 mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
1361 StoreSizeSCEV, *AA, IgnoredInsts);
1362 if (LoopAccessStore) {
1363 // For memmove case it's not enough to guarantee that loop doesn't access
1364 // TheStore and TheLoad. Additionally we need to make sure that TheStore is
1365 // the only user of TheLoad.
1366 if (!TheLoad->hasOneUse())
1367 return Changed;
1368 IgnoredInsts.insert(TheLoad);
1369 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop,
1370 BECount, StoreSizeSCEV, *AA, IgnoredInsts)) {
1371 ORE.emit([&]() {
1372 return OptimizationRemarkMissed(DEBUG_TYPE, "LoopMayAccessStore",
1373 TheStore)
1374 << ore::NV("Inst", InstRemark) << " in "
1375 << ore::NV("Function", TheStore->getFunction())
1376 << " function will not be hoisted: "
1377 << ore::NV("Reason", "The loop may access store location");
1378 });
1379 return Changed;
1380 }
1381 IgnoredInsts.erase(TheLoad);
1382 }
1383
1384 const SCEV *LdStart = LoadEv->getStart();
1385 unsigned LdAS = SourcePtr->getType()->getPointerAddressSpace();
1386
1387 // Handle negative strided loops.
1388 if (IsNegStride)
1389 LdStart =
1390 getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSizeSCEV, SE);
1391
1392 // For a memcpy, we have to make sure that the input array is not being
1393 // mutated by the loop.
1394 Value *LoadBasePtr = Expander.expandCodeFor(LdStart, Builder.getPtrTy(LdAS),
1395 Preheader->getTerminator());
1396
1397 // If the store is a memcpy instruction, we must check if it will write to
1398 // the load memory locations. So remove it from the ignored stores.
1399 MemmoveVerifier Verifier(*LoadBasePtr, *StoreBasePtr, *DL);
1400 if (IsMemCpy && !Verifier.IsSameObject)
1401 IgnoredInsts.erase(TheStore);
1402 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
1403 StoreSizeSCEV, *AA, IgnoredInsts)) {
1404 ORE.emit([&]() {
1405 return OptimizationRemarkMissed(DEBUG_TYPE, "LoopMayAccessLoad", TheLoad)
1406 << ore::NV("Inst", InstRemark) << " in "
1407 << ore::NV("Function", TheStore->getFunction())
1408 << " function will not be hoisted: "
1409 << ore::NV("Reason", "The loop may access load location");
1410 });
1411 return Changed;
1412 }
1413
1414 bool IsAtomic = TheStore->isAtomic() || TheLoad->isAtomic();
1415 bool UseMemMove = IsMemCpy ? Verifier.IsSameObject : LoopAccessStore;
1416
1417 if (IsAtomic) {
1418 // For now don't support unordered atomic memmove.
1419 if (UseMemMove)
1420 return Changed;
1421
1422 // We cannot allow unaligned ops for unordered load/store, so reject
1423 // anything where the alignment isn't at least the element size.
1424 assert((StoreAlign && LoadAlign) &&
1425 "Expect unordered load/store to have align.");
1426 if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
1427 return Changed;
1428
1429 // If the element.atomic memcpy is not lowered into explicit
1430 // loads/stores later, then it will be lowered into an element-size
1431 // specific lib call. If the lib call doesn't exist for our store size, then
1432 // we shouldn't generate the memcpy.
1433 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1434 return Changed;
1435 }
1436
1437 if (UseMemMove)
1438 if (!Verifier.loadAndStoreMayFormMemmove(StoreSize, IsNegStride, *TheLoad,
1439 IsMemCpy))
1440 return Changed;
1441
1442 if (avoidLIRForMultiBlockLoop())
1443 return Changed;
1444
1445 // Okay, everything is safe, we can transform this!
1446
1447 const SCEV *NumBytesS =
1448 getNumBytes(BECount, IntIdxTy, StoreSizeSCEV, CurLoop, DL, SE);
1449
1450 Value *NumBytes =
1451 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
1452
1453 AAMDNodes AATags = TheLoad->getAAMetadata();
1454 AAMDNodes StoreAATags = TheStore->getAAMetadata();
1455 AATags = AATags.merge(StoreAATags);
1456 if (auto CI = dyn_cast<ConstantInt>(NumBytes))
1457 AATags = AATags.extendTo(CI->getZExtValue());
1458 else
1459 AATags = AATags.extendTo(-1);
1460
1461 CallInst *NewCall = nullptr;
1462 // Check whether to generate an unordered atomic memcpy:
1463 // If the load or store are atomic, then they must necessarily be unordered
1464 // by previous checks.
1465 if (!IsAtomic) {
1466 if (UseMemMove)
1467 NewCall = Builder.CreateMemMove(StoreBasePtr, StoreAlign, LoadBasePtr,
1468 LoadAlign, NumBytes,
1469 /*isVolatile=*/false, AATags);
1470 else
1471 NewCall =
1472 Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign,
1473 NumBytes, /*isVolatile=*/false, AATags);
1474 } else {
1475 // Create the call.
1476 // Note that unordered atomic loads/stores are *required* by the spec to
1477 // have an alignment but non-atomic loads/stores may not.
1478 NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1479 StoreBasePtr, *StoreAlign, LoadBasePtr, *LoadAlign, NumBytes, StoreSize,
1480 AATags);
1481 }
1482 NewCall->setDebugLoc(TheStore->getDebugLoc());
1483
1484 if (MSSAU) {
1485 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1486 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator);
1487 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1488 }
1489
1490 LLVM_DEBUG(dbgs() << " Formed new call: " << *NewCall << "\n"
1491 << " from load ptr=" << *LoadEv << " at: " << *TheLoad
1492 << "\n"
1493 << " from store ptr=" << *StoreEv << " at: " << *TheStore
1494 << "\n");
1495
1496 ORE.emit([&]() {
1497 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStoreOfLoopLoad",
1498 NewCall->getDebugLoc(), Preheader)
1499 << "Formed a call to "
1500 << ore::NV("NewFunction", NewCall->getCalledFunction())
1501 << "() intrinsic from " << ore::NV("Inst", InstRemark)
1502 << " instruction in " << ore::NV("Function", TheStore->getFunction())
1503 << " function"
1505 << ore::NV("FromBlock", TheStore->getParent()->getName())
1506 << ore::NV("ToBlock", Preheader->getName());
1507 });
1508
1509 // Okay, a new call to memcpy/memmove has been formed. Zap the original store
1510 // and anything that feeds into it.
1511 if (MSSAU)
1512 MSSAU->removeMemoryAccess(TheStore, true);
1513 deleteDeadInstruction(TheStore);
1514 if (MSSAU && VerifyMemorySSA)
1515 MSSAU->getMemorySSA()->verifyMemorySSA();
1516 if (UseMemMove)
1517 ++NumMemMove;
1518 else
1519 ++NumMemCpy;
1520 ExpCleaner.markResultUsed();
1521 return true;
1522}
1523
1524// When compiling for codesize we avoid idiom recognition for a multi-block loop
1525// unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1526//
1527bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1528 bool IsLoopMemset) {
1529 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1530 if (CurLoop->isOutermost() && (!IsMemset || !IsLoopMemset)) {
1531 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()
1532 << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
1533 << " avoided: multi-block top-level loop\n");
1534 return true;
1535 }
1536 }
1537
1538 return false;
1539}
1540
1541bool LoopIdiomRecognize::optimizeCRCLoop(const PolynomialInfo &Info) {
1542 // FIXME: Hexagon has a special HexagonLoopIdiom that optimizes CRC using
1543 // carry-less multiplication instructions, which is more efficient than our
1544 // Sarwate table-lookup optimization. Hence, until we're able to emit
1545 // target-specific instructions for Hexagon, subsuming HexagonLoopIdiom,
1546 // disable the optimization for Hexagon.
1547 Module &M = *CurLoop->getHeader()->getModule();
1548 Triple TT(M.getTargetTriple());
1549 if (TT.getArch() == Triple::hexagon)
1550 return false;
1551
1552 // First, create a new GlobalVariable corresponding to the
1553 // Sarwate-lookup-table.
1554 Type *CRCTy = Info.LHS->getType();
1555 unsigned CRCBW = CRCTy->getIntegerBitWidth();
1556 std::array<Constant *, 256> CRCConstants;
1557 transform(HashRecognize::genSarwateTable(Info.RHS, Info.ByteOrderSwapped),
1558 CRCConstants.begin(),
1559 [CRCTy](const APInt &E) { return ConstantInt::get(CRCTy, E); });
1560 Constant *ConstArray =
1561 ConstantArray::get(ArrayType::get(CRCTy, 256), CRCConstants);
1562 GlobalVariable *GV =
1563 new GlobalVariable(M, ConstArray->getType(), true,
1564 GlobalValue::PrivateLinkage, ConstArray, ".crctable");
1565
1568
1569 // Next, mark all PHIs for removal except IV.
1570 {
1571 for (PHINode &PN : CurLoop->getHeader()->phis()) {
1572 if (&PN == IV)
1573 continue;
1574 PN.replaceAllUsesWith(PoisonValue::get(PN.getType()));
1575 Cleanup.push_back(&PN);
1576 }
1577 }
1578
1579 // Next, fix up the trip count.
1580 {
1581 unsigned NewBTC = (Info.TripCount / 8) - 1;
1582 BasicBlock *LoopBlk = CurLoop->getLoopLatch();
1583 BranchInst *BrInst = cast<BranchInst>(LoopBlk->getTerminator());
1584 CmpPredicate ExitPred = BrInst->getSuccessor(0) == LoopBlk
1587 Instruction *ExitCond = CurLoop->getLatchCmpInst();
1588 Value *ExitLimit = ConstantInt::get(IV->getType(), NewBTC);
1589 IRBuilder<> Builder(ExitCond);
1590 Value *NewExitCond =
1591 Builder.CreateICmp(ExitPred, IV, ExitLimit, "exit.cond");
1592 ExitCond->replaceAllUsesWith(NewExitCond);
1593 deleteDeadInstruction(ExitCond);
1594 }
1595
1596 // Finally, fill the loop with the Sarwate-table-lookup logic, and replace all
1597 // uses of ComputedValue.
1598 //
1599 // Little-endian:
1600 // crc = (crc >> 8) ^ tbl[(iv'th byte of data) ^ (bottom byte of crc)]
1601 // Big-Endian:
1602 // crc = (crc << 8) ^ tbl[(iv'th byte of data) ^ (top byte of crc)]
1603 {
1604 auto LoByte = [](IRBuilderBase &Builder, Value *Op, const Twine &Name) {
1605 return Builder.CreateZExtOrTrunc(
1606 Op, IntegerType::getInt8Ty(Op->getContext()), Name);
1607 };
1608 auto HiIdx = [LoByte, CRCBW](IRBuilderBase &Builder, Value *Op,
1609 const Twine &Name) {
1610 Type *OpTy = Op->getType();
1611
1612 // When the bitwidth of the CRC mismatches the Op's bitwidth, we need to
1613 // use the CRC's bitwidth as the reference for shifting right.
1614 return LoByte(Builder,
1615 CRCBW > 8 ? Builder.CreateLShr(
1616 Op, ConstantInt::get(OpTy, CRCBW - 8), Name)
1617 : Op,
1618 Name + ".lo.byte");
1619 };
1620
1621 IRBuilder<> Builder(CurLoop->getHeader(),
1622 CurLoop->getHeader()->getFirstNonPHIIt());
1623
1624 // Create the CRC PHI, and initialize its incoming value to the initial
1625 // value of CRC.
1626 PHINode *CRCPhi = Builder.CreatePHI(CRCTy, 2, "crc");
1627 CRCPhi->addIncoming(Info.LHS, CurLoop->getLoopPreheader());
1628
1629 // CRC is now an evolving variable, initialized to the PHI.
1630 Value *CRC = CRCPhi;
1631
1632 // TableIndexer = ((top|bottom) byte of CRC). It is XOR'ed with (iv'th byte
1633 // of LHSAux), if LHSAux is non-nullptr.
1634 Value *Indexer = CRC;
1635 if (Value *Data = Info.LHSAux) {
1636 Type *DataTy = Data->getType();
1637
1638 // To index into the (iv'th byte of LHSAux), we multiply iv by 8, and we
1639 // shift right by that amount, and take the lo-byte (in the little-endian
1640 // case), or shift left by that amount, and take the hi-idx (in the
1641 // big-endian case).
1642 Value *IVBits = Builder.CreateZExtOrTrunc(
1643 Builder.CreateShl(IV, 3, "iv.bits"), DataTy, "iv.indexer");
1644 Value *DataIndexer =
1645 Info.ByteOrderSwapped
1646 ? Builder.CreateShl(Data, IVBits, "data.indexer")
1647 : Builder.CreateLShr(Data, IVBits, "data.indexer");
1648 Indexer = Builder.CreateXor(
1649 DataIndexer,
1650 Builder.CreateZExtOrTrunc(Indexer, DataTy, "crc.indexer.cast"),
1651 "crc.data.indexer");
1652 }
1653
1654 Indexer = Info.ByteOrderSwapped ? HiIdx(Builder, Indexer, "indexer.hi")
1655 : LoByte(Builder, Indexer, "indexer.lo");
1656
1657 // Always index into a GEP using the index type.
1658 Indexer = Builder.CreateZExt(
1659 Indexer, SE->getDataLayout().getIndexType(GV->getType()),
1660 "indexer.ext");
1661
1662 // CRCTableLd = CRCTable[(iv'th byte of data) ^ (top|bottom) byte of CRC].
1663 Value *CRCTableGEP =
1664 Builder.CreateInBoundsGEP(CRCTy, GV, Indexer, "tbl.ptradd");
1665 Value *CRCTableLd = Builder.CreateLoad(CRCTy, CRCTableGEP, "tbl.ld");
1666
1667 // CRCNext = (CRC (<<|>>) 8) ^ CRCTableLd, or simply CRCTableLd in case of
1668 // CRC-8.
1669 Value *CRCNext = CRCTableLd;
1670 if (CRCBW > 8) {
1671 Value *CRCShift = Info.ByteOrderSwapped
1672 ? Builder.CreateShl(CRC, 8, "crc.be.shift")
1673 : Builder.CreateLShr(CRC, 8, "crc.le.shift");
1674 CRCNext = Builder.CreateXor(CRCShift, CRCTableLd, "crc.next");
1675 }
1676
1677 // Connect the back-edge for the loop, and RAUW the ComputedValue.
1678 CRCPhi->addIncoming(CRCNext, CurLoop->getLoopLatch());
1679 Info.ComputedValue->replaceUsesOutsideBlock(CRCNext,
1680 CurLoop->getLoopLatch());
1681 }
1682
1683 // Cleanup.
1684 {
1685 for (PHINode *PN : Cleanup)
1687 SE->forgetLoop(CurLoop);
1688 }
1689 return true;
1690}
1691
1692bool LoopIdiomRecognize::runOnNoncountableLoop() {
1693 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F["
1694 << CurLoop->getHeader()->getParent()->getName()
1695 << "] Noncountable Loop %"
1696 << CurLoop->getHeader()->getName() << "\n");
1697
1698 return recognizePopcount() || recognizeAndInsertFFS() ||
1699 recognizeShiftUntilBitTest() || recognizeShiftUntilZero() ||
1700 recognizeShiftUntilLessThan() || recognizeAndInsertStrLen();
1701}
1702
1703/// Check if the given conditional branch is based on the comparison between
1704/// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is
1705/// true), the control yields to the loop entry. If the branch matches the
1706/// behavior, the variable involved in the comparison is returned. This function
1707/// will be called to see if the precondition and postcondition of the loop are
1708/// in desirable form.
1710 bool JmpOnZero = false) {
1711 if (!BI || !BI->isConditional())
1712 return nullptr;
1713
1715 if (!Cond)
1716 return nullptr;
1717
1718 auto *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1719 if (!CmpZero || !CmpZero->isZero())
1720 return nullptr;
1721
1722 BasicBlock *TrueSucc = BI->getSuccessor(0);
1723 BasicBlock *FalseSucc = BI->getSuccessor(1);
1724 if (JmpOnZero)
1725 std::swap(TrueSucc, FalseSucc);
1726
1727 ICmpInst::Predicate Pred = Cond->getPredicate();
1728 if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) ||
1729 (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry))
1730 return Cond->getOperand(0);
1731
1732 return nullptr;
1733}
1734
1735namespace {
1736
1737class StrlenVerifier {
1738public:
1739 explicit StrlenVerifier(const Loop *CurLoop, ScalarEvolution *SE,
1740 const TargetLibraryInfo *TLI)
1741 : CurLoop(CurLoop), SE(SE), TLI(TLI) {}
1742
1743 bool isValidStrlenIdiom() {
1744 // Give up if the loop has multiple blocks, multiple backedges, or
1745 // multiple exit blocks
1746 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1 ||
1747 !CurLoop->getUniqueExitBlock())
1748 return false;
1749
1750 // It should have a preheader and a branch instruction.
1751 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1752 if (!Preheader)
1753 return false;
1754
1755 BranchInst *EntryBI = dyn_cast<BranchInst>(Preheader->getTerminator());
1756 if (!EntryBI)
1757 return false;
1758
1759 // The loop exit must be conditioned on an icmp with 0 the null terminator.
1760 // The icmp operand has to be a load on some SSA reg that increments
1761 // by 1 in the loop.
1762 BasicBlock *LoopBody = *CurLoop->block_begin();
1763
1764 // Skip if the body is too big as it most likely is not a strlen idiom.
1765 if (!LoopBody || LoopBody->size() >= 15)
1766 return false;
1767
1768 BranchInst *LoopTerm = dyn_cast<BranchInst>(LoopBody->getTerminator());
1769 Value *LoopCond = matchCondition(LoopTerm, LoopBody);
1770 if (!LoopCond)
1771 return false;
1772
1773 LoadInst *LoopLoad = dyn_cast<LoadInst>(LoopCond);
1774 if (!LoopLoad || LoopLoad->getPointerAddressSpace() != 0)
1775 return false;
1776
1777 OperandType = LoopLoad->getType();
1778 if (!OperandType || !OperandType->isIntegerTy())
1779 return false;
1780
1781 // See if the pointer expression is an AddRec with constant step a of form
1782 // ({n,+,a}) where a is the width of the char type.
1783 Value *IncPtr = LoopLoad->getPointerOperand();
1784 const SCEV *LoadEv = SE->getSCEV(IncPtr);
1785 const APInt *Step;
1786 if (!match(LoadEv,
1787 m_scev_AffineAddRec(m_SCEV(LoadBaseEv), m_scev_APInt(Step))))
1788 return false;
1789
1790 LLVM_DEBUG(dbgs() << "pointer load scev: " << *LoadEv << "\n");
1791
1792 unsigned StepSize = Step->getZExtValue();
1793
1794 // Verify that StepSize is consistent with platform char width.
1795 OpWidth = OperandType->getIntegerBitWidth();
1796 unsigned WcharSize = TLI->getWCharSize(*LoopLoad->getModule());
1797 if (OpWidth != StepSize * 8)
1798 return false;
1799 if (OpWidth != 8 && OpWidth != 16 && OpWidth != 32)
1800 return false;
1801 if (OpWidth >= 16)
1802 if (OpWidth != WcharSize * 8)
1803 return false;
1804
1805 // Scan every instruction in the loop to ensure there are no side effects.
1806 for (Instruction &I : *LoopBody)
1807 if (I.mayHaveSideEffects())
1808 return false;
1809
1810 BasicBlock *LoopExitBB = CurLoop->getExitBlock();
1811 if (!LoopExitBB)
1812 return false;
1813
1814 for (PHINode &PN : LoopExitBB->phis()) {
1815 if (!SE->isSCEVable(PN.getType()))
1816 return false;
1817
1818 const SCEV *Ev = SE->getSCEV(&PN);
1819 if (!Ev)
1820 return false;
1821
1822 LLVM_DEBUG(dbgs() << "loop exit phi scev: " << *Ev << "\n");
1823
1824 // Since we verified that the loop trip count will be a valid strlen
1825 // idiom, we can expand all lcssa phi with {n,+,1} as (n + strlen) and use
1826 // SCEVExpander materialize the loop output.
1827 const SCEVAddRecExpr *AddRecEv = dyn_cast<SCEVAddRecExpr>(Ev);
1828 if (!AddRecEv || !AddRecEv->isAffine())
1829 return false;
1830
1831 // We only want RecAddExpr with recurrence step that is constant. This
1832 // is good enough for all the idioms we want to recognize. Later we expand
1833 // and materialize the recurrence as {base,+,a} -> (base + a * strlen)
1834 if (!isa<SCEVConstant>(AddRecEv->getStepRecurrence(*SE)))
1835 return false;
1836 }
1837
1838 return true;
1839 }
1840
1841public:
1842 const Loop *CurLoop;
1843 ScalarEvolution *SE;
1844 const TargetLibraryInfo *TLI;
1845
1846 unsigned OpWidth;
1847 ConstantInt *StepSizeCI;
1848 const SCEV *LoadBaseEv;
1850};
1851
1852} // namespace
1853
1854/// The Strlen Idiom we are trying to detect has the following structure
1855///
1856/// preheader:
1857/// ...
1858/// br label %body, ...
1859///
1860/// body:
1861/// ... ; %0 is incremented by a gep
1862/// %1 = load i8, ptr %0, align 1
1863/// %2 = icmp eq i8 %1, 0
1864/// br i1 %2, label %exit, label %body
1865///
1866/// exit:
1867/// %lcssa = phi [%0, %body], ...
1868///
1869/// We expect the strlen idiom to have a load of a character type that
1870/// is compared against '\0', and such load pointer operand must have scev
1871/// expression of the form {%str,+,c} where c is a ConstantInt of the
1872/// appropiate character width for the idiom, and %str is the base of the string
1873/// And, that all lcssa phis have the form {...,+,n} where n is a constant,
1874///
1875/// When transforming the output of the strlen idiom, the lccsa phi are
1876/// expanded using SCEVExpander as {base scev,+,a} -> (base scev + a * strlen)
1877/// and all subsequent uses are replaced. For example,
1878///
1879/// \code{.c}
1880/// const char* base = str;
1881/// while (*str != '\0')
1882/// ++str;
1883/// size_t result = str - base;
1884/// \endcode
1885///
1886/// will be transformed as follows: The idiom will be replaced by a strlen
1887/// computation to compute the address of the null terminator of the string.
1888///
1889/// \code{.c}
1890/// const char* base = str;
1891/// const char* end = base + strlen(str);
1892/// size_t result = end - base;
1893/// \endcode
1894///
1895/// In the case we index by an induction variable, as long as the induction
1896/// variable has a constant int increment, we can replace all such indvars
1897/// with the closed form computation of strlen
1898///
1899/// \code{.c}
1900/// size_t i = 0;
1901/// while (str[i] != '\0')
1902/// ++i;
1903/// size_t result = i;
1904/// \endcode
1905///
1906/// Will be replaced by
1907///
1908/// \code{.c}
1909/// size_t i = 0 + strlen(str);
1910/// size_t result = i;
1911/// \endcode
1912///
1913bool LoopIdiomRecognize::recognizeAndInsertStrLen() {
1914 if (DisableLIRP::All)
1915 return false;
1916
1917 StrlenVerifier Verifier(CurLoop, SE, TLI);
1918
1919 if (!Verifier.isValidStrlenIdiom())
1920 return false;
1921
1922 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1923 BasicBlock *LoopBody = *CurLoop->block_begin();
1924 BasicBlock *LoopExitBB = CurLoop->getExitBlock();
1925 BranchInst *LoopTerm = dyn_cast<BranchInst>(LoopBody->getTerminator());
1926 assert(Preheader && LoopBody && LoopExitBB && LoopTerm &&
1927 "Should be verified to be valid by StrlenVerifier");
1928
1929 if (Verifier.OpWidth == 8) {
1931 return false;
1932 if (!isLibFuncEmittable(Preheader->getModule(), TLI, LibFunc_strlen))
1933 return false;
1934 } else {
1936 return false;
1937 if (!isLibFuncEmittable(Preheader->getModule(), TLI, LibFunc_wcslen))
1938 return false;
1939 }
1940
1941 IRBuilder<> Builder(Preheader->getTerminator());
1942 Builder.SetCurrentDebugLocation(CurLoop->getStartLoc());
1943 SCEVExpander Expander(*SE, "strlen_idiom");
1944 Value *MaterialzedBase = Expander.expandCodeFor(
1945 Verifier.LoadBaseEv, Verifier.LoadBaseEv->getType(),
1946 Builder.GetInsertPoint());
1947
1948 Value *StrLenFunc = nullptr;
1949 if (Verifier.OpWidth == 8) {
1950 StrLenFunc = emitStrLen(MaterialzedBase, Builder, *DL, TLI);
1951 } else {
1952 StrLenFunc = emitWcsLen(MaterialzedBase, Builder, *DL, TLI);
1953 }
1954 assert(StrLenFunc && "Failed to emit strlen function.");
1955
1956 const SCEV *StrlenEv = SE->getSCEV(StrLenFunc);
1958 for (PHINode &PN : LoopExitBB->phis()) {
1959 // We can now materialize the loop output as all phi have scev {base,+,a}.
1960 // We expand the phi as:
1961 // %strlen = call i64 @strlen(%str)
1962 // %phi.new = base expression + step * %strlen
1963 const SCEV *Ev = SE->getSCEV(&PN);
1964 const SCEVAddRecExpr *AddRecEv = dyn_cast<SCEVAddRecExpr>(Ev);
1965 const SCEVConstant *Step =
1967 const SCEV *Base = AddRecEv->getStart();
1968
1969 // It is safe to truncate to base since if base is narrower than size_t
1970 // the equivalent user code will have to truncate anyways.
1971 const SCEV *NewEv = SE->getAddExpr(
1973 StrlenEv, Base->getType())));
1974
1975 Value *MaterializedPHI = Expander.expandCodeFor(NewEv, NewEv->getType(),
1976 Builder.GetInsertPoint());
1977 Expander.clear();
1978 PN.replaceAllUsesWith(MaterializedPHI);
1979 Cleanup.push_back(&PN);
1980 }
1981
1982 // All LCSSA Loop Phi are dead, the left over dead loop body can be cleaned
1983 // up by later passes
1984 for (PHINode *PN : Cleanup)
1986
1987 // LoopDeletion only delete invariant loops with known trip-count. We can
1988 // update the condition so it will reliablely delete the invariant loop
1989 assert(LoopTerm->getNumSuccessors() == 2 &&
1990 (LoopTerm->getSuccessor(0) == LoopBody ||
1991 LoopTerm->getSuccessor(1) == LoopBody) &&
1992 "loop body must have a successor that is it self");
1993 ConstantInt *NewLoopCond = LoopTerm->getSuccessor(0) == LoopBody
1994 ? Builder.getFalse()
1995 : Builder.getTrue();
1996 LoopTerm->setCondition(NewLoopCond);
1997 SE->forgetLoop(CurLoop);
1998
1999 ++NumStrLen;
2000 LLVM_DEBUG(dbgs() << " Formed strlen idiom: " << *StrLenFunc << "\n");
2001 ORE.emit([&]() {
2002 return OptimizationRemark(DEBUG_TYPE, "recognizeAndInsertStrLen",
2003 CurLoop->getStartLoc(), Preheader)
2004 << "Transformed " << StrLenFunc->getName() << " loop idiom";
2005 });
2006
2007 return true;
2008}
2009
2010/// Check if the given conditional branch is based on an unsigned less-than
2011/// comparison between a variable and a constant, and if the comparison is false
2012/// the control yields to the loop entry. If the branch matches the behaviour,
2013/// the variable involved in the comparison is returned.
2015 APInt &Threshold) {
2016 if (!BI || !BI->isConditional())
2017 return nullptr;
2018
2020 if (!Cond)
2021 return nullptr;
2022
2023 ConstantInt *CmpConst = dyn_cast<ConstantInt>(Cond->getOperand(1));
2024 if (!CmpConst)
2025 return nullptr;
2026
2027 BasicBlock *FalseSucc = BI->getSuccessor(1);
2028 ICmpInst::Predicate Pred = Cond->getPredicate();
2029
2030 if (Pred == ICmpInst::ICMP_ULT && FalseSucc == LoopEntry) {
2031 Threshold = CmpConst->getValue();
2032 return Cond->getOperand(0);
2033 }
2034
2035 return nullptr;
2036}
2037
2038// Check if the recurrence variable `VarX` is in the right form to create
2039// the idiom. Returns the value coerced to a PHINode if so.
2041 BasicBlock *LoopEntry) {
2042 auto *PhiX = dyn_cast<PHINode>(VarX);
2043 if (PhiX && PhiX->getParent() == LoopEntry &&
2044 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
2045 return PhiX;
2046 return nullptr;
2047}
2048
2049/// Return true if the idiom is detected in the loop.
2050///
2051/// Additionally:
2052/// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
2053/// or nullptr if there is no such.
2054/// 2) \p CntPhi is set to the corresponding phi node
2055/// or nullptr if there is no such.
2056/// 3) \p InitX is set to the value whose CTLZ could be used.
2057/// 4) \p DefX is set to the instruction calculating Loop exit condition.
2058/// 5) \p Threshold is set to the constant involved in the unsigned less-than
2059/// comparison.
2060///
2061/// The core idiom we are trying to detect is:
2062/// \code
2063/// if (x0 < 2)
2064/// goto loop-exit // the precondition of the loop
2065/// cnt0 = init-val
2066/// do {
2067/// x = phi (x0, x.next); //PhiX
2068/// cnt = phi (cnt0, cnt.next)
2069///
2070/// cnt.next = cnt + 1;
2071/// ...
2072/// x.next = x >> 1; // DefX
2073/// } while (x >= 4)
2074/// loop-exit:
2075/// \endcode
2077 Intrinsic::ID &IntrinID,
2078 Value *&InitX, Instruction *&CntInst,
2079 PHINode *&CntPhi, Instruction *&DefX,
2080 APInt &Threshold) {
2081 BasicBlock *LoopEntry;
2082
2083 DefX = nullptr;
2084 CntInst = nullptr;
2085 CntPhi = nullptr;
2086 LoopEntry = *(CurLoop->block_begin());
2087
2088 // step 1: Check if the loop-back branch is in desirable form.
2090 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry,
2091 Threshold))
2092 DefX = dyn_cast<Instruction>(T);
2093 else
2094 return false;
2095
2096 // step 2: Check the recurrence of variable X
2097 if (!DefX || !isa<PHINode>(DefX))
2098 return false;
2099
2100 PHINode *VarPhi = cast<PHINode>(DefX);
2101 int Idx = VarPhi->getBasicBlockIndex(LoopEntry);
2102 if (Idx == -1)
2103 return false;
2104
2105 DefX = dyn_cast<Instruction>(VarPhi->getIncomingValue(Idx));
2106 if (!DefX || DefX->getNumOperands() == 0 || DefX->getOperand(0) != VarPhi)
2107 return false;
2108
2109 // step 3: detect instructions corresponding to "x.next = x >> 1"
2110 if (DefX->getOpcode() != Instruction::LShr)
2111 return false;
2112
2113 IntrinID = Intrinsic::ctlz;
2115 if (!Shft || !Shft->isOne())
2116 return false;
2117
2118 InitX = VarPhi->getIncomingValueForBlock(CurLoop->getLoopPreheader());
2119
2120 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
2121 // or cnt.next = cnt + -1.
2122 // TODO: We can skip the step. If loop trip count is known (CTLZ),
2123 // then all uses of "cnt.next" could be optimized to the trip count
2124 // plus "cnt0". Currently it is not optimized.
2125 // This step could be used to detect POPCNT instruction:
2126 // cnt.next = cnt + (x.next & 1)
2127 for (Instruction &Inst :
2128 llvm::make_range(LoopEntry->getFirstNonPHIIt(), LoopEntry->end())) {
2129 if (Inst.getOpcode() != Instruction::Add)
2130 continue;
2131
2133 if (!Inc || (!Inc->isOne() && !Inc->isMinusOne()))
2134 continue;
2135
2136 PHINode *Phi = getRecurrenceVar(Inst.getOperand(0), &Inst, LoopEntry);
2137 if (!Phi)
2138 continue;
2139
2140 CntInst = &Inst;
2141 CntPhi = Phi;
2142 break;
2143 }
2144 if (!CntInst)
2145 return false;
2146
2147 return true;
2148}
2149
2150/// Return true iff the idiom is detected in the loop.
2151///
2152/// Additionally:
2153/// 1) \p CntInst is set to the instruction counting the population bit.
2154/// 2) \p CntPhi is set to the corresponding phi node.
2155/// 3) \p Var is set to the value whose population bits are being counted.
2156///
2157/// The core idiom we are trying to detect is:
2158/// \code
2159/// if (x0 != 0)
2160/// goto loop-exit // the precondition of the loop
2161/// cnt0 = init-val;
2162/// do {
2163/// x1 = phi (x0, x2);
2164/// cnt1 = phi(cnt0, cnt2);
2165///
2166/// cnt2 = cnt1 + 1;
2167/// ...
2168/// x2 = x1 & (x1 - 1);
2169/// ...
2170/// } while(x != 0);
2171///
2172/// loop-exit:
2173/// \endcode
2174static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
2175 Instruction *&CntInst, PHINode *&CntPhi,
2176 Value *&Var) {
2177 // step 1: Check to see if the look-back branch match this pattern:
2178 // "if (a!=0) goto loop-entry".
2179 BasicBlock *LoopEntry;
2180 Instruction *DefX2, *CountInst;
2181 Value *VarX1, *VarX0;
2182 PHINode *PhiX, *CountPhi;
2183
2184 DefX2 = CountInst = nullptr;
2185 VarX1 = VarX0 = nullptr;
2186 PhiX = CountPhi = nullptr;
2187 LoopEntry = *(CurLoop->block_begin());
2188
2189 // step 1: Check if the loop-back branch is in desirable form.
2190 {
2191 if (Value *T = matchCondition(
2192 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
2193 DefX2 = dyn_cast<Instruction>(T);
2194 else
2195 return false;
2196 }
2197
2198 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
2199 {
2200 if (!DefX2 || DefX2->getOpcode() != Instruction::And)
2201 return false;
2202
2203 BinaryOperator *SubOneOp;
2204
2205 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
2206 VarX1 = DefX2->getOperand(1);
2207 else {
2208 VarX1 = DefX2->getOperand(0);
2209 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
2210 }
2211 if (!SubOneOp || SubOneOp->getOperand(0) != VarX1)
2212 return false;
2213
2214 ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1));
2215 if (!Dec ||
2216 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) ||
2217 (SubOneOp->getOpcode() == Instruction::Add &&
2218 Dec->isMinusOne()))) {
2219 return false;
2220 }
2221 }
2222
2223 // step 3: Check the recurrence of variable X
2224 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
2225 if (!PhiX)
2226 return false;
2227
2228 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
2229 {
2230 CountInst = nullptr;
2231 for (Instruction &Inst :
2232 llvm::make_range(LoopEntry->getFirstNonPHIIt(), LoopEntry->end())) {
2233 if (Inst.getOpcode() != Instruction::Add)
2234 continue;
2235
2237 if (!Inc || !Inc->isOne())
2238 continue;
2239
2240 PHINode *Phi = getRecurrenceVar(Inst.getOperand(0), &Inst, LoopEntry);
2241 if (!Phi)
2242 continue;
2243
2244 // Check if the result of the instruction is live of the loop.
2245 bool LiveOutLoop = false;
2246 for (User *U : Inst.users()) {
2247 if ((cast<Instruction>(U))->getParent() != LoopEntry) {
2248 LiveOutLoop = true;
2249 break;
2250 }
2251 }
2252
2253 if (LiveOutLoop) {
2254 CountInst = &Inst;
2255 CountPhi = Phi;
2256 break;
2257 }
2258 }
2259
2260 if (!CountInst)
2261 return false;
2262 }
2263
2264 // step 5: check if the precondition is in this form:
2265 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
2266 {
2267 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
2268 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
2269 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
2270 return false;
2271
2272 CntInst = CountInst;
2273 CntPhi = CountPhi;
2274 Var = T;
2275 }
2276
2277 return true;
2278}
2279
2280/// Return true if the idiom is detected in the loop.
2281///
2282/// Additionally:
2283/// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
2284/// or nullptr if there is no such.
2285/// 2) \p CntPhi is set to the corresponding phi node
2286/// or nullptr if there is no such.
2287/// 3) \p Var is set to the value whose CTLZ could be used.
2288/// 4) \p DefX is set to the instruction calculating Loop exit condition.
2289///
2290/// The core idiom we are trying to detect is:
2291/// \code
2292/// if (x0 == 0)
2293/// goto loop-exit // the precondition of the loop
2294/// cnt0 = init-val;
2295/// do {
2296/// x = phi (x0, x.next); //PhiX
2297/// cnt = phi(cnt0, cnt.next);
2298///
2299/// cnt.next = cnt + 1;
2300/// ...
2301/// x.next = x >> 1; // DefX
2302/// ...
2303/// } while(x.next != 0);
2304///
2305/// loop-exit:
2306/// \endcode
2307static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL,
2308 Intrinsic::ID &IntrinID, Value *&InitX,
2309 Instruction *&CntInst, PHINode *&CntPhi,
2310 Instruction *&DefX) {
2311 BasicBlock *LoopEntry;
2312 Value *VarX = nullptr;
2313
2314 DefX = nullptr;
2315 CntInst = nullptr;
2316 CntPhi = nullptr;
2317 LoopEntry = *(CurLoop->block_begin());
2318
2319 // step 1: Check if the loop-back branch is in desirable form.
2320 if (Value *T = matchCondition(
2321 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
2322 DefX = dyn_cast<Instruction>(T);
2323 else
2324 return false;
2325
2326 // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1"
2327 if (!DefX || !DefX->isShift())
2328 return false;
2329 IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz :
2330 Intrinsic::ctlz;
2332 if (!Shft || !Shft->isOne())
2333 return false;
2334 VarX = DefX->getOperand(0);
2335
2336 // step 3: Check the recurrence of variable X
2337 PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
2338 if (!PhiX)
2339 return false;
2340
2341 InitX = PhiX->getIncomingValueForBlock(CurLoop->getLoopPreheader());
2342
2343 // Make sure the initial value can't be negative otherwise the ashr in the
2344 // loop might never reach zero which would make the loop infinite.
2345 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, DL))
2346 return false;
2347
2348 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
2349 // or cnt.next = cnt + -1.
2350 // TODO: We can skip the step. If loop trip count is known (CTLZ),
2351 // then all uses of "cnt.next" could be optimized to the trip count
2352 // plus "cnt0". Currently it is not optimized.
2353 // This step could be used to detect POPCNT instruction:
2354 // cnt.next = cnt + (x.next & 1)
2355 for (Instruction &Inst :
2356 llvm::make_range(LoopEntry->getFirstNonPHIIt(), LoopEntry->end())) {
2357 if (Inst.getOpcode() != Instruction::Add)
2358 continue;
2359
2361 if (!Inc || (!Inc->isOne() && !Inc->isMinusOne()))
2362 continue;
2363
2364 PHINode *Phi = getRecurrenceVar(Inst.getOperand(0), &Inst, LoopEntry);
2365 if (!Phi)
2366 continue;
2367
2368 CntInst = &Inst;
2369 CntPhi = Phi;
2370 break;
2371 }
2372 if (!CntInst)
2373 return false;
2374
2375 return true;
2376}
2377
2378// Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always
2379// profitable if we delete the loop.
2380bool LoopIdiomRecognize::isProfitableToInsertFFS(Intrinsic::ID IntrinID,
2381 Value *InitX, bool ZeroCheck,
2382 size_t CanonicalSize) {
2383 const Value *Args[] = {InitX,
2384 ConstantInt::getBool(InitX->getContext(), ZeroCheck)};
2385
2386 // @llvm.dbg doesn't count as they have no semantic effect.
2387 auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug();
2388 uint32_t HeaderSize =
2389 std::distance(InstWithoutDebugIt.begin(), InstWithoutDebugIt.end());
2390
2391 IntrinsicCostAttributes Attrs(IntrinID, InitX->getType(), Args);
2392 InstructionCost Cost = TTI->getIntrinsicInstrCost(
2394 if (HeaderSize != CanonicalSize && Cost > TargetTransformInfo::TCC_Basic)
2395 return false;
2396
2397 return true;
2398}
2399
2400/// Convert CTLZ / CTTZ idiom loop into countable loop.
2401/// If CTLZ / CTTZ inserted as a new trip count returns true; otherwise,
2402/// returns false.
2403bool LoopIdiomRecognize::insertFFSIfProfitable(Intrinsic::ID IntrinID,
2404 Value *InitX, Instruction *DefX,
2405 PHINode *CntPhi,
2406 Instruction *CntInst) {
2407 bool IsCntPhiUsedOutsideLoop = false;
2408 for (User *U : CntPhi->users())
2409 if (!CurLoop->contains(cast<Instruction>(U))) {
2410 IsCntPhiUsedOutsideLoop = true;
2411 break;
2412 }
2413 bool IsCntInstUsedOutsideLoop = false;
2414 for (User *U : CntInst->users())
2415 if (!CurLoop->contains(cast<Instruction>(U))) {
2416 IsCntInstUsedOutsideLoop = true;
2417 break;
2418 }
2419 // If both CntInst and CntPhi are used outside the loop the profitability
2420 // is questionable.
2421 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
2422 return false;
2423
2424 // For some CPUs result of CTLZ(X) intrinsic is undefined
2425 // when X is 0. If we can not guarantee X != 0, we need to check this
2426 // when expand.
2427 bool ZeroCheck = false;
2428 // It is safe to assume Preheader exist as it was checked in
2429 // parent function RunOnLoop.
2430 BasicBlock *PH = CurLoop->getLoopPreheader();
2431
2432 // If we are using the count instruction outside the loop, make sure we
2433 // have a zero check as a precondition. Without the check the loop would run
2434 // one iteration for before any check of the input value. This means 0 and 1
2435 // would have identical behavior in the original loop and thus
2436 if (!IsCntPhiUsedOutsideLoop) {
2437 auto *PreCondBB = PH->getSinglePredecessor();
2438 if (!PreCondBB)
2439 return false;
2440 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
2441 if (!PreCondBI)
2442 return false;
2443 if (matchCondition(PreCondBI, PH) != InitX)
2444 return false;
2445 ZeroCheck = true;
2446 }
2447
2448 // FFS idiom loop has only 6 instructions:
2449 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
2450 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
2451 // %shr = ashr %n.addr.0, 1
2452 // %tobool = icmp eq %shr, 0
2453 // %inc = add nsw %i.0, 1
2454 // br i1 %tobool
2455 size_t IdiomCanonicalSize = 6;
2456 if (!isProfitableToInsertFFS(IntrinID, InitX, ZeroCheck, IdiomCanonicalSize))
2457 return false;
2458
2459 transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX,
2460 DefX->getDebugLoc(), ZeroCheck,
2461 IsCntPhiUsedOutsideLoop);
2462 return true;
2463}
2464
2465/// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop
2466/// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new
2467/// trip count returns true; otherwise, returns false.
2468bool LoopIdiomRecognize::recognizeAndInsertFFS() {
2469 // Give up if the loop has multiple blocks or multiple backedges.
2470 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
2471 return false;
2472
2473 Intrinsic::ID IntrinID;
2474 Value *InitX;
2475 Instruction *DefX = nullptr;
2476 PHINode *CntPhi = nullptr;
2477 Instruction *CntInst = nullptr;
2478
2479 if (!detectShiftUntilZeroIdiom(CurLoop, *DL, IntrinID, InitX, CntInst, CntPhi,
2480 DefX))
2481 return false;
2482
2483 return insertFFSIfProfitable(IntrinID, InitX, DefX, CntPhi, CntInst);
2484}
2485
2486bool LoopIdiomRecognize::recognizeShiftUntilLessThan() {
2487 // Give up if the loop has multiple blocks or multiple backedges.
2488 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
2489 return false;
2490
2491 Intrinsic::ID IntrinID;
2492 Value *InitX;
2493 Instruction *DefX = nullptr;
2494 PHINode *CntPhi = nullptr;
2495 Instruction *CntInst = nullptr;
2496
2497 APInt LoopThreshold;
2498 if (!detectShiftUntilLessThanIdiom(CurLoop, *DL, IntrinID, InitX, CntInst,
2499 CntPhi, DefX, LoopThreshold))
2500 return false;
2501
2502 if (LoopThreshold == 2) {
2503 // Treat as regular FFS.
2504 return insertFFSIfProfitable(IntrinID, InitX, DefX, CntPhi, CntInst);
2505 }
2506
2507 // Look for Floor Log2 Idiom.
2508 if (LoopThreshold != 4)
2509 return false;
2510
2511 // Abort if CntPhi is used outside of the loop.
2512 for (User *U : CntPhi->users())
2513 if (!CurLoop->contains(cast<Instruction>(U)))
2514 return false;
2515
2516 // It is safe to assume Preheader exist as it was checked in
2517 // parent function RunOnLoop.
2518 BasicBlock *PH = CurLoop->getLoopPreheader();
2519 auto *PreCondBB = PH->getSinglePredecessor();
2520 if (!PreCondBB)
2521 return false;
2522 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
2523 if (!PreCondBI)
2524 return false;
2525
2526 APInt PreLoopThreshold;
2527 if (matchShiftULTCondition(PreCondBI, PH, PreLoopThreshold) != InitX ||
2528 PreLoopThreshold != 2)
2529 return false;
2530
2531 bool ZeroCheck = true;
2532
2533 // the loop has only 6 instructions:
2534 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
2535 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
2536 // %shr = ashr %n.addr.0, 1
2537 // %tobool = icmp ult %n.addr.0, C
2538 // %inc = add nsw %i.0, 1
2539 // br i1 %tobool
2540 size_t IdiomCanonicalSize = 6;
2541 if (!isProfitableToInsertFFS(IntrinID, InitX, ZeroCheck, IdiomCanonicalSize))
2542 return false;
2543
2544 // log2(x) = w − 1 − clz(x)
2545 transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX,
2546 DefX->getDebugLoc(), ZeroCheck,
2547 /*IsCntPhiUsedOutsideLoop=*/false,
2548 /*InsertSub=*/true);
2549 return true;
2550}
2551
2552/// Recognizes a population count idiom in a non-countable loop.
2553///
2554/// If detected, transforms the relevant code to issue the popcount intrinsic
2555/// function call, and returns true; otherwise, returns false.
2556bool LoopIdiomRecognize::recognizePopcount() {
2557 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
2558 return false;
2559
2560 // Counting population are usually conducted by few arithmetic instructions.
2561 // Such instructions can be easily "absorbed" by vacant slots in a
2562 // non-compact loop. Therefore, recognizing popcount idiom only makes sense
2563 // in a compact loop.
2564
2565 // Give up if the loop has multiple blocks or multiple backedges.
2566 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
2567 return false;
2568
2569 BasicBlock *LoopBody = *(CurLoop->block_begin());
2570 if (LoopBody->size() >= 20) {
2571 // The loop is too big, bail out.
2572 return false;
2573 }
2574
2575 // It should have a preheader containing nothing but an unconditional branch.
2576 BasicBlock *PH = CurLoop->getLoopPreheader();
2577 if (!PH || &PH->front() != PH->getTerminator())
2578 return false;
2579 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
2580 if (!EntryBI || EntryBI->isConditional())
2581 return false;
2582
2583 // It should have a precondition block where the generated popcount intrinsic
2584 // function can be inserted.
2585 auto *PreCondBB = PH->getSinglePredecessor();
2586 if (!PreCondBB)
2587 return false;
2588 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
2589 if (!PreCondBI || PreCondBI->isUnconditional())
2590 return false;
2591
2592 Instruction *CntInst;
2593 PHINode *CntPhi;
2594 Value *Val;
2595 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
2596 return false;
2597
2598 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
2599 return true;
2600}
2601
2603 const DebugLoc &DL) {
2604 Value *Ops[] = {Val};
2605 Type *Tys[] = {Val->getType()};
2606
2607 CallInst *CI = IRBuilder.CreateIntrinsic(Intrinsic::ctpop, Tys, Ops);
2608 CI->setDebugLoc(DL);
2609
2610 return CI;
2611}
2612
2614 const DebugLoc &DL, bool ZeroCheck,
2615 Intrinsic::ID IID) {
2616 Value *Ops[] = {Val, IRBuilder.getInt1(ZeroCheck)};
2617 Type *Tys[] = {Val->getType()};
2618
2619 CallInst *CI = IRBuilder.CreateIntrinsic(IID, Tys, Ops);
2620 CI->setDebugLoc(DL);
2621
2622 return CI;
2623}
2624
2625/// Transform the following loop (Using CTLZ, CTTZ is similar):
2626/// loop:
2627/// CntPhi = PHI [Cnt0, CntInst]
2628/// PhiX = PHI [InitX, DefX]
2629/// CntInst = CntPhi + 1
2630/// DefX = PhiX >> 1
2631/// LOOP_BODY
2632/// Br: loop if (DefX != 0)
2633/// Use(CntPhi) or Use(CntInst)
2634///
2635/// Into:
2636/// If CntPhi used outside the loop:
2637/// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
2638/// Count = CountPrev + 1
2639/// else
2640/// Count = BitWidth(InitX) - CTLZ(InitX)
2641/// loop:
2642/// CntPhi = PHI [Cnt0, CntInst]
2643/// PhiX = PHI [InitX, DefX]
2644/// PhiCount = PHI [Count, Dec]
2645/// CntInst = CntPhi + 1
2646/// DefX = PhiX >> 1
2647/// Dec = PhiCount - 1
2648/// LOOP_BODY
2649/// Br: loop if (Dec != 0)
2650/// Use(CountPrev + Cnt0) // Use(CntPhi)
2651/// or
2652/// Use(Count + Cnt0) // Use(CntInst)
2653///
2654/// If LOOP_BODY is empty the loop will be deleted.
2655/// If CntInst and DefX are not used in LOOP_BODY they will be removed.
2656void LoopIdiomRecognize::transformLoopToCountable(
2657 Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst,
2658 PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL,
2659 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop, bool InsertSub) {
2660 BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator());
2661
2662 // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block
2663 IRBuilder<> Builder(PreheaderBr);
2664 Builder.SetCurrentDebugLocation(DL);
2665
2666 // If there are no uses of CntPhi crate:
2667 // Count = BitWidth - CTLZ(InitX);
2668 // NewCount = Count;
2669 // If there are uses of CntPhi create:
2670 // NewCount = BitWidth - CTLZ(InitX >> 1);
2671 // Count = NewCount + 1;
2672 Value *InitXNext;
2673 if (IsCntPhiUsedOutsideLoop) {
2674 if (DefX->getOpcode() == Instruction::AShr)
2675 InitXNext = Builder.CreateAShr(InitX, 1);
2676 else if (DefX->getOpcode() == Instruction::LShr)
2677 InitXNext = Builder.CreateLShr(InitX, 1);
2678 else if (DefX->getOpcode() == Instruction::Shl) // cttz
2679 InitXNext = Builder.CreateShl(InitX, 1);
2680 else
2681 llvm_unreachable("Unexpected opcode!");
2682 } else
2683 InitXNext = InitX;
2684 Value *Count =
2685 createFFSIntrinsic(Builder, InitXNext, DL, ZeroCheck, IntrinID);
2686 Type *CountTy = Count->getType();
2687 Count = Builder.CreateSub(
2688 ConstantInt::get(CountTy, CountTy->getIntegerBitWidth()), Count);
2689 if (InsertSub)
2690 Count = Builder.CreateSub(Count, ConstantInt::get(CountTy, 1));
2691 Value *NewCount = Count;
2692 if (IsCntPhiUsedOutsideLoop)
2693 Count = Builder.CreateAdd(Count, ConstantInt::get(CountTy, 1));
2694
2695 NewCount = Builder.CreateZExtOrTrunc(NewCount, CntInst->getType());
2696
2697 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
2698 if (cast<ConstantInt>(CntInst->getOperand(1))->isOne()) {
2699 // If the counter was being incremented in the loop, add NewCount to the
2700 // counter's initial value, but only if the initial value is not zero.
2701 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
2702 if (!InitConst || !InitConst->isZero())
2703 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
2704 } else {
2705 // If the count was being decremented in the loop, subtract NewCount from
2706 // the counter's initial value.
2707 NewCount = Builder.CreateSub(CntInitVal, NewCount);
2708 }
2709
2710 // Step 2: Insert new IV and loop condition:
2711 // loop:
2712 // ...
2713 // PhiCount = PHI [Count, Dec]
2714 // ...
2715 // Dec = PhiCount - 1
2716 // ...
2717 // Br: loop if (Dec != 0)
2718 BasicBlock *Body = *(CurLoop->block_begin());
2719 auto *LbBr = cast<BranchInst>(Body->getTerminator());
2720 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
2721
2722 PHINode *TcPhi = PHINode::Create(CountTy, 2, "tcphi");
2723 TcPhi->insertBefore(Body->begin());
2724
2725 Builder.SetInsertPoint(LbCond);
2726 Instruction *TcDec = cast<Instruction>(Builder.CreateSub(
2727 TcPhi, ConstantInt::get(CountTy, 1), "tcdec", false, true));
2728
2729 TcPhi->addIncoming(Count, Preheader);
2730 TcPhi->addIncoming(TcDec, Body);
2731
2732 CmpInst::Predicate Pred =
2733 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
2734 LbCond->setPredicate(Pred);
2735 LbCond->setOperand(0, TcDec);
2736 LbCond->setOperand(1, ConstantInt::get(CountTy, 0));
2737
2738 // Step 3: All the references to the original counter outside
2739 // the loop are replaced with the NewCount
2740 if (IsCntPhiUsedOutsideLoop)
2741 CntPhi->replaceUsesOutsideBlock(NewCount, Body);
2742 else
2743 CntInst->replaceUsesOutsideBlock(NewCount, Body);
2744
2745 // step 4: Forget the "non-computable" trip-count SCEV associated with the
2746 // loop. The loop would otherwise not be deleted even if it becomes empty.
2747 SE->forgetLoop(CurLoop);
2748}
2749
2750void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
2751 Instruction *CntInst,
2752 PHINode *CntPhi, Value *Var) {
2753 BasicBlock *PreHead = CurLoop->getLoopPreheader();
2754 auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator());
2755 const DebugLoc &DL = CntInst->getDebugLoc();
2756
2757 // Assuming before transformation, the loop is following:
2758 // if (x) // the precondition
2759 // do { cnt++; x &= x - 1; } while(x);
2760
2761 // Step 1: Insert the ctpop instruction at the end of the precondition block
2762 IRBuilder<> Builder(PreCondBr);
2763 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
2764 {
2765 PopCnt = createPopcntIntrinsic(Builder, Var, DL);
2766 NewCount = PopCntZext =
2767 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
2768
2769 if (NewCount != PopCnt)
2770 (cast<Instruction>(NewCount))->setDebugLoc(DL);
2771
2772 // TripCnt is exactly the number of iterations the loop has
2773 TripCnt = NewCount;
2774
2775 // If the population counter's initial value is not zero, insert Add Inst.
2776 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
2777 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
2778 if (!InitConst || !InitConst->isZero()) {
2779 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
2780 (cast<Instruction>(NewCount))->setDebugLoc(DL);
2781 }
2782 }
2783
2784 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
2785 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic
2786 // function would be partial dead code, and downstream passes will drag
2787 // it back from the precondition block to the preheader.
2788 {
2789 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
2790
2791 Value *Opnd0 = PopCntZext;
2792 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
2793 if (PreCond->getOperand(0) != Var)
2794 std::swap(Opnd0, Opnd1);
2795
2796 ICmpInst *NewPreCond = cast<ICmpInst>(
2797 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
2798 PreCondBr->setCondition(NewPreCond);
2799
2801 }
2802
2803 // Step 3: Note that the population count is exactly the trip count of the
2804 // loop in question, which enable us to convert the loop from noncountable
2805 // loop into a countable one. The benefit is twofold:
2806 //
2807 // - If the loop only counts population, the entire loop becomes dead after
2808 // the transformation. It is a lot easier to prove a countable loop dead
2809 // than to prove a noncountable one. (In some C dialects, an infinite loop
2810 // isn't dead even if it computes nothing useful. In general, DCE needs
2811 // to prove a noncountable loop finite before safely delete it.)
2812 //
2813 // - If the loop also performs something else, it remains alive.
2814 // Since it is transformed to countable form, it can be aggressively
2815 // optimized by some optimizations which are in general not applicable
2816 // to a noncountable loop.
2817 //
2818 // After this step, this loop (conceptually) would look like following:
2819 // newcnt = __builtin_ctpop(x);
2820 // t = newcnt;
2821 // if (x)
2822 // do { cnt++; x &= x-1; t--) } while (t > 0);
2823 BasicBlock *Body = *(CurLoop->block_begin());
2824 {
2825 auto *LbBr = cast<BranchInst>(Body->getTerminator());
2826 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
2827 Type *Ty = TripCnt->getType();
2828
2829 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi");
2830 TcPhi->insertBefore(Body->begin());
2831
2832 Builder.SetInsertPoint(LbCond);
2834 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
2835 "tcdec", false, true));
2836
2837 TcPhi->addIncoming(TripCnt, PreHead);
2838 TcPhi->addIncoming(TcDec, Body);
2839
2840 CmpInst::Predicate Pred =
2841 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
2842 LbCond->setPredicate(Pred);
2843 LbCond->setOperand(0, TcDec);
2844 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
2845 }
2846
2847 // Step 4: All the references to the original population counter outside
2848 // the loop are replaced with the NewCount -- the value returned from
2849 // __builtin_ctpop().
2850 CntInst->replaceUsesOutsideBlock(NewCount, Body);
2851
2852 // step 5: Forget the "non-computable" trip-count SCEV associated with the
2853 // loop. The loop would otherwise not be deleted even if it becomes empty.
2854 SE->forgetLoop(CurLoop);
2855}
2856
2857/// Match loop-invariant value.
2858template <typename SubPattern_t> struct match_LoopInvariant {
2859 SubPattern_t SubPattern;
2860 const Loop *L;
2861
2862 match_LoopInvariant(const SubPattern_t &SP, const Loop *L)
2863 : SubPattern(SP), L(L) {}
2864
2865 template <typename ITy> bool match(ITy *V) const {
2866 return L->isLoopInvariant(V) && SubPattern.match(V);
2867 }
2868};
2869
2870/// Matches if the value is loop-invariant.
2871template <typename Ty>
2872inline match_LoopInvariant<Ty> m_LoopInvariant(const Ty &M, const Loop *L) {
2873 return match_LoopInvariant<Ty>(M, L);
2874}
2875
2876/// Return true if the idiom is detected in the loop.
2877///
2878/// The core idiom we are trying to detect is:
2879/// \code
2880/// entry:
2881/// <...>
2882/// %bitmask = shl i32 1, %bitpos
2883/// br label %loop
2884///
2885/// loop:
2886/// %x.curr = phi i32 [ %x, %entry ], [ %x.next, %loop ]
2887/// %x.curr.bitmasked = and i32 %x.curr, %bitmask
2888/// %x.curr.isbitunset = icmp eq i32 %x.curr.bitmasked, 0
2889/// %x.next = shl i32 %x.curr, 1
2890/// <...>
2891/// br i1 %x.curr.isbitunset, label %loop, label %end
2892///
2893/// end:
2894/// %x.curr.res = phi i32 [ %x.curr, %loop ] <...>
2895/// %x.next.res = phi i32 [ %x.next, %loop ] <...>
2896/// <...>
2897/// \endcode
2898static bool detectShiftUntilBitTestIdiom(Loop *CurLoop, Value *&BaseX,
2899 Value *&BitMask, Value *&BitPos,
2900 Value *&CurrX, Instruction *&NextX) {
2902 " Performing shift-until-bittest idiom detection.\n");
2903
2904 // Give up if the loop has multiple blocks or multiple backedges.
2905 if (CurLoop->getNumBlocks() != 1 || CurLoop->getNumBackEdges() != 1) {
2906 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad block/backedge count.\n");
2907 return false;
2908 }
2909
2910 BasicBlock *LoopHeaderBB = CurLoop->getHeader();
2911 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader();
2912 assert(LoopPreheaderBB && "There is always a loop preheader.");
2913
2914 using namespace PatternMatch;
2915
2916 // Step 1: Check if the loop backedge is in desirable form.
2917
2918 CmpPredicate Pred;
2919 Value *CmpLHS, *CmpRHS;
2920 BasicBlock *TrueBB, *FalseBB;
2921 if (!match(LoopHeaderBB->getTerminator(),
2922 m_Br(m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)),
2923 m_BasicBlock(TrueBB), m_BasicBlock(FalseBB)))) {
2924 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge structure.\n");
2925 return false;
2926 }
2927
2928 // Step 2: Check if the backedge's condition is in desirable form.
2929
2930 auto MatchVariableBitMask = [&]() {
2931 return ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero()) &&
2932 match(CmpLHS,
2933 m_c_And(m_Value(CurrX),
2935 m_Value(BitMask),
2936 m_LoopInvariant(m_Shl(m_One(), m_Value(BitPos)),
2937 CurLoop))));
2938 };
2939
2940 auto MatchDecomposableConstantBitMask = [&]() {
2941 auto Res = llvm::decomposeBitTestICmp(
2942 CmpLHS, CmpRHS, Pred, /*LookThroughTrunc=*/true,
2943 /*AllowNonZeroC=*/false, /*DecomposeAnd=*/true);
2944 if (Res && Res->Mask.isPowerOf2()) {
2945 assert(ICmpInst::isEquality(Res->Pred));
2946 Pred = Res->Pred;
2947 CurrX = Res->X;
2948 BitMask = ConstantInt::get(CurrX->getType(), Res->Mask);
2949 BitPos = ConstantInt::get(CurrX->getType(), Res->Mask.logBase2());
2950 return true;
2951 }
2952 return false;
2953 };
2954
2955 if (!MatchVariableBitMask() && !MatchDecomposableConstantBitMask()) {
2956 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge comparison.\n");
2957 return false;
2958 }
2959
2960 // Step 3: Check if the recurrence is in desirable form.
2961 auto *CurrXPN = dyn_cast<PHINode>(CurrX);
2962 if (!CurrXPN || CurrXPN->getParent() != LoopHeaderBB) {
2963 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Not an expected PHI node.\n");
2964 return false;
2965 }
2966
2967 BaseX = CurrXPN->getIncomingValueForBlock(LoopPreheaderBB);
2968 NextX =
2969 dyn_cast<Instruction>(CurrXPN->getIncomingValueForBlock(LoopHeaderBB));
2970
2971 assert(CurLoop->isLoopInvariant(BaseX) &&
2972 "Expected BaseX to be available in the preheader!");
2973
2974 if (!NextX || !match(NextX, m_Shl(m_Specific(CurrX), m_One()))) {
2975 // FIXME: support right-shift?
2976 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad recurrence.\n");
2977 return false;
2978 }
2979
2980 // Step 4: Check if the backedge's destinations are in desirable form.
2981
2983 "Should only get equality predicates here.");
2984
2985 // cmp-br is commutative, so canonicalize to a single variant.
2986 if (Pred != ICmpInst::Predicate::ICMP_EQ) {
2987 Pred = ICmpInst::getInversePredicate(Pred);
2988 std::swap(TrueBB, FalseBB);
2989 }
2990
2991 // We expect to exit loop when comparison yields false,
2992 // so when it yields true we should branch back to loop header.
2993 if (TrueBB != LoopHeaderBB) {
2994 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge flow.\n");
2995 return false;
2996 }
2997
2998 // Okay, idiom checks out.
2999 return true;
3000}
3001
3002/// Look for the following loop:
3003/// \code
3004/// entry:
3005/// <...>
3006/// %bitmask = shl i32 1, %bitpos
3007/// br label %loop
3008///
3009/// loop:
3010/// %x.curr = phi i32 [ %x, %entry ], [ %x.next, %loop ]
3011/// %x.curr.bitmasked = and i32 %x.curr, %bitmask
3012/// %x.curr.isbitunset = icmp eq i32 %x.curr.bitmasked, 0
3013/// %x.next = shl i32 %x.curr, 1
3014/// <...>
3015/// br i1 %x.curr.isbitunset, label %loop, label %end
3016///
3017/// end:
3018/// %x.curr.res = phi i32 [ %x.curr, %loop ] <...>
3019/// %x.next.res = phi i32 [ %x.next, %loop ] <...>
3020/// <...>
3021/// \endcode
3022///
3023/// And transform it into:
3024/// \code
3025/// entry:
3026/// %bitmask = shl i32 1, %bitpos
3027/// %lowbitmask = add i32 %bitmask, -1
3028/// %mask = or i32 %lowbitmask, %bitmask
3029/// %x.masked = and i32 %x, %mask
3030/// %x.masked.numleadingzeros = call i32 @llvm.ctlz.i32(i32 %x.masked,
3031/// i1 true)
3032/// %x.masked.numactivebits = sub i32 32, %x.masked.numleadingzeros
3033/// %x.masked.leadingonepos = add i32 %x.masked.numactivebits, -1
3034/// %backedgetakencount = sub i32 %bitpos, %x.masked.leadingonepos
3035/// %tripcount = add i32 %backedgetakencount, 1
3036/// %x.curr = shl i32 %x, %backedgetakencount
3037/// %x.next = shl i32 %x, %tripcount
3038/// br label %loop
3039///
3040/// loop:
3041/// %loop.iv = phi i32 [ 0, %entry ], [ %loop.iv.next, %loop ]
3042/// %loop.iv.next = add nuw i32 %loop.iv, 1
3043/// %loop.ivcheck = icmp eq i32 %loop.iv.next, %tripcount
3044/// <...>
3045/// br i1 %loop.ivcheck, label %end, label %loop
3046///
3047/// end:
3048/// %x.curr.res = phi i32 [ %x.curr, %loop ] <...>
3049/// %x.next.res = phi i32 [ %x.next, %loop ] <...>
3050/// <...>
3051/// \endcode
3052bool LoopIdiomRecognize::recognizeShiftUntilBitTest() {
3053 bool MadeChange = false;
3054
3055 Value *X, *BitMask, *BitPos, *XCurr;
3056 Instruction *XNext;
3057 if (!detectShiftUntilBitTestIdiom(CurLoop, X, BitMask, BitPos, XCurr,
3058 XNext)) {
3060 " shift-until-bittest idiom detection failed.\n");
3061 return MadeChange;
3062 }
3063 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-bittest idiom detected!\n");
3064
3065 // Ok, it is the idiom we were looking for, we *could* transform this loop,
3066 // but is it profitable to transform?
3067
3068 BasicBlock *LoopHeaderBB = CurLoop->getHeader();
3069 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader();
3070 assert(LoopPreheaderBB && "There is always a loop preheader.");
3071
3072 BasicBlock *SuccessorBB = CurLoop->getExitBlock();
3073 assert(SuccessorBB && "There is only a single successor.");
3074
3075 IRBuilder<> Builder(LoopPreheaderBB->getTerminator());
3076 Builder.SetCurrentDebugLocation(cast<Instruction>(XCurr)->getDebugLoc());
3077
3078 Intrinsic::ID IntrID = Intrinsic::ctlz;
3079 Type *Ty = X->getType();
3080 unsigned Bitwidth = Ty->getScalarSizeInBits();
3081
3084
3085 // The rewrite is considered to be unprofitable iff and only iff the
3086 // intrinsic/shift we'll use are not cheap. Note that we are okay with *just*
3087 // making the loop countable, even if nothing else changes.
3089 IntrID, Ty, {PoisonValue::get(Ty), /*is_zero_poison=*/Builder.getTrue()});
3090 InstructionCost Cost = TTI->getIntrinsicInstrCost(Attrs, CostKind);
3093 " Intrinsic is too costly, not beneficial\n");
3094 return MadeChange;
3095 }
3096 if (TTI->getArithmeticInstrCost(Instruction::Shl, Ty, CostKind) >
3098 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Shift is too costly, not beneficial\n");
3099 return MadeChange;
3100 }
3101
3102 // Ok, transform appears worthwhile.
3103 MadeChange = true;
3104
3105 if (!isGuaranteedNotToBeUndefOrPoison(BitPos)) {
3106 // BitMask may be computed from BitPos, Freeze BitPos so we can increase
3107 // it's use count.
3108 std::optional<BasicBlock::iterator> InsertPt = std::nullopt;
3109 if (auto *BitPosI = dyn_cast<Instruction>(BitPos))
3110 InsertPt = BitPosI->getInsertionPointAfterDef();
3111 else
3112 InsertPt = DT->getRoot()->getFirstNonPHIOrDbgOrAlloca();
3113 if (!InsertPt)
3114 return false;
3115 FreezeInst *BitPosFrozen =
3116 new FreezeInst(BitPos, BitPos->getName() + ".fr", *InsertPt);
3117 BitPos->replaceUsesWithIf(BitPosFrozen, [BitPosFrozen](Use &U) {
3118 return U.getUser() != BitPosFrozen;
3119 });
3120 BitPos = BitPosFrozen;
3121 }
3122
3123 // Step 1: Compute the loop trip count.
3124
3125 Value *LowBitMask = Builder.CreateAdd(BitMask, Constant::getAllOnesValue(Ty),
3126 BitPos->getName() + ".lowbitmask");
3127 Value *Mask =
3128 Builder.CreateOr(LowBitMask, BitMask, BitPos->getName() + ".mask");
3129 Value *XMasked = Builder.CreateAnd(X, Mask, X->getName() + ".masked");
3130 CallInst *XMaskedNumLeadingZeros = Builder.CreateIntrinsic(
3131 IntrID, Ty, {XMasked, /*is_zero_poison=*/Builder.getTrue()},
3132 /*FMFSource=*/nullptr, XMasked->getName() + ".numleadingzeros");
3133 Value *XMaskedNumActiveBits = Builder.CreateSub(
3134 ConstantInt::get(Ty, Ty->getScalarSizeInBits()), XMaskedNumLeadingZeros,
3135 XMasked->getName() + ".numactivebits", /*HasNUW=*/true,
3136 /*HasNSW=*/Bitwidth != 2);
3137 Value *XMaskedLeadingOnePos =
3138 Builder.CreateAdd(XMaskedNumActiveBits, Constant::getAllOnesValue(Ty),
3139 XMasked->getName() + ".leadingonepos", /*HasNUW=*/false,
3140 /*HasNSW=*/Bitwidth > 2);
3141
3142 Value *LoopBackedgeTakenCount = Builder.CreateSub(
3143 BitPos, XMaskedLeadingOnePos, CurLoop->getName() + ".backedgetakencount",
3144 /*HasNUW=*/true, /*HasNSW=*/true);
3145 // We know loop's backedge-taken count, but what's loop's trip count?
3146 // Note that while NUW is always safe, while NSW is only for bitwidths != 2.
3147 Value *LoopTripCount =
3148 Builder.CreateAdd(LoopBackedgeTakenCount, ConstantInt::get(Ty, 1),
3149 CurLoop->getName() + ".tripcount", /*HasNUW=*/true,
3150 /*HasNSW=*/Bitwidth != 2);
3151
3152 // Step 2: Compute the recurrence's final value without a loop.
3153
3154 // NewX is always safe to compute, because `LoopBackedgeTakenCount`
3155 // will always be smaller than `bitwidth(X)`, i.e. we never get poison.
3156 Value *NewX = Builder.CreateShl(X, LoopBackedgeTakenCount);
3157 NewX->takeName(XCurr);
3158 if (auto *I = dyn_cast<Instruction>(NewX))
3159 I->copyIRFlags(XNext, /*IncludeWrapFlags=*/true);
3160
3161 Value *NewXNext;
3162 // Rewriting XNext is more complicated, however, because `X << LoopTripCount`
3163 // will be poison iff `LoopTripCount == bitwidth(X)` (which will happen
3164 // iff `BitPos` is `bitwidth(x) - 1` and `X` is `1`). So unless we know
3165 // that isn't the case, we'll need to emit an alternative, safe IR.
3166 if (XNext->hasNoSignedWrap() || XNext->hasNoUnsignedWrap() ||
3170 Ty->getScalarSizeInBits() - 1))))
3171 NewXNext = Builder.CreateShl(X, LoopTripCount);
3172 else {
3173 // Otherwise, just additionally shift by one. It's the smallest solution,
3174 // alternatively, we could check that NewX is INT_MIN (or BitPos is )
3175 // and select 0 instead.
3176 NewXNext = Builder.CreateShl(NewX, ConstantInt::get(Ty, 1));
3177 }
3178
3179 NewXNext->takeName(XNext);
3180 if (auto *I = dyn_cast<Instruction>(NewXNext))
3181 I->copyIRFlags(XNext, /*IncludeWrapFlags=*/true);
3182
3183 // Step 3: Adjust the successor basic block to recieve the computed
3184 // recurrence's final value instead of the recurrence itself.
3185
3186 XCurr->replaceUsesOutsideBlock(NewX, LoopHeaderBB);
3187 XNext->replaceUsesOutsideBlock(NewXNext, LoopHeaderBB);
3188
3189 // Step 4: Rewrite the loop into a countable form, with canonical IV.
3190
3191 // The new canonical induction variable.
3192 Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->begin());
3193 auto *IV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv");
3194
3195 // The induction itself.
3196 // Note that while NUW is always safe, while NSW is only for bitwidths != 2.
3197 Builder.SetInsertPoint(LoopHeaderBB->getTerminator());
3198 auto *IVNext =
3199 Builder.CreateAdd(IV, ConstantInt::get(Ty, 1), IV->getName() + ".next",
3200 /*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2);
3201
3202 // The loop trip count check.
3203 auto *IVCheck = Builder.CreateICmpEQ(IVNext, LoopTripCount,
3204 CurLoop->getName() + ".ivcheck");
3205 SmallVector<uint32_t> BranchWeights;
3206 const bool HasBranchWeights =
3208 extractBranchWeights(*LoopHeaderBB->getTerminator(), BranchWeights);
3209
3210 auto *BI = Builder.CreateCondBr(IVCheck, SuccessorBB, LoopHeaderBB);
3211 if (HasBranchWeights) {
3212 if (SuccessorBB == LoopHeaderBB->getTerminator()->getSuccessor(1))
3213 std::swap(BranchWeights[0], BranchWeights[1]);
3214 // We're not changing the loop profile, so we can reuse the original loop's
3215 // profile.
3216 setBranchWeights(*BI, BranchWeights,
3217 /*IsExpected=*/false);
3218 }
3219
3220 LoopHeaderBB->getTerminator()->eraseFromParent();
3221
3222 // Populate the IV PHI.
3223 IV->addIncoming(ConstantInt::get(Ty, 0), LoopPreheaderBB);
3224 IV->addIncoming(IVNext, LoopHeaderBB);
3225
3226 // Step 5: Forget the "non-computable" trip-count SCEV associated with the
3227 // loop. The loop would otherwise not be deleted even if it becomes empty.
3228
3229 SE->forgetLoop(CurLoop);
3230
3231 // Other passes will take care of actually deleting the loop if possible.
3232
3233 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-bittest idiom optimized!\n");
3234
3235 ++NumShiftUntilBitTest;
3236 return MadeChange;
3237}
3238
3239/// Return true if the idiom is detected in the loop.
3240///
3241/// The core idiom we are trying to detect is:
3242/// \code
3243/// entry:
3244/// <...>
3245/// %start = <...>
3246/// %extraoffset = <...>
3247/// <...>
3248/// br label %for.cond
3249///
3250/// loop:
3251/// %iv = phi i8 [ %start, %entry ], [ %iv.next, %for.cond ]
3252/// %nbits = add nsw i8 %iv, %extraoffset
3253/// %val.shifted = {{l,a}shr,shl} i8 %val, %nbits
3254/// %val.shifted.iszero = icmp eq i8 %val.shifted, 0
3255/// %iv.next = add i8 %iv, 1
3256/// <...>
3257/// br i1 %val.shifted.iszero, label %end, label %loop
3258///
3259/// end:
3260/// %iv.res = phi i8 [ %iv, %loop ] <...>
3261/// %nbits.res = phi i8 [ %nbits, %loop ] <...>
3262/// %val.shifted.res = phi i8 [ %val.shifted, %loop ] <...>
3263/// %val.shifted.iszero.res = phi i1 [ %val.shifted.iszero, %loop ] <...>
3264/// %iv.next.res = phi i8 [ %iv.next, %loop ] <...>
3265/// <...>
3266/// \endcode
3268 Instruction *&ValShiftedIsZero,
3269 Intrinsic::ID &IntrinID, Instruction *&IV,
3270 Value *&Start, Value *&Val,
3271 const SCEV *&ExtraOffsetExpr,
3272 bool &InvertedCond) {
3274 " Performing shift-until-zero idiom detection.\n");
3275
3276 // Give up if the loop has multiple blocks or multiple backedges.
3277 if (CurLoop->getNumBlocks() != 1 || CurLoop->getNumBackEdges() != 1) {
3278 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad block/backedge count.\n");
3279 return false;
3280 }
3281
3282 Instruction *ValShifted, *NBits, *IVNext;
3283 Value *ExtraOffset;
3284
3285 BasicBlock *LoopHeaderBB = CurLoop->getHeader();
3286 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader();
3287 assert(LoopPreheaderBB && "There is always a loop preheader.");
3288
3289 using namespace PatternMatch;
3290
3291 // Step 1: Check if the loop backedge, condition is in desirable form.
3292
3293 CmpPredicate Pred;
3294 BasicBlock *TrueBB, *FalseBB;
3295 if (!match(LoopHeaderBB->getTerminator(),
3296 m_Br(m_Instruction(ValShiftedIsZero), m_BasicBlock(TrueBB),
3297 m_BasicBlock(FalseBB))) ||
3298 !match(ValShiftedIsZero,
3299 m_ICmp(Pred, m_Instruction(ValShifted), m_Zero())) ||
3300 !ICmpInst::isEquality(Pred)) {
3301 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge structure.\n");
3302 return false;
3303 }
3304
3305 // Step 2: Check if the comparison's operand is in desirable form.
3306 // FIXME: Val could be a one-input PHI node, which we should look past.
3307 if (!match(ValShifted, m_Shift(m_LoopInvariant(m_Value(Val), CurLoop),
3308 m_Instruction(NBits)))) {
3309 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad comparisons value computation.\n");
3310 return false;
3311 }
3312 IntrinID = ValShifted->getOpcode() == Instruction::Shl ? Intrinsic::cttz
3313 : Intrinsic::ctlz;
3314
3315 // Step 3: Check if the shift amount is in desirable form.
3316
3317 if (match(NBits, m_c_Add(m_Instruction(IV),
3318 m_LoopInvariant(m_Value(ExtraOffset), CurLoop))) &&
3319 (NBits->hasNoSignedWrap() || NBits->hasNoUnsignedWrap()))
3320 ExtraOffsetExpr = SE->getNegativeSCEV(SE->getSCEV(ExtraOffset));
3321 else if (match(NBits,
3323 m_LoopInvariant(m_Value(ExtraOffset), CurLoop))) &&
3324 NBits->hasNoSignedWrap())
3325 ExtraOffsetExpr = SE->getSCEV(ExtraOffset);
3326 else {
3327 IV = NBits;
3328 ExtraOffsetExpr = SE->getZero(NBits->getType());
3329 }
3330
3331 // Step 4: Check if the recurrence is in desirable form.
3332 auto *IVPN = dyn_cast<PHINode>(IV);
3333 if (!IVPN || IVPN->getParent() != LoopHeaderBB) {
3334 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Not an expected PHI node.\n");
3335 return false;
3336 }
3337
3338 Start = IVPN->getIncomingValueForBlock(LoopPreheaderBB);
3339 IVNext = dyn_cast<Instruction>(IVPN->getIncomingValueForBlock(LoopHeaderBB));
3340
3341 if (!IVNext || !match(IVNext, m_Add(m_Specific(IVPN), m_One()))) {
3342 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad recurrence.\n");
3343 return false;
3344 }
3345
3346 // Step 4: Check if the backedge's destinations are in desirable form.
3347
3349 "Should only get equality predicates here.");
3350
3351 // cmp-br is commutative, so canonicalize to a single variant.
3352 InvertedCond = Pred != ICmpInst::Predicate::ICMP_EQ;
3353 if (InvertedCond) {
3354 Pred = ICmpInst::getInversePredicate(Pred);
3355 std::swap(TrueBB, FalseBB);
3356 }
3357
3358 // We expect to exit loop when comparison yields true,
3359 // so when it yields false we should branch back to loop header.
3360 if (FalseBB != LoopHeaderBB) {
3361 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge flow.\n");
3362 return false;
3363 }
3364
3365 // The new, countable, loop will certainly only run a known number of
3366 // iterations, It won't be infinite. But the old loop might be infinite
3367 // under certain conditions. For logical shifts, the value will become zero
3368 // after at most bitwidth(%Val) loop iterations. However, for arithmetic
3369 // right-shift, iff the sign bit was set, the value will never become zero,
3370 // and the loop may never finish.
3371 if (ValShifted->getOpcode() == Instruction::AShr &&
3372 !isMustProgress(CurLoop) && !SE->isKnownNonNegative(SE->getSCEV(Val))) {
3373 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Can not prove the loop is finite.\n");
3374 return false;
3375 }
3376
3377 // Okay, idiom checks out.
3378 return true;
3379}
3380
3381/// Look for the following loop:
3382/// \code
3383/// entry:
3384/// <...>
3385/// %start = <...>
3386/// %extraoffset = <...>
3387/// <...>
3388/// br label %loop
3389///
3390/// loop:
3391/// %iv = phi i8 [ %start, %entry ], [ %iv.next, %loop ]
3392/// %nbits = add nsw i8 %iv, %extraoffset
3393/// %val.shifted = {{l,a}shr,shl} i8 %val, %nbits
3394/// %val.shifted.iszero = icmp eq i8 %val.shifted, 0
3395/// %iv.next = add i8 %iv, 1
3396/// <...>
3397/// br i1 %val.shifted.iszero, label %end, label %loop
3398///
3399/// end:
3400/// %iv.res = phi i8 [ %iv, %loop ] <...>
3401/// %nbits.res = phi i8 [ %nbits, %loop ] <...>
3402/// %val.shifted.res = phi i8 [ %val.shifted, %loop ] <...>
3403/// %val.shifted.iszero.res = phi i1 [ %val.shifted.iszero, %loop ] <...>
3404/// %iv.next.res = phi i8 [ %iv.next, %loop ] <...>
3405/// <...>
3406/// \endcode
3407///
3408/// And transform it into:
3409/// \code
3410/// entry:
3411/// <...>
3412/// %start = <...>
3413/// %extraoffset = <...>
3414/// <...>
3415/// %val.numleadingzeros = call i8 @llvm.ct{l,t}z.i8(i8 %val, i1 0)
3416/// %val.numactivebits = sub i8 8, %val.numleadingzeros
3417/// %extraoffset.neg = sub i8 0, %extraoffset
3418/// %tmp = add i8 %val.numactivebits, %extraoffset.neg
3419/// %iv.final = call i8 @llvm.smax.i8(i8 %tmp, i8 %start)
3420/// %loop.tripcount = sub i8 %iv.final, %start
3421/// br label %loop
3422///
3423/// loop:
3424/// %loop.iv = phi i8 [ 0, %entry ], [ %loop.iv.next, %loop ]
3425/// %loop.iv.next = add i8 %loop.iv, 1
3426/// %loop.ivcheck = icmp eq i8 %loop.iv.next, %loop.tripcount
3427/// %iv = add i8 %loop.iv, %start
3428/// <...>
3429/// br i1 %loop.ivcheck, label %end, label %loop
3430///
3431/// end:
3432/// %iv.res = phi i8 [ %iv.final, %loop ] <...>
3433/// <...>
3434/// \endcode
3435bool LoopIdiomRecognize::recognizeShiftUntilZero() {
3436 bool MadeChange = false;
3437
3438 Instruction *ValShiftedIsZero;
3439 Intrinsic::ID IntrID;
3440 Instruction *IV;
3441 Value *Start, *Val;
3442 const SCEV *ExtraOffsetExpr;
3443 bool InvertedCond;
3444 if (!detectShiftUntilZeroIdiom(CurLoop, SE, ValShiftedIsZero, IntrID, IV,
3445 Start, Val, ExtraOffsetExpr, InvertedCond)) {
3447 " shift-until-zero idiom detection failed.\n");
3448 return MadeChange;
3449 }
3450 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-zero idiom detected!\n");
3451
3452 // Ok, it is the idiom we were looking for, we *could* transform this loop,
3453 // but is it profitable to transform?
3454
3455 BasicBlock *LoopHeaderBB = CurLoop->getHeader();
3456 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader();
3457 assert(LoopPreheaderBB && "There is always a loop preheader.");
3458
3459 BasicBlock *SuccessorBB = CurLoop->getExitBlock();
3460 assert(SuccessorBB && "There is only a single successor.");
3461
3462 IRBuilder<> Builder(LoopPreheaderBB->getTerminator());
3463 Builder.SetCurrentDebugLocation(IV->getDebugLoc());
3464
3465 Type *Ty = Val->getType();
3466 unsigned Bitwidth = Ty->getScalarSizeInBits();
3467
3470
3471 // The rewrite is considered to be unprofitable iff and only iff the
3472 // intrinsic we'll use are not cheap. Note that we are okay with *just*
3473 // making the loop countable, even if nothing else changes.
3475 IntrID, Ty, {PoisonValue::get(Ty), /*is_zero_poison=*/Builder.getFalse()});
3476 InstructionCost Cost = TTI->getIntrinsicInstrCost(Attrs, CostKind);
3479 " Intrinsic is too costly, not beneficial\n");
3480 return MadeChange;
3481 }
3482
3483 // Ok, transform appears worthwhile.
3484 MadeChange = true;
3485
3486 bool OffsetIsZero = ExtraOffsetExpr->isZero();
3487
3488 // Step 1: Compute the loop's final IV value / trip count.
3489
3490 CallInst *ValNumLeadingZeros = Builder.CreateIntrinsic(
3491 IntrID, Ty, {Val, /*is_zero_poison=*/Builder.getFalse()},
3492 /*FMFSource=*/nullptr, Val->getName() + ".numleadingzeros");
3493 Value *ValNumActiveBits = Builder.CreateSub(
3494 ConstantInt::get(Ty, Ty->getScalarSizeInBits()), ValNumLeadingZeros,
3495 Val->getName() + ".numactivebits", /*HasNUW=*/true,
3496 /*HasNSW=*/Bitwidth != 2);
3497
3498 SCEVExpander Expander(*SE, "loop-idiom");
3499 Expander.setInsertPoint(&*Builder.GetInsertPoint());
3500 Value *ExtraOffset = Expander.expandCodeFor(ExtraOffsetExpr);
3501
3502 Value *ValNumActiveBitsOffset = Builder.CreateAdd(
3503 ValNumActiveBits, ExtraOffset, ValNumActiveBits->getName() + ".offset",
3504 /*HasNUW=*/OffsetIsZero, /*HasNSW=*/true);
3505 Value *IVFinal = Builder.CreateIntrinsic(Intrinsic::smax, {Ty},
3506 {ValNumActiveBitsOffset, Start},
3507 /*FMFSource=*/nullptr, "iv.final");
3508
3509 auto *LoopBackedgeTakenCount = cast<Instruction>(Builder.CreateSub(
3510 IVFinal, Start, CurLoop->getName() + ".backedgetakencount",
3511 /*HasNUW=*/OffsetIsZero, /*HasNSW=*/true));
3512 // FIXME: or when the offset was `add nuw`
3513
3514 // We know loop's backedge-taken count, but what's loop's trip count?
3515 Value *LoopTripCount =
3516 Builder.CreateAdd(LoopBackedgeTakenCount, ConstantInt::get(Ty, 1),
3517 CurLoop->getName() + ".tripcount", /*HasNUW=*/true,
3518 /*HasNSW=*/Bitwidth != 2);
3519
3520 // Step 2: Adjust the successor basic block to recieve the original
3521 // induction variable's final value instead of the orig. IV itself.
3522
3523 IV->replaceUsesOutsideBlock(IVFinal, LoopHeaderBB);
3524
3525 // Step 3: Rewrite the loop into a countable form, with canonical IV.
3526
3527 // The new canonical induction variable.
3528 Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->begin());
3529 auto *CIV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv");
3530
3531 // The induction itself.
3532 Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->getFirstNonPHIIt());
3533 auto *CIVNext =
3534 Builder.CreateAdd(CIV, ConstantInt::get(Ty, 1), CIV->getName() + ".next",
3535 /*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2);
3536
3537 // The loop trip count check.
3538 auto *CIVCheck = Builder.CreateICmpEQ(CIVNext, LoopTripCount,
3539 CurLoop->getName() + ".ivcheck");
3540 auto *NewIVCheck = CIVCheck;
3541 if (InvertedCond) {
3542 NewIVCheck = Builder.CreateNot(CIVCheck);
3543 NewIVCheck->takeName(ValShiftedIsZero);
3544 }
3545
3546 // The original IV, but rebased to be an offset to the CIV.
3547 auto *IVDePHId = Builder.CreateAdd(CIV, Start, "", /*HasNUW=*/false,
3548 /*HasNSW=*/true); // FIXME: what about NUW?
3549 IVDePHId->takeName(IV);
3550
3551 // The loop terminator.
3552 Builder.SetInsertPoint(LoopHeaderBB->getTerminator());
3553 SmallVector<uint32_t> BranchWeights;
3554 const bool HasBranchWeights =
3556 extractBranchWeights(*LoopHeaderBB->getTerminator(), BranchWeights);
3557
3558 auto *BI = Builder.CreateCondBr(CIVCheck, SuccessorBB, LoopHeaderBB);
3559 if (HasBranchWeights) {
3560 if (InvertedCond)
3561 std::swap(BranchWeights[0], BranchWeights[1]);
3562 // We're not changing the loop profile, so we can reuse the original loop's
3563 // profile.
3564 setBranchWeights(*BI, BranchWeights, /*IsExpected=*/false);
3565 }
3566 LoopHeaderBB->getTerminator()->eraseFromParent();
3567
3568 // Populate the IV PHI.
3569 CIV->addIncoming(ConstantInt::get(Ty, 0), LoopPreheaderBB);
3570 CIV->addIncoming(CIVNext, LoopHeaderBB);
3571
3572 // Step 4: Forget the "non-computable" trip-count SCEV associated with the
3573 // loop. The loop would otherwise not be deleted even if it becomes empty.
3574
3575 SE->forgetLoop(CurLoop);
3576
3577 // Step 5: Try to cleanup the loop's body somewhat.
3578 IV->replaceAllUsesWith(IVDePHId);
3579 IV->eraseFromParent();
3580
3581 ValShiftedIsZero->replaceAllUsesWith(NewIVCheck);
3582 ValShiftedIsZero->eraseFromParent();
3583
3584 // Other passes will take care of actually deleting the loop if possible.
3585
3586 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-zero idiom optimized!\n");
3587
3588 ++NumShiftUntilZero;
3589 return MadeChange;
3590}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
DXIL Resource Access
This file defines the DenseMap class.
#define DEBUG_TYPE
static const HTTPClientCleanup Cleanup
static bool mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, const SCEV *BECount, unsigned StoreSize, AliasAnalysis &AA, SmallPtrSetImpl< Instruction * > &Ignored)
mayLoopAccessLocation - Return true if the specified loop might access the specified pointer location...
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static Value * matchCondition(BranchInst *BI, BasicBlock *LoopEntry, bool JmpOnZero=false)
Check if the given conditional branch is based on the comparison between a variable and zero,...
static PHINode * getRecurrenceVar(Value *VarX, Instruction *DefX, BasicBlock *LoopEntry)
static CallInst * createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val, const DebugLoc &DL, bool ZeroCheck, Intrinsic::ID IID)
static bool detectShiftUntilLessThanIdiom(Loop *CurLoop, const DataLayout &DL, Intrinsic::ID &IntrinID, Value *&InitX, Instruction *&CntInst, PHINode *&CntPhi, Instruction *&DefX, APInt &Threshold)
Return true if the idiom is detected in the loop.
static bool detectShiftUntilBitTestIdiom(Loop *CurLoop, Value *&BaseX, Value *&BitMask, Value *&BitPos, Value *&CurrX, Instruction *&NextX)
Return true if the idiom is detected in the loop.
static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, Instruction *&CntInst, PHINode *&CntPhi, Value *&Var)
Return true iff the idiom is detected in the loop.
static Constant * getMemSetPatternValue(Value *V, const DataLayout *DL)
getMemSetPatternValue - If a strided store of the specified value is safe to turn into a memset....
static CallInst * createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val, const DebugLoc &DL)
static const SCEV * getNumBytes(const SCEV *BECount, Type *IntPtr, const SCEV *StoreSizeSCEV, Loop *CurLoop, const DataLayout *DL, ScalarEvolution *SE)
Compute the number of bytes as a SCEV from the backedge taken count.
static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL, Intrinsic::ID &IntrinID, Value *&InitX, Instruction *&CntInst, PHINode *&CntPhi, Instruction *&DefX)
Return true if the idiom is detected in the loop.
static const SCEV * getStartForNegStride(const SCEV *Start, const SCEV *BECount, Type *IntPtr, const SCEV *StoreSizeSCEV, ScalarEvolution *SE)
static APInt getStoreStride(const SCEVAddRecExpr *StoreEv)
static Value * matchShiftULTCondition(BranchInst *BI, BasicBlock *LoopEntry, APInt &Threshold)
Check if the given conditional branch is based on an unsigned less-than comparison between a variable...
match_LoopInvariant< Ty > m_LoopInvariant(const Ty &M, const Loop *L)
Matches if the value is loop-invariant.
static void deleteDeadInstruction(Instruction *I)
#define I(x, y, z)
Definition MD5.cpp:57
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
This file implements a map that provides insertion order iteration.
This file provides utility analysis objects describing memory locations.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
#define T
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
if(PassOpts->AAPipeline)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isSimple(Instruction *I)
verify safepoint Safepoint IR Verifier
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static const uint32_t IV[8]
Definition blake3_impl.h:83
Class for arbitrary precision integers.
Definition APInt.h:78
std::optional< uint64_t > tryZExtValue() const
Get zero extended value if possible.
Definition APInt.h:1553
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:472
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:482
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:480
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Conditional or Unconditional Branch instruction.
void setCondition(Value *V)
bool isConditional() const
unsigned getNumSuccessors() const
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
This class represents a function call, abstracting a target machine's calling convention.
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
Definition Constants.h:225
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
A debug info location.
Definition DebugLoc.h:123
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This class represents a freeze function that returns random concrete value if an operand is either a ...
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
static CRCTable genSarwateTable(const APInt &GenPoly, bool ByteOrderSwapped)
Generate a lookup table of 256 entries by interleaving the generating polynomial.
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Definition IRBuilder.h:497
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition IRBuilder.h:2103
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isShift() const
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
bool isUnordered() const
Align getAlign() const
Return the alignment of the access that is being performed.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool isOutermost() const
Return true if the loop does not have a parent (natural) loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
unsigned getNumBlocks() const
Get the number of blocks in this loop in constant time.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
BlockT * getExitBlock() const
If getExitBlocks would return exactly one block, return that block.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
void getUniqueExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all unique successor blocks of this loop.
block_iterator block_begin() const
BlockT * getUniqueExitBlock() const
If getUniqueExitBlocks would return exactly one block, return that block.
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:61
ICmpInst * getLatchCmpInst() const
Get the latch condition instruction.
Definition LoopInfo.cpp:175
StringRef getName() const
Definition LoopInfo.h:389
PHINode * getCanonicalInductionVariable() const
Check to see if the loop has a canonical induction variable: an integer recurrence that starts at 0 a...
Definition LoopInfo.cpp:151
This class wraps the llvm.memcpy intrinsic.
Value * getLength() const
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions (including addrspacecast) that ...
MaybeAlign getDestAlign() const
bool isForceInlined() const
bool isVolatile() const
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
MaybeAlign getSourceAlign() const
Value * getSource() const
This is just like getRawSource, but it strips off any cast instructions that feed it,...
Representation for a specific memory location.
An analysis that produces MemorySSA for a function.
Definition MemorySSA.h:936
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition MemorySSA.h:702
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
Helper to remove instructions inserted during SCEV expansion, unless they are marked as used.
This class uses information about analyze scalars to rewrite expressions in canonical form.
const SCEV * getOperand(unsigned i) const
This class represents an analyzed expression in the program.
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
Simple and conservative implementation of LoopSafetyInfo that can give false-positive answers to its ...
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Align getAlign() const
Value * getValueOperand()
Value * getPointerOperand()
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Provides information about what library functions are available for the current target.
unsigned getWCharSize(const Module &M) const
Returns the size of the wchar_t type in bytes or 0 if the size is unknown.
bool has(LibFunc F) const
Tests whether a library function is available.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Basic
The cost of a typical 'add' instruction.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:554
LLVM_ABI void replaceUsesOutsideBlock(Value *V, BasicBlock *BB)
replaceUsesOutsideBlock - Go through the uses list for this definition and make each use point to "V"...
Definition Value.cpp:599
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Value handle that is nullable, but tries to track the Value.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
const ParentTy * getParent() const
Definition ilist_node.h:34
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ HeaderSize
Definition BTF.h:61
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
OperandType
Operands are tagged with one of the values of this enum.
Definition MCInstrDesc.h:59
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
class_match< const SCEVConstant > m_SCEVConstant()
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bool match(const SCEV *S, const Pattern &P)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
constexpr double e
DiagnosticInfoOptimizationBase::Argument NV
DiagnosticInfoOptimizationBase::setExtraArgs setExtraArgs
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:533
static cl::opt< bool, true > EnableLIRPWcslen("disable-loop-idiom-wcslen", cl::desc("Proceed with loop idiom recognize pass, " "enable conversion of loop(s) to wcslen."), cl::location(DisableLIRP::Wcslen), cl::init(false), cl::ReallyHidden)
InstructionCost Cost
static cl::opt< bool, true > DisableLIRPMemcpy("disable-" DEBUG_TYPE "-memcpy", cl::desc("Proceed with loop idiom recognize pass, but do " "not convert loop(s) to memcpy."), cl::location(DisableLIRP::Memcpy), cl::init(false), cl::ReallyHidden)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
static cl::opt< bool, true > DisableLIRPStrlen("disable-loop-idiom-strlen", cl::desc("Proceed with loop idiom recognize pass, but do " "not convert loop(s) to strlen."), cl::location(DisableLIRP::Strlen), cl::init(false), cl::ReallyHidden)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
static cl::opt< bool > ForceMemsetPatternIntrinsic("loop-idiom-force-memset-pattern-intrinsic", cl::desc("Use memset.pattern intrinsic whenever possible"), cl::init(false), cl::Hidden)
LLVM_ABI bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI, LibFunc TheLibFunc)
Check whether the library function is available on target and also that it in the current Module is a...
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:1980
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
bool isModOrRefSet(const ModRefInfo MRI)
Definition ModRef.h:43
LLVM_ABI Value * emitStrLen(Value *Ptr, IRBuilderBase &B, const DataLayout &DL, const TargetLibraryInfo *TLI)
Emit a call to the strlen function to the builder, for the specified pointer.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
static cl::opt< bool, true > DisableLIRPHashRecognize("disable-" DEBUG_TYPE "-hashrecognize", cl::desc("Proceed with loop idiom recognize pass, " "but do not optimize CRC loops."), cl::location(DisableLIRP::HashRecognize), cl::init(false), cl::ReallyHidden)
TargetTransformInfo TTI
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition MemorySSA.cpp:84
LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI Value * emitWcsLen(Value *Ptr, IRBuilderBase &B, const DataLayout &DL, const TargetLibraryInfo *TLI)
Emit a call to the wcslen function to the builder, for the specified pointer.
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
LLVM_ABI Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
static cl::opt< bool > UseLIRCodeSizeHeurs("use-lir-code-size-heurs", cl::desc("Use loop idiom recognition code size heuristics when compiling " "with -Os/-Oz"), cl::init(true), cl::Hidden)
LLVM_ABI bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition Local.cpp:641
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
static cl::opt< bool, true > DisableLIRPMemset("disable-" DEBUG_TYPE "-memset", cl::desc("Proceed with loop idiom recognize pass, but do " "not convert loop(s) to memset."), cl::location(DisableLIRP::Memset), cl::init(false), cl::ReallyHidden)
static cl::opt< bool, true > DisableLIRPAll("disable-" DEBUG_TYPE "-all", cl::desc("Options to disable Loop Idiom Recognize Pass."), cl::location(DisableLIRP::All), cl::init(false), cl::ReallyHidden)
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
AAMDNodes extendTo(ssize_t Len) const
Create a new AAMDNode that describes this AAMDNode after extending it to apply to a series of bytes o...
Definition Metadata.h:834
static bool Memcpy
When true, Memcpy is disabled.
static bool Wcslen
When true, Wcslen is disabled.
static bool Strlen
When true, Strlen is disabled.
static bool HashRecognize
When true, HashRecognize is disabled.
static bool Memset
When true, Memset is disabled.
static bool All
When true, the entire pass is disabled.
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
The structure that is returned when a polynomial algorithm was recognized by the analysis.
Match loop-invariant value.
match_LoopInvariant(const SubPattern_t &SP, const Loop *L)