LLVM 17.0.0git
MergeICmps.cpp
Go to the documentation of this file.
1//===- MergeICmps.cpp - Optimize chains of integer comparisons ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass turns chains of integer comparisons into memcmp (the memcmp is
10// later typically inlined as a chain of efficient hardware comparisons). This
11// typically benefits c++ member or nonmember operator==().
12//
13// The basic idea is to replace a longer chain of integer comparisons loaded
14// from contiguous memory locations into a shorter chain of larger integer
15// comparisons. Benefits are double:
16// - There are less jumps, and therefore less opportunities for mispredictions
17// and I-cache misses.
18// - Code size is smaller, both because jumps are removed and because the
19// encoding of a 2*n byte compare is smaller than that of two n-byte
20// compares.
21//
22// Example:
23//
24// struct S {
25// int a;
26// char b;
27// char c;
28// uint16_t d;
29// bool operator==(const S& o) const {
30// return a == o.a && b == o.b && c == o.c && d == o.d;
31// }
32// };
33//
34// Is optimized as :
35//
36// bool S::operator==(const S& o) const {
37// return memcmp(this, &o, 8) == 0;
38// }
39//
40// Which will later be expanded (ExpandMemCmp) as a single 8-bytes icmp.
41//
42//===----------------------------------------------------------------------===//
43
47#include "llvm/Analysis/Loads.h"
50#include "llvm/IR/Dominators.h"
51#include "llvm/IR/Function.h"
52#include "llvm/IR/Instruction.h"
53#include "llvm/IR/IRBuilder.h"
55#include "llvm/Pass.h"
59#include <algorithm>
60#include <numeric>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68#define DEBUG_TYPE "mergeicmps"
69
70// A BCE atom "Binary Compare Expression Atom" represents an integer load
71// that is a constant offset from a base value, e.g. `a` or `o.c` in the example
72// at the top.
73struct BCEAtom {
74 BCEAtom() = default;
75 BCEAtom(GetElementPtrInst *GEP, LoadInst *LoadI, int BaseId, APInt Offset)
76 : GEP(GEP), LoadI(LoadI), BaseId(BaseId), Offset(Offset) {}
77
78 BCEAtom(const BCEAtom &) = delete;
79 BCEAtom &operator=(const BCEAtom &) = delete;
80
81 BCEAtom(BCEAtom &&that) = default;
82 BCEAtom &operator=(BCEAtom &&that) {
83 if (this == &that)
84 return *this;
85 GEP = that.GEP;
86 LoadI = that.LoadI;
87 BaseId = that.BaseId;
88 Offset = std::move(that.Offset);
89 return *this;
90 }
91
92 // We want to order BCEAtoms by (Base, Offset). However we cannot use
93 // the pointer values for Base because these are non-deterministic.
94 // To make sure that the sort order is stable, we first assign to each atom
95 // base value an index based on its order of appearance in the chain of
96 // comparisons. We call this index `BaseOrdering`. For example, for:
97 // b[3] == c[2] && a[1] == d[1] && b[4] == c[3]
98 // | block 1 | | block 2 | | block 3 |
99 // b gets assigned index 0 and a index 1, because b appears as LHS in block 1,
100 // which is before block 2.
101 // We then sort by (BaseOrdering[LHS.Base()], LHS.Offset), which is stable.
102 bool operator<(const BCEAtom &O) const {
103 return BaseId != O.BaseId ? BaseId < O.BaseId : Offset.slt(O.Offset);
104 }
105
106 GetElementPtrInst *GEP = nullptr;
107 LoadInst *LoadI = nullptr;
108 unsigned BaseId = 0;
109 APInt Offset;
110};
111
112// A class that assigns increasing ids to values in the order in which they are
113// seen. See comment in `BCEAtom::operator<()``.
114class BaseIdentifier {
115public:
116 // Returns the id for value `Base`, after assigning one if `Base` has not been
117 // seen before.
118 int getBaseId(const Value *Base) {
119 assert(Base && "invalid base");
120 const auto Insertion = BaseToIndex.try_emplace(Base, Order);
121 if (Insertion.second)
122 ++Order;
123 return Insertion.first->second;
124 }
125
126private:
127 unsigned Order = 1;
128 DenseMap<const Value*, int> BaseToIndex;
129};
130
131// If this value is a load from a constant offset w.r.t. a base address, and
132// there are no other users of the load or address, returns the base address and
133// the offset.
134BCEAtom visitICmpLoadOperand(Value *const Val, BaseIdentifier &BaseId) {
135 auto *const LoadI = dyn_cast<LoadInst>(Val);
136 if (!LoadI)
137 return {};
138 LLVM_DEBUG(dbgs() << "load\n");
139 if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) {
140 LLVM_DEBUG(dbgs() << "used outside of block\n");
141 return {};
142 }
143 // Do not optimize atomic loads to non-atomic memcmp
144 if (!LoadI->isSimple()) {
145 LLVM_DEBUG(dbgs() << "volatile or atomic\n");
146 return {};
147 }
148 Value *Addr = LoadI->getOperand(0);
149 if (Addr->getType()->getPointerAddressSpace() != 0) {
150 LLVM_DEBUG(dbgs() << "from non-zero AddressSpace\n");
151 return {};
152 }
153 const auto &DL = LoadI->getModule()->getDataLayout();
154 if (!isDereferenceablePointer(Addr, LoadI->getType(), DL)) {
155 LLVM_DEBUG(dbgs() << "not dereferenceable\n");
156 // We need to make sure that we can do comparison in any order, so we
157 // require memory to be unconditionally dereferenceable.
158 return {};
159 }
160
161 APInt Offset = APInt(DL.getIndexTypeSizeInBits(Addr->getType()), 0);
162 Value *Base = Addr;
163 auto *GEP = dyn_cast<GetElementPtrInst>(Addr);
164 if (GEP) {
165 LLVM_DEBUG(dbgs() << "GEP\n");
166 if (GEP->isUsedOutsideOfBlock(LoadI->getParent())) {
167 LLVM_DEBUG(dbgs() << "used outside of block\n");
168 return {};
169 }
170 if (!GEP->accumulateConstantOffset(DL, Offset))
171 return {};
172 Base = GEP->getPointerOperand();
173 }
174 return BCEAtom(GEP, LoadI, BaseId.getBaseId(Base), Offset);
175}
176
177// A comparison between two BCE atoms, e.g. `a == o.a` in the example at the
178// top.
179// Note: the terminology is misleading: the comparison is symmetric, so there
180// is no real {l/r}hs. What we want though is to have the same base on the
181// left (resp. right), so that we can detect consecutive loads. To ensure this
182// we put the smallest atom on the left.
183struct BCECmp {
184 BCEAtom Lhs;
185 BCEAtom Rhs;
186 int SizeBits;
187 const ICmpInst *CmpI;
188
189 BCECmp(BCEAtom L, BCEAtom R, int SizeBits, const ICmpInst *CmpI)
190 : Lhs(std::move(L)), Rhs(std::move(R)), SizeBits(SizeBits), CmpI(CmpI) {
191 if (Rhs < Lhs) std::swap(Rhs, Lhs);
192 }
193};
194
195// A basic block with a comparison between two BCE atoms.
196// The block might do extra work besides the atom comparison, in which case
197// doesOtherWork() returns true. Under some conditions, the block can be
198// split into the atom comparison part and the "other work" part
199// (see canSplit()).
200class BCECmpBlock {
201 public:
202 typedef SmallDenseSet<const Instruction *, 8> InstructionSet;
203
204 BCECmpBlock(BCECmp Cmp, BasicBlock *BB, InstructionSet BlockInsts)
205 : BB(BB), BlockInsts(std::move(BlockInsts)), Cmp(std::move(Cmp)) {}
206
207 const BCEAtom &Lhs() const { return Cmp.Lhs; }
208 const BCEAtom &Rhs() const { return Cmp.Rhs; }
209 int SizeBits() const { return Cmp.SizeBits; }
210
211 // Returns true if the block does other works besides comparison.
212 bool doesOtherWork() const;
213
214 // Returns true if the non-BCE-cmp instructions can be separated from BCE-cmp
215 // instructions in the block.
216 bool canSplit(AliasAnalysis &AA) const;
217
218 // Return true if this all the relevant instructions in the BCE-cmp-block can
219 // be sunk below this instruction. By doing this, we know we can separate the
220 // BCE-cmp-block instructions from the non-BCE-cmp-block instructions in the
221 // block.
222 bool canSinkBCECmpInst(const Instruction *, AliasAnalysis &AA) const;
223
224 // We can separate the BCE-cmp-block instructions and the non-BCE-cmp-block
225 // instructions. Split the old block and move all non-BCE-cmp-insts into the
226 // new parent block.
227 void split(BasicBlock *NewParent, AliasAnalysis &AA) const;
228
229 // The basic block where this comparison happens.
230 BasicBlock *BB;
231 // Instructions relating to the BCECmp and branch.
232 InstructionSet BlockInsts;
233 // The block requires splitting.
234 bool RequireSplit = false;
235 // Original order of this block in the chain.
236 unsigned OrigOrder = 0;
237
238private:
239 BCECmp Cmp;
240};
241
242bool BCECmpBlock::canSinkBCECmpInst(const Instruction *Inst,
243 AliasAnalysis &AA) const {
244 // If this instruction may clobber the loads and is in middle of the BCE cmp
245 // block instructions, then bail for now.
246 if (Inst->mayWriteToMemory()) {
247 auto MayClobber = [&](LoadInst *LI) {
248 // If a potentially clobbering instruction comes before the load,
249 // we can still safely sink the load.
250 return (Inst->getParent() != LI->getParent() || !Inst->comesBefore(LI)) &&
252 };
253 if (MayClobber(Cmp.Lhs.LoadI) || MayClobber(Cmp.Rhs.LoadI))
254 return false;
255 }
256 // Make sure this instruction does not use any of the BCE cmp block
257 // instructions as operand.
258 return llvm::none_of(Inst->operands(), [&](const Value *Op) {
259 const Instruction *OpI = dyn_cast<Instruction>(Op);
260 return OpI && BlockInsts.contains(OpI);
261 });
262}
263
264void BCECmpBlock::split(BasicBlock *NewParent, AliasAnalysis &AA) const {
266 for (Instruction &Inst : *BB) {
267 if (BlockInsts.count(&Inst))
268 continue;
269 assert(canSinkBCECmpInst(&Inst, AA) && "Split unsplittable block");
270 // This is a non-BCE-cmp-block instruction. And it can be separated
271 // from the BCE-cmp-block instruction.
272 OtherInsts.push_back(&Inst);
273 }
274
275 // Do the actual spliting.
276 for (Instruction *Inst : reverse(OtherInsts))
277 Inst->moveBefore(*NewParent, NewParent->begin());
278}
279
280bool BCECmpBlock::canSplit(AliasAnalysis &AA) const {
281 for (Instruction &Inst : *BB) {
282 if (!BlockInsts.count(&Inst)) {
283 if (!canSinkBCECmpInst(&Inst, AA))
284 return false;
285 }
286 }
287 return true;
288}
289
290bool BCECmpBlock::doesOtherWork() const {
291 // TODO(courbet): Can we allow some other things ? This is very conservative.
292 // We might be able to get away with anything does not have any side
293 // effects outside of the basic block.
294 // Note: The GEPs and/or loads are not necessarily in the same block.
295 for (const Instruction &Inst : *BB) {
296 if (!BlockInsts.count(&Inst))
297 return true;
298 }
299 return false;
300}
301
302// Visit the given comparison. If this is a comparison between two valid
303// BCE atoms, returns the comparison.
304std::optional<BCECmp> visitICmp(const ICmpInst *const CmpI,
305 const ICmpInst::Predicate ExpectedPredicate,
306 BaseIdentifier &BaseId) {
307 // The comparison can only be used once:
308 // - For intermediate blocks, as a branch condition.
309 // - For the final block, as an incoming value for the Phi.
310 // If there are any other uses of the comparison, we cannot merge it with
311 // other comparisons as we would create an orphan use of the value.
312 if (!CmpI->hasOneUse()) {
313 LLVM_DEBUG(dbgs() << "cmp has several uses\n");
314 return std::nullopt;
315 }
316 if (CmpI->getPredicate() != ExpectedPredicate)
317 return std::nullopt;
318 LLVM_DEBUG(dbgs() << "cmp "
319 << (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne")
320 << "\n");
321 auto Lhs = visitICmpLoadOperand(CmpI->getOperand(0), BaseId);
322 if (!Lhs.BaseId)
323 return std::nullopt;
324 auto Rhs = visitICmpLoadOperand(CmpI->getOperand(1), BaseId);
325 if (!Rhs.BaseId)
326 return std::nullopt;
327 const auto &DL = CmpI->getModule()->getDataLayout();
328 return BCECmp(std::move(Lhs), std::move(Rhs),
329 DL.getTypeSizeInBits(CmpI->getOperand(0)->getType()), CmpI);
330}
331
332// Visit the given comparison block. If this is a comparison between two valid
333// BCE atoms, returns the comparison.
334std::optional<BCECmpBlock> visitCmpBlock(Value *const Val,
335 BasicBlock *const Block,
336 const BasicBlock *const PhiBlock,
337 BaseIdentifier &BaseId) {
338 if (Block->empty())
339 return std::nullopt;
340 auto *const BranchI = dyn_cast<BranchInst>(Block->getTerminator());
341 if (!BranchI)
342 return std::nullopt;
343 LLVM_DEBUG(dbgs() << "branch\n");
344 Value *Cond;
345 ICmpInst::Predicate ExpectedPredicate;
346 if (BranchI->isUnconditional()) {
347 // In this case, we expect an incoming value which is the result of the
348 // comparison. This is the last link in the chain of comparisons (note
349 // that this does not mean that this is the last incoming value, blocks
350 // can be reordered).
351 Cond = Val;
352 ExpectedPredicate = ICmpInst::ICMP_EQ;
353 } else {
354 // In this case, we expect a constant incoming value (the comparison is
355 // chained).
356 const auto *const Const = cast<ConstantInt>(Val);
357 LLVM_DEBUG(dbgs() << "const\n");
358 if (!Const->isZero())
359 return std::nullopt;
360 LLVM_DEBUG(dbgs() << "false\n");
361 assert(BranchI->getNumSuccessors() == 2 && "expecting a cond branch");
362 BasicBlock *const FalseBlock = BranchI->getSuccessor(1);
363 Cond = BranchI->getCondition();
364 ExpectedPredicate =
365 FalseBlock == PhiBlock ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
366 }
367
368 auto *CmpI = dyn_cast<ICmpInst>(Cond);
369 if (!CmpI)
370 return std::nullopt;
371 LLVM_DEBUG(dbgs() << "icmp\n");
372
373 std::optional<BCECmp> Result = visitICmp(CmpI, ExpectedPredicate, BaseId);
374 if (!Result)
375 return std::nullopt;
376
378 {Result->Lhs.LoadI, Result->Rhs.LoadI, Result->CmpI, BranchI});
379 if (Result->Lhs.GEP)
380 BlockInsts.insert(Result->Lhs.GEP);
381 if (Result->Rhs.GEP)
382 BlockInsts.insert(Result->Rhs.GEP);
383 return BCECmpBlock(std::move(*Result), Block, BlockInsts);
384}
385
386static inline void enqueueBlock(std::vector<BCECmpBlock> &Comparisons,
387 BCECmpBlock &&Comparison) {
388 LLVM_DEBUG(dbgs() << "Block '" << Comparison.BB->getName()
389 << "': Found cmp of " << Comparison.SizeBits()
390 << " bits between " << Comparison.Lhs().BaseId << " + "
391 << Comparison.Lhs().Offset << " and "
392 << Comparison.Rhs().BaseId << " + "
393 << Comparison.Rhs().Offset << "\n");
394 LLVM_DEBUG(dbgs() << "\n");
395 Comparison.OrigOrder = Comparisons.size();
396 Comparisons.push_back(std::move(Comparison));
397}
398
399// A chain of comparisons.
400class BCECmpChain {
401public:
402 using ContiguousBlocks = std::vector<BCECmpBlock>;
403
404 BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
405 AliasAnalysis &AA);
406
407 bool simplify(const TargetLibraryInfo &TLI, AliasAnalysis &AA,
408 DomTreeUpdater &DTU);
409
410 bool atLeastOneMerged() const {
411 return any_of(MergedBlocks_,
412 [](const auto &Blocks) { return Blocks.size() > 1; });
413 }
414
415private:
416 PHINode &Phi_;
417 // The list of all blocks in the chain, grouped by contiguity.
418 std::vector<ContiguousBlocks> MergedBlocks_;
419 // The original entry block (before sorting);
420 BasicBlock *EntryBlock_;
421};
422
423static bool areContiguous(const BCECmpBlock &First, const BCECmpBlock &Second) {
424 return First.Lhs().BaseId == Second.Lhs().BaseId &&
425 First.Rhs().BaseId == Second.Rhs().BaseId &&
426 First.Lhs().Offset + First.SizeBits() / 8 == Second.Lhs().Offset &&
427 First.Rhs().Offset + First.SizeBits() / 8 == Second.Rhs().Offset;
428}
429
430static unsigned getMinOrigOrder(const BCECmpChain::ContiguousBlocks &Blocks) {
431 unsigned MinOrigOrder = std::numeric_limits<unsigned>::max();
432 for (const BCECmpBlock &Block : Blocks)
433 MinOrigOrder = std::min(MinOrigOrder, Block.OrigOrder);
434 return MinOrigOrder;
435}
436
437/// Given a chain of comparison blocks, groups the blocks into contiguous
438/// ranges that can be merged together into a single comparison.
439static std::vector<BCECmpChain::ContiguousBlocks>
440mergeBlocks(std::vector<BCECmpBlock> &&Blocks) {
441 std::vector<BCECmpChain::ContiguousBlocks> MergedBlocks;
442
443 // Sort to detect continuous offsets.
445 [](const BCECmpBlock &LhsBlock, const BCECmpBlock &RhsBlock) {
446 return std::tie(LhsBlock.Lhs(), LhsBlock.Rhs()) <
447 std::tie(RhsBlock.Lhs(), RhsBlock.Rhs());
448 });
449
450 BCECmpChain::ContiguousBlocks *LastMergedBlock = nullptr;
451 for (BCECmpBlock &Block : Blocks) {
452 if (!LastMergedBlock || !areContiguous(LastMergedBlock->back(), Block)) {
453 MergedBlocks.emplace_back();
454 LastMergedBlock = &MergedBlocks.back();
455 } else {
456 LLVM_DEBUG(dbgs() << "Merging block " << Block.BB->getName() << " into "
457 << LastMergedBlock->back().BB->getName() << "\n");
458 }
459 LastMergedBlock->push_back(std::move(Block));
460 }
461
462 // While we allow reordering for merging, do not reorder unmerged comparisons.
463 // Doing so may introduce branch on poison.
464 llvm::sort(MergedBlocks, [](const BCECmpChain::ContiguousBlocks &LhsBlocks,
465 const BCECmpChain::ContiguousBlocks &RhsBlocks) {
466 return getMinOrigOrder(LhsBlocks) < getMinOrigOrder(RhsBlocks);
467 });
468
469 return MergedBlocks;
470}
471
472BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
473 AliasAnalysis &AA)
474 : Phi_(Phi) {
475 assert(!Blocks.empty() && "a chain should have at least one block");
476 // Now look inside blocks to check for BCE comparisons.
477 std::vector<BCECmpBlock> Comparisons;
478 BaseIdentifier BaseId;
479 for (BasicBlock *const Block : Blocks) {
480 assert(Block && "invalid block");
481 std::optional<BCECmpBlock> Comparison = visitCmpBlock(
482 Phi.getIncomingValueForBlock(Block), Block, Phi.getParent(), BaseId);
483 if (!Comparison) {
484 LLVM_DEBUG(dbgs() << "chain with invalid BCECmpBlock, no merge.\n");
485 return;
486 }
487 if (Comparison->doesOtherWork()) {
488 LLVM_DEBUG(dbgs() << "block '" << Comparison->BB->getName()
489 << "' does extra work besides compare\n");
490 if (Comparisons.empty()) {
491 // This is the initial block in the chain, in case this block does other
492 // work, we can try to split the block and move the irrelevant
493 // instructions to the predecessor.
494 //
495 // If this is not the initial block in the chain, splitting it wont
496 // work.
497 //
498 // As once split, there will still be instructions before the BCE cmp
499 // instructions that do other work in program order, i.e. within the
500 // chain before sorting. Unless we can abort the chain at this point
501 // and start anew.
502 //
503 // NOTE: we only handle blocks a with single predecessor for now.
504 if (Comparison->canSplit(AA)) {
506 << "Split initial block '" << Comparison->BB->getName()
507 << "' that does extra work besides compare\n");
508 Comparison->RequireSplit = true;
509 enqueueBlock(Comparisons, std::move(*Comparison));
510 } else {
512 << "ignoring initial block '" << Comparison->BB->getName()
513 << "' that does extra work besides compare\n");
514 }
515 continue;
516 }
517 // TODO(courbet): Right now we abort the whole chain. We could be
518 // merging only the blocks that don't do other work and resume the
519 // chain from there. For example:
520 // if (a[0] == b[0]) { // bb1
521 // if (a[1] == b[1]) { // bb2
522 // some_value = 3; //bb3
523 // if (a[2] == b[2]) { //bb3
524 // do a ton of stuff //bb4
525 // }
526 // }
527 // }
528 //
529 // This is:
530 //
531 // bb1 --eq--> bb2 --eq--> bb3* -eq--> bb4 --+
532 // \ \ \ \
533 // ne ne ne \
534 // \ \ \ v
535 // +------------+-----------+----------> bb_phi
536 //
537 // We can only merge the first two comparisons, because bb3* does
538 // "other work" (setting some_value to 3).
539 // We could still merge bb1 and bb2 though.
540 return;
541 }
542 enqueueBlock(Comparisons, std::move(*Comparison));
543 }
544
545 // It is possible we have no suitable comparison to merge.
546 if (Comparisons.empty()) {
547 LLVM_DEBUG(dbgs() << "chain with no BCE basic blocks, no merge\n");
548 return;
549 }
550 EntryBlock_ = Comparisons[0].BB;
551 MergedBlocks_ = mergeBlocks(std::move(Comparisons));
552}
553
554namespace {
555
556// A class to compute the name of a set of merged basic blocks.
557// This is optimized for the common case of no block names.
558class MergedBlockName {
559 // Storage for the uncommon case of several named blocks.
560 SmallString<16> Scratch;
561
562public:
563 explicit MergedBlockName(ArrayRef<BCECmpBlock> Comparisons)
564 : Name(makeName(Comparisons)) {}
565 const StringRef Name;
566
567private:
568 StringRef makeName(ArrayRef<BCECmpBlock> Comparisons) {
569 assert(!Comparisons.empty() && "no basic block");
570 // Fast path: only one block, or no names at all.
571 if (Comparisons.size() == 1)
572 return Comparisons[0].BB->getName();
573 const int size = std::accumulate(Comparisons.begin(), Comparisons.end(), 0,
574 [](int i, const BCECmpBlock &Cmp) {
575 return i + Cmp.BB->getName().size();
576 });
577 if (size == 0)
578 return StringRef("", 0);
579
580 // Slow path: at least two blocks, at least one block with a name.
581 Scratch.clear();
582 // We'll have `size` bytes for name and `Comparisons.size() - 1` bytes for
583 // separators.
584 Scratch.reserve(size + Comparisons.size() - 1);
585 const auto append = [this](StringRef str) {
586 Scratch.append(str.begin(), str.end());
587 };
588 append(Comparisons[0].BB->getName());
589 for (int I = 1, E = Comparisons.size(); I < E; ++I) {
590 const BasicBlock *const BB = Comparisons[I].BB;
591 if (!BB->getName().empty()) {
592 append("+");
593 append(BB->getName());
594 }
595 }
596 return Scratch.str();
597 }
598};
599} // namespace
600
601// Merges the given contiguous comparison blocks into one memcmp block.
602static BasicBlock *mergeComparisons(ArrayRef<BCECmpBlock> Comparisons,
603 BasicBlock *const InsertBefore,
604 BasicBlock *const NextCmpBlock,
605 PHINode &Phi, const TargetLibraryInfo &TLI,
606 AliasAnalysis &AA, DomTreeUpdater &DTU) {
607 assert(!Comparisons.empty() && "merging zero comparisons");
608 LLVMContext &Context = NextCmpBlock->getContext();
609 const BCECmpBlock &FirstCmp = Comparisons[0];
610
611 // Create a new cmp block before next cmp block.
612 BasicBlock *const BB =
613 BasicBlock::Create(Context, MergedBlockName(Comparisons).Name,
614 NextCmpBlock->getParent(), InsertBefore);
616 // Add the GEPs from the first BCECmpBlock.
617 Value *Lhs, *Rhs;
618 if (FirstCmp.Lhs().GEP)
619 Lhs = Builder.Insert(FirstCmp.Lhs().GEP->clone());
620 else
621 Lhs = FirstCmp.Lhs().LoadI->getPointerOperand();
622 if (FirstCmp.Rhs().GEP)
623 Rhs = Builder.Insert(FirstCmp.Rhs().GEP->clone());
624 else
625 Rhs = FirstCmp.Rhs().LoadI->getPointerOperand();
626
627 Value *IsEqual = nullptr;
628 LLVM_DEBUG(dbgs() << "Merging " << Comparisons.size() << " comparisons -> "
629 << BB->getName() << "\n");
630
631 // If there is one block that requires splitting, we do it now, i.e.
632 // just before we know we will collapse the chain. The instructions
633 // can be executed before any of the instructions in the chain.
634 const auto ToSplit = llvm::find_if(
635 Comparisons, [](const BCECmpBlock &B) { return B.RequireSplit; });
636 if (ToSplit != Comparisons.end()) {
637 LLVM_DEBUG(dbgs() << "Splitting non_BCE work to header\n");
638 ToSplit->split(BB, AA);
639 }
640
641 if (Comparisons.size() == 1) {
642 LLVM_DEBUG(dbgs() << "Only one comparison, updating branches\n");
643 // Use clone to keep the metadata
644 Instruction *const LhsLoad = Builder.Insert(FirstCmp.Lhs().LoadI->clone());
645 Instruction *const RhsLoad = Builder.Insert(FirstCmp.Rhs().LoadI->clone());
646 LhsLoad->replaceUsesOfWith(LhsLoad->getOperand(0), Lhs);
647 RhsLoad->replaceUsesOfWith(RhsLoad->getOperand(0), Rhs);
648 // There are no blocks to merge, just do the comparison.
649 IsEqual = Builder.CreateICmpEQ(LhsLoad, RhsLoad);
650 } else {
651 const unsigned TotalSizeBits = std::accumulate(
652 Comparisons.begin(), Comparisons.end(), 0u,
653 [](int Size, const BCECmpBlock &C) { return Size + C.SizeBits(); });
654
655 // memcmp expects a 'size_t' argument and returns 'int'.
656 unsigned SizeTBits = TLI.getSizeTSize(*Phi.getModule());
657 unsigned IntBits = TLI.getIntSize();
658
659 // Create memcmp() == 0.
660 const auto &DL = Phi.getModule()->getDataLayout();
661 Value *const MemCmpCall = emitMemCmp(
662 Lhs, Rhs,
663 ConstantInt::get(Builder.getIntNTy(SizeTBits), TotalSizeBits / 8),
664 Builder, DL, &TLI);
665 IsEqual = Builder.CreateICmpEQ(
666 MemCmpCall, ConstantInt::get(Builder.getIntNTy(IntBits), 0));
667 }
668
669 BasicBlock *const PhiBB = Phi.getParent();
670 // Add a branch to the next basic block in the chain.
671 if (NextCmpBlock == PhiBB) {
672 // Continue to phi, passing it the comparison result.
673 Builder.CreateBr(PhiBB);
674 Phi.addIncoming(IsEqual, BB);
675 DTU.applyUpdates({{DominatorTree::Insert, BB, PhiBB}});
676 } else {
677 // Continue to next block if equal, exit to phi else.
678 Builder.CreateCondBr(IsEqual, NextCmpBlock, PhiBB);
679 Phi.addIncoming(ConstantInt::getFalse(Context), BB);
680 DTU.applyUpdates({{DominatorTree::Insert, BB, NextCmpBlock},
681 {DominatorTree::Insert, BB, PhiBB}});
682 }
683 return BB;
684}
685
686bool BCECmpChain::simplify(const TargetLibraryInfo &TLI, AliasAnalysis &AA,
687 DomTreeUpdater &DTU) {
688 assert(atLeastOneMerged() && "simplifying trivial BCECmpChain");
689 LLVM_DEBUG(dbgs() << "Simplifying comparison chain starting at block "
690 << EntryBlock_->getName() << "\n");
691
692 // Effectively merge blocks. We go in the reverse direction from the phi block
693 // so that the next block is always available to branch to.
694 BasicBlock *InsertBefore = EntryBlock_;
695 BasicBlock *NextCmpBlock = Phi_.getParent();
696 for (const auto &Blocks : reverse(MergedBlocks_)) {
697 InsertBefore = NextCmpBlock = mergeComparisons(
698 Blocks, InsertBefore, NextCmpBlock, Phi_, TLI, AA, DTU);
699 }
700
701 // Replace the original cmp chain with the new cmp chain by pointing all
702 // predecessors of EntryBlock_ to NextCmpBlock instead. This makes all cmp
703 // blocks in the old chain unreachable.
704 while (!pred_empty(EntryBlock_)) {
705 BasicBlock* const Pred = *pred_begin(EntryBlock_);
706 LLVM_DEBUG(dbgs() << "Updating jump into old chain from " << Pred->getName()
707 << "\n");
708 Pred->getTerminator()->replaceUsesOfWith(EntryBlock_, NextCmpBlock);
709 DTU.applyUpdates({{DominatorTree::Delete, Pred, EntryBlock_},
710 {DominatorTree::Insert, Pred, NextCmpBlock}});
711 }
712
713 // If the old cmp chain was the function entry, we need to update the function
714 // entry.
715 const bool ChainEntryIsFnEntry = EntryBlock_->isEntryBlock();
716 if (ChainEntryIsFnEntry && DTU.hasDomTree()) {
717 LLVM_DEBUG(dbgs() << "Changing function entry from "
718 << EntryBlock_->getName() << " to "
719 << NextCmpBlock->getName() << "\n");
720 DTU.getDomTree().setNewRoot(NextCmpBlock);
721 DTU.applyUpdates({{DominatorTree::Delete, NextCmpBlock, EntryBlock_}});
722 }
723 EntryBlock_ = nullptr;
724
725 // Delete merged blocks. This also removes incoming values in phi.
727 for (const auto &Blocks : MergedBlocks_) {
728 for (const BCECmpBlock &Block : Blocks) {
729 LLVM_DEBUG(dbgs() << "Deleting merged block " << Block.BB->getName()
730 << "\n");
731 DeadBlocks.push_back(Block.BB);
732 }
733 }
734 DeleteDeadBlocks(DeadBlocks, &DTU);
735
736 MergedBlocks_.clear();
737 return true;
738}
739
740std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi,
741 BasicBlock *const LastBlock,
742 int NumBlocks) {
743 // Walk up from the last block to find other blocks.
744 std::vector<BasicBlock *> Blocks(NumBlocks);
745 assert(LastBlock && "invalid last block");
746 BasicBlock *CurBlock = LastBlock;
747 for (int BlockIndex = NumBlocks - 1; BlockIndex > 0; --BlockIndex) {
748 if (CurBlock->hasAddressTaken()) {
749 // Somebody is jumping to the block through an address, all bets are
750 // off.
751 LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex
752 << " has its address taken\n");
753 return {};
754 }
755 Blocks[BlockIndex] = CurBlock;
756 auto *SinglePredecessor = CurBlock->getSinglePredecessor();
757 if (!SinglePredecessor) {
758 // The block has two or more predecessors.
759 LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex
760 << " has two or more predecessors\n");
761 return {};
762 }
763 if (Phi.getBasicBlockIndex(SinglePredecessor) < 0) {
764 // The block does not link back to the phi.
765 LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex
766 << " does not link back to the phi\n");
767 return {};
768 }
769 CurBlock = SinglePredecessor;
770 }
771 Blocks[0] = CurBlock;
772 return Blocks;
773}
774
775bool processPhi(PHINode &Phi, const TargetLibraryInfo &TLI, AliasAnalysis &AA,
776 DomTreeUpdater &DTU) {
777 LLVM_DEBUG(dbgs() << "processPhi()\n");
778 if (Phi.getNumIncomingValues() <= 1) {
779 LLVM_DEBUG(dbgs() << "skip: only one incoming value in phi\n");
780 return false;
781 }
782 // We are looking for something that has the following structure:
783 // bb1 --eq--> bb2 --eq--> bb3 --eq--> bb4 --+
784 // \ \ \ \
785 // ne ne ne \
786 // \ \ \ v
787 // +------------+-----------+----------> bb_phi
788 //
789 // - The last basic block (bb4 here) must branch unconditionally to bb_phi.
790 // It's the only block that contributes a non-constant value to the Phi.
791 // - All other blocks (b1, b2, b3) must have exactly two successors, one of
792 // them being the phi block.
793 // - All intermediate blocks (bb2, bb3) must have only one predecessor.
794 // - Blocks cannot do other work besides the comparison, see doesOtherWork()
795
796 // The blocks are not necessarily ordered in the phi, so we start from the
797 // last block and reconstruct the order.
798 BasicBlock *LastBlock = nullptr;
799 for (unsigned I = 0; I < Phi.getNumIncomingValues(); ++I) {
800 if (isa<ConstantInt>(Phi.getIncomingValue(I))) continue;
801 if (LastBlock) {
802 // There are several non-constant values.
803 LLVM_DEBUG(dbgs() << "skip: several non-constant values\n");
804 return false;
805 }
806 if (!isa<ICmpInst>(Phi.getIncomingValue(I)) ||
807 cast<ICmpInst>(Phi.getIncomingValue(I))->getParent() !=
808 Phi.getIncomingBlock(I)) {
809 // Non-constant incoming value is not from a cmp instruction or not
810 // produced by the last block. We could end up processing the value
811 // producing block more than once.
812 //
813 // This is an uncommon case, so we bail.
815 dbgs()
816 << "skip: non-constant value not from cmp or not from last block.\n");
817 return false;
818 }
819 LastBlock = Phi.getIncomingBlock(I);
820 }
821 if (!LastBlock) {
822 // There is no non-constant block.
823 LLVM_DEBUG(dbgs() << "skip: no non-constant block\n");
824 return false;
825 }
826 if (LastBlock->getSingleSuccessor() != Phi.getParent()) {
827 LLVM_DEBUG(dbgs() << "skip: last block non-phi successor\n");
828 return false;
829 }
830
831 const auto Blocks =
832 getOrderedBlocks(Phi, LastBlock, Phi.getNumIncomingValues());
833 if (Blocks.empty()) return false;
834 BCECmpChain CmpChain(Blocks, Phi, AA);
835
836 if (!CmpChain.atLeastOneMerged()) {
837 LLVM_DEBUG(dbgs() << "skip: nothing merged\n");
838 return false;
839 }
840
841 return CmpChain.simplify(TLI, AA, DTU);
842}
843
844static bool runImpl(Function &F, const TargetLibraryInfo &TLI,
846 DominatorTree *DT) {
847 LLVM_DEBUG(dbgs() << "MergeICmpsLegacyPass: " << F.getName() << "\n");
848
849 // We only try merging comparisons if the target wants to expand memcmp later.
850 // The rationale is to avoid turning small chains into memcmp calls.
851 if (!TTI.enableMemCmpExpansion(F.hasOptSize(), true))
852 return false;
853
854 // If we don't have memcmp avaiable we can't emit calls to it.
855 if (!TLI.has(LibFunc_memcmp))
856 return false;
857
858 DomTreeUpdater DTU(DT, /*PostDominatorTree*/ nullptr,
859 DomTreeUpdater::UpdateStrategy::Eager);
860
861 bool MadeChange = false;
862
863 for (BasicBlock &BB : llvm::drop_begin(F)) {
864 // A Phi operation is always first in a basic block.
865 if (auto *const Phi = dyn_cast<PHINode>(&*BB.begin()))
866 MadeChange |= processPhi(*Phi, TLI, AA, DTU);
867 }
868
869 return MadeChange;
870}
871
872class MergeICmpsLegacyPass : public FunctionPass {
873public:
874 static char ID;
875
876 MergeICmpsLegacyPass() : FunctionPass(ID) {
878 }
879
880 bool runOnFunction(Function &F) override {
881 if (skipFunction(F)) return false;
882 const auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
883 const auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
884 // MergeICmps does not need the DominatorTree, but we update it if it's
885 // already available.
886 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
887 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
888 return runImpl(F, TLI, TTI, AA, DTWP ? &DTWP->getDomTree() : nullptr);
889 }
890
891 private:
892 void getAnalysisUsage(AnalysisUsage &AU) const override {
898 }
899};
900
901} // namespace
902
903char MergeICmpsLegacyPass::ID = 0;
904INITIALIZE_PASS_BEGIN(MergeICmpsLegacyPass, "mergeicmps",
905 "Merge contiguous icmps into a memcmp", false, false)
909INITIALIZE_PASS_END(MergeICmpsLegacyPass, "mergeicmps",
910 "Merge contiguous icmps into a memcmp", false, false)
911
912Pass *llvm::createMergeICmpsLegacyPass() { return new MergeICmpsLegacyPass(); }
913
916 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
917 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
918 auto &AA = AM.getResult<AAManager>(F);
920 const bool MadeChanges = runImpl(F, TLI, TTI, AA, DT);
921 if (!MadeChanges)
922 return PreservedAnalyses::all();
925 return PA;
926}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
assume Assume Builder
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Error split(StringRef Str, char Separator, std::pair< StringRef, StringRef > &Split)
Checked version of split, to ensure mandatory subparts.
Definition: DataLayout.cpp:235
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
uint64_t Size
DenseMap< Block *, BlockRelaxAux > Blocks
Definition: ELF_riscv.cpp:491
static bool runImpl(Function &F, const TargetLowering &TLI)
This is the interface for a simple mod/ref and alias analysis over globals.
hexagon bit simplify
Hexagon Common GEP
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mergeicmps
Definition: MergeICmps.cpp:909
Merge contiguous icmps into a memcmp
Definition: MergeICmps.cpp:910
LLVMContext & Context
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
R600 Clause Merge
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This pass exposes codegen information to IR-level passes.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Check whether or not an instruction may read or write the optionally specified memory location.
Class for arbitrary precision integers.
Definition: APInt.h:75
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:793
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:152
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
iterator begin() const
Definition: ArrayRef.h:151
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:158
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:326
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:507
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:292
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:322
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:35
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:801
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:840
bool hasDomTree() const
Returns true if it holds a DominatorTree.
void applyUpdates(ArrayRef< DominatorTree::UpdateType > Updates)
Submit updates to all available trees.
DominatorTree & getDomTree()
Flush DomTree updates and return DomTree.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
DomTreeNodeBase< NodeT > * setNewRoot(NodeT *BB)
Add a new node to the forward dominator tree and make it a new root.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:174
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2570
bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:70
const BasicBlock * getParent() const
Definition: Instruction.h:90
bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
StringRef str() const
Explicit conversion to StringRef.
Definition: SmallString.h:261
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
unsigned getSizeTSize(const Module &M) const
Returns the size of the size_t type in bits.
unsigned getIntSize() const
Get size of a C-level int or unsigned int, in bits.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
op_range operands()
Definition: User.h:242
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
void append(SmallVectorImpl< char > &path, const Twine &a, const Twine &b="", const Twine &c="", const Twine &d="")
Append to path.
Definition: Path.cpp:457
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:413
@ Offset
Definition: DWP.cpp:440
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:361
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1777
void initializeMergeICmpsLegacyPassPass(PassRegistry &)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
Value * emitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B, const DataLayout &DL, const TargetLibraryInfo *TLI)
Emit a call to the memcmp function.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:511
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1744
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1833
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
Pass * createMergeICmpsLegacyPass()
Definition: MergeICmps.cpp:912
bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition: Loads.cpp:221
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1946
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1846
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
void DeleteDeadBlocks(ArrayRef< BasicBlock * > BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MergeICmps.cpp:914