LLVM  10.0.0svn
EarlyCSE.cpp
Go to the documentation of this file.
1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs a simple dominator tree walk that eliminates trivially
10 // redundant instructions.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/Hashing.h"
17 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/PassManager.h"
44 #include "llvm/IR/PatternMatch.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Use.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/Allocator.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Debug.h"
56 #include "llvm/Transforms/Scalar.h"
58 #include <cassert>
59 #include <deque>
60 #include <memory>
61 #include <utility>
62 
63 using namespace llvm;
64 using namespace llvm::PatternMatch;
65 
66 #define DEBUG_TYPE "early-cse"
67 
68 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
69 STATISTIC(NumCSE, "Number of instructions CSE'd");
70 STATISTIC(NumCSECVP, "Number of compare instructions CVP'd");
71 STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
72 STATISTIC(NumCSECall, "Number of call instructions CSE'd");
73 STATISTIC(NumDSE, "Number of trivial dead stores removed");
74 
75 DEBUG_COUNTER(CSECounter, "early-cse",
76  "Controls which instructions are removed");
77 
79  "earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden,
80  cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange "
81  "for faster compile. Caps the MemorySSA clobbering calls."));
82 
84  "earlycse-debug-hash", cl::init(false), cl::Hidden,
85  cl::desc("Perform extra assertion checking to verify that SimpleValue's hash "
86  "function is well-behaved w.r.t. its isEqual predicate"));
87 
88 //===----------------------------------------------------------------------===//
89 // SimpleValue
90 //===----------------------------------------------------------------------===//
91 
92 namespace {
93 
94 /// Struct representing the available values in the scoped hash table.
95 struct SimpleValue {
96  Instruction *Inst;
97 
98  SimpleValue(Instruction *I) : Inst(I) {
99  assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
100  }
101 
102  bool isSentinel() const {
103  return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
105  }
106 
107  static bool canHandle(Instruction *Inst) {
108  // This can only handle non-void readnone functions.
109  if (CallInst *CI = dyn_cast<CallInst>(Inst))
110  return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
111  return isa<CastInst>(Inst) || isa<UnaryOperator>(Inst) ||
112  isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) ||
113  isa<CmpInst>(Inst) || isa<SelectInst>(Inst) ||
114  isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
115  isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) ||
116  isa<InsertValueInst>(Inst);
117  }
118 };
119 
120 } // end anonymous namespace
121 
122 namespace llvm {
123 
124 template <> struct DenseMapInfo<SimpleValue> {
125  static inline SimpleValue getEmptyKey() {
127  }
128 
129  static inline SimpleValue getTombstoneKey() {
131  }
132 
133  static unsigned getHashValue(SimpleValue Val);
134  static bool isEqual(SimpleValue LHS, SimpleValue RHS);
135 };
136 
137 } // end namespace llvm
138 
139 /// Match a 'select' including an optional 'not's of the condition.
140 static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A,
141  Value *&B,
142  SelectPatternFlavor &Flavor) {
143  // Return false if V is not even a select.
144  if (!match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))))
145  return false;
146 
147  // Look through a 'not' of the condition operand by swapping A/B.
148  Value *CondNot;
149  if (match(Cond, m_Not(m_Value(CondNot)))) {
150  Cond = CondNot;
151  std::swap(A, B);
152  }
153 
154  // Set flavor if we find a match, or set it to unknown otherwise; in
155  // either case, return true to indicate that this is a select we can
156  // process.
157  if (auto *CmpI = dyn_cast<ICmpInst>(Cond))
158  Flavor = matchDecomposedSelectPattern(CmpI, A, B, A, B).Flavor;
159  else
160  Flavor = SPF_UNKNOWN;
161 
162  return true;
163 }
164 
165 static unsigned getHashValueImpl(SimpleValue Val) {
166  Instruction *Inst = Val.Inst;
167  // Hash in all of the operands as pointers.
168  if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
169  Value *LHS = BinOp->getOperand(0);
170  Value *RHS = BinOp->getOperand(1);
171  if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
172  std::swap(LHS, RHS);
173 
174  return hash_combine(BinOp->getOpcode(), LHS, RHS);
175  }
176 
177  if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
178  // Compares can be commuted by swapping the comparands and
179  // updating the predicate. Choose the form that has the
180  // comparands in sorted order, or in the case of a tie, the
181  // one with the lower predicate.
182  Value *LHS = CI->getOperand(0);
183  Value *RHS = CI->getOperand(1);
184  CmpInst::Predicate Pred = CI->getPredicate();
185  CmpInst::Predicate SwappedPred = CI->getSwappedPredicate();
186  if (std::tie(LHS, Pred) > std::tie(RHS, SwappedPred)) {
187  std::swap(LHS, RHS);
188  Pred = SwappedPred;
189  }
190  return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
191  }
192 
193  // Hash general selects to allow matching commuted true/false operands.
195  Value *Cond, *A, *B;
196  if (matchSelectWithOptionalNotCond(Inst, Cond, A, B, SPF)) {
197  // Hash min/max/abs (cmp + select) to allow for commuted operands.
198  // Min/max may also have non-canonical compare predicate (eg, the compare for
199  // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
200  // compare.
201  // TODO: We should also detect FP min/max.
202  if (SPF == SPF_SMIN || SPF == SPF_SMAX ||
203  SPF == SPF_UMIN || SPF == SPF_UMAX) {
204  if (A > B)
205  std::swap(A, B);
206  return hash_combine(Inst->getOpcode(), SPF, A, B);
207  }
208  if (SPF == SPF_ABS || SPF == SPF_NABS) {
209  // ABS/NABS always puts the input in A and its negation in B.
210  return hash_combine(Inst->getOpcode(), SPF, A, B);
211  }
212 
213  // Hash general selects to allow matching commuted true/false operands.
214 
215  // If we do not have a compare as the condition, just hash in the condition.
216  CmpInst::Predicate Pred;
217  Value *X, *Y;
218  if (!match(Cond, m_Cmp(Pred, m_Value(X), m_Value(Y))))
219  return hash_combine(Inst->getOpcode(), Cond, A, B);
220 
221  // Similar to cmp normalization (above) - canonicalize the predicate value:
222  // select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A
223  if (CmpInst::getInversePredicate(Pred) < Pred) {
224  Pred = CmpInst::getInversePredicate(Pred);
225  std::swap(A, B);
226  }
227  return hash_combine(Inst->getOpcode(), Pred, X, Y, A, B);
228  }
229 
230  if (CastInst *CI = dyn_cast<CastInst>(Inst))
231  return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
232 
233  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
234  return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
235  hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
236 
237  if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
238  return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
239  IVI->getOperand(1),
240  hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
241 
242  assert((isa<CallInst>(Inst) || isa<GetElementPtrInst>(Inst) ||
243  isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
244  isa<ShuffleVectorInst>(Inst) || isa<UnaryOperator>(Inst)) &&
245  "Invalid/unknown instruction");
246 
247  // Mix in the opcode.
248  return hash_combine(
249  Inst->getOpcode(),
251 }
252 
253 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
254 #ifndef NDEBUG
255  // If -earlycse-debug-hash was specified, return a constant -- this
256  // will force all hashing to collide, so we'll exhaustively search
257  // the table for a match, and the assertion in isEqual will fire if
258  // there's a bug causing equal keys to hash differently.
259  if (EarlyCSEDebugHash)
260  return 0;
261 #endif
262  return getHashValueImpl(Val);
263 }
264 
265 static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) {
266  Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
267 
268  if (LHS.isSentinel() || RHS.isSentinel())
269  return LHSI == RHSI;
270 
271  if (LHSI->getOpcode() != RHSI->getOpcode())
272  return false;
273  if (LHSI->isIdenticalToWhenDefined(RHSI))
274  return true;
275 
276  // If we're not strictly identical, we still might be a commutable instruction
277  if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
278  if (!LHSBinOp->isCommutative())
279  return false;
280 
281  assert(isa<BinaryOperator>(RHSI) &&
282  "same opcode, but different instruction type?");
283  BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
284 
285  // Commuted equality
286  return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
287  LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
288  }
289  if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
290  assert(isa<CmpInst>(RHSI) &&
291  "same opcode, but different instruction type?");
292  CmpInst *RHSCmp = cast<CmpInst>(RHSI);
293  // Commuted equality
294  return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
295  LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
296  LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
297  }
298 
299  // Min/max/abs can occur with commuted operands, non-canonical predicates,
300  // and/or non-canonical operands.
301  // Selects can be non-trivially equivalent via inverted conditions and swaps.
302  SelectPatternFlavor LSPF, RSPF;
303  Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB;
304  if (matchSelectWithOptionalNotCond(LHSI, CondL, LHSA, LHSB, LSPF) &&
305  matchSelectWithOptionalNotCond(RHSI, CondR, RHSA, RHSB, RSPF)) {
306  if (LSPF == RSPF) {
307  // TODO: We should also detect FP min/max.
308  if (LSPF == SPF_SMIN || LSPF == SPF_SMAX ||
309  LSPF == SPF_UMIN || LSPF == SPF_UMAX)
310  return ((LHSA == RHSA && LHSB == RHSB) ||
311  (LHSA == RHSB && LHSB == RHSA));
312 
313  if (LSPF == SPF_ABS || LSPF == SPF_NABS) {
314  // Abs results are placed in a defined order by matchSelectPattern.
315  return LHSA == RHSA && LHSB == RHSB;
316  }
317 
318  // select Cond, A, B <--> select not(Cond), B, A
319  if (CondL == CondR && LHSA == RHSA && LHSB == RHSB)
320  return true;
321  }
322 
323  // If the true/false operands are swapped and the conditions are compares
324  // with inverted predicates, the selects are equal:
325  // select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A
326  //
327  // This also handles patterns with a double-negation in the sense of not +
328  // inverse, because we looked through a 'not' in the matching function and
329  // swapped A/B:
330  // select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A
331  //
332  // This intentionally does NOT handle patterns with a double-negation in
333  // the sense of not + not, because doing so could result in values
334  // comparing
335  // as equal that hash differently in the min/max/abs cases like:
336  // select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y
337  // ^ hashes as min ^ would not hash as min
338  // In the context of the EarlyCSE pass, however, such cases never reach
339  // this code, as we simplify the double-negation before hashing the second
340  // select (and so still succeed at CSEing them).
341  if (LHSA == RHSB && LHSB == RHSA) {
342  CmpInst::Predicate PredL, PredR;
343  Value *X, *Y;
344  if (match(CondL, m_Cmp(PredL, m_Value(X), m_Value(Y))) &&
345  match(CondR, m_Cmp(PredR, m_Specific(X), m_Specific(Y))) &&
346  CmpInst::getInversePredicate(PredL) == PredR)
347  return true;
348  }
349  }
350 
351  return false;
352 }
353 
354 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
355  // These comparisons are nontrivial, so assert that equality implies
356  // hash equality (DenseMap demands this as an invariant).
357  bool Result = isEqualImpl(LHS, RHS);
358  assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) ||
359  getHashValueImpl(LHS) == getHashValueImpl(RHS));
360  return Result;
361 }
362 
363 //===----------------------------------------------------------------------===//
364 // CallValue
365 //===----------------------------------------------------------------------===//
366 
367 namespace {
368 
369 /// Struct representing the available call values in the scoped hash
370 /// table.
371 struct CallValue {
372  Instruction *Inst;
373 
374  CallValue(Instruction *I) : Inst(I) {
375  assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
376  }
377 
378  bool isSentinel() const {
379  return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
381  }
382 
383  static bool canHandle(Instruction *Inst) {
384  // Don't value number anything that returns void.
385  if (Inst->getType()->isVoidTy())
386  return false;
387 
388  CallInst *CI = dyn_cast<CallInst>(Inst);
389  if (!CI || !CI->onlyReadsMemory())
390  return false;
391  return true;
392  }
393 };
394 
395 } // end anonymous namespace
396 
397 namespace llvm {
398 
399 template <> struct DenseMapInfo<CallValue> {
400  static inline CallValue getEmptyKey() {
402  }
403 
404  static inline CallValue getTombstoneKey() {
406  }
407 
408  static unsigned getHashValue(CallValue Val);
409  static bool isEqual(CallValue LHS, CallValue RHS);
410 };
411 
412 } // end namespace llvm
413 
414 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
415  Instruction *Inst = Val.Inst;
416  // Hash all of the operands as pointers and mix in the opcode.
417  return hash_combine(
418  Inst->getOpcode(),
420 }
421 
422 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
423  Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
424  if (LHS.isSentinel() || RHS.isSentinel())
425  return LHSI == RHSI;
426  return LHSI->isIdenticalTo(RHSI);
427 }
428 
429 //===----------------------------------------------------------------------===//
430 // EarlyCSE implementation
431 //===----------------------------------------------------------------------===//
432 
433 namespace {
434 
435 /// A simple and fast domtree-based CSE pass.
436 ///
437 /// This pass does a simple depth-first walk over the dominator tree,
438 /// eliminating trivially redundant instructions and using instsimplify to
439 /// canonicalize things as it goes. It is intended to be fast and catch obvious
440 /// cases so that instcombine and other passes are more effective. It is
441 /// expected that a later pass of GVN will catch the interesting/hard cases.
442 class EarlyCSE {
443 public:
444  const TargetLibraryInfo &TLI;
445  const TargetTransformInfo &TTI;
446  DominatorTree &DT;
447  AssumptionCache &AC;
448  const SimplifyQuery SQ;
449  MemorySSA *MSSA;
450  std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
451 
452  using AllocatorTy =
455  using ScopedHTType =
457  AllocatorTy>;
458 
459  /// A scoped hash table of the current values of all of our simple
460  /// scalar expressions.
461  ///
462  /// As we walk down the domtree, we look to see if instructions are in this:
463  /// if so, we replace them with what we find, otherwise we insert them so
464  /// that dominated values can succeed in their lookup.
465  ScopedHTType AvailableValues;
466 
467  /// A scoped hash table of the current values of previously encountered
468  /// memory locations.
469  ///
470  /// This allows us to get efficient access to dominating loads or stores when
471  /// we have a fully redundant load. In addition to the most recent load, we
472  /// keep track of a generation count of the read, which is compared against
473  /// the current generation count. The current generation count is incremented
474  /// after every possibly writing memory operation, which ensures that we only
475  /// CSE loads with other loads that have no intervening store. Ordering
476  /// events (such as fences or atomic instructions) increment the generation
477  /// count as well; essentially, we model these as writes to all possible
478  /// locations. Note that atomic and/or volatile loads and stores can be
479  /// present the table; it is the responsibility of the consumer to inspect
480  /// the atomicity/volatility if needed.
481  struct LoadValue {
482  Instruction *DefInst = nullptr;
483  unsigned Generation = 0;
484  int MatchingId = -1;
485  bool IsAtomic = false;
486 
487  LoadValue() = default;
488  LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
489  bool IsAtomic)
490  : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
491  IsAtomic(IsAtomic) {}
492  };
493 
494  using LoadMapAllocator =
497  using LoadHTType =
499  LoadMapAllocator>;
500 
501  LoadHTType AvailableLoads;
502 
503  // A scoped hash table mapping memory locations (represented as typed
504  // addresses) to generation numbers at which that memory location became
505  // (henceforth indefinitely) invariant.
506  using InvariantMapAllocator =
509  using InvariantHTType =
511  InvariantMapAllocator>;
512  InvariantHTType AvailableInvariants;
513 
514  /// A scoped hash table of the current values of read-only call
515  /// values.
516  ///
517  /// It uses the same generation count as loads.
518  using CallHTType =
520  CallHTType AvailableCalls;
521 
522  /// This is the current generation of the memory value.
523  unsigned CurrentGeneration = 0;
524 
525  /// Set up the EarlyCSE runner for a particular function.
526  EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
527  const TargetTransformInfo &TTI, DominatorTree &DT,
528  AssumptionCache &AC, MemorySSA *MSSA)
529  : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
530  MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {}
531 
532  bool run();
533 
534 private:
535  unsigned ClobberCounter = 0;
536  // Almost a POD, but needs to call the constructors for the scoped hash
537  // tables so that a new scope gets pushed on. These are RAII so that the
538  // scope gets popped when the NodeScope is destroyed.
539  class NodeScope {
540  public:
541  NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
542  InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
543  : Scope(AvailableValues), LoadScope(AvailableLoads),
544  InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
545  NodeScope(const NodeScope &) = delete;
546  NodeScope &operator=(const NodeScope &) = delete;
547 
548  private:
549  ScopedHTType::ScopeTy Scope;
550  LoadHTType::ScopeTy LoadScope;
551  InvariantHTType::ScopeTy InvariantScope;
552  CallHTType::ScopeTy CallScope;
553  };
554 
555  // Contains all the needed information to create a stack for doing a depth
556  // first traversal of the tree. This includes scopes for values, loads, and
557  // calls as well as the generation. There is a child iterator so that the
558  // children do not need to be store separately.
559  class StackNode {
560  public:
561  StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
562  InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
563  unsigned cg, DomTreeNode *n, DomTreeNode::iterator child,
565  : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
566  EndIter(end),
567  Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
568  AvailableCalls)
569  {}
570  StackNode(const StackNode &) = delete;
571  StackNode &operator=(const StackNode &) = delete;
572 
573  // Accessors.
574  unsigned currentGeneration() { return CurrentGeneration; }
575  unsigned childGeneration() { return ChildGeneration; }
576  void childGeneration(unsigned generation) { ChildGeneration = generation; }
577  DomTreeNode *node() { return Node; }
578  DomTreeNode::iterator childIter() { return ChildIter; }
579 
580  DomTreeNode *nextChild() {
581  DomTreeNode *child = *ChildIter;
582  ++ChildIter;
583  return child;
584  }
585 
586  DomTreeNode::iterator end() { return EndIter; }
587  bool isProcessed() { return Processed; }
588  void process() { Processed = true; }
589 
590  private:
591  unsigned CurrentGeneration;
592  unsigned ChildGeneration;
593  DomTreeNode *Node;
594  DomTreeNode::iterator ChildIter;
595  DomTreeNode::iterator EndIter;
596  NodeScope Scopes;
597  bool Processed = false;
598  };
599 
600  /// Wrapper class to handle memory instructions, including loads,
601  /// stores and intrinsic loads and stores defined by the target.
602  class ParseMemoryInst {
603  public:
604  ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
605  : Inst(Inst) {
606  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
607  if (TTI.getTgtMemIntrinsic(II, Info))
608  IsTargetMemInst = true;
609  }
610 
611  bool isLoad() const {
612  if (IsTargetMemInst) return Info.ReadMem;
613  return isa<LoadInst>(Inst);
614  }
615 
616  bool isStore() const {
617  if (IsTargetMemInst) return Info.WriteMem;
618  return isa<StoreInst>(Inst);
619  }
620 
621  bool isAtomic() const {
622  if (IsTargetMemInst)
623  return Info.Ordering != AtomicOrdering::NotAtomic;
624  return Inst->isAtomic();
625  }
626 
627  bool isUnordered() const {
628  if (IsTargetMemInst)
629  return Info.isUnordered();
630 
631  if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
632  return LI->isUnordered();
633  } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
634  return SI->isUnordered();
635  }
636  // Conservative answer
637  return !Inst->isAtomic();
638  }
639 
640  bool isVolatile() const {
641  if (IsTargetMemInst)
642  return Info.IsVolatile;
643 
644  if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
645  return LI->isVolatile();
646  } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
647  return SI->isVolatile();
648  }
649  // Conservative answer
650  return true;
651  }
652 
653  bool isInvariantLoad() const {
654  if (auto *LI = dyn_cast<LoadInst>(Inst))
655  return LI->hasMetadata(LLVMContext::MD_invariant_load);
656  return false;
657  }
658 
659  bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
660  return (getPointerOperand() == Inst.getPointerOperand() &&
661  getMatchingId() == Inst.getMatchingId());
662  }
663 
664  bool isValid() const { return getPointerOperand() != nullptr; }
665 
666  // For regular (non-intrinsic) loads/stores, this is set to -1. For
667  // intrinsic loads/stores, the id is retrieved from the corresponding
668  // field in the MemIntrinsicInfo structure. That field contains
669  // non-negative values only.
670  int getMatchingId() const {
671  if (IsTargetMemInst) return Info.MatchingId;
672  return -1;
673  }
674 
675  Value *getPointerOperand() const {
676  if (IsTargetMemInst) return Info.PtrVal;
677  return getLoadStorePointerOperand(Inst);
678  }
679 
680  bool mayReadFromMemory() const {
681  if (IsTargetMemInst) return Info.ReadMem;
682  return Inst->mayReadFromMemory();
683  }
684 
685  bool mayWriteToMemory() const {
686  if (IsTargetMemInst) return Info.WriteMem;
687  return Inst->mayWriteToMemory();
688  }
689 
690  private:
691  bool IsTargetMemInst = false;
693  Instruction *Inst;
694  };
695 
696  bool processNode(DomTreeNode *Node);
697 
698  bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI,
699  const BasicBlock *BB, const BasicBlock *Pred);
700 
701  Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
702  if (auto *LI = dyn_cast<LoadInst>(Inst))
703  return LI;
704  if (auto *SI = dyn_cast<StoreInst>(Inst))
705  return SI->getValueOperand();
706  assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
707  return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
708  ExpectedType);
709  }
710 
711  /// Return true if the instruction is known to only operate on memory
712  /// provably invariant in the given "generation".
713  bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
714 
715  bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
716  Instruction *EarlierInst, Instruction *LaterInst);
717 
718  void removeMSSA(Instruction *Inst) {
719  if (!MSSA)
720  return;
721  if (VerifyMemorySSA)
722  MSSA->verifyMemorySSA();
723  // Removing a store here can leave MemorySSA in an unoptimized state by
724  // creating MemoryPhis that have identical arguments and by creating
725  // MemoryUses whose defining access is not an actual clobber. The phi case
726  // is handled by MemorySSA when passing OptimizePhis = true to
727  // removeMemoryAccess. The non-optimized MemoryUse case is lazily updated
728  // by MemorySSA's getClobberingMemoryAccess.
729  MSSAUpdater->removeMemoryAccess(Inst, true);
730  }
731 };
732 
733 } // end anonymous namespace
734 
735 /// Determine if the memory referenced by LaterInst is from the same heap
736 /// version as EarlierInst.
737 /// This is currently called in two scenarios:
738 ///
739 /// load p
740 /// ...
741 /// load p
742 ///
743 /// and
744 ///
745 /// x = load p
746 /// ...
747 /// store x, p
748 ///
749 /// in both cases we want to verify that there are no possible writes to the
750 /// memory referenced by p between the earlier and later instruction.
751 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
752  unsigned LaterGeneration,
753  Instruction *EarlierInst,
754  Instruction *LaterInst) {
755  // Check the simple memory generation tracking first.
756  if (EarlierGeneration == LaterGeneration)
757  return true;
758 
759  if (!MSSA)
760  return false;
761 
762  // If MemorySSA has determined that one of EarlierInst or LaterInst does not
763  // read/write memory, then we can safely return true here.
764  // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
765  // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
766  // by also checking the MemorySSA MemoryAccess on the instruction. Initial
767  // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
768  // with the default optimization pipeline.
769  auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
770  if (!EarlierMA)
771  return true;
772  auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
773  if (!LaterMA)
774  return true;
775 
776  // Since we know LaterDef dominates LaterInst and EarlierInst dominates
777  // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
778  // EarlierInst and LaterInst and neither can any other write that potentially
779  // clobbers LaterInst.
780  MemoryAccess *LaterDef;
781  if (ClobberCounter < EarlyCSEMssaOptCap) {
782  LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
783  ClobberCounter++;
784  } else
785  LaterDef = LaterMA->getDefiningAccess();
786 
787  return MSSA->dominates(LaterDef, EarlierMA);
788 }
789 
790 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
791  // A location loaded from with an invariant_load is assumed to *never* change
792  // within the visible scope of the compilation.
793  if (auto *LI = dyn_cast<LoadInst>(I))
794  if (LI->hasMetadata(LLVMContext::MD_invariant_load))
795  return true;
796 
797  auto MemLocOpt = MemoryLocation::getOrNone(I);
798  if (!MemLocOpt)
799  // "target" intrinsic forms of loads aren't currently known to
800  // MemoryLocation::get. TODO
801  return false;
802  MemoryLocation MemLoc = *MemLocOpt;
803  if (!AvailableInvariants.count(MemLoc))
804  return false;
805 
806  // Is the generation at which this became invariant older than the
807  // current one?
808  return AvailableInvariants.lookup(MemLoc) <= GenAt;
809 }
810 
811 bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
812  const BranchInst *BI, const BasicBlock *BB,
813  const BasicBlock *Pred) {
814  assert(BI->isConditional() && "Should be a conditional branch!");
815  assert(BI->getCondition() == CondInst && "Wrong condition?");
816  assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
817  auto *TorF = (BI->getSuccessor(0) == BB)
820  auto MatchBinOp = [](Instruction *I, unsigned Opcode) {
821  if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(I))
822  return BOp->getOpcode() == Opcode;
823  return false;
824  };
825  // If the condition is AND operation, we can propagate its operands into the
826  // true branch. If it is OR operation, we can propagate them into the false
827  // branch.
828  unsigned PropagateOpcode =
829  (BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or;
830 
831  bool MadeChanges = false;
834  WorkList.push_back(CondInst);
835  while (!WorkList.empty()) {
836  Instruction *Curr = WorkList.pop_back_val();
837 
838  AvailableValues.insert(Curr, TorF);
839  LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
840  << Curr->getName() << "' as " << *TorF << " in "
841  << BB->getName() << "\n");
842  if (!DebugCounter::shouldExecute(CSECounter)) {
843  LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
844  } else {
845  // Replace all dominated uses with the known value.
846  if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT,
847  BasicBlockEdge(Pred, BB))) {
848  NumCSECVP += Count;
849  MadeChanges = true;
850  }
851  }
852 
853  if (MatchBinOp(Curr, PropagateOpcode))
854  for (auto &Op : cast<BinaryOperator>(Curr)->operands())
855  if (Instruction *OPI = dyn_cast<Instruction>(Op))
856  if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second)
857  WorkList.push_back(OPI);
858  }
859 
860  return MadeChanges;
861 }
862 
863 bool EarlyCSE::processNode(DomTreeNode *Node) {
864  bool Changed = false;
865  BasicBlock *BB = Node->getBlock();
866 
867  // If this block has a single predecessor, then the predecessor is the parent
868  // of the domtree node and all of the live out memory values are still current
869  // in this block. If this block has multiple predecessors, then they could
870  // have invalidated the live-out memory values of our parent value. For now,
871  // just be conservative and invalidate memory if this block has multiple
872  // predecessors.
873  if (!BB->getSinglePredecessor())
874  ++CurrentGeneration;
875 
876  // If this node has a single predecessor which ends in a conditional branch,
877  // we can infer the value of the branch condition given that we took this
878  // path. We need the single predecessor to ensure there's not another path
879  // which reaches this block where the condition might hold a different
880  // value. Since we're adding this to the scoped hash table (like any other
881  // def), it will have been popped if we encounter a future merge block.
882  if (BasicBlock *Pred = BB->getSinglePredecessor()) {
883  auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
884  if (BI && BI->isConditional()) {
885  auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
886  if (CondInst && SimpleValue::canHandle(CondInst))
887  Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
888  }
889  }
890 
891  /// LastStore - Keep track of the last non-volatile store that we saw... for
892  /// as long as there in no instruction that reads memory. If we see a store
893  /// to the same location, we delete the dead store. This zaps trivial dead
894  /// stores which can occur in bitfield code among other things.
895  Instruction *LastStore = nullptr;
896 
897  // See if any instructions in the block can be eliminated. If so, do it. If
898  // not, add them to AvailableValues.
899  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
900  Instruction *Inst = &*I++;
901 
902  // Dead instructions should just be removed.
903  if (isInstructionTriviallyDead(Inst, &TLI)) {
904  LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
905  if (!DebugCounter::shouldExecute(CSECounter)) {
906  LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
907  continue;
908  }
909  if (!salvageDebugInfo(*Inst))
911  removeMSSA(Inst);
912  Inst->eraseFromParent();
913  Changed = true;
914  ++NumSimplify;
915  continue;
916  }
917 
918  // Skip assume intrinsics, they don't really have side effects (although
919  // they're marked as such to ensure preservation of control dependencies),
920  // and this pass will not bother with its removal. However, we should mark
921  // its condition as true for all dominated blocks.
922  if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
923  auto *CondI =
924  dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
925  if (CondI && SimpleValue::canHandle(CondI)) {
926  LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
927  << '\n');
928  AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
929  } else
930  LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
931  continue;
932  }
933 
934  // Skip sideeffect intrinsics, for the same reason as assume intrinsics.
935  if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
936  LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
937  continue;
938  }
939 
940  // We can skip all invariant.start intrinsics since they only read memory,
941  // and we can forward values across it. For invariant starts without
942  // invariant ends, we can use the fact that the invariantness never ends to
943  // start a scope in the current generaton which is true for all future
944  // generations. Also, we dont need to consume the last store since the
945  // semantics of invariant.start allow us to perform DSE of the last
946  // store, if there was a store following invariant.start. Consider:
947  //
948  // store 30, i8* p
949  // invariant.start(p)
950  // store 40, i8* p
951  // We can DSE the store to 30, since the store 40 to invariant location p
952  // causes undefined behaviour.
953  if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
954  // If there are any uses, the scope might end.
955  if (!Inst->use_empty())
956  continue;
957  auto *CI = cast<CallInst>(Inst);
958  MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI);
959  // Don't start a scope if we already have a better one pushed
960  if (!AvailableInvariants.count(MemLoc))
961  AvailableInvariants.insert(MemLoc, CurrentGeneration);
962  continue;
963  }
964 
965  if (isGuard(Inst)) {
966  if (auto *CondI =
967  dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
968  if (SimpleValue::canHandle(CondI)) {
969  // Do we already know the actual value of this condition?
970  if (auto *KnownCond = AvailableValues.lookup(CondI)) {
971  // Is the condition known to be true?
972  if (isa<ConstantInt>(KnownCond) &&
973  cast<ConstantInt>(KnownCond)->isOne()) {
974  LLVM_DEBUG(dbgs()
975  << "EarlyCSE removing guard: " << *Inst << '\n');
976  removeMSSA(Inst);
977  Inst->eraseFromParent();
978  Changed = true;
979  continue;
980  } else
981  // Use the known value if it wasn't true.
982  cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
983  }
984  // The condition we're on guarding here is true for all dominated
985  // locations.
986  AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
987  }
988  }
989 
990  // Guard intrinsics read all memory, but don't write any memory.
991  // Accordingly, don't update the generation but consume the last store (to
992  // avoid an incorrect DSE).
993  LastStore = nullptr;
994  continue;
995  }
996 
997  // If the instruction can be simplified (e.g. X+0 = X) then replace it with
998  // its simpler value.
999  if (Value *V = SimplifyInstruction(Inst, SQ)) {
1000  LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V
1001  << '\n');
1002  if (!DebugCounter::shouldExecute(CSECounter)) {
1003  LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1004  } else {
1005  bool Killed = false;
1006  if (!Inst->use_empty()) {
1007  Inst->replaceAllUsesWith(V);
1008  Changed = true;
1009  }
1010  if (isInstructionTriviallyDead(Inst, &TLI)) {
1011  removeMSSA(Inst);
1012  Inst->eraseFromParent();
1013  Changed = true;
1014  Killed = true;
1015  }
1016  if (Changed)
1017  ++NumSimplify;
1018  if (Killed)
1019  continue;
1020  }
1021  }
1022 
1023  // If this is a simple instruction that we can value number, process it.
1024  if (SimpleValue::canHandle(Inst)) {
1025  // See if the instruction has an available value. If so, use it.
1026  if (Value *V = AvailableValues.lookup(Inst)) {
1027  LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V
1028  << '\n');
1029  if (!DebugCounter::shouldExecute(CSECounter)) {
1030  LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1031  continue;
1032  }
1033  if (auto *I = dyn_cast<Instruction>(V))
1034  I->andIRFlags(Inst);
1035  Inst->replaceAllUsesWith(V);
1036  removeMSSA(Inst);
1037  Inst->eraseFromParent();
1038  Changed = true;
1039  ++NumCSE;
1040  continue;
1041  }
1042 
1043  // Otherwise, just remember that this value is available.
1044  AvailableValues.insert(Inst, Inst);
1045  continue;
1046  }
1047 
1048  ParseMemoryInst MemInst(Inst, TTI);
1049  // If this is a non-volatile load, process it.
1050  if (MemInst.isValid() && MemInst.isLoad()) {
1051  // (conservatively) we can't peak past the ordering implied by this
1052  // operation, but we can add this load to our set of available values
1053  if (MemInst.isVolatile() || !MemInst.isUnordered()) {
1054  LastStore = nullptr;
1055  ++CurrentGeneration;
1056  }
1057 
1058  if (MemInst.isInvariantLoad()) {
1059  // If we pass an invariant load, we know that memory location is
1060  // indefinitely constant from the moment of first dereferenceability.
1061  // We conservatively treat the invariant_load as that moment. If we
1062  // pass a invariant load after already establishing a scope, don't
1063  // restart it since we want to preserve the earliest point seen.
1064  auto MemLoc = MemoryLocation::get(Inst);
1065  if (!AvailableInvariants.count(MemLoc))
1066  AvailableInvariants.insert(MemLoc, CurrentGeneration);
1067  }
1068 
1069  // If we have an available version of this load, and if it is the right
1070  // generation or the load is known to be from an invariant location,
1071  // replace this instruction.
1072  //
1073  // If either the dominating load or the current load are invariant, then
1074  // we can assume the current load loads the same value as the dominating
1075  // load.
1076  LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1077  if (InVal.DefInst != nullptr &&
1078  InVal.MatchingId == MemInst.getMatchingId() &&
1079  // We don't yet handle removing loads with ordering of any kind.
1080  !MemInst.isVolatile() && MemInst.isUnordered() &&
1081  // We can't replace an atomic load with one which isn't also atomic.
1082  InVal.IsAtomic >= MemInst.isAtomic() &&
1083  (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1084  isSameMemGeneration(InVal.Generation, CurrentGeneration,
1085  InVal.DefInst, Inst))) {
1086  Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
1087  if (Op != nullptr) {
1088  LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
1089  << " to: " << *InVal.DefInst << '\n');
1090  if (!DebugCounter::shouldExecute(CSECounter)) {
1091  LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1092  continue;
1093  }
1094  if (!Inst->use_empty())
1095  Inst->replaceAllUsesWith(Op);
1096  removeMSSA(Inst);
1097  Inst->eraseFromParent();
1098  Changed = true;
1099  ++NumCSELoad;
1100  continue;
1101  }
1102  }
1103 
1104  // Otherwise, remember that we have this instruction.
1105  AvailableLoads.insert(
1106  MemInst.getPointerOperand(),
1107  LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1108  MemInst.isAtomic()));
1109  LastStore = nullptr;
1110  continue;
1111  }
1112 
1113  // If this instruction may read from memory or throw (and potentially read
1114  // from memory in the exception handler), forget LastStore. Load/store
1115  // intrinsics will indicate both a read and a write to memory. The target
1116  // may override this (e.g. so that a store intrinsic does not read from
1117  // memory, and thus will be treated the same as a regular store for
1118  // commoning purposes).
1119  if ((Inst->mayReadFromMemory() || Inst->mayThrow()) &&
1120  !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
1121  LastStore = nullptr;
1122 
1123  // If this is a read-only call, process it.
1124  if (CallValue::canHandle(Inst)) {
1125  // If we have an available version of this call, and if it is the right
1126  // generation, replace this instruction.
1127  std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
1128  if (InVal.first != nullptr &&
1129  isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1130  Inst)) {
1131  LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
1132  << " to: " << *InVal.first << '\n');
1133  if (!DebugCounter::shouldExecute(CSECounter)) {
1134  LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1135  continue;
1136  }
1137  if (!Inst->use_empty())
1138  Inst->replaceAllUsesWith(InVal.first);
1139  removeMSSA(Inst);
1140  Inst->eraseFromParent();
1141  Changed = true;
1142  ++NumCSECall;
1143  continue;
1144  }
1145 
1146  // Otherwise, remember that we have this instruction.
1147  AvailableCalls.insert(
1148  Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
1149  continue;
1150  }
1151 
1152  // A release fence requires that all stores complete before it, but does
1153  // not prevent the reordering of following loads 'before' the fence. As a
1154  // result, we don't need to consider it as writing to memory and don't need
1155  // to advance the generation. We do need to prevent DSE across the fence,
1156  // but that's handled above.
1157  if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
1158  if (FI->getOrdering() == AtomicOrdering::Release) {
1159  assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
1160  continue;
1161  }
1162 
1163  // write back DSE - If we write back the same value we just loaded from
1164  // the same location and haven't passed any intervening writes or ordering
1165  // operations, we can remove the write. The primary benefit is in allowing
1166  // the available load table to remain valid and value forward past where
1167  // the store originally was.
1168  if (MemInst.isValid() && MemInst.isStore()) {
1169  LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1170  if (InVal.DefInst &&
1171  InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
1172  InVal.MatchingId == MemInst.getMatchingId() &&
1173  // We don't yet handle removing stores with ordering of any kind.
1174  !MemInst.isVolatile() && MemInst.isUnordered() &&
1175  (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1176  isSameMemGeneration(InVal.Generation, CurrentGeneration,
1177  InVal.DefInst, Inst))) {
1178  // It is okay to have a LastStore to a different pointer here if MemorySSA
1179  // tells us that the load and store are from the same memory generation.
1180  // In that case, LastStore should keep its present value since we're
1181  // removing the current store.
1182  assert((!LastStore ||
1183  ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
1184  MemInst.getPointerOperand() ||
1185  MSSA) &&
1186  "can't have an intervening store if not using MemorySSA!");
1187  LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
1188  if (!DebugCounter::shouldExecute(CSECounter)) {
1189  LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1190  continue;
1191  }
1192  removeMSSA(Inst);
1193  Inst->eraseFromParent();
1194  Changed = true;
1195  ++NumDSE;
1196  // We can avoid incrementing the generation count since we were able
1197  // to eliminate this store.
1198  continue;
1199  }
1200  }
1201 
1202  // Okay, this isn't something we can CSE at all. Check to see if it is
1203  // something that could modify memory. If so, our available memory values
1204  // cannot be used so bump the generation count.
1205  if (Inst->mayWriteToMemory()) {
1206  ++CurrentGeneration;
1207 
1208  if (MemInst.isValid() && MemInst.isStore()) {
1209  // We do a trivial form of DSE if there are two stores to the same
1210  // location with no intervening loads. Delete the earlier store.
1211  // At the moment, we don't remove ordered stores, but do remove
1212  // unordered atomic stores. There's no special requirement (for
1213  // unordered atomics) about removing atomic stores only in favor of
1214  // other atomic stores since we were going to execute the non-atomic
1215  // one anyway and the atomic one might never have become visible.
1216  if (LastStore) {
1217  ParseMemoryInst LastStoreMemInst(LastStore, TTI);
1218  assert(LastStoreMemInst.isUnordered() &&
1219  !LastStoreMemInst.isVolatile() &&
1220  "Violated invariant");
1221  if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
1222  LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1223  << " due to: " << *Inst << '\n');
1224  if (!DebugCounter::shouldExecute(CSECounter)) {
1225  LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1226  } else {
1227  removeMSSA(LastStore);
1228  LastStore->eraseFromParent();
1229  Changed = true;
1230  ++NumDSE;
1231  LastStore = nullptr;
1232  }
1233  }
1234  // fallthrough - we can exploit information about this store
1235  }
1236 
1237  // Okay, we just invalidated anything we knew about loaded values. Try
1238  // to salvage *something* by remembering that the stored value is a live
1239  // version of the pointer. It is safe to forward from volatile stores
1240  // to non-volatile loads, so we don't have to check for volatility of
1241  // the store.
1242  AvailableLoads.insert(
1243  MemInst.getPointerOperand(),
1244  LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1245  MemInst.isAtomic()));
1246 
1247  // Remember that this was the last unordered store we saw for DSE. We
1248  // don't yet handle DSE on ordered or volatile stores since we don't
1249  // have a good way to model the ordering requirement for following
1250  // passes once the store is removed. We could insert a fence, but
1251  // since fences are slightly stronger than stores in their ordering,
1252  // it's not clear this is a profitable transform. Another option would
1253  // be to merge the ordering with that of the post dominating store.
1254  if (MemInst.isUnordered() && !MemInst.isVolatile())
1255  LastStore = Inst;
1256  else
1257  LastStore = nullptr;
1258  }
1259  }
1260  }
1261 
1262  return Changed;
1263 }
1264 
1265 bool EarlyCSE::run() {
1266  // Note, deque is being used here because there is significant performance
1267  // gains over vector when the container becomes very large due to the
1268  // specific access patterns. For more information see the mailing list
1269  // discussion on this:
1270  // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
1271  std::deque<StackNode *> nodesToProcess;
1272 
1273  bool Changed = false;
1274 
1275  // Process the root node.
1276  nodesToProcess.push_back(new StackNode(
1277  AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1278  CurrentGeneration, DT.getRootNode(),
1279  DT.getRootNode()->begin(), DT.getRootNode()->end()));
1280 
1281  assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it.");
1282 
1283  // Process the stack.
1284  while (!nodesToProcess.empty()) {
1285  // Grab the first item off the stack. Set the current generation, remove
1286  // the node from the stack, and process it.
1287  StackNode *NodeToProcess = nodesToProcess.back();
1288 
1289  // Initialize class members.
1290  CurrentGeneration = NodeToProcess->currentGeneration();
1291 
1292  // Check if the node needs to be processed.
1293  if (!NodeToProcess->isProcessed()) {
1294  // Process the node.
1295  Changed |= processNode(NodeToProcess->node());
1296  NodeToProcess->childGeneration(CurrentGeneration);
1297  NodeToProcess->process();
1298  } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1299  // Push the next child onto the stack.
1300  DomTreeNode *child = NodeToProcess->nextChild();
1301  nodesToProcess.push_back(
1302  new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1303  AvailableCalls, NodeToProcess->childGeneration(),
1304  child, child->begin(), child->end()));
1305  } else {
1306  // It has been processed, and there are no more children to process,
1307  // so delete it and pop it off the stack.
1308  delete NodeToProcess;
1309  nodesToProcess.pop_back();
1310  }
1311  } // while (!nodes...)
1312 
1313  return Changed;
1314 }
1315 
1318  auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1319  auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1320  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1321  auto &AC = AM.getResult<AssumptionAnalysis>(F);
1322  auto *MSSA =
1323  UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
1324 
1325  EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1326 
1327  if (!CSE.run())
1328  return PreservedAnalyses::all();
1329 
1330  PreservedAnalyses PA;
1331  PA.preserveSet<CFGAnalyses>();
1332  PA.preserve<GlobalsAA>();
1333  if (UseMemorySSA)
1335  return PA;
1336 }
1337 
1338 namespace {
1339 
1340 /// A simple and fast domtree-based CSE pass.
1341 ///
1342 /// This pass does a simple depth-first walk over the dominator tree,
1343 /// eliminating trivially redundant instructions and using instsimplify to
1344 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1345 /// cases so that instcombine and other passes are more effective. It is
1346 /// expected that a later pass of GVN will catch the interesting/hard cases.
1347 template<bool UseMemorySSA>
1348 class EarlyCSELegacyCommonPass : public FunctionPass {
1349 public:
1350  static char ID;
1351 
1352  EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1353  if (UseMemorySSA)
1355  else
1357  }
1358 
1359  bool runOnFunction(Function &F) override {
1360  if (skipFunction(F))
1361  return false;
1362 
1363  auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1364  auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1365  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1366  auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1367  auto *MSSA =
1368  UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1369 
1370  EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1371 
1372  return CSE.run();
1373  }
1374 
1375  void getAnalysisUsage(AnalysisUsage &AU) const override {
1380  if (UseMemorySSA) {
1383  }
1385  AU.setPreservesCFG();
1386  }
1387 };
1388 
1389 } // end anonymous namespace
1390 
1391 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1392 
1393 template<>
1394 char EarlyCSELegacyPass::ID = 0;
1395 
1396 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1397  false)
1402 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1403 
1404 using EarlyCSEMemSSALegacyPass =
1405  EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1406 
1407 template<>
1408 char EarlyCSEMemSSALegacyPass::ID = 0;
1409 
1410 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1411  if (UseMemorySSA)
1412  return new EarlyCSEMemSSALegacyPass();
1413  else
1414  return new EarlyCSELegacyPass();
1415 }
1416 
1417 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1418  "Early CSE w/ MemorySSA", false, false)
1424 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1425  "Early CSE w/ MemorySSA", false, false)
Legacy wrapper pass to provide the GlobalsAAResult object.
void initializeEarlyCSELegacyPassPass(PassRegistry &)
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:233
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:616
static SimpleValue getTombstoneKey()
Definition: EarlyCSE.cpp:129
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:70
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:722
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
Definition: PatternMatch.h:78
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
Unsigned minimum.
Atomic ordering constants.
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:83
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:776
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
This is the interface for a simple mod/ref and alias analysis over globals.
An instruction for ordering other memory operations.
Definition: Instructions.h:454
value_op_iterator value_op_begin()
Definition: User.h:255
This class represents a function call, abstracting a target machine&#39;s calling convention.
An immutable pass that tracks lazily created AssumptionCache objects.
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
static unsigned getHashValueImpl(SimpleValue Val)
Definition: EarlyCSE.cpp:165
A cache of @llvm.assume calls within a function.
Analysis pass providing the TargetTransformInfo.
bool salvageDebugInfo(Instruction &I)
Assuming the instruction I is going to be deleted, attempt to salvage debug users of I by writing the...
Definition: Local.cpp:1605
static CallValue getTombstoneKey()
Definition: EarlyCSE.cpp:404
bool replaceDbgUsesWithUndef(Instruction *I)
Replace all the uses of an SSA value in .dbg intrinsics with undef.
Definition: Local.cpp:489
value_op_iterator value_op_end()
Definition: User.h:258
BasicBlock * getSuccessor(unsigned i) const
STATISTIC(NumFunctions, "Total number of functions")
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:230
F(f)
block Block Frequency true
An instruction for reading from memory.
Definition: Instructions.h:167
Value * getCondition() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:144
This defines the Use class.
static Optional< MemoryLocation > getOrNone(const Instruction *Inst)
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of &#39;From&#39; with &#39;To&#39; if that use is dominated by the given edge.
Definition: Local.cpp:2502
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:32
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition: EarlyCSE.cpp:1316
This file defines the MallocAllocator and BumpPtrAllocator interfaces.
Signed maximum.
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:273
bool isIdenticalTo(const Instruction *I) const
Return true if the specified instruction is exactly identical to the current one. ...
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:47
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:965
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
Definition: InstrTypes.h:831
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Absolute value.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:439
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:703
static bool isLoad(int Opcode)
static CallValue getEmptyKey()
Definition: EarlyCSE.cpp:400
RecyclingAllocator - This class wraps an Allocator, adding the functionality of recycling deleted obj...
static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
This file provides an implementation of debug counters.
static void cse(BasicBlock *BB)
Perform cse of induction variable instructions.
static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A, Value *&B, SelectPatternFlavor &Flavor)
Match a &#39;select&#39; including an optional &#39;not&#39;s of the condition.
Definition: EarlyCSE.cpp:140
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
static bool isEqual(const Function &Caller, const Function &Callee)
This file provides the interface for a simple, fast CSE pass.
early cse memssa
Definition: EarlyCSE.cpp:1424
static cl::opt< bool > EarlyCSEDebugHash("earlycse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that SimpleValue's hash " "function is well-behaved w.r.t. its isEqual predicate"))
void andIRFlags(const Value *V)
Logical &#39;and&#39; of any supported wrapping, exact, and fast-math flags of V and this instruction...
static bool isStore(int Opcode)
static cl::opt< unsigned > EarlyCSEMssaOptCap("earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden, cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange " "for faster compile. Caps the MemorySSA clobbering calls."))
SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
An instruction for storing to memory.
Definition: Instructions.h:320
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
Optimize for code generation
INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) using EarlyCSEMemSSALegacyPass
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
Value * getOperand(unsigned i) const
Definition: User.h:169
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition: Allocator.h:434
NodeT * getBlock() const
static bool runOnFunction(Function &F, bool PostInlining)
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Wrapper pass for TargetTransformInfo.
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:240
bool isIdenticalToWhenDefined(const Instruction *I) const
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags, which may specify conditions under which the instruction&#39;s result is undefined.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
Conditional or Unconditional Branch instruction.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static SimpleValue getEmptyKey()
Definition: EarlyCSE.cpp:125
This file contains the declarations for the subclasses of Constant, which represent the different fla...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
bool mayThrow() const
Return true if this instruction may throw an exception.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:576
Represent the analysis usage information of a pass.
Analysis pass providing a never-invalidated alias analysis result.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:732
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:73
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:159
static bool isAtomic(Instruction *I)
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
Floating point maxnum.
Representation for a specific memory location.
A function analysis which provides an AssumptionCache.
Iterator for intrusive lists based on ilist_node.
SelectPatternFlavor Flavor
void verifyMemorySSA() const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1869
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
iterator end()
Definition: BasicBlock.h:275
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
SelectPatternFlavor
Specific patterns of select instructions we can match.
Provides information about what library functions are available for the current target.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:926
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:374
bool isConditional() const
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:301
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:609
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental.guard intrinsic.
Definition: GuardUtils.cpp:17
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:600
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:478
Represents analyses that only rely on functions&#39; control flow.
Definition: PassManager.h:114
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:807
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:189
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:214
Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) const
bool onlyReadsMemory(unsigned OpNo) const
Definition: InstrTypes.h:1557
#define I(x, y, z)
Definition: MD5.cpp:58
bool mayReadFromMemory() const
Return true if this instruction may read memory.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
Definition: EarlyCSE.cpp:265
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:174
DEBUG_COUNTER(CSECounter, "early-cse", "Controls which instructions are removed")
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
Signed minimum.
EarlyCSELegacyCommonPass< false > EarlyCSELegacyPass
Definition: EarlyCSE.cpp:1391
Analysis pass providing the TargetLibraryInfo.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction has no side ef...
Definition: Local.cpp:359
LLVM Value Representation.
Definition: Value.h:73
typename std::vector< DomTreeNodeBase *>::iterator iterator
void initializeEarlyCSEMemSSALegacyPassPass(PassRegistry &)
bool isEqual(const GCNRPTracker::LiveRegSet &S1, const GCNRPTracker::LiveRegSet &S2)
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1410
A container for analyses that lazily runs them and caches their results.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:259
This pass exposes codegen information to IR-level passes.
static bool isVolatile(Instruction *Inst)
This header defines various interfaces for pass management in LLVM.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Information about a load/store intrinsic defined by the target.
bool use_empty() const
Definition: Value.h:342
BinaryOp_match< ValTy, cst_pred_ty< is_all_ones >, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a &#39;Not&#39; as &#39;xor V, -1&#39; or &#39;xor -1, V&#39;.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:43
This instruction inserts a struct field of array element value into an aggregate value.