LLVM  15.0.0git
InstCombineAddSub.cpp
Go to the documentation of this file.
1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for add, fadd, sub, and fsub.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/InstrTypes.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/AlignOf.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/KnownBits.h"
33 #include <cassert>
34 #include <utility>
35 
36 using namespace llvm;
37 using namespace PatternMatch;
38 
39 #define DEBUG_TYPE "instcombine"
40 
41 namespace {
42 
43  /// Class representing coefficient of floating-point addend.
44  /// This class needs to be highly efficient, which is especially true for
45  /// the constructor. As of I write this comment, the cost of the default
46  /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
47  /// perform write-merging).
48  ///
49  class FAddendCoef {
50  public:
51  // The constructor has to initialize a APFloat, which is unnecessary for
52  // most addends which have coefficient either 1 or -1. So, the constructor
53  // is expensive. In order to avoid the cost of the constructor, we should
54  // reuse some instances whenever possible. The pre-created instances
55  // FAddCombine::Add[0-5] embodies this idea.
56  FAddendCoef() = default;
57  ~FAddendCoef();
58 
59  // If possible, don't define operator+/operator- etc because these
60  // operators inevitably call FAddendCoef's constructor which is not cheap.
61  void operator=(const FAddendCoef &A);
62  void operator+=(const FAddendCoef &A);
63  void operator*=(const FAddendCoef &S);
64 
65  void set(short C) {
66  assert(!insaneIntVal(C) && "Insane coefficient");
67  IsFp = false; IntVal = C;
68  }
69 
70  void set(const APFloat& C);
71 
72  void negate();
73 
74  bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
75  Value *getValue(Type *) const;
76 
77  bool isOne() const { return isInt() && IntVal == 1; }
78  bool isTwo() const { return isInt() && IntVal == 2; }
79  bool isMinusOne() const { return isInt() && IntVal == -1; }
80  bool isMinusTwo() const { return isInt() && IntVal == -2; }
81 
82  private:
83  bool insaneIntVal(int V) { return V > 4 || V < -4; }
84 
85  APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); }
86 
87  const APFloat *getFpValPtr() const {
88  return reinterpret_cast<const APFloat *>(&FpValBuf);
89  }
90 
91  const APFloat &getFpVal() const {
92  assert(IsFp && BufHasFpVal && "Incorret state");
93  return *getFpValPtr();
94  }
95 
96  APFloat &getFpVal() {
97  assert(IsFp && BufHasFpVal && "Incorret state");
98  return *getFpValPtr();
99  }
100 
101  bool isInt() const { return !IsFp; }
102 
103  // If the coefficient is represented by an integer, promote it to a
104  // floating point.
105  void convertToFpType(const fltSemantics &Sem);
106 
107  // Construct an APFloat from a signed integer.
108  // TODO: We should get rid of this function when APFloat can be constructed
109  // from an *SIGNED* integer.
110  APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
111 
112  bool IsFp = false;
113 
114  // True iff FpValBuf contains an instance of APFloat.
115  bool BufHasFpVal = false;
116 
117  // The integer coefficient of an individual addend is either 1 or -1,
118  // and we try to simplify at most 4 addends from neighboring at most
119  // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
120  // is overkill of this end.
121  short IntVal = 0;
122 
124  };
125 
126  /// FAddend is used to represent floating-point addend. An addend is
127  /// represented as <C, V>, where the V is a symbolic value, and C is a
128  /// constant coefficient. A constant addend is represented as <C, 0>.
129  class FAddend {
130  public:
131  FAddend() = default;
132 
133  void operator+=(const FAddend &T) {
134  assert((Val == T.Val) && "Symbolic-values disagree");
135  Coeff += T.Coeff;
136  }
137 
138  Value *getSymVal() const { return Val; }
139  const FAddendCoef &getCoef() const { return Coeff; }
140 
141  bool isConstant() const { return Val == nullptr; }
142  bool isZero() const { return Coeff.isZero(); }
143 
144  void set(short Coefficient, Value *V) {
145  Coeff.set(Coefficient);
146  Val = V;
147  }
148  void set(const APFloat &Coefficient, Value *V) {
149  Coeff.set(Coefficient);
150  Val = V;
151  }
152  void set(const ConstantFP *Coefficient, Value *V) {
153  Coeff.set(Coefficient->getValueAPF());
154  Val = V;
155  }
156 
157  void negate() { Coeff.negate(); }
158 
159  /// Drill down the U-D chain one step to find the definition of V, and
160  /// try to break the definition into one or two addends.
161  static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
162 
163  /// Similar to FAddend::drillDownOneStep() except that the value being
164  /// splitted is the addend itself.
165  unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
166 
167  private:
168  void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
169 
170  // This addend has the value of "Coeff * Val".
171  Value *Val = nullptr;
172  FAddendCoef Coeff;
173  };
174 
175  /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
176  /// with its neighboring at most two instructions.
177  ///
178  class FAddCombine {
179  public:
180  FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {}
181 
183 
184  private:
185  using AddendVect = SmallVector<const FAddend *, 4>;
186 
187  Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
188 
189  /// Convert given addend to a Value
190  Value *createAddendVal(const FAddend &A, bool& NeedNeg);
191 
192  /// Return the number of instructions needed to emit the N-ary addition.
193  unsigned calcInstrNumber(const AddendVect& Vect);
194 
195  Value *createFSub(Value *Opnd0, Value *Opnd1);
196  Value *createFAdd(Value *Opnd0, Value *Opnd1);
197  Value *createFMul(Value *Opnd0, Value *Opnd1);
198  Value *createFNeg(Value *V);
199  Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
200  void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
201 
202  // Debugging stuff are clustered here.
203  #ifndef NDEBUG
204  unsigned CreateInstrNum;
205  void initCreateInstNum() { CreateInstrNum = 0; }
206  void incCreateInstNum() { CreateInstrNum++; }
207  #else
208  void initCreateInstNum() {}
209  void incCreateInstNum() {}
210  #endif
211 
213  Instruction *Instr = nullptr;
214  };
215 
216 } // end anonymous namespace
217 
218 //===----------------------------------------------------------------------===//
219 //
220 // Implementation of
221 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
222 //
223 //===----------------------------------------------------------------------===//
224 FAddendCoef::~FAddendCoef() {
225  if (BufHasFpVal)
226  getFpValPtr()->~APFloat();
227 }
228 
229 void FAddendCoef::set(const APFloat& C) {
230  APFloat *P = getFpValPtr();
231 
232  if (isInt()) {
233  // As the buffer is meanless byte stream, we cannot call
234  // APFloat::operator=().
235  new(P) APFloat(C);
236  } else
237  *P = C;
238 
239  IsFp = BufHasFpVal = true;
240 }
241 
242 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
243  if (!isInt())
244  return;
245 
246  APFloat *P = getFpValPtr();
247  if (IntVal > 0)
248  new(P) APFloat(Sem, IntVal);
249  else {
250  new(P) APFloat(Sem, 0 - IntVal);
251  P->changeSign();
252  }
253  IsFp = BufHasFpVal = true;
254 }
255 
256 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
257  if (Val >= 0)
258  return APFloat(Sem, Val);
259 
260  APFloat T(Sem, 0 - Val);
261  T.changeSign();
262 
263  return T;
264 }
265 
266 void FAddendCoef::operator=(const FAddendCoef &That) {
267  if (That.isInt())
268  set(That.IntVal);
269  else
270  set(That.getFpVal());
271 }
272 
273 void FAddendCoef::operator+=(const FAddendCoef &That) {
275  if (isInt() == That.isInt()) {
276  if (isInt())
277  IntVal += That.IntVal;
278  else
279  getFpVal().add(That.getFpVal(), RndMode);
280  return;
281  }
282 
283  if (isInt()) {
284  const APFloat &T = That.getFpVal();
285  convertToFpType(T.getSemantics());
286  getFpVal().add(T, RndMode);
287  return;
288  }
289 
290  APFloat &T = getFpVal();
291  T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
292 }
293 
294 void FAddendCoef::operator*=(const FAddendCoef &That) {
295  if (That.isOne())
296  return;
297 
298  if (That.isMinusOne()) {
299  negate();
300  return;
301  }
302 
303  if (isInt() && That.isInt()) {
304  int Res = IntVal * (int)That.IntVal;
305  assert(!insaneIntVal(Res) && "Insane int value");
306  IntVal = Res;
307  return;
308  }
309 
310  const fltSemantics &Semantic =
311  isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
312 
313  if (isInt())
314  convertToFpType(Semantic);
315  APFloat &F0 = getFpVal();
316 
317  if (That.isInt())
318  F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
320  else
321  F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
322 }
323 
324 void FAddendCoef::negate() {
325  if (isInt())
326  IntVal = 0 - IntVal;
327  else
328  getFpVal().changeSign();
329 }
330 
331 Value *FAddendCoef::getValue(Type *Ty) const {
332  return isInt() ?
333  ConstantFP::get(Ty, float(IntVal)) :
334  ConstantFP::get(Ty->getContext(), getFpVal());
335 }
336 
337 // The definition of <Val> Addends
338 // =========================================
339 // A + B <1, A>, <1,B>
340 // A - B <1, A>, <1,B>
341 // 0 - B <-1, B>
342 // C * A, <C, A>
343 // A + C <1, A> <C, NULL>
344 // 0 +/- 0 <0, NULL> (corner case)
345 //
346 // Legend: A and B are not constant, C is constant
347 unsigned FAddend::drillValueDownOneStep
348  (Value *Val, FAddend &Addend0, FAddend &Addend1) {
349  Instruction *I = nullptr;
350  if (!Val || !(I = dyn_cast<Instruction>(Val)))
351  return 0;
352 
353  unsigned Opcode = I->getOpcode();
354 
355  if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
356  ConstantFP *C0, *C1;
357  Value *Opnd0 = I->getOperand(0);
358  Value *Opnd1 = I->getOperand(1);
359  if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
360  Opnd0 = nullptr;
361 
362  if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
363  Opnd1 = nullptr;
364 
365  if (Opnd0) {
366  if (!C0)
367  Addend0.set(1, Opnd0);
368  else
369  Addend0.set(C0, nullptr);
370  }
371 
372  if (Opnd1) {
373  FAddend &Addend = Opnd0 ? Addend1 : Addend0;
374  if (!C1)
375  Addend.set(1, Opnd1);
376  else
377  Addend.set(C1, nullptr);
378  if (Opcode == Instruction::FSub)
379  Addend.negate();
380  }
381 
382  if (Opnd0 || Opnd1)
383  return Opnd0 && Opnd1 ? 2 : 1;
384 
385  // Both operands are zero. Weird!
386  Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
387  return 1;
388  }
389 
390  if (I->getOpcode() == Instruction::FMul) {
391  Value *V0 = I->getOperand(0);
392  Value *V1 = I->getOperand(1);
393  if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
394  Addend0.set(C, V1);
395  return 1;
396  }
397 
398  if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
399  Addend0.set(C, V0);
400  return 1;
401  }
402  }
403 
404  return 0;
405 }
406 
407 // Try to break *this* addend into two addends. e.g. Suppose this addend is
408 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
409 // i.e. <2.3, X> and <2.3, Y>.
410 unsigned FAddend::drillAddendDownOneStep
411  (FAddend &Addend0, FAddend &Addend1) const {
412  if (isConstant())
413  return 0;
414 
415  unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416  if (!BreakNum || Coeff.isOne())
417  return BreakNum;
418 
419  Addend0.Scale(Coeff);
420 
421  if (BreakNum == 2)
422  Addend1.Scale(Coeff);
423 
424  return BreakNum;
425 }
426 
428  assert(I->hasAllowReassoc() && I->hasNoSignedZeros() &&
429  "Expected 'reassoc'+'nsz' instruction");
430 
431  // Currently we are not able to handle vector type.
432  if (I->getType()->isVectorTy())
433  return nullptr;
434 
435  assert((I->getOpcode() == Instruction::FAdd ||
436  I->getOpcode() == Instruction::FSub) && "Expect add/sub");
437 
438  // Save the instruction before calling other member-functions.
439  Instr = I;
440 
441  FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
442 
443  unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
444 
445  // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
446  unsigned Opnd0_ExpNum = 0;
447  unsigned Opnd1_ExpNum = 0;
448 
449  if (!Opnd0.isConstant())
450  Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
451 
452  // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
453  if (OpndNum == 2 && !Opnd1.isConstant())
454  Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
455 
456  // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
457  if (Opnd0_ExpNum && Opnd1_ExpNum) {
458  AddendVect AllOpnds;
459  AllOpnds.push_back(&Opnd0_0);
460  AllOpnds.push_back(&Opnd1_0);
461  if (Opnd0_ExpNum == 2)
462  AllOpnds.push_back(&Opnd0_1);
463  if (Opnd1_ExpNum == 2)
464  AllOpnds.push_back(&Opnd1_1);
465 
466  // Compute instruction quota. We should save at least one instruction.
467  unsigned InstQuota = 0;
468 
469  Value *V0 = I->getOperand(0);
470  Value *V1 = I->getOperand(1);
471  InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
472  (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
473 
474  if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
475  return R;
476  }
477 
478  if (OpndNum != 2) {
479  // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
480  // splitted into two addends, say "V = X - Y", the instruction would have
481  // been optimized into "I = Y - X" in the previous steps.
482  //
483  const FAddendCoef &CE = Opnd0.getCoef();
484  return CE.isOne() ? Opnd0.getSymVal() : nullptr;
485  }
486 
487  // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
488  if (Opnd1_ExpNum) {
489  AddendVect AllOpnds;
490  AllOpnds.push_back(&Opnd0);
491  AllOpnds.push_back(&Opnd1_0);
492  if (Opnd1_ExpNum == 2)
493  AllOpnds.push_back(&Opnd1_1);
494 
495  if (Value *R = simplifyFAdd(AllOpnds, 1))
496  return R;
497  }
498 
499  // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
500  if (Opnd0_ExpNum) {
501  AddendVect AllOpnds;
502  AllOpnds.push_back(&Opnd1);
503  AllOpnds.push_back(&Opnd0_0);
504  if (Opnd0_ExpNum == 2)
505  AllOpnds.push_back(&Opnd0_1);
506 
507  if (Value *R = simplifyFAdd(AllOpnds, 1))
508  return R;
509  }
510 
511  return nullptr;
512 }
513 
514 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
515  unsigned AddendNum = Addends.size();
516  assert(AddendNum <= 4 && "Too many addends");
517 
518  // For saving intermediate results;
519  unsigned NextTmpIdx = 0;
520  FAddend TmpResult[3];
521 
522  // Simplified addends are placed <SimpVect>.
523  AddendVect SimpVect;
524 
525  // The outer loop works on one symbolic-value at a time. Suppose the input
526  // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
527  // The symbolic-values will be processed in this order: x, y, z.
528  for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
529 
530  const FAddend *ThisAddend = Addends[SymIdx];
531  if (!ThisAddend) {
532  // This addend was processed before.
533  continue;
534  }
535 
536  Value *Val = ThisAddend->getSymVal();
537 
538  // If the resulting expr has constant-addend, this constant-addend is
539  // desirable to reside at the top of the resulting expression tree. Placing
540  // constant close to super-expr(s) will potentially reveal some
541  // optimization opportunities in super-expr(s). Here we do not implement
542  // this logic intentionally and rely on SimplifyAssociativeOrCommutative
543  // call later.
544 
545  unsigned StartIdx = SimpVect.size();
546  SimpVect.push_back(ThisAddend);
547 
548  // The inner loop collects addends sharing same symbolic-value, and these
549  // addends will be later on folded into a single addend. Following above
550  // example, if the symbolic value "y" is being processed, the inner loop
551  // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
552  // be later on folded into "<b1+b2, y>".
553  for (unsigned SameSymIdx = SymIdx + 1;
554  SameSymIdx < AddendNum; SameSymIdx++) {
555  const FAddend *T = Addends[SameSymIdx];
556  if (T && T->getSymVal() == Val) {
557  // Set null such that next iteration of the outer loop will not process
558  // this addend again.
559  Addends[SameSymIdx] = nullptr;
560  SimpVect.push_back(T);
561  }
562  }
563 
564  // If multiple addends share same symbolic value, fold them together.
565  if (StartIdx + 1 != SimpVect.size()) {
566  FAddend &R = TmpResult[NextTmpIdx ++];
567  R = *SimpVect[StartIdx];
568  for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
569  R += *SimpVect[Idx];
570 
571  // Pop all addends being folded and push the resulting folded addend.
572  SimpVect.resize(StartIdx);
573  if (!R.isZero()) {
574  SimpVect.push_back(&R);
575  }
576  }
577  }
578 
579  assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) &&
580  "out-of-bound access");
581 
582  Value *Result;
583  if (!SimpVect.empty())
584  Result = createNaryFAdd(SimpVect, InstrQuota);
585  else {
586  // The addition is folded to 0.0.
587  Result = ConstantFP::get(Instr->getType(), 0.0);
588  }
589 
590  return Result;
591 }
592 
593 Value *FAddCombine::createNaryFAdd
594  (const AddendVect &Opnds, unsigned InstrQuota) {
595  assert(!Opnds.empty() && "Expect at least one addend");
596 
597  // Step 1: Check if the # of instructions needed exceeds the quota.
598 
599  unsigned InstrNeeded = calcInstrNumber(Opnds);
600  if (InstrNeeded > InstrQuota)
601  return nullptr;
602 
603  initCreateInstNum();
604 
605  // step 2: Emit the N-ary addition.
606  // Note that at most three instructions are involved in Fadd-InstCombine: the
607  // addition in question, and at most two neighboring instructions.
608  // The resulting optimized addition should have at least one less instruction
609  // than the original addition expression tree. This implies that the resulting
610  // N-ary addition has at most two instructions, and we don't need to worry
611  // about tree-height when constructing the N-ary addition.
612 
613  Value *LastVal = nullptr;
614  bool LastValNeedNeg = false;
615 
616  // Iterate the addends, creating fadd/fsub using adjacent two addends.
617  for (const FAddend *Opnd : Opnds) {
618  bool NeedNeg;
619  Value *V = createAddendVal(*Opnd, NeedNeg);
620  if (!LastVal) {
621  LastVal = V;
622  LastValNeedNeg = NeedNeg;
623  continue;
624  }
625 
626  if (LastValNeedNeg == NeedNeg) {
627  LastVal = createFAdd(LastVal, V);
628  continue;
629  }
630 
631  if (LastValNeedNeg)
632  LastVal = createFSub(V, LastVal);
633  else
634  LastVal = createFSub(LastVal, V);
635 
636  LastValNeedNeg = false;
637  }
638 
639  if (LastValNeedNeg) {
640  LastVal = createFNeg(LastVal);
641  }
642 
643 #ifndef NDEBUG
644  assert(CreateInstrNum == InstrNeeded &&
645  "Inconsistent in instruction numbers");
646 #endif
647 
648  return LastVal;
649 }
650 
651 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
652  Value *V = Builder.CreateFSub(Opnd0, Opnd1);
653  if (Instruction *I = dyn_cast<Instruction>(V))
654  createInstPostProc(I);
655  return V;
656 }
657 
658 Value *FAddCombine::createFNeg(Value *V) {
659  Value *NewV = Builder.CreateFNeg(V);
660  if (Instruction *I = dyn_cast<Instruction>(NewV))
661  createInstPostProc(I, true); // fneg's don't receive instruction numbers.
662  return NewV;
663 }
664 
665 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
666  Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
667  if (Instruction *I = dyn_cast<Instruction>(V))
668  createInstPostProc(I);
669  return V;
670 }
671 
672 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
673  Value *V = Builder.CreateFMul(Opnd0, Opnd1);
674  if (Instruction *I = dyn_cast<Instruction>(V))
675  createInstPostProc(I);
676  return V;
677 }
678 
679 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
680  NewInstr->setDebugLoc(Instr->getDebugLoc());
681 
682  // Keep track of the number of instruction created.
683  if (!NoNumber)
684  incCreateInstNum();
685 
686  // Propagate fast-math flags
687  NewInstr->setFastMathFlags(Instr->getFastMathFlags());
688 }
689 
690 // Return the number of instruction needed to emit the N-ary addition.
691 // NOTE: Keep this function in sync with createAddendVal().
692 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
693  unsigned OpndNum = Opnds.size();
694  unsigned InstrNeeded = OpndNum - 1;
695 
696  // Adjust the number of instructions needed to emit the N-ary add.
697  for (const FAddend *Opnd : Opnds) {
698  if (Opnd->isConstant())
699  continue;
700 
701  // The constant check above is really for a few special constant
702  // coefficients.
703  if (isa<UndefValue>(Opnd->getSymVal()))
704  continue;
705 
706  const FAddendCoef &CE = Opnd->getCoef();
707  // Let the addend be "c * x". If "c == +/-1", the value of the addend
708  // is immediately available; otherwise, it needs exactly one instruction
709  // to evaluate the value.
710  if (!CE.isMinusOne() && !CE.isOne())
711  InstrNeeded++;
712  }
713  return InstrNeeded;
714 }
715 
716 // Input Addend Value NeedNeg(output)
717 // ================================================================
718 // Constant C C false
719 // <+/-1, V> V coefficient is -1
720 // <2/-2, V> "fadd V, V" coefficient is -2
721 // <C, V> "fmul V, C" false
722 //
723 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
724 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
725  const FAddendCoef &Coeff = Opnd.getCoef();
726 
727  if (Opnd.isConstant()) {
728  NeedNeg = false;
729  return Coeff.getValue(Instr->getType());
730  }
731 
732  Value *OpndVal = Opnd.getSymVal();
733 
734  if (Coeff.isMinusOne() || Coeff.isOne()) {
735  NeedNeg = Coeff.isMinusOne();
736  return OpndVal;
737  }
738 
739  if (Coeff.isTwo() || Coeff.isMinusTwo()) {
740  NeedNeg = Coeff.isMinusTwo();
741  return createFAdd(OpndVal, OpndVal);
742  }
743 
744  NeedNeg = false;
745  return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
746 }
747 
748 // Checks if any operand is negative and we can convert add to sub.
749 // This function checks for following negative patterns
750 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
751 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
752 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
755  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
756 
757  // This function creates 2 instructions to replace ADD, we need at least one
758  // of LHS or RHS to have one use to ensure benefit in transform.
759  if (!LHS->hasOneUse() && !RHS->hasOneUse())
760  return nullptr;
761 
762  Value *X = nullptr, *Y = nullptr, *Z = nullptr;
763  const APInt *C1 = nullptr, *C2 = nullptr;
764 
765  // if ONE is on other side, swap
766  if (match(RHS, m_Add(m_Value(X), m_One())))
767  std::swap(LHS, RHS);
768 
769  if (match(LHS, m_Add(m_Value(X), m_One()))) {
770  // if XOR on other side, swap
771  if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
772  std::swap(X, RHS);
773 
774  if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
775  // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
776  // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
777  if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
778  Value *NewAnd = Builder.CreateAnd(Z, *C1);
779  return Builder.CreateSub(RHS, NewAnd, "sub");
780  } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
781  // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
782  // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
783  Value *NewOr = Builder.CreateOr(Z, ~(*C1));
784  return Builder.CreateSub(RHS, NewOr, "sub");
785  }
786  }
787  }
788 
789  // Restore LHS and RHS
790  LHS = I.getOperand(0);
791  RHS = I.getOperand(1);
792 
793  // if XOR is on other side, swap
794  if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
795  std::swap(LHS, RHS);
796 
797  // C2 is ODD
798  // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
799  // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
800  if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
801  if (C1->countTrailingZeros() == 0)
802  if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
803  Value *NewOr = Builder.CreateOr(Z, ~(*C2));
804  return Builder.CreateSub(RHS, NewOr, "sub");
805  }
806  return nullptr;
807 }
808 
809 /// Wrapping flags may allow combining constants separated by an extend.
812  Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
813  Type *Ty = Add.getType();
814  Constant *Op1C;
815  if (!match(Op1, m_Constant(Op1C)))
816  return nullptr;
817 
818  // Try this match first because it results in an add in the narrow type.
819  // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1)))
820  Value *X;
821  const APInt *C1, *C2;
822  if (match(Op1, m_APInt(C1)) &&
823  match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) &&
824  C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) {
825  Constant *NewC =
826  ConstantInt::get(X->getType(), *C2 + C1->trunc(C2->getBitWidth()));
827  return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty);
828  }
829 
830  // More general combining of constants in the wide type.
831  // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
832  Constant *NarrowC;
833  if (match(Op0, m_OneUse(m_SExt(m_NSWAdd(m_Value(X), m_Constant(NarrowC)))))) {
834  Constant *WideC = ConstantExpr::getSExt(NarrowC, Ty);
835  Constant *NewC = ConstantExpr::getAdd(WideC, Op1C);
836  Value *WideX = Builder.CreateSExt(X, Ty);
837  return BinaryOperator::CreateAdd(WideX, NewC);
838  }
839  // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C)
840  if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_Constant(NarrowC)))))) {
841  Constant *WideC = ConstantExpr::getZExt(NarrowC, Ty);
842  Constant *NewC = ConstantExpr::getAdd(WideC, Op1C);
843  Value *WideX = Builder.CreateZExt(X, Ty);
844  return BinaryOperator::CreateAdd(WideX, NewC);
845  }
846 
847  return nullptr;
848 }
849 
851  Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
852  Constant *Op1C;
853  if (!match(Op1, m_ImmConstant(Op1C)))
854  return nullptr;
855 
856  if (Instruction *NV = foldBinOpIntoSelectOrPhi(Add))
857  return NV;
858 
859  Value *X;
860  Constant *Op00C;
861 
862  // add (sub C1, X), C2 --> sub (add C1, C2), X
863  if (match(Op0, m_Sub(m_Constant(Op00C), m_Value(X))))
864  return BinaryOperator::CreateSub(ConstantExpr::getAdd(Op00C, Op1C), X);
865 
866  Value *Y;
867 
868  // add (sub X, Y), -1 --> add (not Y), X
869  if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) &&
870  match(Op1, m_AllOnes()))
871  return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X);
872 
873  // zext(bool) + C -> bool ? C + 1 : C
874  if (match(Op0, m_ZExt(m_Value(X))) &&
875  X->getType()->getScalarSizeInBits() == 1)
876  return SelectInst::Create(X, InstCombiner::AddOne(Op1C), Op1);
877  // sext(bool) + C -> bool ? C - 1 : C
878  if (match(Op0, m_SExt(m_Value(X))) &&
879  X->getType()->getScalarSizeInBits() == 1)
880  return SelectInst::Create(X, InstCombiner::SubOne(Op1C), Op1);
881 
882  // ~X + C --> (C-1) - X
883  if (match(Op0, m_Not(m_Value(X))))
884  return BinaryOperator::CreateSub(InstCombiner::SubOne(Op1C), X);
885 
886  const APInt *C;
887  if (!match(Op1, m_APInt(C)))
888  return nullptr;
889 
890  // (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add`
891  Constant *Op01C;
892  if (match(Op0, m_Or(m_Value(X), m_ImmConstant(Op01C))) &&
893  haveNoCommonBitsSet(X, Op01C, DL, &AC, &Add, &DT))
894  return BinaryOperator::CreateAdd(X, ConstantExpr::getAdd(Op01C, Op1C));
895 
896  // (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C)
897  const APInt *C2;
898  if (match(Op0, m_Or(m_Value(), m_APInt(C2))) && *C2 == -*C)
899  return BinaryOperator::CreateXor(Op0, ConstantInt::get(Add.getType(), *C2));
900 
901  if (C->isSignMask()) {
902  // If wrapping is not allowed, then the addition must set the sign bit:
903  // X + (signmask) --> X | signmask
904  if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
905  return BinaryOperator::CreateOr(Op0, Op1);
906 
907  // If wrapping is allowed, then the addition flips the sign bit of LHS:
908  // X + (signmask) --> X ^ signmask
909  return BinaryOperator::CreateXor(Op0, Op1);
910  }
911 
912  // Is this add the last step in a convoluted sext?
913  // add(zext(xor i16 X, -32768), -32768) --> sext X
914  Type *Ty = Add.getType();
915  if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) &&
916  C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
917  return CastInst::Create(Instruction::SExt, X, Ty);
918 
919  if (match(Op0, m_Xor(m_Value(X), m_APInt(C2)))) {
920  // (X ^ signmask) + C --> (X + (signmask ^ C))
921  if (C2->isSignMask())
922  return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C2 ^ *C));
923 
924  // If X has no high-bits set above an xor mask:
925  // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X
926  if (C2->isMask()) {
927  KnownBits LHSKnown = computeKnownBits(X, 0, &Add);
928  if ((*C2 | LHSKnown.Zero).isAllOnes())
929  return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X);
930  }
931 
932  // Look for a math+logic pattern that corresponds to sext-in-register of a
933  // value with cleared high bits. Convert that into a pair of shifts:
934  // add (xor X, 0x80), 0xF..F80 --> (X << ShAmtC) >>s ShAmtC
935  // add (xor X, 0xF..F80), 0x80 --> (X << ShAmtC) >>s ShAmtC
936  if (Op0->hasOneUse() && *C2 == -(*C)) {
937  unsigned BitWidth = Ty->getScalarSizeInBits();
938  unsigned ShAmt = 0;
939  if (C->isPowerOf2())
940  ShAmt = BitWidth - C->logBase2() - 1;
941  else if (C2->isPowerOf2())
942  ShAmt = BitWidth - C2->logBase2() - 1;
943  if (ShAmt && MaskedValueIsZero(X, APInt::getHighBitsSet(BitWidth, ShAmt),
944  0, &Add)) {
945  Constant *ShAmtC = ConstantInt::get(Ty, ShAmt);
946  Value *NewShl = Builder.CreateShl(X, ShAmtC, "sext");
947  return BinaryOperator::CreateAShr(NewShl, ShAmtC);
948  }
949  }
950  }
951 
952  if (C->isOne() && Op0->hasOneUse()) {
953  // add (sext i1 X), 1 --> zext (not X)
954  // TODO: The smallest IR representation is (select X, 0, 1), and that would
955  // not require the one-use check. But we need to remove a transform in
956  // visitSelect and make sure that IR value tracking for select is equal or
957  // better than for these ops.
958  if (match(Op0, m_SExt(m_Value(X))) &&
959  X->getType()->getScalarSizeInBits() == 1)
960  return new ZExtInst(Builder.CreateNot(X), Ty);
961 
962  // Shifts and add used to flip and mask off the low bit:
963  // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
964  const APInt *C3;
965  if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) &&
966  C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) {
967  Value *NotX = Builder.CreateNot(X);
968  return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));
969  }
970  }
971 
972  // If all bits affected by the add are included in a high-bit-mask, do the
973  // add before the mask op:
974  // (X & 0xFF00) + xx00 --> (X + xx00) & 0xFF00
975  if (match(Op0, m_OneUse(m_And(m_Value(X), m_APInt(C2)))) &&
976  C2->isNegative() && C2->isShiftedMask() && *C == (*C & *C2)) {
977  Value *NewAdd = Builder.CreateAdd(X, ConstantInt::get(Ty, *C));
978  return BinaryOperator::CreateAnd(NewAdd, ConstantInt::get(Ty, *C2));
979  }
980 
981  return nullptr;
982 }
983 
984 // Matches multiplication expression Op * C where C is a constant. Returns the
985 // constant value in C and the other operand in Op. Returns true if such a
986 // match is found.
987 static bool MatchMul(Value *E, Value *&Op, APInt &C) {
988  const APInt *AI;
989  if (match(E, m_Mul(m_Value(Op), m_APInt(AI)))) {
990  C = *AI;
991  return true;
992  }
993  if (match(E, m_Shl(m_Value(Op), m_APInt(AI)))) {
994  C = APInt(AI->getBitWidth(), 1);
995  C <<= *AI;
996  return true;
997  }
998  return false;
999 }
1000 
1001 // Matches remainder expression Op % C where C is a constant. Returns the
1002 // constant value in C and the other operand in Op. Returns the signedness of
1003 // the remainder operation in IsSigned. Returns true if such a match is
1004 // found.
1005 static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) {
1006  const APInt *AI;
1007  IsSigned = false;
1008  if (match(E, m_SRem(m_Value(Op), m_APInt(AI)))) {
1009  IsSigned = true;
1010  C = *AI;
1011  return true;
1012  }
1013  if (match(E, m_URem(m_Value(Op), m_APInt(AI)))) {
1014  C = *AI;
1015  return true;
1016  }
1017  if (match(E, m_And(m_Value(Op), m_APInt(AI))) && (*AI + 1).isPowerOf2()) {
1018  C = *AI + 1;
1019  return true;
1020  }
1021  return false;
1022 }
1023 
1024 // Matches division expression Op / C with the given signedness as indicated
1025 // by IsSigned, where C is a constant. Returns the constant value in C and the
1026 // other operand in Op. Returns true if such a match is found.
1027 static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) {
1028  const APInt *AI;
1029  if (IsSigned && match(E, m_SDiv(m_Value(Op), m_APInt(AI)))) {
1030  C = *AI;
1031  return true;
1032  }
1033  if (!IsSigned) {
1034  if (match(E, m_UDiv(m_Value(Op), m_APInt(AI)))) {
1035  C = *AI;
1036  return true;
1037  }
1038  if (match(E, m_LShr(m_Value(Op), m_APInt(AI)))) {
1039  C = APInt(AI->getBitWidth(), 1);
1040  C <<= *AI;
1041  return true;
1042  }
1043  }
1044  return false;
1045 }
1046 
1047 // Returns whether C0 * C1 with the given signedness overflows.
1048 static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) {
1049  bool overflow;
1050  if (IsSigned)
1051  (void)C0.smul_ov(C1, overflow);
1052  else
1053  (void)C0.umul_ov(C1, overflow);
1054  return overflow;
1055 }
1056 
1057 // Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1)
1058 // does not overflow.
1060  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1061  Value *X, *MulOpV;
1062  APInt C0, MulOpC;
1063  bool IsSigned;
1064  // Match I = X % C0 + MulOpV * C0
1065  if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) ||
1066  (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) &&
1067  C0 == MulOpC) {
1068  Value *RemOpV;
1069  APInt C1;
1070  bool Rem2IsSigned;
1071  // Match MulOpC = RemOpV % C1
1072  if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&
1073  IsSigned == Rem2IsSigned) {
1074  Value *DivOpV;
1075  APInt DivOpC;
1076  // Match RemOpV = X / C0
1077  if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV &&
1078  C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) {
1079  Value *NewDivisor = ConstantInt::get(X->getType(), C0 * C1);
1080  return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem")
1081  : Builder.CreateURem(X, NewDivisor, "urem");
1082  }
1083  }
1084  }
1085 
1086  return nullptr;
1087 }
1088 
1089 /// Fold
1090 /// (1 << NBits) - 1
1091 /// Into:
1092 /// ~(-(1 << NBits))
1093 /// Because a 'not' is better for bit-tracking analysis and other transforms
1094 /// than an 'add'. The new shl is always nsw, and is nuw if old `and` was.
1097  Value *NBits;
1098  if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes())))
1099  return nullptr;
1100 
1101  Constant *MinusOne = Constant::getAllOnesValue(NBits->getType());
1102  Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
1103  // Be wary of constant folding.
1104  if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
1105  // Always NSW. But NUW propagates from `add`.
1106  BOp->setHasNoSignedWrap();
1107  BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1108  }
1109 
1110  return BinaryOperator::CreateNot(NotMask, I.getName());
1111 }
1112 
1114  assert(I.getOpcode() == Instruction::Add && "Expecting add instruction");
1115  Type *Ty = I.getType();
1116  auto getUAddSat = [&]() {
1117  return Intrinsic::getDeclaration(I.getModule(), Intrinsic::uadd_sat, Ty);
1118  };
1119 
1120  // add (umin X, ~Y), Y --> uaddsat X, Y
1121  Value *X, *Y;
1122  if (match(&I, m_c_Add(m_c_UMin(m_Value(X), m_Not(m_Value(Y))),
1123  m_Deferred(Y))))
1124  return CallInst::Create(getUAddSat(), { X, Y });
1125 
1126  // add (umin X, ~C), C --> uaddsat X, C
1127  const APInt *C, *NotC;
1128  if (match(&I, m_Add(m_UMin(m_Value(X), m_APInt(NotC)), m_APInt(C))) &&
1129  *C == ~*NotC)
1130  return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) });
1131 
1132  return nullptr;
1133 }
1134 
1137  BinaryOperator &I) {
1138  assert((I.getOpcode() == Instruction::Add ||
1139  I.getOpcode() == Instruction::Or ||
1140  I.getOpcode() == Instruction::Sub) &&
1141  "Expecting add/or/sub instruction");
1142 
1143  // We have a subtraction/addition between a (potentially truncated) *logical*
1144  // right-shift of X and a "select".
1145  Value *X, *Select;
1146  Instruction *LowBitsToSkip, *Extract;
1148  m_LShr(m_Value(X), m_Instruction(LowBitsToSkip)),
1149  m_Instruction(Extract))),
1150  m_Value(Select))))
1151  return nullptr;
1152 
1153  // `add`/`or` is commutative; but for `sub`, "select" *must* be on RHS.
1154  if (I.getOpcode() == Instruction::Sub && I.getOperand(1) != Select)
1155  return nullptr;
1156 
1157  Type *XTy = X->getType();
1158  bool HadTrunc = I.getType() != XTy;
1159 
1160  // If there was a truncation of extracted value, then we'll need to produce
1161  // one extra instruction, so we need to ensure one instruction will go away.
1162  if (HadTrunc && !match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1163  return nullptr;
1164 
1165  // Extraction should extract high NBits bits, with shift amount calculated as:
1166  // low bits to skip = shift bitwidth - high bits to extract
1167  // The shift amount itself may be extended, and we need to look past zero-ext
1168  // when matching NBits, that will matter for matching later.
1169  Constant *C;
1170  Value *NBits;
1171  if (!match(
1172  LowBitsToSkip,
1174  !match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1175  APInt(C->getType()->getScalarSizeInBits(),
1176  X->getType()->getScalarSizeInBits()))))
1177  return nullptr;
1178 
1179  // Sign-extending value can be zero-extended if we `sub`tract it,
1180  // or sign-extended otherwise.
1181  auto SkipExtInMagic = [&I](Value *&V) {
1182  if (I.getOpcode() == Instruction::Sub)
1183  match(V, m_ZExtOrSelf(m_Value(V)));
1184  else
1185  match(V, m_SExtOrSelf(m_Value(V)));
1186  };
1187 
1188  // Now, finally validate the sign-extending magic.
1189  // `select` itself may be appropriately extended, look past that.
1190  SkipExtInMagic(Select);
1191 
1192  ICmpInst::Predicate Pred;
1193  const APInt *Thr;
1194  Value *SignExtendingValue, *Zero;
1195  bool ShouldSignext;
1196  // It must be a select between two values we will later establish to be a
1197  // sign-extending value and a zero constant. The condition guarding the
1198  // sign-extension must be based on a sign bit of the same X we had in `lshr`.
1199  if (!match(Select, m_Select(m_ICmp(Pred, m_Specific(X), m_APInt(Thr)),
1200  m_Value(SignExtendingValue), m_Value(Zero))) ||
1201  !isSignBitCheck(Pred, *Thr, ShouldSignext))
1202  return nullptr;
1203 
1204  // icmp-select pair is commutative.
1205  if (!ShouldSignext)
1206  std::swap(SignExtendingValue, Zero);
1207 
1208  // If we should not perform sign-extension then we must add/or/subtract zero.
1209  if (!match(Zero, m_Zero()))
1210  return nullptr;
1211  // Otherwise, it should be some constant, left-shifted by the same NBits we
1212  // had in `lshr`. Said left-shift can also be appropriately extended.
1213  // Again, we must look past zero-ext when looking for NBits.
1214  SkipExtInMagic(SignExtendingValue);
1215  Constant *SignExtendingValueBaseConstant;
1216  if (!match(SignExtendingValue,
1217  m_Shl(m_Constant(SignExtendingValueBaseConstant),
1218  m_ZExtOrSelf(m_Specific(NBits)))))
1219  return nullptr;
1220  // If we `sub`, then the constant should be one, else it should be all-ones.
1221  if (I.getOpcode() == Instruction::Sub
1222  ? !match(SignExtendingValueBaseConstant, m_One())
1223  : !match(SignExtendingValueBaseConstant, m_AllOnes()))
1224  return nullptr;
1225 
1226  auto *NewAShr = BinaryOperator::CreateAShr(X, LowBitsToSkip,
1227  Extract->getName() + ".sext");
1228  NewAShr->copyIRFlags(Extract); // Preserve `exact`-ness.
1229  if (!HadTrunc)
1230  return NewAShr;
1231 
1232  Builder.Insert(NewAShr);
1233  return TruncInst::CreateTruncOrBitCast(NewAShr, I.getType());
1234 }
1235 
1236 /// This is a specialization of a more general transform from
1237 /// SimplifyUsingDistributiveLaws. If that code can be made to work optimally
1238 /// for multi-use cases or propagating nsw/nuw, then we would not need this.
1241  // TODO: Also handle mul by doubling the shift amount?
1242  assert((I.getOpcode() == Instruction::Add ||
1243  I.getOpcode() == Instruction::Sub) &&
1244  "Expected add/sub");
1245  auto *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
1246  auto *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
1247  if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))
1248  return nullptr;
1249 
1250  Value *X, *Y, *ShAmt;
1251  if (!match(Op0, m_Shl(m_Value(X), m_Value(ShAmt))) ||
1252  !match(Op1, m_Shl(m_Value(Y), m_Specific(ShAmt))))
1253  return nullptr;
1254 
1255  // No-wrap propagates only when all ops have no-wrap.
1256  bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&
1257  Op1->hasNoSignedWrap();
1258  bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&
1259  Op1->hasNoUnsignedWrap();
1260 
1261  // add/sub (X << ShAmt), (Y << ShAmt) --> (add/sub X, Y) << ShAmt
1262  Value *NewMath = Builder.CreateBinOp(I.getOpcode(), X, Y);
1263  if (auto *NewI = dyn_cast<BinaryOperator>(NewMath)) {
1264  NewI->setHasNoSignedWrap(HasNSW);
1265  NewI->setHasNoUnsignedWrap(HasNUW);
1266  }
1267  auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);
1268  NewShl->setHasNoSignedWrap(HasNSW);
1269  NewShl->setHasNoUnsignedWrap(HasNUW);
1270  return NewShl;
1271 }
1272 
1274  if (Value *V = SimplifyAddInst(I.getOperand(0), I.getOperand(1),
1275  I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1276  SQ.getWithInstruction(&I)))
1277  return replaceInstUsesWith(I, V);
1278 
1279  if (SimplifyAssociativeOrCommutative(I))
1280  return &I;
1281 
1282  if (Instruction *X = foldVectorBinop(I))
1283  return X;
1284 
1285  if (Instruction *Phi = foldBinopWithPhiOperands(I))
1286  return Phi;
1287 
1288  // (A*B)+(A*C) -> A*(B+C) etc
1289  if (Value *V = SimplifyUsingDistributiveLaws(I))
1290  return replaceInstUsesWith(I, V);
1291 
1293  return R;
1294 
1295  if (Instruction *X = foldAddWithConstant(I))
1296  return X;
1297 
1298  if (Instruction *X = foldNoWrapAdd(I, Builder))
1299  return X;
1300 
1301  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1302  Type *Ty = I.getType();
1303  if (Ty->isIntOrIntVectorTy(1))
1304  return BinaryOperator::CreateXor(LHS, RHS);
1305 
1306  // X + X --> X << 1
1307  if (LHS == RHS) {
1308  auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));
1309  Shl->setHasNoSignedWrap(I.hasNoSignedWrap());
1310  Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1311  return Shl;
1312  }
1313 
1314  Value *A, *B;
1315  if (match(LHS, m_Neg(m_Value(A)))) {
1316  // -A + -B --> -(A + B)
1317  if (match(RHS, m_Neg(m_Value(B))))
1318  return BinaryOperator::CreateNeg(Builder.CreateAdd(A, B));
1319 
1320  // -A + B --> B - A
1321  return BinaryOperator::CreateSub(RHS, A);
1322  }
1323 
1324  // A + -B --> A - B
1325  if (match(RHS, m_Neg(m_Value(B))))
1326  return BinaryOperator::CreateSub(LHS, B);
1327 
1329  return replaceInstUsesWith(I, V);
1330 
1331  // (A + 1) + ~B --> A - B
1332  // ~B + (A + 1) --> A - B
1333  // (~B + A) + 1 --> A - B
1334  // (A + ~B) + 1 --> A - B
1335  if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) ||
1336  match(&I, m_BinOp(m_c_Add(m_Not(m_Value(B)), m_Value(A)), m_One())))
1337  return BinaryOperator::CreateSub(A, B);
1338 
1339  // (A + RHS) + RHS --> A + (RHS << 1)
1341  return BinaryOperator::CreateAdd(A, Builder.CreateShl(RHS, 1, "reass.add"));
1342 
1343  // LHS + (A + LHS) --> A + (LHS << 1)
1345  return BinaryOperator::CreateAdd(A, Builder.CreateShl(LHS, 1, "reass.add"));
1346 
1347  {
1348  // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2)
1349  Constant *C1, *C2;
1350  if (match(&I, m_c_Add(m_Add(m_Value(A), m_ImmConstant(C1)),
1351  m_Sub(m_ImmConstant(C2), m_Value(B)))) &&
1352  (LHS->hasOneUse() || RHS->hasOneUse())) {
1353  Value *Sub = Builder.CreateSub(A, B);
1355  }
1356  }
1357 
1358  // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1)
1359  if (Value *V = SimplifyAddWithRemainder(I)) return replaceInstUsesWith(I, V);
1360 
1361  // ((X s/ C1) << C2) + X => X s% -C1 where -C1 is 1 << C2
1362  const APInt *C1, *C2;
1363  if (match(LHS, m_Shl(m_SDiv(m_Specific(RHS), m_APInt(C1)), m_APInt(C2)))) {
1364  APInt one(C2->getBitWidth(), 1);
1365  APInt minusC1 = -(*C1);
1366  if (minusC1 == (one << *C2)) {
1367  Constant *NewRHS = ConstantInt::get(RHS->getType(), minusC1);
1368  return BinaryOperator::CreateSRem(RHS, NewRHS);
1369  }
1370  }
1371 
1372  // (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit
1373  if (match(&I, m_c_Add(m_And(m_Value(A), m_APInt(C1)), m_Deferred(A))) &&
1374  C1->isPowerOf2() && (ComputeNumSignBits(A) > C1->countLeadingZeros())) {
1375  Constant *NewMask = ConstantInt::get(RHS->getType(), *C1 - 1);
1376  return BinaryOperator::CreateAnd(A, NewMask);
1377  }
1378 
1379  // A+B --> A|B iff A and B have no bits set in common.
1380  if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
1381  return BinaryOperator::CreateOr(LHS, RHS);
1382 
1383  // add (select X 0 (sub n A)) A --> select X A n
1384  {
1385  SelectInst *SI = dyn_cast<SelectInst>(LHS);
1386  Value *A = RHS;
1387  if (!SI) {
1388  SI = dyn_cast<SelectInst>(RHS);
1389  A = LHS;
1390  }
1391  if (SI && SI->hasOneUse()) {
1392  Value *TV = SI->getTrueValue();
1393  Value *FV = SI->getFalseValue();
1394  Value *N;
1395 
1396  // Can we fold the add into the argument of the select?
1397  // We check both true and false select arguments for a matching subtract.
1398  if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
1399  // Fold the add into the true select value.
1400  return SelectInst::Create(SI->getCondition(), N, A);
1401 
1402  if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
1403  // Fold the add into the false select value.
1404  return SelectInst::Create(SI->getCondition(), A, N);
1405  }
1406  }
1407 
1408  if (Instruction *Ext = narrowMathIfNoOverflow(I))
1409  return Ext;
1410 
1411  // (add (xor A, B) (and A, B)) --> (or A, B)
1412  // (add (and A, B) (xor A, B)) --> (or A, B)
1413  if (match(&I, m_c_BinOp(m_Xor(m_Value(A), m_Value(B)),
1414  m_c_And(m_Deferred(A), m_Deferred(B)))))
1415  return BinaryOperator::CreateOr(A, B);
1416 
1417  // (add (or A, B) (and A, B)) --> (add A, B)
1418  // (add (and A, B) (or A, B)) --> (add A, B)
1419  if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)),
1420  m_c_And(m_Deferred(A), m_Deferred(B))))) {
1421  // Replacing operands in-place to preserve nuw/nsw flags.
1422  replaceOperand(I, 0, A);
1423  replaceOperand(I, 1, B);
1424  return &I;
1425  }
1426 
1427  // TODO(jingyue): Consider willNotOverflowSignedAdd and
1428  // willNotOverflowUnsignedAdd to reduce the number of invocations of
1429  // computeKnownBits.
1430  bool Changed = false;
1431  if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHS, RHS, I)) {
1432  Changed = true;
1433  I.setHasNoSignedWrap(true);
1434  }
1435  if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedAdd(LHS, RHS, I)) {
1436  Changed = true;
1437  I.setHasNoUnsignedWrap(true);
1438  }
1439 
1441  return V;
1442 
1443  if (Instruction *V =
1444  canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
1445  return V;
1446 
1447  if (Instruction *SatAdd = foldToUnsignedSaturatedAdd(I))
1448  return SatAdd;
1449 
1450  // usub.sat(A, B) + B => umax(A, B)
1451  if (match(&I, m_c_BinOp(
1452  m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Value(A), m_Value(B))),
1453  m_Deferred(B)))) {
1454  return replaceInstUsesWith(I,
1455  Builder.CreateIntrinsic(Intrinsic::umax, {I.getType()}, {A, B}));
1456  }
1457 
1458  // ctpop(A) + ctpop(B) => ctpop(A | B) if A and B have no bits set in common.
1459  if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(A)))) &&
1460  match(RHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(B)))) &&
1461  haveNoCommonBitsSet(A, B, DL, &AC, &I, &DT))
1462  return replaceInstUsesWith(
1463  I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
1464  {Builder.CreateOr(A, B)}));
1465 
1466  return Changed ? &I : nullptr;
1467 }
1468 
1469 /// Eliminate an op from a linear interpolation (lerp) pattern.
1472  Value *X, *Y, *Z;
1475  m_Value(Z))))),
1477  return nullptr;
1478 
1479  // (Y * (1.0 - Z)) + (X * Z) --> Y + Z * (X - Y) [8 commuted variants]
1480  Value *XY = Builder.CreateFSubFMF(X, Y, &I);
1481  Value *MulZ = Builder.CreateFMulFMF(Z, XY, &I);
1482  return BinaryOperator::CreateFAddFMF(Y, MulZ, &I);
1483 }
1484 
1485 /// Factor a common operand out of fadd/fsub of fmul/fdiv.
1488  assert((I.getOpcode() == Instruction::FAdd ||
1489  I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub");
1490  assert(I.hasAllowReassoc() && I.hasNoSignedZeros() &&
1491  "FP factorization requires FMF");
1492 
1493  if (Instruction *Lerp = factorizeLerp(I, Builder))
1494  return Lerp;
1495 
1496  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1497  if (!Op0->hasOneUse() || !Op1->hasOneUse())
1498  return nullptr;
1499 
1500  Value *X, *Y, *Z;
1501  bool IsFMul;
1502  if ((match(Op0, m_FMul(m_Value(X), m_Value(Z))) &&
1503  match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))) ||
1504  (match(Op0, m_FMul(m_Value(Z), m_Value(X))) &&
1505  match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))))
1506  IsFMul = true;
1507  else if (match(Op0, m_FDiv(m_Value(X), m_Value(Z))) &&
1508  match(Op1, m_FDiv(m_Value(Y), m_Specific(Z))))
1509  IsFMul = false;
1510  else
1511  return nullptr;
1512 
1513  // (X * Z) + (Y * Z) --> (X + Y) * Z
1514  // (X * Z) - (Y * Z) --> (X - Y) * Z
1515  // (X / Z) + (Y / Z) --> (X + Y) / Z
1516  // (X / Z) - (Y / Z) --> (X - Y) / Z
1517  bool IsFAdd = I.getOpcode() == Instruction::FAdd;
1518  Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I)
1519  : Builder.CreateFSubFMF(X, Y, &I);
1520 
1521  // Bail out if we just created a denormal constant.
1522  // TODO: This is copied from a previous implementation. Is it necessary?
1523  const APFloat *C;
1524  if (match(XY, m_APFloat(C)) && !C->isNormal())
1525  return nullptr;
1526 
1527  return IsFMul ? BinaryOperator::CreateFMulFMF(XY, Z, &I)
1528  : BinaryOperator::CreateFDivFMF(XY, Z, &I);
1529 }
1530 
1532  if (Value *V = SimplifyFAddInst(I.getOperand(0), I.getOperand(1),
1533  I.getFastMathFlags(),
1534  SQ.getWithInstruction(&I)))
1535  return replaceInstUsesWith(I, V);
1536 
1537  if (SimplifyAssociativeOrCommutative(I))
1538  return &I;
1539 
1540  if (Instruction *X = foldVectorBinop(I))
1541  return X;
1542 
1543  if (Instruction *Phi = foldBinopWithPhiOperands(I))
1544  return Phi;
1545 
1546  if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I))
1547  return FoldedFAdd;
1548 
1549  // (-X) + Y --> Y - X
1550  Value *X, *Y;
1551  if (match(&I, m_c_FAdd(m_FNeg(m_Value(X)), m_Value(Y))))
1552  return BinaryOperator::CreateFSubFMF(Y, X, &I);
1553 
1554  // Similar to above, but look through fmul/fdiv for the negated term.
1555  // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants]
1556  Value *Z;
1558  m_Value(Z)))) {
1559  Value *XY = Builder.CreateFMulFMF(X, Y, &I);
1560  return BinaryOperator::CreateFSubFMF(Z, XY, &I);
1561  }
1562  // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants]
1563  // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants]
1565  m_Value(Z))) ||
1567  m_Value(Z)))) {
1568  Value *XY = Builder.CreateFDivFMF(X, Y, &I);
1569  return BinaryOperator::CreateFSubFMF(Z, XY, &I);
1570  }
1571 
1572  // Check for (fadd double (sitofp x), y), see if we can merge this into an
1573  // integer add followed by a promotion.
1574  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1575  if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1576  Value *LHSIntVal = LHSConv->getOperand(0);
1577  Type *FPType = LHSConv->getType();
1578 
1579  // TODO: This check is overly conservative. In many cases known bits
1580  // analysis can tell us that the result of the addition has less significant
1581  // bits than the integer type can hold.
1582  auto IsValidPromotion = [](Type *FTy, Type *ITy) {
1583  Type *FScalarTy = FTy->getScalarType();
1584  Type *IScalarTy = ITy->getScalarType();
1585 
1586  // Do we have enough bits in the significand to represent the result of
1587  // the integer addition?
1588  unsigned MaxRepresentableBits =
1590  return IScalarTy->getIntegerBitWidth() <= MaxRepresentableBits;
1591  };
1592 
1593  // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1594  // ... if the constant fits in the integer value. This is useful for things
1595  // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1596  // requires a constant pool load, and generally allows the add to be better
1597  // instcombined.
1598  if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
1599  if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1600  Constant *CI =
1601  ConstantExpr::getFPToSI(CFP, LHSIntVal->getType());
1602  if (LHSConv->hasOneUse() &&
1603  ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1604  willNotOverflowSignedAdd(LHSIntVal, CI, I)) {
1605  // Insert the new integer add.
1606  Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, CI, "addconv");
1607  return new SIToFPInst(NewAdd, I.getType());
1608  }
1609  }
1610 
1611  // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1612  if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1613  Value *RHSIntVal = RHSConv->getOperand(0);
1614  // It's enough to check LHS types only because we require int types to
1615  // be the same for this transform.
1616  if (IsValidPromotion(FPType, LHSIntVal->getType())) {
1617  // Only do this if x/y have the same type, if at least one of them has a
1618  // single use (so we don't increase the number of int->fp conversions),
1619  // and if the integer add will not overflow.
1620  if (LHSIntVal->getType() == RHSIntVal->getType() &&
1621  (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1622  willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
1623  // Insert the new integer add.
1624  Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, RHSIntVal, "addconv");
1625  return new SIToFPInst(NewAdd, I.getType());
1626  }
1627  }
1628  }
1629  }
1630 
1631  // Handle specials cases for FAdd with selects feeding the operation
1632  if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS))
1633  return replaceInstUsesWith(I, V);
1634 
1635  if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
1637  return F;
1638 
1639  // Try to fold fadd into start value of reduction intrinsic.
1640  if (match(&I, m_c_FAdd(m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
1641  m_AnyZeroFP(), m_Value(X))),
1642  m_Value(Y)))) {
1643  // fadd (rdx 0.0, X), Y --> rdx Y, X
1644  return replaceInstUsesWith(
1645  I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1646  {X->getType()}, {Y, X}, &I));
1647  }
1648  const APFloat *StartC, *C;
1649  if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
1650  m_APFloat(StartC), m_Value(X)))) &&
1651  match(RHS, m_APFloat(C))) {
1652  // fadd (rdx StartC, X), C --> rdx (C + StartC), X
1653  Constant *NewStartC = ConstantFP::get(I.getType(), *C + *StartC);
1654  return replaceInstUsesWith(
1655  I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1656  {X->getType()}, {NewStartC, X}, &I));
1657  }
1658 
1659  // (X * MulC) + X --> X * (MulC + 1.0)
1660  Constant *MulC;
1661  if (match(&I, m_c_FAdd(m_FMul(m_Value(X), m_ImmConstant(MulC)),
1662  m_Deferred(X)))) {
1663  MulC = ConstantExpr::getFAdd(MulC, ConstantFP::get(I.getType(), 1.0));
1664  return BinaryOperator::CreateFMulFMF(X, MulC, &I);
1665  }
1666 
1667  if (Value *V = FAddCombine(Builder).simplify(&I))
1668  return replaceInstUsesWith(I, V);
1669  }
1670 
1671  return nullptr;
1672 }
1673 
1674 /// Optimize pointer differences into the same array into a size. Consider:
1675 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1676 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1678  Type *Ty, bool IsNUW) {
1679  // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1680  // this.
1681  bool Swapped = false;
1682  GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
1683  if (!isa<GEPOperator>(LHS) && isa<GEPOperator>(RHS)) {
1684  std::swap(LHS, RHS);
1685  Swapped = true;
1686  }
1687 
1688  // Require at least one GEP with a common base pointer on both sides.
1689  if (auto *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1690  // (gep X, ...) - X
1691  if (LHSGEP->getOperand(0) == RHS) {
1692  GEP1 = LHSGEP;
1693  } else if (auto *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1694  // (gep X, ...) - (gep X, ...)
1695  if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1696  RHSGEP->getOperand(0)->stripPointerCasts()) {
1697  GEP1 = LHSGEP;
1698  GEP2 = RHSGEP;
1699  }
1700  }
1701  }
1702 
1703  if (!GEP1)
1704  return nullptr;
1705 
1706  if (GEP2) {
1707  // (gep X, ...) - (gep X, ...)
1708  //
1709  // Avoid duplicating the arithmetic if there are more than one non-constant
1710  // indices between the two GEPs and either GEP has a non-constant index and
1711  // multiple users. If zero non-constant index, the result is a constant and
1712  // there is no duplication. If one non-constant index, the result is an add
1713  // or sub with a constant, which is no larger than the original code, and
1714  // there's no duplicated arithmetic, even if either GEP has multiple
1715  // users. If more than one non-constant indices combined, as long as the GEP
1716  // with at least one non-constant index doesn't have multiple users, there
1717  // is no duplication.
1718  unsigned NumNonConstantIndices1 = GEP1->countNonConstantIndices();
1719  unsigned NumNonConstantIndices2 = GEP2->countNonConstantIndices();
1720  if (NumNonConstantIndices1 + NumNonConstantIndices2 > 1 &&
1721  ((NumNonConstantIndices1 > 0 && !GEP1->hasOneUse()) ||
1722  (NumNonConstantIndices2 > 0 && !GEP2->hasOneUse()))) {
1723  return nullptr;
1724  }
1725  }
1726 
1727  // Emit the offset of the GEP and an intptr_t.
1728  Value *Result = EmitGEPOffset(GEP1);
1729 
1730  // If this is a single inbounds GEP and the original sub was nuw,
1731  // then the final multiplication is also nuw.
1732  if (auto *I = dyn_cast<Instruction>(Result))
1733  if (IsNUW && !GEP2 && !Swapped && GEP1->isInBounds() &&
1734  I->getOpcode() == Instruction::Mul)
1735  I->setHasNoUnsignedWrap();
1736 
1737  // If we have a 2nd GEP of the same base pointer, subtract the offsets.
1738  // If both GEPs are inbounds, then the subtract does not have signed overflow.
1739  if (GEP2) {
1740  Value *Offset = EmitGEPOffset(GEP2);
1741  Result = Builder.CreateSub(Result, Offset, "gepdiff", /* NUW */ false,
1742  GEP1->isInBounds() && GEP2->isInBounds());
1743  }
1744 
1745  // If we have p - gep(p, ...) then we have to negate the result.
1746  if (Swapped)
1747  Result = Builder.CreateNeg(Result, "diff.neg");
1748 
1749  return Builder.CreateIntCast(Result, Ty, true);
1750 }
1751 
1753  if (Value *V = SimplifySubInst(I.getOperand(0), I.getOperand(1),
1754  I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1755  SQ.getWithInstruction(&I)))
1756  return replaceInstUsesWith(I, V);
1757 
1758  if (Instruction *X = foldVectorBinop(I))
1759  return X;
1760 
1761  if (Instruction *Phi = foldBinopWithPhiOperands(I))
1762  return Phi;
1763 
1764  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1765 
1766  // If this is a 'B = x-(-A)', change to B = x+A.
1767  // We deal with this without involving Negator to preserve NSW flag.
1768  if (Value *V = dyn_castNegVal(Op1)) {
1770 
1771  if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
1772  assert(BO->getOpcode() == Instruction::Sub &&
1773  "Expected a subtraction operator!");
1774  if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
1775  Res->setHasNoSignedWrap(true);
1776  } else {
1777  if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
1778  Res->setHasNoSignedWrap(true);
1779  }
1780 
1781  return Res;
1782  }
1783 
1784  // Try this before Negator to preserve NSW flag.
1786  return R;
1787 
1788  Constant *C;
1789  if (match(Op0, m_ImmConstant(C))) {
1790  Value *X;
1791  Constant *C2;
1792 
1793  // C-(X+C2) --> (C-C2)-X
1794  if (match(Op1, m_Add(m_Value(X), m_ImmConstant(C2))))
1795  return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
1796  }
1797 
1798  auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * {
1799  if (Instruction *Ext = narrowMathIfNoOverflow(I))
1800  return Ext;
1801 
1802  bool Changed = false;
1803  if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {
1804  Changed = true;
1805  I.setHasNoSignedWrap(true);
1806  }
1807  if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {
1808  Changed = true;
1809  I.setHasNoUnsignedWrap(true);
1810  }
1811 
1812  return Changed ? &I : nullptr;
1813  };
1814 
1815  // First, let's try to interpret `sub a, b` as `add a, (sub 0, b)`,
1816  // and let's try to sink `(sub 0, b)` into `b` itself. But only if this isn't
1817  // a pure negation used by a select that looks like abs/nabs.
1818  bool IsNegation = match(Op0, m_ZeroInt());
1819  if (!IsNegation || none_of(I.users(), [&I, Op1](const User *U) {
1820  const Instruction *UI = dyn_cast<Instruction>(U);
1821  if (!UI)
1822  return false;
1823  return match(UI,
1824  m_Select(m_Value(), m_Specific(Op1), m_Specific(&I))) ||
1825  match(UI, m_Select(m_Value(), m_Specific(&I), m_Specific(Op1)));
1826  })) {
1827  if (Value *NegOp1 = Negator::Negate(IsNegation, Op1, *this))
1828  return BinaryOperator::CreateAdd(NegOp1, Op0);
1829  }
1830  if (IsNegation)
1831  return TryToNarrowDeduceFlags(); // Should have been handled in Negator!
1832 
1833  // (A*B)-(A*C) -> A*(B-C) etc
1834  if (Value *V = SimplifyUsingDistributiveLaws(I))
1835  return replaceInstUsesWith(I, V);
1836 
1837  if (I.getType()->isIntOrIntVectorTy(1))
1838  return BinaryOperator::CreateXor(Op0, Op1);
1839 
1840  // Replace (-1 - A) with (~A).
1841  if (match(Op0, m_AllOnes()))
1842  return BinaryOperator::CreateNot(Op1);
1843 
1844  // (X + -1) - Y --> ~Y + X
1845  Value *X, *Y;
1846  if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes()))))
1847  return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);
1848 
1849  // Reassociate sub/add sequences to create more add instructions and
1850  // reduce dependency chains:
1851  // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
1852  Value *Z;
1854  m_Value(Z))))) {
1855  Value *XZ = Builder.CreateAdd(X, Z);
1856  Value *YW = Builder.CreateAdd(Y, Op1);
1857  return BinaryOperator::CreateSub(XZ, YW);
1858  }
1859 
1860  // ((X - Y) - Op1) --> X - (Y + Op1)
1861  if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y))))) {
1862  Value *Add = Builder.CreateAdd(Y, Op1);
1863  return BinaryOperator::CreateSub(X, Add);
1864  }
1865 
1866  // (~X) - (~Y) --> Y - X
1867  // This is placed after the other reassociations and explicitly excludes a
1868  // sub-of-sub pattern to avoid infinite looping.
1869  if (isFreeToInvert(Op0, Op0->hasOneUse()) &&
1870  isFreeToInvert(Op1, Op1->hasOneUse()) &&
1871  !match(Op0, m_Sub(m_ImmConstant(), m_Value()))) {
1872  Value *NotOp0 = Builder.CreateNot(Op0);
1873  Value *NotOp1 = Builder.CreateNot(Op1);
1874  return BinaryOperator::CreateSub(NotOp1, NotOp0);
1875  }
1876 
1877  auto m_AddRdx = [](Value *&Vec) {
1878  return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_add>(m_Value(Vec)));
1879  };
1880  Value *V0, *V1;
1881  if (match(Op0, m_AddRdx(V0)) && match(Op1, m_AddRdx(V1)) &&
1882  V0->getType() == V1->getType()) {
1883  // Difference of sums is sum of differences:
1884  // add_rdx(V0) - add_rdx(V1) --> add_rdx(V0 - V1)
1885  Value *Sub = Builder.CreateSub(V0, V1);
1886  Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_add,
1887  {Sub->getType()}, {Sub});
1888  return replaceInstUsesWith(I, Rdx);
1889  }
1890 
1891  if (Constant *C = dyn_cast<Constant>(Op0)) {
1892  Value *X;
1893  if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1894  // C - (zext bool) --> bool ? C - 1 : C
1896  if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1897  // C - (sext bool) --> bool ? C + 1 : C
1899 
1900  // C - ~X == X + (1+C)
1901  if (match(Op1, m_Not(m_Value(X))))
1903 
1904  // Try to fold constant sub into select arguments.
1905  if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1906  if (Instruction *R = FoldOpIntoSelect(I, SI))
1907  return R;
1908 
1909  // Try to fold constant sub into PHI values.
1910  if (PHINode *PN = dyn_cast<PHINode>(Op1))
1911  if (Instruction *R = foldOpIntoPhi(I, PN))
1912  return R;
1913 
1914  Constant *C2;
1915 
1916  // C-(C2-X) --> X+(C-C2)
1917  if (match(Op1, m_Sub(m_ImmConstant(C2), m_Value(X))))
1919  }
1920 
1921  const APInt *Op0C;
1922  if (match(Op0, m_APInt(Op0C)) && Op0C->isMask()) {
1923  // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
1924  // zero.
1925  KnownBits RHSKnown = computeKnownBits(Op1, 0, &I);
1926  if ((*Op0C | RHSKnown.Zero).isAllOnes())
1927  return BinaryOperator::CreateXor(Op1, Op0);
1928  }
1929 
1930  {
1931  Value *Y;
1932  // X-(X+Y) == -Y X-(Y+X) == -Y
1933  if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
1934  return BinaryOperator::CreateNeg(Y);
1935 
1936  // (X-Y)-X == -Y
1937  if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
1938  return BinaryOperator::CreateNeg(Y);
1939  }
1940 
1941  // (sub (or A, B) (and A, B)) --> (xor A, B)
1942  {
1943  Value *A, *B;
1944  if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
1945  match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1946  return BinaryOperator::CreateXor(A, B);
1947  }
1948 
1949  // (sub (add A, B) (or A, B)) --> (and A, B)
1950  {
1951  Value *A, *B;
1952  if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
1953  match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
1954  return BinaryOperator::CreateAnd(A, B);
1955  }
1956 
1957  // (sub (add A, B) (and A, B)) --> (or A, B)
1958  {
1959  Value *A, *B;
1960  if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
1961  match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
1962  return BinaryOperator::CreateOr(A, B);
1963  }
1964 
1965  // (sub (and A, B) (or A, B)) --> neg (xor A, B)
1966  {
1967  Value *A, *B;
1968  if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1969  match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
1970  (Op0->hasOneUse() || Op1->hasOneUse()))
1971  return BinaryOperator::CreateNeg(Builder.CreateXor(A, B));
1972  }
1973 
1974  // (sub (or A, B), (xor A, B)) --> (and A, B)
1975  {
1976  Value *A, *B;
1977  if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1978  match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1979  return BinaryOperator::CreateAnd(A, B);
1980  }
1981 
1982  // (sub (xor A, B) (or A, B)) --> neg (and A, B)
1983  {
1984  Value *A, *B;
1985  if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
1986  match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
1987  (Op0->hasOneUse() || Op1->hasOneUse()))
1988  return BinaryOperator::CreateNeg(Builder.CreateAnd(A, B));
1989  }
1990 
1991  {
1992  Value *Y;
1993  // ((X | Y) - X) --> (~X & Y)
1994  if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
1995  return BinaryOperator::CreateAnd(
1996  Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
1997  }
1998 
1999  {
2000  // (sub (and Op1, (neg X)), Op1) --> neg (and Op1, (add X, -1))
2001  Value *X;
2002  if (match(Op0, m_OneUse(m_c_And(m_Specific(Op1),
2003  m_OneUse(m_Neg(m_Value(X))))))) {
2004  return BinaryOperator::CreateNeg(Builder.CreateAnd(
2005  Op1, Builder.CreateAdd(X, Constant::getAllOnesValue(I.getType()))));
2006  }
2007  }
2008 
2009  {
2010  // (sub (and Op1, C), Op1) --> neg (and Op1, ~C)
2011  Constant *C;
2012  if (match(Op0, m_OneUse(m_And(m_Specific(Op1), m_Constant(C))))) {
2014  Builder.CreateAnd(Op1, Builder.CreateNot(C)));
2015  }
2016  }
2017 
2018  if (auto *II = dyn_cast<MinMaxIntrinsic>(Op1)) {
2019  {
2020  // sub(add(X,Y), s/umin(X,Y)) --> s/umax(X,Y)
2021  // sub(add(X,Y), s/umax(X,Y)) --> s/umin(X,Y)
2022  Value *X = II->getLHS();
2023  Value *Y = II->getRHS();
2024  if (match(Op0, m_c_Add(m_Specific(X), m_Specific(Y))) &&
2025  (Op0->hasOneUse() || Op1->hasOneUse())) {
2026  Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
2027  Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y);
2028  return replaceInstUsesWith(I, InvMaxMin);
2029  }
2030  }
2031 
2032  {
2033  // sub(add(X,Y),umin(Y,Z)) --> add(X,usub.sat(Y,Z))
2034  // sub(add(X,Z),umin(Y,Z)) --> add(X,usub.sat(Z,Y))
2035  Value *X, *Y, *Z;
2036  if (match(Op1, m_OneUse(m_UMin(m_Value(Y), m_Value(Z))))) {
2037  if (match(Op0, m_OneUse(m_c_Add(m_Specific(Y), m_Value(X)))))
2039  X, Builder.CreateIntrinsic(Intrinsic::usub_sat, I.getType(),
2040  {Y, Z}));
2041  if (match(Op0, m_OneUse(m_c_Add(m_Specific(Z), m_Value(X)))))
2043  X, Builder.CreateIntrinsic(Intrinsic::usub_sat, I.getType(),
2044  {Z, Y}));
2045  }
2046  }
2047  }
2048 
2049  {
2050  // If we have a subtraction between some value and a select between
2051  // said value and something else, sink subtraction into select hands, i.e.:
2052  // sub (select %Cond, %TrueVal, %FalseVal), %Op1
2053  // ->
2054  // select %Cond, (sub %TrueVal, %Op1), (sub %FalseVal, %Op1)
2055  // or
2056  // sub %Op0, (select %Cond, %TrueVal, %FalseVal)
2057  // ->
2058  // select %Cond, (sub %Op0, %TrueVal), (sub %Op0, %FalseVal)
2059  // This will result in select between new subtraction and 0.
2060  auto SinkSubIntoSelect =
2061  [Ty = I.getType()](Value *Select, Value *OtherHandOfSub,
2062  auto SubBuilder) -> Instruction * {
2063  Value *Cond, *TrueVal, *FalseVal;
2065  m_Value(FalseVal)))))
2066  return nullptr;
2067  if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal)
2068  return nullptr;
2069  // While it is really tempting to just create two subtractions and let
2070  // InstCombine fold one of those to 0, it isn't possible to do so
2071  // because of worklist visitation order. So ugly it is.
2072  bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal;
2073  Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal);
2074  Constant *Zero = Constant::getNullValue(Ty);
2075  SelectInst *NewSel =
2076  SelectInst::Create(Cond, OtherHandOfSubIsTrueVal ? Zero : NewSub,
2077  OtherHandOfSubIsTrueVal ? NewSub : Zero);
2078  // Preserve prof metadata if any.
2079  NewSel->copyMetadata(cast<Instruction>(*Select));
2080  return NewSel;
2081  };
2082  if (Instruction *NewSel = SinkSubIntoSelect(
2083  /*Select=*/Op0, /*OtherHandOfSub=*/Op1,
2084  [Builder = &Builder, Op1](Value *OtherHandOfSelect) {
2085  return Builder->CreateSub(OtherHandOfSelect,
2086  /*OtherHandOfSub=*/Op1);
2087  }))
2088  return NewSel;
2089  if (Instruction *NewSel = SinkSubIntoSelect(
2090  /*Select=*/Op1, /*OtherHandOfSub=*/Op0,
2091  [Builder = &Builder, Op0](Value *OtherHandOfSelect) {
2092  return Builder->CreateSub(/*OtherHandOfSub=*/Op0,
2093  OtherHandOfSelect);
2094  }))
2095  return NewSel;
2096  }
2097 
2098  // (X - (X & Y)) --> (X & ~Y)
2099  if (match(Op1, m_c_And(m_Specific(Op0), m_Value(Y))) &&
2100  (Op1->hasOneUse() || isa<Constant>(Y)))
2101  return BinaryOperator::CreateAnd(
2102  Op0, Builder.CreateNot(Y, Y->getName() + ".not"));
2103 
2104  // ~X - Min/Max(~X, Y) -> ~Min/Max(X, ~Y) - X
2105  // ~X - Min/Max(Y, ~X) -> ~Min/Max(X, ~Y) - X
2106  // Min/Max(~X, Y) - ~X -> X - ~Min/Max(X, ~Y)
2107  // Min/Max(Y, ~X) - ~X -> X - ~Min/Max(X, ~Y)
2108  // As long as Y is freely invertible, this will be neutral or a win.
2109  // Note: We don't generate the inverse max/min, just create the 'not' of
2110  // it and let other folds do the rest.
2111  if (match(Op0, m_Not(m_Value(X))) &&
2112  match(Op1, m_c_MaxOrMin(m_Specific(Op0), m_Value(Y))) &&
2113  !Op0->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2114  Value *Not = Builder.CreateNot(Op1);
2115  return BinaryOperator::CreateSub(Not, X);
2116  }
2117  if (match(Op1, m_Not(m_Value(X))) &&
2118  match(Op0, m_c_MaxOrMin(m_Specific(Op1), m_Value(Y))) &&
2119  !Op1->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2120  Value *Not = Builder.CreateNot(Op0);
2121  return BinaryOperator::CreateSub(X, Not);
2122  }
2123 
2124  // Optimize pointer differences into the same array into a size. Consider:
2125  // &A[10] - &A[0]: we should compile this to "10".
2126  Value *LHSOp, *RHSOp;
2127  if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
2128  match(Op1, m_PtrToInt(m_Value(RHSOp))))
2129  if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2130  I.hasNoUnsignedWrap()))
2131  return replaceInstUsesWith(I, Res);
2132 
2133  // trunc(p)-trunc(q) -> trunc(p-q)
2134  if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
2135  match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
2136  if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2137  /* IsNUW */ false))
2138  return replaceInstUsesWith(I, Res);
2139 
2140  // Canonicalize a shifty way to code absolute value to the common pattern.
2141  // There are 2 potential commuted variants.
2142  // We're relying on the fact that we only do this transform when the shift has
2143  // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase
2144  // instructions).
2145  Value *A;
2146  const APInt *ShAmt;
2147  Type *Ty = I.getType();
2148  if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
2149  Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
2150  match(Op0, m_OneUse(m_c_Xor(m_Specific(A), m_Specific(Op1))))) {
2151  // B = ashr i32 A, 31 ; smear the sign bit
2152  // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1)
2153  // --> (A < 0) ? -A : A
2154  Value *IsNeg = Builder.CreateIsNeg(A);
2155  // Copy the nuw/nsw flags from the sub to the negate.
2156  Value *NegA = Builder.CreateNeg(A, "", I.hasNoUnsignedWrap(),
2157  I.hasNoSignedWrap());
2158  return SelectInst::Create(IsNeg, NegA, A);
2159  }
2160 
2161  // If we are subtracting a low-bit masked subset of some value from an add
2162  // of that same value with no low bits changed, that is clearing some low bits
2163  // of the sum:
2164  // sub (X + AddC), (X & AndC) --> and (X + AddC), ~AndC
2165  const APInt *AddC, *AndC;
2166  if (match(Op0, m_Add(m_Value(X), m_APInt(AddC))) &&
2167  match(Op1, m_And(m_Specific(X), m_APInt(AndC)))) {
2168  unsigned BitWidth = Ty->getScalarSizeInBits();
2169  unsigned Cttz = AddC->countTrailingZeros();
2170  APInt HighMask(APInt::getHighBitsSet(BitWidth, BitWidth - Cttz));
2171  if ((HighMask & *AndC).isZero())
2172  return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC)));
2173  }
2174 
2175  if (Instruction *V =
2176  canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
2177  return V;
2178 
2179  // X - usub.sat(X, Y) => umin(X, Y)
2180  if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Specific(Op0),
2181  m_Value(Y)))))
2182  return replaceInstUsesWith(
2183  I, Builder.CreateIntrinsic(Intrinsic::umin, {I.getType()}, {Op0, Y}));
2184 
2185  // umax(X, Op1) - Op1 --> usub.sat(X, Op1)
2186  // TODO: The one-use restriction is not strictly necessary, but it may
2187  // require improving other pattern matching and/or codegen.
2188  if (match(Op0, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op1)))))
2189  return replaceInstUsesWith(
2190  I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op1}));
2191 
2192  // Op0 - umin(X, Op0) --> usub.sat(Op0, X)
2193  if (match(Op1, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op0)))))
2194  return replaceInstUsesWith(
2195  I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op0, X}));
2196 
2197  // Op0 - umax(X, Op0) --> 0 - usub.sat(X, Op0)
2198  if (match(Op1, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op0))))) {
2199  Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op0});
2200  return BinaryOperator::CreateNeg(USub);
2201  }
2202 
2203  // umin(X, Op1) - Op1 --> 0 - usub.sat(Op1, X)
2204  if (match(Op0, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op1))))) {
2205  Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op1, X});
2206  return BinaryOperator::CreateNeg(USub);
2207  }
2208 
2209  // C - ctpop(X) => ctpop(~X) if C is bitwidth
2210  if (match(Op0, m_SpecificInt(Ty->getScalarSizeInBits())) &&
2211  match(Op1, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(X)))))
2212  return replaceInstUsesWith(
2213  I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
2214  {Builder.CreateNot(X)}));
2215 
2216  return TryToNarrowDeduceFlags();
2217 }
2218 
2219 /// This eliminates floating-point negation in either 'fneg(X)' or
2220 /// 'fsub(-0.0, X)' form by combining into a constant operand.
2222  // This is limited with one-use because fneg is assumed better for
2223  // reassociation and cheaper in codegen than fmul/fdiv.
2224  // TODO: Should the m_OneUse restriction be removed?
2225  Instruction *FNegOp;
2226  if (!match(&I, m_FNeg(m_OneUse(m_Instruction(FNegOp)))))
2227  return nullptr;
2228 
2229  Value *X;
2230  Constant *C;
2231 
2232  // Fold negation into constant operand.
2233  // -(X * C) --> X * (-C)
2234  if (match(FNegOp, m_FMul(m_Value(X), m_Constant(C))))
2236  // -(X / C) --> X / (-C)
2237  if (match(FNegOp, m_FDiv(m_Value(X), m_Constant(C))))
2239  // -(C / X) --> (-C) / X
2240  if (match(FNegOp, m_FDiv(m_Constant(C), m_Value(X)))) {
2241  Instruction *FDiv =
2243 
2244  // Intersect 'nsz' and 'ninf' because those special value exceptions may not
2245  // apply to the fdiv. Everything else propagates from the fneg.
2246  // TODO: We could propagate nsz/ninf from fdiv alone?
2247  FastMathFlags FMF = I.getFastMathFlags();
2248  FastMathFlags OpFMF = FNegOp->getFastMathFlags();
2249  FDiv->setHasNoSignedZeros(FMF.noSignedZeros() && OpFMF.noSignedZeros());
2250  FDiv->setHasNoInfs(FMF.noInfs() && OpFMF.noInfs());
2251  return FDiv;
2252  }
2253  // With NSZ [ counter-example with -0.0: -(-0.0 + 0.0) != 0.0 + -0.0 ]:
2254  // -(X + C) --> -X + -C --> -C - X
2255  if (I.hasNoSignedZeros() && match(FNegOp, m_FAdd(m_Value(X), m_Constant(C))))
2257 
2258  return nullptr;
2259 }
2260 
2263  Value *FNeg;
2264  if (!match(&I, m_FNeg(m_Value(FNeg))))
2265  return nullptr;
2266 
2267  Value *X, *Y;
2268  if (match(FNeg, m_OneUse(m_FMul(m_Value(X), m_Value(Y)))))
2269  return BinaryOperator::CreateFMulFMF(Builder.CreateFNegFMF(X, &I), Y, &I);
2270 
2271  if (match(FNeg, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))))
2272  return BinaryOperator::CreateFDivFMF(Builder.CreateFNegFMF(X, &I), Y, &I);
2273 
2274  return nullptr;
2275 }
2276 
2278  Value *Op = I.getOperand(0);
2279 
2280  if (Value *V = SimplifyFNegInst(Op, I.getFastMathFlags(),
2281  getSimplifyQuery().getWithInstruction(&I)))
2282  return replaceInstUsesWith(I, V);
2283 
2285  return X;
2286 
2287  Value *X, *Y;
2288 
2289  // If we can ignore the sign of zeros: -(X - Y) --> (Y - X)
2290  if (I.hasNoSignedZeros() &&
2292  return BinaryOperator::CreateFSubFMF(Y, X, &I);
2293 
2295  return R;
2296 
2297  // Try to eliminate fneg if at least 1 arm of the select is negated.
2298  Value *Cond;
2300  // Unlike most transforms, this one is not safe to propagate nsz unless
2301  // it is present on the original select. (We are conservatively intersecting
2302  // the nsz flags from the select and root fneg instruction.)
2303  auto propagateSelectFMF = [&](SelectInst *S, bool CommonOperand) {
2304  S->copyFastMathFlags(&I);
2305  if (auto *OldSel = dyn_cast<SelectInst>(Op))
2306  if (!OldSel->hasNoSignedZeros() && !CommonOperand &&
2307  !isGuaranteedNotToBeUndefOrPoison(OldSel->getCondition()))
2308  S->setHasNoSignedZeros(false);
2309  };
2310  // -(Cond ? -P : Y) --> Cond ? P : -Y
2311  Value *P;
2312  if (match(X, m_FNeg(m_Value(P)))) {
2313  Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");
2314  SelectInst *NewSel = SelectInst::Create(Cond, P, NegY);
2315  propagateSelectFMF(NewSel, P == Y);
2316  return NewSel;
2317  }
2318  // -(Cond ? X : -P) --> Cond ? -X : P
2319  if (match(Y, m_FNeg(m_Value(P)))) {
2320  Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");
2321  SelectInst *NewSel = SelectInst::Create(Cond, NegX, P);
2322  propagateSelectFMF(NewSel, P == X);
2323  return NewSel;
2324  }
2325  }
2326 
2327  return nullptr;
2328 }
2329 
2331  if (Value *V = SimplifyFSubInst(I.getOperand(0), I.getOperand(1),
2332  I.getFastMathFlags(),
2333  getSimplifyQuery().getWithInstruction(&I)))
2334  return replaceInstUsesWith(I, V);
2335 
2336  if (Instruction *X = foldVectorBinop(I))
2337  return X;
2338 
2339  if (Instruction *Phi = foldBinopWithPhiOperands(I))
2340  return Phi;
2341 
2342  // Subtraction from -0.0 is the canonical form of fneg.
2343  // fsub -0.0, X ==> fneg X
2344  // fsub nsz 0.0, X ==> fneg nsz X
2345  //
2346  // FIXME This matcher does not respect FTZ or DAZ yet:
2347  // fsub -0.0, Denorm ==> +-0
2348  // fneg Denorm ==> -Denorm
2349  Value *Op;
2350  if (match(&I, m_FNeg(m_Value(Op))))
2351  return UnaryOperator::CreateFNegFMF(Op, &I);
2352 
2354  return X;
2355 
2357  return R;
2358 
2359  Value *X, *Y;
2360  Constant *C;
2361 
2362  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2363  // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X)
2364  // Canonicalize to fadd to make analysis easier.
2365  // This can also help codegen because fadd is commutative.
2366  // Note that if this fsub was really an fneg, the fadd with -0.0 will get
2367  // killed later. We still limit that particular transform with 'hasOneUse'
2368  // because an fneg is assumed better/cheaper than a generic fsub.
2369  if (I.hasNoSignedZeros() || CannotBeNegativeZero(Op0, SQ.TLI)) {
2370  if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
2371  Value *NewSub = Builder.CreateFSubFMF(Y, X, &I);
2372  return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I);
2373  }
2374  }
2375 
2376  // (-X) - Op1 --> -(X + Op1)
2377  if (I.hasNoSignedZeros() && !isa<ConstantExpr>(Op0) &&
2378  match(Op0, m_OneUse(m_FNeg(m_Value(X))))) {
2379  Value *FAdd = Builder.CreateFAddFMF(X, Op1, &I);
2381  }
2382 
2383  if (isa<Constant>(Op0))
2384  if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2385  if (Instruction *NV = FoldOpIntoSelect(I, SI))
2386  return NV;
2387 
2388  // X - C --> X + (-C)
2389  // But don't transform constant expressions because there's an inverse fold
2390  // for X + (-Y) --> X - Y.
2391  if (match(Op1, m_ImmConstant(C)))
2393 
2394  // X - (-Y) --> X + Y
2395  if (match(Op1, m_FNeg(m_Value(Y))))
2396  return BinaryOperator::CreateFAddFMF(Op0, Y, &I);
2397 
2398  // Similar to above, but look through a cast of the negated value:
2399  // X - (fptrunc(-Y)) --> X + fptrunc(Y)
2400  Type *Ty = I.getType();
2401  if (match(Op1, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y))))))
2402  return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPTrunc(Y, Ty), &I);
2403 
2404  // X - (fpext(-Y)) --> X + fpext(Y)
2405  if (match(Op1, m_OneUse(m_FPExt(m_FNeg(m_Value(Y))))))
2406  return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPExt(Y, Ty), &I);
2407 
2408  // Similar to above, but look through fmul/fdiv of the negated value:
2409  // Op0 - (-X * Y) --> Op0 + (X * Y)
2410  // Op0 - (Y * -X) --> Op0 + (X * Y)
2411  if (match(Op1, m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))))) {
2412  Value *FMul = Builder.CreateFMulFMF(X, Y, &I);
2413  return BinaryOperator::CreateFAddFMF(Op0, FMul, &I);
2414  }
2415  // Op0 - (-X / Y) --> Op0 + (X / Y)
2416  // Op0 - (X / -Y) --> Op0 + (X / Y)
2417  if (match(Op1, m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y)))) ||
2418  match(Op1, m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))))) {
2419  Value *FDiv = Builder.CreateFDivFMF(X, Y, &I);
2420  return BinaryOperator::CreateFAddFMF(Op0, FDiv, &I);
2421  }
2422 
2423  // Handle special cases for FSub with selects feeding the operation
2424  if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
2425  return replaceInstUsesWith(I, V);
2426 
2427  if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
2428  // (Y - X) - Y --> -X
2429  if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X))))
2430  return UnaryOperator::CreateFNegFMF(X, &I);
2431 
2432  // Y - (X + Y) --> -X
2433  // Y - (Y + X) --> -X
2434  if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X))))
2435  return UnaryOperator::CreateFNegFMF(X, &I);
2436 
2437  // (X * C) - X --> X * (C - 1.0)
2438  if (match(Op0, m_FMul(m_Specific(Op1), m_Constant(C)))) {
2439  Constant *CSubOne = ConstantExpr::getFSub(C, ConstantFP::get(Ty, 1.0));
2440  return BinaryOperator::CreateFMulFMF(Op1, CSubOne, &I);
2441  }
2442  // X - (X * C) --> X * (1.0 - C)
2443  if (match(Op1, m_FMul(m_Specific(Op0), m_Constant(C)))) {
2444  Constant *OneSubC = ConstantExpr::getFSub(ConstantFP::get(Ty, 1.0), C);
2445  return BinaryOperator::CreateFMulFMF(Op0, OneSubC, &I);
2446  }
2447 
2448  // Reassociate fsub/fadd sequences to create more fadd instructions and
2449  // reduce dependency chains:
2450  // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
2451  Value *Z;
2453  m_Value(Z))))) {
2454  Value *XZ = Builder.CreateFAddFMF(X, Z, &I);
2455  Value *YW = Builder.CreateFAddFMF(Y, Op1, &I);
2456  return BinaryOperator::CreateFSubFMF(XZ, YW, &I);
2457  }
2458 
2459  auto m_FaddRdx = [](Value *&Sum, Value *&Vec) {
2460  return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(m_Value(Sum),
2461  m_Value(Vec)));
2462  };
2463  Value *A0, *A1, *V0, *V1;
2464  if (match(Op0, m_FaddRdx(A0, V0)) && match(Op1, m_FaddRdx(A1, V1)) &&
2465  V0->getType() == V1->getType()) {
2466  // Difference of sums is sum of differences:
2467  // add_rdx(A0, V0) - add_rdx(A1, V1) --> add_rdx(A0, V0 - V1) - A1
2468  Value *Sub = Builder.CreateFSubFMF(V0, V1, &I);
2469  Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
2470  {Sub->getType()}, {A0, Sub}, &I);
2471  return BinaryOperator::CreateFSubFMF(Rdx, A1, &I);
2472  }
2473 
2475  return F;
2476 
2477  // TODO: This performs reassociative folds for FP ops. Some fraction of the
2478  // functionality has been subsumed by simple pattern matching here and in
2479  // InstSimplify. We should let a dedicated reassociation pass handle more
2480  // complex pattern matching and remove this from InstCombine.
2481  if (Value *V = FAddCombine(Builder).simplify(&I))
2482  return replaceInstUsesWith(I, V);
2483 
2484  // (X - Y) - Op1 --> X - (Y + Op1)
2485  if (match(Op0, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
2486  Value *FAdd = Builder.CreateFAddFMF(Y, Op1, &I);
2488  }
2489  }
2490 
2491  return nullptr;
2492 }
llvm::lltok::APFloat
@ APFloat
Definition: LLToken.h:425
set
We currently generate a but we really shouldn eax ecx xorl edx divl ecx eax divl ecx movl eax ret A similar code sequence works for division We currently compile i32 v2 eax eax jo LBB1_2 atomic and others It is also currently not done for read modify write instructions It is also current not done if the OF or CF flags are needed The shift operators have the complication that when the shift count is EFLAGS is not set
Definition: README.txt:1277
foldToUnsignedSaturatedAdd
static Instruction * foldToUnsignedSaturatedAdd(BinaryOperator &I)
Definition: InstCombineAddSub.cpp:1113
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
hoistFNegAboveFMulFDiv
static Instruction * hoistFNegAboveFMulFDiv(Instruction &I, InstCombiner::BuilderTy &Builder)
Definition: InstCombineAddSub.cpp:2261
llvm::InstCombinerImpl::visitFAdd
Instruction * visitFAdd(BinaryOperator &I)
Definition: InstCombineAddSub.cpp:1531
llvm::haveNoCommonBitsSet
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if LHS and RHS have no common bits set.
Definition: ValueTracking.cpp:267
llvm::PatternMatch::m_TruncOrSelf
match_combine_or< CastClass_match< OpTy, Instruction::Trunc >, OpTy > m_TruncOrSelf(const OpTy &Op)
Definition: PatternMatch.h:1627
llvm::none_of
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1619
llvm::SimplifySubInst
Value * SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Definition: InstructionSimplify.cpp:865
llvm::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
Definition: ValueTracking.cpp:391
llvm::ConstantExpr::getSIToFP
static Constant * getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2197
llvm::Value::hasOneUse
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
InstCombiner.h
llvm::RecurKind::FMul
@ FMul
Product of floats.
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1410
MatchDiv
static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned)
Definition: InstCombineAddSub.cpp:1027
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:719
simplify
hexagon bit simplify
Definition: HexagonBitSimplify.cpp:289
llvm::ConstantExpr::getZExt
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2148
T
llvm::APInt::isMask
bool isMask(unsigned numBits) const
Definition: APInt.h:469
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::BinaryOperator::CreateNot
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Definition: Instructions.cpp:2834
llvm::PatternMatch::m_LShr
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1127
llvm::APInt::isPowerOf2
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:425
llvm::KnownBits::Zero
APInt Zero
Definition: KnownBits.h:24
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
llvm::PatternMatch::m_FPOne
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
Definition: PatternMatch.h:826
llvm::isGuaranteedNotToBeUndefOrPoison
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Definition: ValueTracking.cpp:5246
llvm::Type::getScalarType
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:309
llvm::ConstantExpr::getSExt
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2134
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::BinaryOperator::CreateFDivFMF
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
Definition: InstrTypes.h:272
llvm::PatternMatch::m_Add
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
Definition: PatternMatch.h:988
llvm::Type::getFltSemantics
const fltSemantics & getFltSemantics() const
Definition: Type.cpp:67
llvm::IRBuilder< TargetFolder, IRBuilderCallbackInserter >
llvm::CastInst::Create
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Definition: Instructions.cpp:3180
llvm::SimplifyFSubInst
Value * SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
Definition: InstructionSimplify.cpp:5322
ValueTracking.h
llvm::PatternMatch::m_APFloat
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition: PatternMatch.h:287
APInt.h
llvm::ConstantFP::isZero
bool isZero() const
Return true if the value is positive or negative zero.
Definition: Constants.h:301
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1423
llvm::ConstantExpr::getFPToSI
static Constant * getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2219
llvm::SimplifyFNegInst
Value * SimplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
Definition: InstructionSimplify.cpp:5090
llvm::FastMathFlags::noSignedZeros
bool noSignedZeros() const
Definition: FMF.h:69
llvm::ConstantFP::getValueAPF
const APFloat & getValueAPF() const
Definition: Constants.h:297
llvm::PatternMatch::m_BinOp
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:84
llvm::ore::NV
DiagnosticInfoOptimizationBase::Argument NV
Definition: OptimizationRemarkEmitter.h:136
llvm::PatternMatch::m_AShr
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1133
llvm::ComputeNumSignBits
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
Definition: ValueTracking.cpp:415
llvm::tgtok::FalseVal
@ FalseVal
Definition: TGLexer.h:61
Operator.h
llvm::Instruction::copyMetadata
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Definition: Instruction.cpp:841
AlignOf.h
STLExtras.h
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
llvm::PatternMatch::m_c_And
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
Definition: PatternMatch.h:2257
llvm::BinaryOperator::CreateNeg
NUW NUW NUW NUW Exact static Exact BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
Definition: Instructions.cpp:2794
llvm::UnaryOperator
Definition: InstrTypes.h:101
factorizeLerp
static Instruction * factorizeLerp(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Eliminate an op from a linear interpolation (lerp) pattern.
Definition: InstCombineAddSub.cpp:1470
llvm::PatternMatch::m_Not
BinaryOp_match< ValTy, cst_pred_ty< is_all_ones >, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Definition: PatternMatch.h:2295
llvm::FastMathFlags
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:21
llvm::isInt
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition: MathExtras.h:363
llvm::InstCombinerImpl::foldAddWithConstant
Instruction * foldAddWithConstant(BinaryOperator &Add)
Definition: InstCombineAddSub.cpp:850
llvm::APFloat::getSemantics
const fltSemantics & getSemantics() const
Definition: APFloat.h:1222
llvm::AlignedCharArrayUnion
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:27
llvm::EmitGEPOffset
Value * EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition: Local.h:29
llvm::PatternMatch::m_Deferred
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:798
llvm::APIntOps::umin
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2149
llvm::PatternMatch::m_FDiv
BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1079
llvm::PatternMatch::m_c_MaxOrMin
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
Definition: PatternMatch.h:2355
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:240
llvm::PatternMatch::m_FAdd
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
Definition: PatternMatch.h:994
KnownBits.h
llvm::PatternMatch::m_FSub
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1006
factorizeFAddFSub
static Instruction * factorizeFAddFSub(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Factor a common operand out of fadd/fsub of fmul/fdiv.
Definition: InstCombineAddSub.cpp:1486
llvm::APInt::isSignMask
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
Definition: APInt.h:447
llvm::PatternMatch::m_OneUse
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
llvm::ConstantExpr::getSub
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2726
llvm::APInt::isShiftedMask
bool isShiftedMask() const
Return true if this APInt value contains a non-empty sequence of ones with the remainder zero.
Definition: APInt.h:491
Instruction.h
llvm::PatternMatch::m_APInt
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:270
llvm::APInt::umul_ov
APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1987
llvm::PatternMatch::m_c_BinOp
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
Definition: PatternMatch.h:2221
LHS
Value * LHS
Definition: X86PartialReduction.cpp:75
llvm::APInt::isNegative
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:312
llvm::SelectInst::Create
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
Definition: Instructions.h:1768
InstCombineInternal.h
llvm::PatternMatch::m_Select
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
Definition: PatternMatch.h:1472
Constants.h
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
isZero
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition: Lint.cpp:518
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::Instruction::setHasNoSignedWrap
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:127
llvm::User
Definition: User.h:44
llvm::RoundingMode
RoundingMode
Rounding mode.
Definition: FloatingPointMode.h:36
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
InstrTypes.h
int
Clang compiles this i1 i64 store i64 i64 store i64 i64 store i64 i64 store i64 align Which gets codegen d xmm0 movaps rbp movaps rbp movaps rbp movaps rbp rbp rbp rbp rbp It would be better to have movq s of instead of the movaps s LLVM produces ret int
Definition: README.txt:536
llvm::operator+=
std::string & operator+=(std::string &buffer, StringRef string)
Definition: StringRef.h:960
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1517
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::PatternMatch::m_Instruction
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:726
foldFNegIntoConstant
static Instruction * foldFNegIntoConstant(Instruction &I)
This eliminates floating-point negation in either 'fneg(X)' or 'fsub(-0.0, X)' form by combining into...
Definition: InstCombineAddSub.cpp:2221
llvm::PatternMatch::m_ZExt
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
Definition: PatternMatch.h:1639
llvm::PatternMatch::m_c_Add
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
Definition: PatternMatch.h:2243
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
checkForNegativeOperand
static Value * checkForNegativeOperand(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Definition: InstCombineAddSub.cpp:753
llvm::PatternMatch::m_FNeg
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
Definition: PatternMatch.h:1043
llvm::Constant::getAllOnesValue
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:395
llvm::PatternMatch::m_SDiv
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1073
llvm::Instruction
Definition: Instruction.h:42
llvm::Type::getScalarSizeInBits
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition: Type.cpp:189
llvm::PatternMatch::m_UMin
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1881
llvm::PatternMatch::m_c_Or
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
Definition: PatternMatch.h:2264
llvm::ConstantFP
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:257
llvm::APInt::getHighBitsSet
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:279
APFloat.h
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:919
canonicalizeLowbitMask
static Instruction * canonicalizeLowbitMask(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Fold (1 << NBits) - 1 Into: ~(-(1 << NBits)) Because a 'not' is better for bit-tracking analysis and ...
Definition: InstCombineAddSub.cpp:1095
llvm::PatternMatch::m_URem
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1085
PatternMatch.h
llvm::APInt::countTrailingZeros
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Definition: APInt.h:1555
llvm::CastInst::CreateTruncOrBitCast
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
Definition: Instructions.cpp:3256
llvm::SimplifyFAddInst
Value * SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
Definition: InstructionSimplify.cpp:5314
llvm::array_lengthof
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLArrayExtras.h:29
llvm::Type::getIntegerBitWidth
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:97
llvm::InstCombiner::SubOne
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
Definition: InstCombiner.h:207
llvm::PatternMatch::m_c_UMax
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
Definition: PatternMatch.h:2345
Type.h
llvm::ConstantExpr::getFNeg
static Constant * getFNeg(Constant *C)
Definition: Constants.cpp:2703
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::PatternMatch::m_One
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:513
one
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only one
Definition: README.txt:401
llvm::PatternMatch::m_NSWAdd
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1166
llvm::APFloat::multiply
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:987
llvm::PatternMatch::m_Xor
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1115
llvm::InstCombinerImpl::visitFNeg
Instruction * visitFNeg(UnaryOperator &I)
Definition: InstCombineAddSub.cpp:2277
llvm::PatternMatch::m_FPTrunc
CastClass_match< OpTy, Instruction::FPTrunc > m_FPTrunc(const OpTy &Op)
Definition: PatternMatch.h:1692
llvm::PatternMatch::m_ZExtOrSelf
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, OpTy > m_ZExtOrSelf(const OpTy &Op)
Definition: PatternMatch.h:1645
llvm::APFloat
Definition: APFloat.h:700
llvm::PatternMatch::m_Zero
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:535
llvm::PatternMatch::m_NUWAdd
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1199
llvm::MipsISD::Ext
@ Ext
Definition: MipsISelLowering.h:159
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::InstCombinerImpl::visitAdd
Instruction * visitAdd(BinaryOperator &I)
Definition: InstCombineAddSub.cpp:1273
llvm::FastMathFlags::noInfs
bool noInfs() const
Definition: FMF.h:68
llvm::UnaryOperator::CreateFNegFMF
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", Instruction *InsertBefore=nullptr)
Definition: InstrTypes.h:164
llvm::PatternMatch::m_ImmConstant
match_combine_and< class_match< Constant >, match_unless< class_match< ConstantExpr > > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:759
llvm::InstCombiner::AddOne
static Constant * AddOne(Constant *C)
Add one to a Constant.
Definition: InstCombiner.h:202
llvm::BinaryOperator::CreateFMulFMF
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
Definition: InstrTypes.h:267
llvm::InstCombinerImpl::canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract
Instruction * canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator &I)
Definition: InstCombineAddSub.cpp:1136
llvm::GEPOperator::countNonConstantIndices
unsigned countNonConstantIndices() const
Definition: Operator.h:471
llvm::APInt::logBase2
unsigned logBase2() const
Definition: APInt.h:1664
llvm::PatternMatch::m_Or
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1109
llvm::PatternMatch::m_AllOnes
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:445
isConstant
static bool isConstant(const MachineInstr &MI)
Definition: AMDGPUInstructionSelector.cpp:2304
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::ConstantExpr::getFSub
static Constant * getFSub(Constant *C1, Constant *C2)
Definition: Constants.cpp:2733
llvm::PatternMatch::m_And
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1103
llvm::Instruction::setDebugLoc
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:364
llvm::PatternMatch::m_SRem
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1091
llvm::InstCombinerImpl::SimplifyAddWithRemainder
Value * SimplifyAddWithRemainder(BinaryOperator &I)
Tries to simplify add operations using the definition of remainder.
Definition: InstCombineAddSub.cpp:1059
llvm::computeKnownBits
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Definition: ValueTracking.cpp:222
llvm::InstCombinerImpl::visitFSub
Instruction * visitFSub(BinaryOperator &I)
Definition: InstCombineAddSub.cpp:2330
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::PatternMatch::m_Sub
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1000
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
llvm::Negator::Negate
static LLVM_NODISCARD Value * Negate(bool LHSIsZero, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
Definition: InstCombineNegator.cpp:517
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition: Instructions.h:1737
llvm::WinEH::EncodingType::CE
@ CE
Windows NT (Windows on ARM)
llvm::PatternMatch::m_Constant
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:142
llvm::GEPOperator
Definition: Operator.h:375
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:651
llvm::PatternMatch::m_Value
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
llvm::ZExtInst
This class represents zero extension of integer types.
Definition: Instructions.h:4819
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::PatternMatch::m_SExt
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
Definition: PatternMatch.h:1633
llvm::PatternMatch::m_SpecificInt
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:863
llvm::PatternMatch::m_CombineAnd
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:216
llvm::Instruction::getFastMathFlags
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
Definition: Instruction.cpp:289
llvm::BinaryOperator
Definition: InstrTypes.h:188
Mul
BinaryOperator * Mul
Definition: X86PartialReduction.cpp:70
llvm::PatternMatch::m_SpecificInt_ICMP
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
Definition: PatternMatch.h:604
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:178
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
foldNoWrapAdd
static Instruction * foldNoWrapAdd(BinaryOperator &Add, InstCombiner::BuilderTy &Builder)
Wrapping flags may allow combining constants separated by an extend.
Definition: InstCombineAddSub.cpp:810
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
MatchMul
static bool MatchMul(Value *E, Value *&Op, APInt &C)
Definition: InstCombineAddSub.cpp:987
factorizeMathWithShlOps
static Instruction * factorizeMathWithShlOps(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
This is a specialization of a more general transform from SimplifyUsingDistributiveLaws.
Definition: InstCombineAddSub.cpp:1239
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:305
llvm::InstCombinerImpl::OptimizePointerDifference
Value * OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty, bool isNUW)
Optimize pointer differences into the same array into a size.
Definition: InstCombineAddSub.cpp:1677
llvm::MCID::Select
@ Select
Definition: MCInstrDesc.h:164
llvm::PatternMatch::m_FPExt
CastClass_match< OpTy, Instruction::FPExt > m_FPExt(const OpTy &Op)
Definition: PatternMatch.h:1697
llvm::tgtok::IntVal
@ IntVal
Definition: TGLexer.h:64
llvm::APIntOps::umax
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition: APInt.h:2154
llvm::Instruction::setFastMathFlags
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
Definition: Instruction.cpp:239
Constant.h
llvm::PatternMatch::m_SExtOrSelf
match_combine_or< CastClass_match< OpTy, Instruction::SExt >, OpTy > m_SExtOrSelf(const OpTy &Op)
Definition: PatternMatch.h:1651
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:350
llvm::KnownBits
Definition: KnownBits.h:23
llvm::PatternMatch::m_UDiv
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1067
llvm::APInt::isMinSignedValue
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:408
llvm::BinaryOperator::CreateFAddFMF
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
Definition: InstrTypes.h:257
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:341
llvm::GEPOperator::isInBounds
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:392
llvm::ConstantExpr::getAdd
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2715
llvm::Type::isIntOrIntVectorTy
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:197
llvm::ConstantFP::get
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:966
Casting.h
llvm::fltSemantics
Definition: APFloat.cpp:54
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::APInt::smul_ov
APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1976
MulWillOverflow
static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned)
Definition: InstCombineAddSub.cpp:1048
llvm::PatternMatch::m_ZeroInt
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:522
llvm::getInverseMinMaxIntrinsic
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
Definition: ValueTracking.cpp:6270
llvm::PatternMatch::m_PtrToInt
CastClass_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
Definition: PatternMatch.h:1609
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:185
llvm::InstCombinerImpl::visitSub
Instruction * visitSub(BinaryOperator &I)
Definition: InstCombineAddSub.cpp:1752
llvm::PatternMatch::m_c_Xor
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
Definition: PatternMatch.h:2271
llvm::APInt::sext
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:946
llvm::PatternMatch::m_AnyZeroFP
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
Definition: PatternMatch.h:674
llvm::CannotBeNegativeZero
bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
Definition: ValueTracking.cpp:3415
llvm::APFloatBase::rmNearestTiesToEven
static constexpr roundingMode rmNearestTiesToEven
Definition: APFloat.h:189
llvm::RecurKind::FAdd
@ FAdd
Sum of floats.
llvm::PatternMatch::m_c_FMul
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
Definition: PatternMatch.h:2370
Instructions.h
llvm::PatternMatch::m_c_FAdd
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
Definition: PatternMatch.h:2363
llvm::Instruction::setHasNoInfs
void setHasNoInfs(bool B)
Set or clear the no-infs flag on this instruction, which must be an operator which supports this flag...
Definition: Instruction.cpp:214
SmallVector.h
llvm::PatternMatch::m_Specific
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:780
llvm::Instruction::setHasNoSignedZeros
void setHasNoSignedZeros(bool B)
Set or clear the no-signed-zeros flag on this instruction, which must be an operator which supports t...
Definition: Instruction.cpp:219
llvm::SIToFPInst
This class represents a cast from signed integer to floating point.
Definition: Instructions.h:5014
llvm::PatternMatch::m_ICmp
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
Definition: PatternMatch.h:1404
N
#define N
MatchRem
static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned)
Definition: InstCombineAddSub.cpp:1005
InstructionSimplify.h
llvm::APFloatBase::semanticsPrecision
static unsigned int semanticsPrecision(const fltSemantics &)
Definition: APFloat.cpp:211
llvm::PHINode
Definition: Instructions.h:2664
llvm::PatternMatch::m_Neg
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
Definition: PatternMatch.h:2279
CreateAdd
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
Definition: Reassociate.cpp:231
llvm::PatternMatch::m_Trunc
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
Definition: PatternMatch.h:1621
llvm::ConstantExpr::getFAdd
static Constant * getFAdd(Constant *C1, Constant *C2)
Definition: Constants.cpp:2722
llvm::SimplifyAddInst
Value * SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Definition: InstructionSimplify.cpp:683
llvm::PatternMatch::m_FMul
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1061
llvm::tgtok::TrueVal
@ TrueVal
Definition: TGLexer.h:61
Value.h
llvm::RoundingMode::NearestTiesToEven
@ NearestTiesToEven
roundTiesToEven.
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::PatternMatch::m_Shl
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1121
llvm::BinaryOperator::CreateFSubFMF
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
Definition: InstrTypes.h:262
llvm::PatternMatch::m_c_UMin
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
Definition: PatternMatch.h:2339
llvm::PatternMatch::m_Mul
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:1055
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:37