LLVM 23.0.0git
InstCombineAddSub.cpp
Go to the documentation of this file.
1//===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for add, fadd, sub, and fsub.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/STLExtras.h"
20#include "llvm/IR/Constant.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/InstrTypes.h"
23#include "llvm/IR/Instruction.h"
25#include "llvm/IR/Operator.h"
27#include "llvm/IR/Type.h"
28#include "llvm/IR/Value.h"
33#include <cassert>
34#include <utility>
35
36using namespace llvm;
37using namespace PatternMatch;
38
39#define DEBUG_TYPE "instcombine"
40
41namespace {
42
43 /// Class representing coefficient of floating-point addend.
44 /// This class needs to be highly efficient, which is especially true for
45 /// the constructor. As of I write this comment, the cost of the default
46 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
47 /// perform write-merging).
48 ///
49 class FAddendCoef {
50 public:
51 // The constructor has to initialize a APFloat, which is unnecessary for
52 // most addends which have coefficient either 1 or -1. So, the constructor
53 // is expensive. In order to avoid the cost of the constructor, we should
54 // reuse some instances whenever possible. The pre-created instances
55 // FAddCombine::Add[0-5] embodies this idea.
56 FAddendCoef() = default;
57 ~FAddendCoef();
58
59 // If possible, don't define operator+/operator- etc because these
60 // operators inevitably call FAddendCoef's constructor which is not cheap.
61 void operator=(const FAddendCoef &A);
62 void operator+=(const FAddendCoef &A);
63 void operator*=(const FAddendCoef &S);
64
65 void set(short C) {
66 assert(!insaneIntVal(C) && "Insane coefficient");
67 IsFp = false; IntVal = C;
68 }
69
70 void set(const APFloat& C);
71
72 void negate();
73
74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
75 Value *getValue(Type *) const;
76
77 bool isOne() const { return isInt() && IntVal == 1; }
78 bool isTwo() const { return isInt() && IntVal == 2; }
79 bool isMinusOne() const { return isInt() && IntVal == -1; }
80 bool isMinusTwo() const { return isInt() && IntVal == -2; }
81
82 private:
83 bool insaneIntVal(int V) { return V > 4 || V < -4; }
84
85 APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); }
86
87 const APFloat *getFpValPtr() const {
88 return reinterpret_cast<const APFloat *>(&FpValBuf);
89 }
90
91 const APFloat &getFpVal() const {
92 assert(IsFp && BufHasFpVal && "Incorret state");
93 return *getFpValPtr();
94 }
95
96 APFloat &getFpVal() {
97 assert(IsFp && BufHasFpVal && "Incorret state");
98 return *getFpValPtr();
99 }
100
101 bool isInt() const { return !IsFp; }
102
103 // If the coefficient is represented by an integer, promote it to a
104 // floating point.
105 void convertToFpType(const fltSemantics &Sem);
106
107 // Construct an APFloat from a signed integer.
108 // TODO: We should get rid of this function when APFloat can be constructed
109 // from an *SIGNED* integer.
110 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
111
112 bool IsFp = false;
113
114 // True iff FpValBuf contains an instance of APFloat.
115 bool BufHasFpVal = false;
116
117 // The integer coefficient of an individual addend is either 1 or -1,
118 // and we try to simplify at most 4 addends from neighboring at most
119 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
120 // is overkill of this end.
121 short IntVal = 0;
122
124 };
125
126 /// FAddend is used to represent floating-point addend. An addend is
127 /// represented as <C, V>, where the V is a symbolic value, and C is a
128 /// constant coefficient. A constant addend is represented as <C, 0>.
129 class FAddend {
130 public:
131 FAddend() = default;
132
133 void operator+=(const FAddend &T) {
134 assert((Val == T.Val) && "Symbolic-values disagree");
135 Coeff += T.Coeff;
136 }
137
138 Value *getSymVal() const { return Val; }
139 const FAddendCoef &getCoef() const { return Coeff; }
140
141 bool isConstant() const { return Val == nullptr; }
142 bool isZero() const { return Coeff.isZero(); }
143
144 void set(short Coefficient, Value *V) {
145 Coeff.set(Coefficient);
146 Val = V;
147 }
148 void set(const APFloat &Coefficient, Value *V) {
149 Coeff.set(Coefficient);
150 Val = V;
151 }
152 void set(const ConstantFP *Coefficient, Value *V) {
153 Coeff.set(Coefficient->getValueAPF());
154 Val = V;
155 }
156
157 void negate() { Coeff.negate(); }
158
159 /// Drill down the U-D chain one step to find the definition of V, and
160 /// try to break the definition into one or two addends.
161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
162
163 /// Similar to FAddend::drillDownOneStep() except that the value being
164 /// splitted is the addend itself.
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
166
167 private:
168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
169
170 // This addend has the value of "Coeff * Val".
171 Value *Val = nullptr;
172 FAddendCoef Coeff;
173 };
174
175 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
176 /// with its neighboring at most two instructions.
177 ///
178 class FAddCombine {
179 public:
180 FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {}
181
183
184 private:
185 using AddendVect = SmallVector<const FAddend *, 4>;
186
187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
188
189 /// Convert given addend to a Value
190 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
191
192 /// Return the number of instructions needed to emit the N-ary addition.
193 unsigned calcInstrNumber(const AddendVect& Vect);
194
195 Value *createFSub(Value *Opnd0, Value *Opnd1);
196 Value *createFAdd(Value *Opnd0, Value *Opnd1);
197 Value *createFMul(Value *Opnd0, Value *Opnd1);
198 Value *createFNeg(Value *V);
199 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
200 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
201
202 // Debugging stuff are clustered here.
203 #ifndef NDEBUG
204 unsigned CreateInstrNum;
205 void initCreateInstNum() { CreateInstrNum = 0; }
206 void incCreateInstNum() { CreateInstrNum++; }
207 #else
208 void initCreateInstNum() {}
209 void incCreateInstNum() {}
210 #endif
211
213 Instruction *Instr = nullptr;
214 };
215
216} // end anonymous namespace
217
218//===----------------------------------------------------------------------===//
219//
220// Implementation of
221// {FAddendCoef, FAddend, FAddition, FAddCombine}.
222//
223//===----------------------------------------------------------------------===//
224FAddendCoef::~FAddendCoef() {
225 if (BufHasFpVal)
226 getFpValPtr()->~APFloat();
227}
228
229void FAddendCoef::set(const APFloat& C) {
230 APFloat *P = getFpValPtr();
231
232 if (isInt()) {
233 // As the buffer is meanless byte stream, we cannot call
234 // APFloat::operator=().
235 new(P) APFloat(C);
236 } else
237 *P = C;
238
239 IsFp = BufHasFpVal = true;
240}
241
242void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
243 if (!isInt())
244 return;
245
246 APFloat *P = getFpValPtr();
247 if (IntVal > 0)
248 new(P) APFloat(Sem, IntVal);
249 else {
250 new(P) APFloat(Sem, 0 - IntVal);
251 P->changeSign();
252 }
253 IsFp = BufHasFpVal = true;
254}
255
256APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
257 if (Val >= 0)
258 return APFloat(Sem, Val);
259
260 APFloat T(Sem, 0 - Val);
261 T.changeSign();
262
263 return T;
264}
265
266void FAddendCoef::operator=(const FAddendCoef &That) {
267 if (That.isInt())
268 set(That.IntVal);
269 else
270 set(That.getFpVal());
271}
272
273void FAddendCoef::operator+=(const FAddendCoef &That) {
274 RoundingMode RndMode = RoundingMode::NearestTiesToEven;
275 if (isInt() == That.isInt()) {
276 if (isInt())
277 IntVal += That.IntVal;
278 else
279 getFpVal().add(That.getFpVal(), RndMode);
280 return;
281 }
282
283 if (isInt()) {
284 const APFloat &T = That.getFpVal();
285 convertToFpType(T.getSemantics());
286 getFpVal().add(T, RndMode);
287 return;
288 }
289
290 APFloat &T = getFpVal();
291 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
292}
293
294void FAddendCoef::operator*=(const FAddendCoef &That) {
295 if (That.isOne())
296 return;
297
298 if (That.isMinusOne()) {
299 negate();
300 return;
301 }
302
303 if (isInt() && That.isInt()) {
304 int Res = IntVal * (int)That.IntVal;
305 assert(!insaneIntVal(Res) && "Insane int value");
306 IntVal = Res;
307 return;
308 }
309
310 const fltSemantics &Semantic =
311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
312
313 if (isInt())
314 convertToFpType(Semantic);
315 APFloat &F0 = getFpVal();
316
317 if (That.isInt())
318 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
319 APFloat::rmNearestTiesToEven);
320 else
321 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
322}
323
324void FAddendCoef::negate() {
325 if (isInt())
326 IntVal = 0 - IntVal;
327 else
328 getFpVal().changeSign();
329}
330
331Value *FAddendCoef::getValue(Type *Ty) const {
332 return isInt() ?
333 ConstantFP::get(Ty, float(IntVal)) :
334 ConstantFP::get(Ty->getContext(), getFpVal());
335}
336
337// The definition of <Val> Addends
338// =========================================
339// A + B <1, A>, <1,B>
340// A - B <1, A>, <1,B>
341// 0 - B <-1, B>
342// C * A, <C, A>
343// A + C <1, A> <C, NULL>
344// 0 +/- 0 <0, NULL> (corner case)
345//
346// Legend: A and B are not constant, C is constant
347unsigned FAddend::drillValueDownOneStep
348 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
349 Instruction *I = nullptr;
350 if (!Val || !(I = dyn_cast<Instruction>(Val)))
351 return 0;
352
353 unsigned Opcode = I->getOpcode();
354
355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
356 ConstantFP *C0, *C1;
357 Value *Opnd0 = I->getOperand(0);
358 Value *Opnd1 = I->getOperand(1);
359 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
360 Opnd0 = nullptr;
361
362 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
363 Opnd1 = nullptr;
364
365 if (Opnd0) {
366 if (!C0)
367 Addend0.set(1, Opnd0);
368 else
369 Addend0.set(C0, nullptr);
370 }
371
372 if (Opnd1) {
373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
374 if (!C1)
375 Addend.set(1, Opnd1);
376 else
377 Addend.set(C1, nullptr);
378 if (Opcode == Instruction::FSub)
379 Addend.negate();
380 }
381
382 if (Opnd0 || Opnd1)
383 return Opnd0 && Opnd1 ? 2 : 1;
384
385 // Both operands are zero. Weird!
386 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
387 return 1;
388 }
389
390 if (I->getOpcode() == Instruction::FMul) {
391 Value *V0 = I->getOperand(0);
392 Value *V1 = I->getOperand(1);
393 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
394 Addend0.set(C, V1);
395 return 1;
396 }
397
398 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
399 Addend0.set(C, V0);
400 return 1;
401 }
402 }
403
404 return 0;
405}
406
407// Try to break *this* addend into two addends. e.g. Suppose this addend is
408// <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
409// i.e. <2.3, X> and <2.3, Y>.
410unsigned FAddend::drillAddendDownOneStep
411 (FAddend &Addend0, FAddend &Addend1) const {
412 if (isConstant())
413 return 0;
414
415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416 if (!BreakNum || Coeff.isOne())
417 return BreakNum;
418
419 Addend0.Scale(Coeff);
420
421 if (BreakNum == 2)
422 Addend1.Scale(Coeff);
423
424 return BreakNum;
425}
426
427Value *FAddCombine::simplify(Instruction *I) {
428 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
430
431 // Currently we are not able to handle vector type.
432 if (I->getType()->isVectorTy())
433 return nullptr;
434
435 assert((I->getOpcode() == Instruction::FAdd ||
436 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
437
438 // Save the instruction before calling other member-functions.
439 Instr = I;
440
441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
442
443 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
444
445 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
446 unsigned Opnd0_ExpNum = 0;
447 unsigned Opnd1_ExpNum = 0;
448
449 if (!Opnd0.isConstant())
450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
451
452 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
453 if (OpndNum == 2 && !Opnd1.isConstant())
454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
455
456 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
457 if (Opnd0_ExpNum && Opnd1_ExpNum) {
458 AddendVect AllOpnds;
459 AllOpnds.push_back(&Opnd0_0);
460 AllOpnds.push_back(&Opnd1_0);
461 if (Opnd0_ExpNum == 2)
462 AllOpnds.push_back(&Opnd0_1);
463 if (Opnd1_ExpNum == 2)
464 AllOpnds.push_back(&Opnd1_1);
465
466 // Compute instruction quota. We should save at least one instruction.
467 unsigned InstQuota = 0;
468
469 Value *V0 = I->getOperand(0);
470 Value *V1 = I->getOperand(1);
471 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
472 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
473
474 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
475 return R;
476 }
477
478 if (OpndNum != 2) {
479 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
480 // splitted into two addends, say "V = X - Y", the instruction would have
481 // been optimized into "I = Y - X" in the previous steps.
482 //
483 const FAddendCoef &CE = Opnd0.getCoef();
484 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
485 }
486
487 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
488 if (Opnd1_ExpNum) {
489 AddendVect AllOpnds;
490 AllOpnds.push_back(&Opnd0);
491 AllOpnds.push_back(&Opnd1_0);
492 if (Opnd1_ExpNum == 2)
493 AllOpnds.push_back(&Opnd1_1);
494
495 if (Value *R = simplifyFAdd(AllOpnds, 1))
496 return R;
497 }
498
499 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
500 if (Opnd0_ExpNum) {
501 AddendVect AllOpnds;
502 AllOpnds.push_back(&Opnd1);
503 AllOpnds.push_back(&Opnd0_0);
504 if (Opnd0_ExpNum == 2)
505 AllOpnds.push_back(&Opnd0_1);
506
507 if (Value *R = simplifyFAdd(AllOpnds, 1))
508 return R;
509 }
510
511 return nullptr;
512}
513
514Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
515 unsigned AddendNum = Addends.size();
516 assert(AddendNum <= 4 && "Too many addends");
517
518 // For saving intermediate results;
519 unsigned NextTmpIdx = 0;
520 FAddend TmpResult[3];
521
522 // Simplified addends are placed <SimpVect>.
523 AddendVect SimpVect;
524
525 // The outer loop works on one symbolic-value at a time. Suppose the input
526 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
527 // The symbolic-values will be processed in this order: x, y, z.
528 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
529
530 const FAddend *ThisAddend = Addends[SymIdx];
531 if (!ThisAddend) {
532 // This addend was processed before.
533 continue;
534 }
535
536 Value *Val = ThisAddend->getSymVal();
537
538 // If the resulting expr has constant-addend, this constant-addend is
539 // desirable to reside at the top of the resulting expression tree. Placing
540 // constant close to super-expr(s) will potentially reveal some
541 // optimization opportunities in super-expr(s). Here we do not implement
542 // this logic intentionally and rely on SimplifyAssociativeOrCommutative
543 // call later.
544
545 unsigned StartIdx = SimpVect.size();
546 SimpVect.push_back(ThisAddend);
547
548 // The inner loop collects addends sharing same symbolic-value, and these
549 // addends will be later on folded into a single addend. Following above
550 // example, if the symbolic value "y" is being processed, the inner loop
551 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
552 // be later on folded into "<b1+b2, y>".
553 for (unsigned SameSymIdx = SymIdx + 1;
554 SameSymIdx < AddendNum; SameSymIdx++) {
555 const FAddend *T = Addends[SameSymIdx];
556 if (T && T->getSymVal() == Val) {
557 // Set null such that next iteration of the outer loop will not process
558 // this addend again.
559 Addends[SameSymIdx] = nullptr;
560 SimpVect.push_back(T);
561 }
562 }
563
564 // If multiple addends share same symbolic value, fold them together.
565 if (StartIdx + 1 != SimpVect.size()) {
566 FAddend &R = TmpResult[NextTmpIdx ++];
567 R = *SimpVect[StartIdx];
568 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
569 R += *SimpVect[Idx];
570
571 // Pop all addends being folded and push the resulting folded addend.
572 SimpVect.resize(StartIdx);
573 if (!R.isZero()) {
574 SimpVect.push_back(&R);
575 }
576 }
577 }
578
579 assert((NextTmpIdx <= std::size(TmpResult) + 1) && "out-of-bound access");
580
581 Value *Result;
582 if (!SimpVect.empty())
583 Result = createNaryFAdd(SimpVect, InstrQuota);
584 else {
585 // The addition is folded to 0.0.
586 Result = ConstantFP::get(Instr->getType(), 0.0);
587 }
588
589 return Result;
590}
591
592Value *FAddCombine::createNaryFAdd
593 (const AddendVect &Opnds, unsigned InstrQuota) {
594 assert(!Opnds.empty() && "Expect at least one addend");
595
596 // Step 1: Check if the # of instructions needed exceeds the quota.
597
598 unsigned InstrNeeded = calcInstrNumber(Opnds);
599 if (InstrNeeded > InstrQuota)
600 return nullptr;
601
602 initCreateInstNum();
603
604 // step 2: Emit the N-ary addition.
605 // Note that at most three instructions are involved in Fadd-InstCombine: the
606 // addition in question, and at most two neighboring instructions.
607 // The resulting optimized addition should have at least one less instruction
608 // than the original addition expression tree. This implies that the resulting
609 // N-ary addition has at most two instructions, and we don't need to worry
610 // about tree-height when constructing the N-ary addition.
611
612 Value *LastVal = nullptr;
613 bool LastValNeedNeg = false;
614
615 // Iterate the addends, creating fadd/fsub using adjacent two addends.
616 for (const FAddend *Opnd : Opnds) {
617 bool NeedNeg;
618 Value *V = createAddendVal(*Opnd, NeedNeg);
619 if (!LastVal) {
620 LastVal = V;
621 LastValNeedNeg = NeedNeg;
622 continue;
623 }
624
625 if (LastValNeedNeg == NeedNeg) {
626 LastVal = createFAdd(LastVal, V);
627 continue;
628 }
629
630 if (LastValNeedNeg)
631 LastVal = createFSub(V, LastVal);
632 else
633 LastVal = createFSub(LastVal, V);
634
635 LastValNeedNeg = false;
636 }
637
638 if (LastValNeedNeg) {
639 LastVal = createFNeg(LastVal);
640 }
641
642#ifndef NDEBUG
643 assert(CreateInstrNum == InstrNeeded &&
644 "Inconsistent in instruction numbers");
645#endif
646
647 return LastVal;
648}
649
650Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
651 Value *V = Builder.CreateFSub(Opnd0, Opnd1);
652 if (Instruction *I = dyn_cast<Instruction>(V))
653 createInstPostProc(I);
654 return V;
655}
656
657Value *FAddCombine::createFNeg(Value *V) {
658 Value *NewV = Builder.CreateFNeg(V);
659 if (Instruction *I = dyn_cast<Instruction>(NewV))
660 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
661 return NewV;
662}
663
664Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
665 Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
666 if (Instruction *I = dyn_cast<Instruction>(V))
667 createInstPostProc(I);
668 return V;
669}
670
671Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
672 Value *V = Builder.CreateFMul(Opnd0, Opnd1);
673 if (Instruction *I = dyn_cast<Instruction>(V))
674 createInstPostProc(I);
675 return V;
676}
677
678void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
679 NewInstr->setDebugLoc(Instr->getDebugLoc());
680
681 // Keep track of the number of instruction created.
682 if (!NoNumber)
683 incCreateInstNum();
684
685 // Propagate fast-math flags
686 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
687}
688
689// Return the number of instruction needed to emit the N-ary addition.
690// NOTE: Keep this function in sync with createAddendVal().
691unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
692 unsigned OpndNum = Opnds.size();
693 unsigned InstrNeeded = OpndNum - 1;
694
695 // Adjust the number of instructions needed to emit the N-ary add.
696 for (const FAddend *Opnd : Opnds) {
697 if (Opnd->isConstant())
698 continue;
699
700 // The constant check above is really for a few special constant
701 // coefficients.
702 if (isa<UndefValue>(Opnd->getSymVal()))
703 continue;
704
705 const FAddendCoef &CE = Opnd->getCoef();
706 // Let the addend be "c * x". If "c == +/-1", the value of the addend
707 // is immediately available; otherwise, it needs exactly one instruction
708 // to evaluate the value.
709 if (!CE.isMinusOne() && !CE.isOne())
710 InstrNeeded++;
711 }
712 return InstrNeeded;
713}
714
715// Input Addend Value NeedNeg(output)
716// ================================================================
717// Constant C C false
718// <+/-1, V> V coefficient is -1
719// <2/-2, V> "fadd V, V" coefficient is -2
720// <C, V> "fmul V, C" false
721//
722// NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
723Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
724 const FAddendCoef &Coeff = Opnd.getCoef();
725
726 if (Opnd.isConstant()) {
727 NeedNeg = false;
728 return Coeff.getValue(Instr->getType());
729 }
730
731 Value *OpndVal = Opnd.getSymVal();
732
733 if (Coeff.isMinusOne() || Coeff.isOne()) {
734 NeedNeg = Coeff.isMinusOne();
735 return OpndVal;
736 }
737
738 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
739 NeedNeg = Coeff.isMinusTwo();
740 return createFAdd(OpndVal, OpndVal);
741 }
742
743 NeedNeg = false;
744 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
745}
746
747// Checks if any operand is negative and we can convert add to sub.
748// This function checks for following negative patterns
749// ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
750// ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
751// XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
753 InstCombiner::BuilderTy &Builder) {
754 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
755
756 // This function creates 2 instructions to replace ADD, we need at least one
757 // of LHS or RHS to have one use to ensure benefit in transform.
758 if (!LHS->hasOneUse() && !RHS->hasOneUse())
759 return nullptr;
760
761 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
762 const APInt *C1 = nullptr, *C2 = nullptr;
763
764 // if ONE is on other side, swap
765 if (match(RHS, m_Add(m_Value(X), m_One())))
766 std::swap(LHS, RHS);
767
768 if (match(LHS, m_Add(m_Value(X), m_One()))) {
769 // if XOR on other side, swap
770 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
771 std::swap(X, RHS);
772
773 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
774 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
775 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
776 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
777 Value *NewAnd = Builder.CreateAnd(Z, *C1);
778 return Builder.CreateSub(RHS, NewAnd, "sub");
779 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
780 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
781 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
782 Value *NewOr = Builder.CreateOr(Z, ~(*C1));
783 return Builder.CreateSub(RHS, NewOr, "sub");
784 }
785 }
786 }
787
788 // Restore LHS and RHS
789 LHS = I.getOperand(0);
790 RHS = I.getOperand(1);
791
792 // if XOR is on other side, swap
793 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
794 std::swap(LHS, RHS);
795
796 // C2 is ODD
797 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
798 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
799 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
800 if (C1->countr_zero() == 0)
801 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
802 Value *NewOr = Builder.CreateOr(Z, ~(*C2));
803 return Builder.CreateSub(RHS, NewOr, "sub");
804 }
805 return nullptr;
806}
807
808/// Wrapping flags may allow combining constants separated by an extend.
810 InstCombiner::BuilderTy &Builder) {
811 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
812 Type *Ty = Add.getType();
813 Constant *Op1C;
814 if (!match(Op1, m_Constant(Op1C)))
815 return nullptr;
816
817 // Try this match first because it results in an add in the narrow type.
818 // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1)))
819 Value *X;
820 const APInt *C1, *C2;
821 if (match(Op1, m_APInt(C1)) &&
822 match(Op0, m_ZExt(m_NUWAddLike(m_Value(X), m_APInt(C2)))) &&
823 C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) {
824 APInt NewC = *C2 + C1->trunc(C2->getBitWidth());
825 // If the smaller add will fold to zero, we don't need to check one use.
826 if (NewC.isZero())
827 return new ZExtInst(X, Ty);
828 // Otherwise only do this if the existing zero extend will be removed.
829 if (Op0->hasOneUse())
830 return new ZExtInst(
831 Builder.CreateNUWAdd(X, ConstantInt::get(X->getType(), NewC)), Ty);
832 }
833
834 // More general combining of constants in the wide type.
835 // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
836 // or (zext nneg (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
837 Constant *NarrowC;
838 if (match(Op0, m_OneUse(m_SExtLike(
839 m_NSWAddLike(m_Value(X), m_Constant(NarrowC)))))) {
840 Value *WideC = Builder.CreateSExt(NarrowC, Ty);
841 Value *NewC = Builder.CreateAdd(WideC, Op1C);
842 Value *WideX = Builder.CreateSExt(X, Ty);
843 return BinaryOperator::CreateAdd(WideX, NewC);
844 }
845 // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C)
846 if (match(Op0,
848 Value *WideC = Builder.CreateZExt(NarrowC, Ty);
849 Value *NewC = Builder.CreateAdd(WideC, Op1C);
850 Value *WideX = Builder.CreateZExt(X, Ty);
851 return BinaryOperator::CreateAdd(WideX, NewC);
852 }
853 return nullptr;
854}
855
857 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
858 Type *Ty = Add.getType();
859 Constant *Op1C;
860 if (!match(Op1, m_ImmConstant(Op1C)))
861 return nullptr;
862
864 return NV;
865
866 if (Instruction *FoldedLogic = foldBinOpSelectBinOp(Add))
867 return FoldedLogic;
868
869 Value *X;
870 Constant *Op00C;
871
872 // add (sub C1, X), C2 --> sub (add C1, C2), X
873 if (match(Op0, m_Sub(m_Constant(Op00C), m_Value(X))))
874 return BinaryOperator::CreateSub(ConstantExpr::getAdd(Op00C, Op1C), X);
875
876 Value *Y;
877
878 // add (sub X, Y), -1 --> add (not Y), X
879 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) &&
880 match(Op1, m_AllOnes()))
881 return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X);
882
883 // zext(bool) + C -> bool ? C + 1 : C
884 if (match(Op0, m_ZExt(m_Value(X))) &&
885 X->getType()->getScalarSizeInBits() == 1)
886 return createSelectInstWithUnknownProfile(X, InstCombiner::AddOne(Op1C),
887 Op1);
888 // sext(bool) + C -> bool ? C - 1 : C
889 if (match(Op0, m_SExt(m_Value(X))) &&
890 X->getType()->getScalarSizeInBits() == 1)
891 return createSelectInstWithUnknownProfile(X, InstCombiner::SubOne(Op1C),
892 Op1);
893
894 // ~X + C --> (C-1) - X
895 if (match(Op0, m_Not(m_Value(X)))) {
896 // ~X + C has NSW and (C-1) won't oveflow => (C-1)-X can have NSW
897 auto *COne = ConstantInt::get(Op1C->getType(), 1);
898 bool WillNotSOV = willNotOverflowSignedSub(Op1C, COne, Add);
899 BinaryOperator *Res =
900 BinaryOperator::CreateSub(ConstantExpr::getSub(Op1C, COne), X);
901 Res->setHasNoSignedWrap(Add.hasNoSignedWrap() && WillNotSOV);
902 return Res;
903 }
904
905 // (iN X s>> (N - 1)) + 1 --> zext (X > -1)
906 const APInt *C;
907 unsigned BitWidth = Ty->getScalarSizeInBits();
908 if (match(Op0, m_OneUse(m_AShr(m_Value(X),
910 match(Op1, m_One()))
911 return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
912
913 if (!match(Op1, m_APInt(C)))
914 return nullptr;
915
916 // (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add`
917 Constant *Op01C;
918 if (match(Op0, m_DisjointOr(m_Value(X), m_ImmConstant(Op01C)))) {
919 BinaryOperator *NewAdd =
920 BinaryOperator::CreateAdd(X, ConstantExpr::getAdd(Op01C, Op1C));
921 NewAdd->setHasNoSignedWrap(Add.hasNoSignedWrap() &&
922 willNotOverflowSignedAdd(Op01C, Op1C, Add));
923 NewAdd->setHasNoUnsignedWrap(Add.hasNoUnsignedWrap());
924 return NewAdd;
925 }
926
927 // (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C)
928 const APInt *C2;
929 if (match(Op0, m_Or(m_Value(), m_APInt(C2))) && *C2 == -*C)
930 return BinaryOperator::CreateXor(Op0, ConstantInt::get(Add.getType(), *C2));
931
932 if (C->isSignMask()) {
933 // If wrapping is not allowed, then the addition must set the sign bit:
934 // X + (signmask) --> X | signmask
935 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
936 return BinaryOperator::CreateDisjointOr(Op0, Op1);
937
938 // If wrapping is allowed, then the addition flips the sign bit of LHS:
939 // X + (signmask) --> X ^ signmask
940 return BinaryOperator::CreateXor(Op0, Op1);
941 }
942
943 // Is this add the last step in a convoluted sext?
944 // add(zext(xor i16 X, -32768), -32768) --> sext X
945 if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) &&
946 C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
947 return CastInst::Create(Instruction::SExt, X, Ty);
948
949 if (match(Op0, m_Xor(m_Value(X), m_APInt(C2)))) {
950 // (X ^ signmask) + C --> (X + (signmask ^ C))
951 if (C2->isSignMask())
952 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C2 ^ *C));
953
954 // If X has no bits set other than an xor mask,
955 // xor is equivalent to sub with no borrow between bits:
956 // add (xor X, C2), C --> sub (C2 + C), X
957 KnownBits LHSKnown = computeKnownBits(X, &Add);
958 if ((*C2 | LHSKnown.Zero).isAllOnes())
959 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X);
960
961 // Look for a math+logic pattern that corresponds to sext-in-register of a
962 // value with cleared high bits. Convert that into a pair of shifts:
963 // add (xor X, 0x80), 0xF..F80 --> (X << ShAmtC) >>s ShAmtC
964 // add (xor X, 0xF..F80), 0x80 --> (X << ShAmtC) >>s ShAmtC
965 if (Op0->hasOneUse() && *C2 == -(*C)) {
966 unsigned BitWidth = Ty->getScalarSizeInBits();
967 unsigned ShAmt = 0;
968 if (C->isPowerOf2())
969 ShAmt = BitWidth - C->logBase2() - 1;
970 else if (C2->isPowerOf2())
971 ShAmt = BitWidth - C2->logBase2() - 1;
972 if (ShAmt &&
974 Constant *ShAmtC = ConstantInt::get(Ty, ShAmt);
975 Value *NewShl = Builder.CreateShl(X, ShAmtC, "sext");
976 return BinaryOperator::CreateAShr(NewShl, ShAmtC);
977 }
978 }
979 }
980
981 if (C->isOne() && Op0->hasOneUse()) {
982 // add (sext i1 X), 1 --> zext (not X)
983 // TODO: The smallest IR representation is (select X, 0, 1), and that would
984 // not require the one-use check. But we need to remove a transform in
985 // visitSelect and make sure that IR value tracking for select is equal or
986 // better than for these ops.
987 if (match(Op0, m_SExt(m_Value(X))) &&
988 X->getType()->getScalarSizeInBits() == 1)
989 return new ZExtInst(Builder.CreateNot(X), Ty);
990
991 // Shifts and add used to flip and mask off the low bit:
992 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
993 const APInt *C3;
994 if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) &&
995 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) {
996 Value *NotX = Builder.CreateNot(X);
997 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));
998 }
999 }
1000
1001 // umax(X, C) + -C --> usub.sat(X, C)
1002 if (match(Op0, m_OneUse(m_UMax(m_Value(X), m_SpecificInt(-*C)))))
1003 return replaceInstUsesWith(
1004 Add, Builder.CreateBinaryIntrinsic(
1005 Intrinsic::usub_sat, X, ConstantInt::get(Add.getType(), -*C)));
1006
1007 // Fold (add (zext (add X, -C)), C) -> (zext X) if X u>= C.
1008 // Truncate C to the narrow type to avoid mismatched width comparisons.
1009 {
1010 const APInt *InnerC;
1011 if (match(Op0, m_ZExt(m_Add(m_Value(X), m_APIntAllowPoison(InnerC))))) {
1012 unsigned NarrowBW = InnerC->getBitWidth();
1013 if (C->isIntN(NarrowBW)) {
1014 APInt NarrowC = C->trunc(NarrowBW);
1015 const SimplifyQuery Q = SQ.getWithInstruction(&Add);
1016 if (*InnerC == -NarrowC &&
1017 (NarrowC.isOne()
1019 : computeKnownBits(X, &Add).getMinValue().uge(NarrowC)))
1020 return new ZExtInst(X, Ty);
1021 }
1022 }
1023 }
1024
1025 return nullptr;
1026}
1027
1028// match variations of a^2 + 2*a*b + b^2
1029//
1030// to reuse the code between the FP and Int versions, the instruction OpCodes
1031// and constant types have been turned into template parameters.
1032//
1033// Mul2Rhs: The constant to perform the multiplicative equivalent of X*2 with;
1034// should be `m_SpecificFP(2.0)` for FP and `m_SpecificInt(1)` for Int
1035// (we're matching `X<<1` instead of `X*2` for Int)
1036template <bool FP, typename Mul2Rhs>
1037static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A,
1038 Value *&B) {
1039 constexpr unsigned MulOp = FP ? Instruction::FMul : Instruction::Mul;
1040 constexpr unsigned AddOp = FP ? Instruction::FAdd : Instruction::Add;
1041 constexpr unsigned Mul2Op = FP ? Instruction::FMul : Instruction::Shl;
1042
1043 // (a * a) + (((a * 2) + b) * b)
1044 if (match(&I, m_c_BinOp(
1045 AddOp, m_OneUse(m_BinOp(MulOp, m_Value(A), m_Deferred(A))),
1047 MulOp,
1048 m_c_BinOp(AddOp, m_BinOp(Mul2Op, m_Deferred(A), M2Rhs),
1049 m_Value(B)),
1050 m_Deferred(B))))))
1051 return true;
1052
1053 // ((a * b) * 2) or ((a * 2) * b)
1054 // +
1055 // (a * a + b * b) or (b * b + a * a)
1056 return match(
1057 &I, m_c_BinOp(
1058 AddOp,
1061 Mul2Op, m_BinOp(MulOp, m_Value(A), m_Value(B)), M2Rhs)),
1062 m_OneUse(m_c_BinOp(MulOp, m_BinOp(Mul2Op, m_Value(A), M2Rhs),
1063 m_Value(B)))),
1064 m_OneUse(
1065 m_c_BinOp(AddOp, m_BinOp(MulOp, m_Deferred(A), m_Deferred(A)),
1066 m_BinOp(MulOp, m_Deferred(B), m_Deferred(B))))));
1067}
1068
1069// Fold integer variations of a^2 + 2*a*b + b^2 -> (a + b)^2
1071 Value *A, *B;
1073 Value *AB = Builder.CreateAdd(A, B);
1074 return BinaryOperator::CreateMul(AB, AB);
1075 }
1076 return nullptr;
1077}
1078
1079// Fold floating point variations of a^2 + 2*a*b + b^2 -> (a + b)^2
1080// Requires `nsz` and `reassoc`.
1082 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && "Assumption mismatch");
1083 Value *A, *B;
1085 Value *AB = Builder.CreateFAddFMF(A, B, &I);
1086 return BinaryOperator::CreateFMulFMF(AB, AB, &I);
1087 }
1088 return nullptr;
1089}
1090
1091// Matches multiplication expression Op * C where C is a constant. Returns the
1092// constant value in C and the other operand in Op. Returns true if such a
1093// match is found.
1094static bool MatchMul(Value *E, Value *&Op, APInt &C) {
1095 const APInt *AI;
1096 if (match(E, m_Mul(m_Value(Op), m_APInt(AI)))) {
1097 C = *AI;
1098 return true;
1099 }
1100 if (match(E, m_Shl(m_Value(Op), m_APInt(AI)))) {
1101 C = APInt(AI->getBitWidth(), 1);
1102 C <<= *AI;
1103 return true;
1104 }
1105 return false;
1106}
1107
1108// Matches remainder expression Op % C where C is a constant. Returns the
1109// constant value in C and the other operand in Op. Returns the signedness of
1110// the remainder operation in IsSigned. Returns true if such a match is
1111// found.
1112static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) {
1113 const APInt *AI;
1114 IsSigned = false;
1115 if (match(E, m_SRem(m_Value(Op), m_APInt(AI)))) {
1116 IsSigned = true;
1117 C = *AI;
1118 return true;
1119 }
1120 if (match(E, m_URem(m_Value(Op), m_APInt(AI)))) {
1121 C = *AI;
1122 return true;
1123 }
1124 if (match(E, m_And(m_Value(Op), m_APInt(AI))) && (*AI + 1).isPowerOf2()) {
1125 C = *AI + 1;
1126 return true;
1127 }
1128 return false;
1129}
1130
1131// Matches division expression Op / C with the given signedness as indicated
1132// by IsSigned, where C is a constant. Returns the constant value in C and the
1133// other operand in Op. Returns true if such a match is found.
1134static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) {
1135 const APInt *AI;
1136 if (IsSigned && match(E, m_SDiv(m_Value(Op), m_APInt(AI)))) {
1137 C = *AI;
1138 return true;
1139 }
1140 if (!IsSigned) {
1141 if (match(E, m_UDiv(m_Value(Op), m_APInt(AI)))) {
1142 C = *AI;
1143 return true;
1144 }
1145 if (match(E, m_LShr(m_Value(Op), m_APInt(AI)))) {
1146 C = APInt(AI->getBitWidth(), 1);
1147 C <<= *AI;
1148 return true;
1149 }
1150 }
1151 return false;
1152}
1153
1154// Returns whether C0 * C1 with the given signedness overflows.
1155static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) {
1156 bool overflow;
1157 if (IsSigned)
1158 (void)C0.smul_ov(C1, overflow);
1159 else
1160 (void)C0.umul_ov(C1, overflow);
1161 return overflow;
1162}
1163
1164// Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1)
1165// does not overflow.
1166// Simplifies (X / C0) * C1 + (X % C0) * C2 to
1167// (X / C0) * (C1 - C2 * C0) + X * C2
1169 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1170 Value *X, *MulOpV;
1171 APInt C0, MulOpC;
1172 bool IsSigned;
1173 // Match I = X % C0 + MulOpV * C0
1174 if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) ||
1175 (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) &&
1176 C0 == MulOpC) {
1177 Value *RemOpV;
1178 APInt C1;
1179 bool Rem2IsSigned;
1180 // Match MulOpC = RemOpV % C1
1181 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&
1182 IsSigned == Rem2IsSigned) {
1183 Value *DivOpV;
1184 APInt DivOpC;
1185 // Match RemOpV = X / C0
1186 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV &&
1187 C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) {
1188 Value *NewDivisor = ConstantInt::get(X->getType(), C0 * C1);
1189 return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem")
1190 : Builder.CreateURem(X, NewDivisor, "urem");
1191 }
1192 }
1193 }
1194
1195 // Match I = (X / C0) * C1 + (X % C0) * C2
1196 Value *Div, *Rem;
1197 APInt C1, C2;
1198 if (!LHS->hasOneUse() || !MatchMul(LHS, Div, C1))
1199 Div = LHS, C1 = APInt(I.getType()->getScalarSizeInBits(), 1);
1200 if (!RHS->hasOneUse() || !MatchMul(RHS, Rem, C2))
1201 Rem = RHS, C2 = APInt(I.getType()->getScalarSizeInBits(), 1);
1202 if (match(Div, m_IRem(m_Value(), m_Value()))) {
1203 std::swap(Div, Rem);
1204 std::swap(C1, C2);
1205 }
1206 Value *DivOpV;
1207 APInt DivOpC;
1208 if (MatchRem(Rem, X, C0, IsSigned) &&
1209 MatchDiv(Div, DivOpV, DivOpC, IsSigned) && X == DivOpV && C0 == DivOpC &&
1210 // Avoid unprofitable replacement of and with mul.
1211 !(C1.isOne() && !IsSigned && DivOpC.isPowerOf2() && DivOpC != 2)) {
1212 APInt NewC = C1 - C2 * C0;
1213 if (!NewC.isZero() && !Rem->hasOneUse())
1214 return nullptr;
1215 if (!isGuaranteedNotToBeUndef(X, &AC, &I, &DT))
1216 return nullptr;
1217 Value *MulXC2 = Builder.CreateMul(X, ConstantInt::get(X->getType(), C2));
1218 if (NewC.isZero())
1219 return MulXC2;
1220 return Builder.CreateAdd(
1221 Builder.CreateMul(Div, ConstantInt::get(X->getType(), NewC)), MulXC2);
1222 }
1223
1224 return nullptr;
1225}
1226
1227/// Fold
1228/// (1 << NBits) - 1
1229/// Into:
1230/// ~(-(1 << NBits))
1231/// Because a 'not' is better for bit-tracking analysis and other transforms
1232/// than an 'add'. The new shl is always nsw, and is nuw if old `and` was.
1234 InstCombiner::BuilderTy &Builder) {
1235 Value *NBits;
1236 if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes())))
1237 return nullptr;
1238
1239 Constant *MinusOne = Constant::getAllOnesValue(NBits->getType());
1240 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
1241 // Be wary of constant folding.
1242 if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
1243 // Always NSW. But NUW propagates from `add`.
1244 BOp->setHasNoSignedWrap();
1245 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1246 }
1247
1248 return BinaryOperator::CreateNot(NotMask, I.getName());
1249}
1250
1252 assert(I.getOpcode() == Instruction::Add && "Expecting add instruction");
1253 Type *Ty = I.getType();
1254 auto getUAddSat = [&]() {
1255 return Intrinsic::getOrInsertDeclaration(I.getModule(), Intrinsic::uadd_sat,
1256 Ty);
1257 };
1258
1259 // add (umin X, ~Y), Y --> uaddsat X, Y
1260 Value *X, *Y;
1262 m_Deferred(Y))))
1263 return CallInst::Create(getUAddSat(), { X, Y });
1264
1265 // add (umin X, ~C), C --> uaddsat X, C
1266 const APInt *C, *NotC;
1267 if (match(&I, m_Add(m_UMin(m_Value(X), m_APInt(NotC)), m_APInt(C))) &&
1268 *C == ~*NotC)
1269 return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) });
1270
1271 return nullptr;
1272}
1273
1274// Transform:
1275// (add A, (shl (neg B), Y))
1276// -> (sub A, (shl B, Y))
1278 const BinaryOperator &I) {
1279 Value *A, *B, *Cnt;
1280 if (match(&I,
1282 m_Value(A)))) {
1283 Value *NewShl = Builder.CreateShl(B, Cnt);
1284 return BinaryOperator::CreateSub(A, NewShl);
1285 }
1286 return nullptr;
1287}
1288
1289/// Try to reduce signed division by power-of-2 to an arithmetic shift right.
1291 // Division must be by power-of-2, but not the minimum signed value.
1292 Value *X;
1293 const APInt *DivC;
1294 if (!match(Add.getOperand(0), m_SDiv(m_Value(X), m_Power2(DivC))) ||
1295 DivC->isNegative())
1296 return nullptr;
1297
1298 // Rounding is done by adding -1 if the dividend (X) is negative and has any
1299 // low bits set. It recognizes two canonical patterns:
1300 // 1. For an 'ugt' cmp with the signed minimum value (SMIN), the
1301 // pattern is: sext (icmp ugt (X & (DivC - 1)), SMIN).
1302 // 2. For an 'eq' cmp, the pattern's: sext (icmp eq X & (SMIN + 1), SMIN + 1).
1303 // Note that, by the time we end up here, if possible, ugt has been
1304 // canonicalized into eq.
1305 const APInt *MaskC, *MaskCCmp;
1306 CmpPredicate Pred;
1307 if (!match(Add.getOperand(1),
1308 m_SExt(m_ICmp(Pred, m_And(m_Specific(X), m_APInt(MaskC)),
1309 m_APInt(MaskCCmp)))))
1310 return nullptr;
1311
1312 if ((Pred != ICmpInst::ICMP_UGT || !MaskCCmp->isSignMask()) &&
1313 (Pred != ICmpInst::ICMP_EQ || *MaskCCmp != *MaskC))
1314 return nullptr;
1315
1316 APInt SMin = APInt::getSignedMinValue(Add.getType()->getScalarSizeInBits());
1317 bool IsMaskValid = Pred == ICmpInst::ICMP_UGT
1318 ? (*MaskC == (SMin | (*DivC - 1)))
1319 : (*DivC == 2 && *MaskC == SMin + 1);
1320 if (!IsMaskValid)
1321 return nullptr;
1322
1323 // (X / DivC) + sext ((X & (SMin | (DivC - 1)) >u SMin) --> X >>s log2(DivC)
1324 return BinaryOperator::CreateAShr(
1325 X, ConstantInt::get(Add.getType(), DivC->exactLogBase2()));
1326}
1327
1329 bool NSW, bool NUW) {
1330 Value *A, *B, *C;
1331 if (match(LHS, m_Sub(m_Value(A), m_Value(B))) &&
1332 match(RHS, m_Sub(m_Value(C), m_Specific(A)))) {
1333 Instruction *R = BinaryOperator::CreateSub(C, B);
1334 bool NSWOut = NSW && match(LHS, m_NSWSub(m_Value(), m_Value())) &&
1335 match(RHS, m_NSWSub(m_Value(), m_Value()));
1336
1337 bool NUWOut = match(LHS, m_NUWSub(m_Value(), m_Value())) &&
1338 match(RHS, m_NUWSub(m_Value(), m_Value()));
1339 R->setHasNoSignedWrap(NSWOut);
1340 R->setHasNoUnsignedWrap(NUWOut);
1341 return R;
1342 }
1343
1344 // ((X s/ C1) << C2) + X => X s% -C1 where -C1 is 1 << C2
1345 const APInt *C1, *C2;
1346 if (match(LHS, m_Shl(m_SDiv(m_Specific(RHS), m_APInt(C1)), m_APInt(C2)))) {
1347 APInt One(C2->getBitWidth(), 1);
1348 APInt MinusC1 = -(*C1);
1349 if (MinusC1 == (One << *C2)) {
1350 Constant *NewRHS = ConstantInt::get(RHS->getType(), MinusC1);
1351 return BinaryOperator::CreateSRem(RHS, NewRHS);
1352 }
1353 }
1354
1355 // (A + C) + (B & ~C) == A + (B | C)
1356 if (match(LHS, m_c_Add(m_Value(A), m_APInt(C1))) &&
1357 match(RHS, m_c_And(m_Value(B), m_SpecificInt(~*C1)))) {
1358 // Replacing one add with {or, add}. Avoid growth if both sides are shared.
1359 if (!LHS->hasOneUse() && !RHS->hasOneUse())
1360 return nullptr;
1361
1362 bool NSWOut = NSW && match(LHS, m_NSWAdd(m_Value(), m_Value()));
1363 bool NUWOut = NUW && match(LHS, m_NUWAdd(m_Value(), m_Value()));
1364 Value *NewOr =
1365 Builder.CreateOr(B, Constant::getIntegerValue(LHS->getType(), *C1));
1366 Instruction *NewAdd = BinaryOperator::CreateAdd(A, NewOr);
1367 NewAdd->setHasNoSignedWrap(NSWOut);
1368 NewAdd->setHasNoUnsignedWrap(NUWOut);
1369 return NewAdd;
1370 }
1371
1372 return nullptr;
1373}
1374
1377 BinaryOperator &I) {
1378 assert((I.getOpcode() == Instruction::Add ||
1379 I.getOpcode() == Instruction::Or ||
1380 I.getOpcode() == Instruction::Sub) &&
1381 "Expecting add/or/sub instruction");
1382
1383 // We have a subtraction/addition between a (potentially truncated) *logical*
1384 // right-shift of X and a "select".
1385 Value *X, *Select;
1386 Instruction *LowBitsToSkip, *Extract;
1388 Extract, m_LShr(m_Value(X),
1389 m_Instruction(LowBitsToSkip)))),
1390 m_Value(Select))))
1391 return nullptr;
1392
1393 // `add`/`or` is commutative; but for `sub`, "select" *must* be on RHS.
1394 if (I.getOpcode() == Instruction::Sub && I.getOperand(1) != Select)
1395 return nullptr;
1396
1397 Type *XTy = X->getType();
1398 bool HadTrunc = I.getType() != XTy;
1399
1400 // If there was a truncation of extracted value, then we'll need to produce
1401 // one extra instruction, so we need to ensure one instruction will go away.
1402 if (HadTrunc && !match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1403 return nullptr;
1404
1405 // Extraction should extract high NBits bits, with shift amount calculated as:
1406 // low bits to skip = shift bitwidth - high bits to extract
1407 // The shift amount itself may be extended, and we need to look past zero-ext
1408 // when matching NBits, that will matter for matching later.
1409 Value *NBits;
1410 if (!match(LowBitsToSkip,
1412 m_ZExtOrSelf(m_Value(NBits))))))
1413 return nullptr;
1414
1415 // Sign-extending value can be zero-extended if we `sub`tract it,
1416 // or sign-extended otherwise.
1417 auto SkipExtInMagic = [&I](Value *&V) {
1418 if (I.getOpcode() == Instruction::Sub)
1419 match(V, m_ZExtOrSelf(m_Value(V)));
1420 else
1421 match(V, m_SExtOrSelf(m_Value(V)));
1422 };
1423
1424 // Now, finally validate the sign-extending magic.
1425 // `select` itself may be appropriately extended, look past that.
1426 SkipExtInMagic(Select);
1427
1428 CmpPredicate Pred;
1429 const APInt *Thr;
1430 Value *SignExtendingValue, *Zero;
1431 bool ShouldSignext;
1432 // It must be a select between two values we will later establish to be a
1433 // sign-extending value and a zero constant. The condition guarding the
1434 // sign-extension must be based on a sign bit of the same X we had in `lshr`.
1435 if (!match(Select, m_Select(m_ICmp(Pred, m_Specific(X), m_APInt(Thr)),
1436 m_Value(SignExtendingValue), m_Value(Zero))) ||
1437 !isSignBitCheck(Pred, *Thr, ShouldSignext))
1438 return nullptr;
1439
1440 // icmp-select pair is commutative.
1441 if (!ShouldSignext)
1442 std::swap(SignExtendingValue, Zero);
1443
1444 // If we should not perform sign-extension then we must add/or/subtract zero.
1445 if (!match(Zero, m_Zero()))
1446 return nullptr;
1447 // Otherwise, it should be some constant, left-shifted by the same NBits we
1448 // had in `lshr`. Said left-shift can also be appropriately extended.
1449 // Again, we must look past zero-ext when looking for NBits.
1450 SkipExtInMagic(SignExtendingValue);
1451 Constant *SignExtendingValueBaseConstant;
1452 if (!match(SignExtendingValue,
1453 m_Shl(m_Constant(SignExtendingValueBaseConstant),
1454 m_ZExtOrSelf(m_Specific(NBits)))))
1455 return nullptr;
1456 // If we `sub`, then the constant should be one, else it should be all-ones.
1457 if (I.getOpcode() == Instruction::Sub
1458 ? !match(SignExtendingValueBaseConstant, m_One())
1459 : !match(SignExtendingValueBaseConstant, m_AllOnes()))
1460 return nullptr;
1461
1462 auto *NewAShr = BinaryOperator::CreateAShr(X, LowBitsToSkip,
1463 Extract->getName() + ".sext");
1464 NewAShr->copyIRFlags(Extract); // Preserve `exact`-ness.
1465 if (!HadTrunc)
1466 return NewAShr;
1467
1468 Builder.Insert(NewAShr);
1469 return TruncInst::CreateTruncOrBitCast(NewAShr, I.getType());
1470}
1471
1472/// This is a specialization of a more general transform from
1473/// foldUsingDistributiveLaws. If that code can be made to work optimally
1474/// for multi-use cases or propagating nsw/nuw, then we would not need this.
1476 InstCombiner::BuilderTy &Builder) {
1477 // TODO: Also handle mul by doubling the shift amount?
1478 assert((I.getOpcode() == Instruction::Add ||
1479 I.getOpcode() == Instruction::Sub) &&
1480 "Expected add/sub");
1481 auto *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
1482 auto *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
1483 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))
1484 return nullptr;
1485
1486 Value *X, *Y, *ShAmt;
1487 if (!match(Op0, m_Shl(m_Value(X), m_Value(ShAmt))) ||
1488 !match(Op1, m_Shl(m_Value(Y), m_Specific(ShAmt))))
1489 return nullptr;
1490
1491 // No-wrap propagates only when all ops have no-wrap.
1492 bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&
1493 Op1->hasNoSignedWrap();
1494 bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&
1495 Op1->hasNoUnsignedWrap();
1496
1497 // add/sub (X << ShAmt), (Y << ShAmt) --> (add/sub X, Y) << ShAmt
1498 Value *NewMath = Builder.CreateBinOp(I.getOpcode(), X, Y);
1499 if (auto *NewI = dyn_cast<BinaryOperator>(NewMath)) {
1500 NewI->setHasNoSignedWrap(HasNSW);
1501 NewI->setHasNoUnsignedWrap(HasNUW);
1502 }
1503 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);
1504 NewShl->setHasNoSignedWrap(HasNSW);
1505 NewShl->setHasNoUnsignedWrap(HasNUW);
1506 return NewShl;
1507}
1508
1509/// Reduce a sequence of masked half-width multiplies to a single multiply.
1510/// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y
1512 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1513 // Skip the odd bitwidth types.
1514 if ((BitWidth & 0x1))
1515 return nullptr;
1516
1517 unsigned HalfBits = BitWidth >> 1;
1518 APInt HalfMask = APInt::getMaxValue(HalfBits);
1519
1520 // ResLo = (CrossSum << HalfBits) + (YLo * XLo)
1521 Value *XLo, *YLo;
1522 Value *CrossSum;
1523 // Require one-use on the multiply to avoid increasing the number of
1524 // multiplications.
1525 if (!match(&I, m_c_Add(m_Shl(m_Value(CrossSum), m_SpecificInt(HalfBits)),
1526 m_OneUse(m_Mul(m_Value(YLo), m_Value(XLo))))))
1527 return nullptr;
1528
1529 // XLo = X & HalfMask
1530 // YLo = Y & HalfMask
1531 // TODO: Refactor with SimplifyDemandedBits or KnownBits known leading zeros
1532 // to enhance robustness
1533 Value *X, *Y;
1534 if (!match(XLo, m_And(m_Value(X), m_SpecificInt(HalfMask))) ||
1535 !match(YLo, m_And(m_Value(Y), m_SpecificInt(HalfMask))))
1536 return nullptr;
1537
1538 // CrossSum = (X' * (Y >> Halfbits)) + (Y' * (X >> HalfBits))
1539 // X' can be either X or XLo in the pattern (and the same for Y')
1540 if (match(CrossSum,
1545 return BinaryOperator::CreateMul(X, Y);
1546
1547 return nullptr;
1548}
1549
1550/// Return true if X + (Y-1) is provably non-wrapping in X's type
1551static bool checkDivCeilNUW(Value *X, Value *Y, const SimplifyQuery &SQ) {
1552 ConstantRange CRX = computeConstantRange(X, /*ForSigned=*/false, SQ);
1553 ConstantRange CRY = computeConstantRange(Y, /*ForSigned=*/false, SQ);
1554 APInt MinY = CRY.getUnsignedMin();
1555 APInt MaxX = CRX.getUnsignedMax();
1556 APInt MaxY = CRY.getUnsignedMax();
1557
1558 return !MinY.isZero() && !MaxX.ugt(-MaxY);
1559}
1560
1561/// Fold the div_ceil idiom in both forms:
1562/// add(udiv(X, Y), zext(icmp ne(urem(X, Y), 0)))
1563/// -> udiv(add nuw(X, Y - 1), Y)
1564/// add(zext(udiv(X, Y)), zext(icmp ne(urem(X, Y), 0)))
1565/// -> zext(udiv(add nuw(X, Y - 1), Y))
1566/// The zext form applies when udiv/urem operate in a narrower type than the
1567/// add.
1569 Value *X, *Y;
1570
1571 auto UDivPat = m_OneUse(m_UDiv(m_Value(X), m_Value(Y)));
1572 auto URemPat = m_OneUse(m_URem(m_Deferred(X), m_Deferred(Y)));
1573 auto ICmpPat = m_OneUse(m_SpecificICmp(ICmpInst::ICMP_NE, URemPat, m_Zero()));
1574 auto DivPat = m_OneUse(m_ZExtOrSelf(UDivPat));
1575 auto ZExtCmpPat = m_OneUse(m_ZExt(ICmpPat));
1576
1577 if (!match(&I, m_c_Add(DivPat, ZExtCmpPat)) || !checkDivCeilNUW(X, Y, SQ))
1578 return nullptr;
1579
1580 Value *YMinusOne =
1581 Builder.CreateAdd(Y, ConstantInt::getAllOnesValue(Y->getType()));
1582 Value *NUWAdd = Builder.CreateNUWAdd(X, YMinusOne);
1583 if (X->getType() != I.getType()) {
1584 Value *Div = Builder.CreateUDiv(NUWAdd, Y);
1585 return new ZExtInst(Div, I.getType());
1586 }
1587 return BinaryOperator::CreateUDiv(NUWAdd, Y);
1588}
1589
1591 if (Value *V = simplifyAddInst(I.getOperand(0), I.getOperand(1),
1592 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1593 SQ.getWithInstruction(&I)))
1594 return replaceInstUsesWith(I, V);
1595
1597 return &I;
1598
1600 return X;
1601
1603 return Phi;
1604
1605 // (A*B)+(A*C) -> A*(B+C) etc
1607 return replaceInstUsesWith(I, V);
1608
1609 if (Instruction *R = foldBoxMultiply(I))
1610 return R;
1611
1613 return R;
1614
1616 return X;
1617
1619 return X;
1620
1622 return R;
1623
1625 return R;
1626
1627 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1628 if (Instruction *R = foldAddLikeCommutative(LHS, RHS, I.hasNoSignedWrap(),
1629 I.hasNoUnsignedWrap()))
1630 return R;
1631 if (Instruction *R = foldAddLikeCommutative(RHS, LHS, I.hasNoSignedWrap(),
1632 I.hasNoUnsignedWrap()))
1633 return R;
1634 Type *Ty = I.getType();
1635 if (Ty->isIntOrIntVectorTy(1))
1636 return BinaryOperator::CreateXor(LHS, RHS);
1637
1638 // X + X --> X << 1
1639 if (LHS == RHS) {
1640 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));
1641 Shl->setHasNoSignedWrap(I.hasNoSignedWrap());
1642 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1643 return Shl;
1644 }
1645
1646 Value *A, *B;
1647 if (match(LHS, m_Neg(m_Value(A)))) {
1648 // -A + -B --> -(A + B)
1649 if (match(RHS, m_Neg(m_Value(B))))
1650 return BinaryOperator::CreateNeg(Builder.CreateAdd(A, B));
1651
1652 // -A + B --> B - A
1653 auto *Sub = BinaryOperator::CreateSub(RHS, A);
1654 auto *OB0 = cast<OverflowingBinaryOperator>(LHS);
1655 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OB0->hasNoSignedWrap());
1656
1657 return Sub;
1658 }
1659
1660 // A + -B --> A - B
1661 if (match(RHS, m_Neg(m_Value(B)))) {
1662 auto *Sub = BinaryOperator::CreateSub(LHS, B);
1663 auto *OBO = cast<OverflowingBinaryOperator>(RHS);
1664 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO->hasNoSignedWrap());
1665 return Sub;
1666 }
1667
1669 return replaceInstUsesWith(I, V);
1670
1671 // (A + 1) + ~B --> A - B
1672 // ~B + (A + 1) --> A - B
1673 // (~B + A) + 1 --> A - B
1674 // (A + ~B) + 1 --> A - B
1675 // This relies on the ~B == -1-B identity.
1676 if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) ||
1678 return BinaryOperator::CreateSub(A, B);
1679
1680 {
1681 // (A + C) + ~B --> A - B + (C-1)
1682 // ~B + (A + C) --> A - B + (C-1)
1683 // (~B + A) + C --> A - B + (C-1)
1684 // (A + ~B) + C --> A - B + (C-1)
1685 // With constant C, subtraction of one is free, so we replace three ops
1686 // (two adds and a bitwise-not) with two (sub and add).
1687 const APInt *C;
1689 m_Not(m_Value(B)))) ||
1691 m_APIntAllowPoison(C)))) {
1692 Value *Sub = Builder.CreateSub(A, B);
1693 return BinaryOperator::CreateAdd(Sub, ConstantInt::get(Ty, *C - 1));
1694 }
1695 }
1696
1697 // (A + RHS) + RHS --> A + (RHS << 1)
1698 if (match(LHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(RHS)))))
1699 return BinaryOperator::CreateAdd(A, Builder.CreateShl(RHS, 1, "reass.add"));
1700
1701 // LHS + (A + LHS) --> A + (LHS << 1)
1702 if (match(RHS, m_OneUse(m_c_Add(m_Value(A), m_Specific(LHS)))))
1703 return BinaryOperator::CreateAdd(A, Builder.CreateShl(LHS, 1, "reass.add"));
1704
1705 {
1706 // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2)
1707 Constant *C1, *C2;
1708 if (match(&I, m_c_Add(m_Add(m_Value(A), m_ImmConstant(C1)),
1709 m_Sub(m_ImmConstant(C2), m_Value(B)))) &&
1710 (LHS->hasOneUse() || RHS->hasOneUse())) {
1711 Value *Sub = Builder.CreateSub(A, B);
1712 return BinaryOperator::CreateAdd(Sub, ConstantExpr::getAdd(C1, C2));
1713 }
1714
1715 // Canonicalize a constant sub operand as an add operand for better folding:
1716 // (C1 - A) + B --> (B - A) + C1
1718 m_Value(B)))) {
1719 Value *Sub = Builder.CreateSub(B, A, "reass.sub");
1720 return BinaryOperator::CreateAdd(Sub, C1);
1721 }
1722 }
1723
1724 // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1)
1726
1727 const APInt *C1;
1728 // (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit
1729 if (match(&I, m_c_Add(m_And(m_Value(A), m_APInt(C1)), m_Deferred(A))) &&
1730 C1->isPowerOf2() && (ComputeNumSignBits(A) > C1->countl_zero())) {
1731 Constant *NewMask = ConstantInt::get(RHS->getType(), *C1 - 1);
1732 return BinaryOperator::CreateAnd(A, NewMask);
1733 }
1734
1735 // ZExt (B - A) + ZExt(A) --> ZExt(B)
1736 if ((match(RHS, m_ZExt(m_Value(A))) &&
1737 match(LHS, m_ZExt(m_NUWSub(m_Value(B), m_Specific(A))))) ||
1738 (match(LHS, m_ZExt(m_Value(A))) &&
1740 return new ZExtInst(B, LHS->getType());
1741
1742 // zext(A) + sext(A) --> 0 if A is i1
1744 A->getType()->isIntOrIntVectorTy(1))
1745 return replaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1746
1747 // sext(A < B) + zext(A > B) => ucmp/scmp(A, B)
1748 CmpPredicate LTPred, GTPred;
1749 if (match(&I,
1750 m_c_Add(m_SExt(m_c_ICmp(LTPred, m_Value(A), m_Value(B))),
1751 m_ZExt(m_c_ICmp(GTPred, m_Deferred(A), m_Deferred(B))))) &&
1752 A->getType()->isIntOrIntVectorTy()) {
1753 if (ICmpInst::isGT(LTPred)) {
1754 std::swap(LTPred, GTPred);
1755 std::swap(A, B);
1756 }
1757
1758 if (ICmpInst::isLT(LTPred) && ICmpInst::isGT(GTPred) &&
1759 ICmpInst::isSigned(LTPred) == ICmpInst::isSigned(GTPred))
1760 return replaceInstUsesWith(
1761 I, Builder.CreateIntrinsic(
1762 Ty,
1763 ICmpInst::isSigned(LTPred) ? Intrinsic::scmp : Intrinsic::ucmp,
1764 {A, B}));
1765 }
1766
1767 // A+B --> A|B iff A and B have no bits set in common.
1768 WithCache<const Value *> LHSCache(LHS), RHSCache(RHS);
1769 if (haveNoCommonBitsSet(LHSCache, RHSCache, SQ.getWithInstruction(&I)))
1770 return BinaryOperator::CreateDisjointOr(LHS, RHS);
1771
1772 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1773 return Ext;
1774
1775 // (add (xor A, B) (and A, B)) --> (or A, B)
1776 // (add (and A, B) (xor A, B)) --> (or A, B)
1777 if (match(&I, m_c_BinOp(m_Xor(m_Value(A), m_Value(B)),
1779 return BinaryOperator::CreateOr(A, B);
1780
1781 // (add (or A, B) (and A, B)) --> (add A, B)
1782 // (add (and A, B) (or A, B)) --> (add A, B)
1783 if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)),
1785 // Replacing operands in-place to preserve nuw/nsw flags.
1786 replaceOperand(I, 0, A);
1787 replaceOperand(I, 1, B);
1788 return &I;
1789 }
1790
1791 // (add A (or A, -A)) --> (and (add A, -1) A)
1792 // (add A (or -A, A)) --> (and (add A, -1) A)
1793 // (add (or A, -A) A) --> (and (add A, -1) A)
1794 // (add (or -A, A) A) --> (and (add A, -1) A)
1796 m_Deferred(A)))))) {
1797 Value *Add =
1798 Builder.CreateAdd(A, Constant::getAllOnesValue(A->getType()), "",
1799 I.hasNoUnsignedWrap(), I.hasNoSignedWrap());
1800 return BinaryOperator::CreateAnd(Add, A);
1801 }
1802
1803 // Canonicalize ((A & -A) - 1) --> ((A - 1) & ~A)
1804 // Forms all commutable operations, and simplifies ctpop -> cttz folds.
1805 if (match(&I,
1807 m_AllOnes()))) {
1809 Value *Dec = Builder.CreateAdd(A, AllOnes);
1810 Value *Not = Builder.CreateXor(A, AllOnes);
1811 return BinaryOperator::CreateAnd(Dec, Not);
1812 }
1813
1814 // Disguised reassociation/factorization:
1815 // ~(A * C1) + A
1816 // ((A * -C1) - 1) + A
1817 // ((A * -C1) + A) - 1
1818 // (A * (1 - C1)) - 1
1819 if (match(&I,
1821 m_Deferred(A)))) {
1822 Type *Ty = I.getType();
1823 Constant *NewMulC = ConstantInt::get(Ty, 1 - *C1);
1824 Value *NewMul = Builder.CreateMul(A, NewMulC);
1825 return BinaryOperator::CreateAdd(NewMul, ConstantInt::getAllOnesValue(Ty));
1826 }
1827
1828 // (A * -2**C) + B --> B - (A << C)
1829 const APInt *NegPow2C;
1830 if (match(&I, m_c_Add(m_OneUse(m_Mul(m_Value(A), m_NegatedPower2(NegPow2C))),
1831 m_Value(B)))) {
1832 Constant *ShiftAmtC = ConstantInt::get(Ty, NegPow2C->countr_zero());
1833 Value *Shl = Builder.CreateShl(A, ShiftAmtC);
1834 return BinaryOperator::CreateSub(B, Shl);
1835 }
1836
1837 // Canonicalize signum variant that ends in add:
1838 // (A s>> (BW - 1)) + (zext (A s> 0)) --> (A s>> (BW - 1)) | (zext (A != 0))
1839 uint64_t BitWidth = Ty->getScalarSizeInBits();
1843 Value *NotZero = Builder.CreateIsNotNull(A, "isnotnull");
1844 Value *Zext = Builder.CreateZExt(NotZero, Ty, "isnotnull.zext");
1845 return BinaryOperator::CreateOr(LHS, Zext);
1846 }
1847
1848 {
1849 Value *Cond, *Ext;
1850 Constant *C;
1851 // (add X, (sext/zext (icmp eq X, C)))
1852 // -> (select (icmp eq X, C), (add C, (sext/zext 1)), X)
1853 auto CondMatcher =
1855 m_ImmConstant(C)));
1856
1857 if (match(&I,
1858 m_c_Add(m_Value(A), m_Value(Ext, m_ZExtOrSExt(CondMatcher)))) &&
1859 Ext->hasOneUse()) {
1862 return replaceInstUsesWith(I, Builder.CreateSelect(Cond, Add, A));
1863 }
1864 }
1865
1866 // (add (add A, 1), (sext (icmp ne A, 0))) => call umax(A, 1)
1867 if (match(LHS, m_Add(m_Value(A), m_One())) &&
1870 Value *OneConst = ConstantInt::get(A->getType(), 1);
1871 Value *UMax = Builder.CreateBinaryIntrinsic(Intrinsic::umax, A, OneConst);
1872 return replaceInstUsesWith(I, UMax);
1873 }
1874
1875 if (Instruction *Ashr = foldAddToAshr(I))
1876 return Ashr;
1877
1878 // Ceiling division by power-of-2:
1879 // (X >> log2(N)) + zext(X & (N-1) != 0) --> (X + (N-1)) >> log2(N)
1880 // This is valid when adding (N-1) to X doesn't overflow.
1881 {
1882 Value *X;
1883 const APInt *ShiftAmt, *Mask;
1884 CmpPredicate Pred;
1885
1886 // Match: (X >> C) + zext((X & Mask) != 0)
1887 // or: zext((X & Mask) != 0) + (X >> C)
1888 if (match(&I, m_c_Add(m_OneUse(m_LShr(m_Value(X), m_APInt(ShiftAmt))),
1891 m_And(m_Deferred(X), m_LowBitMask(Mask)),
1892 m_ZeroInt())))) &&
1893 Mask->popcount() == *ShiftAmt) {
1894
1895 // Check if X + Mask doesn't overflow
1896 Constant *MaskC = ConstantInt::get(X->getType(), *Mask);
1897 if (willNotOverflowUnsignedAdd(X, MaskC, I)) {
1898 // (X + Mask) >> ShiftAmt
1899 Value *Add = Builder.CreateNUWAdd(X, MaskC);
1900 return BinaryOperator::CreateLShr(
1901 Add, ConstantInt::get(X->getType(), *ShiftAmt));
1902 }
1903 }
1904 }
1905
1906 // (~X) + (~Y) --> -2 - (X + Y)
1907 {
1908 // To ensure we can save instructions we need to ensure that we consume both
1909 // LHS/RHS (i.e they have a `not`).
1910 bool ConsumesLHS, ConsumesRHS;
1911 if (isFreeToInvert(LHS, LHS->hasOneUse(), ConsumesLHS) && ConsumesLHS &&
1912 isFreeToInvert(RHS, RHS->hasOneUse(), ConsumesRHS) && ConsumesRHS) {
1913 Value *NotLHS = getFreelyInverted(LHS, LHS->hasOneUse(), &Builder);
1914 Value *NotRHS = getFreelyInverted(RHS, RHS->hasOneUse(), &Builder);
1915 assert(NotLHS != nullptr && NotRHS != nullptr &&
1916 "isFreeToInvert desynced with getFreelyInverted");
1917 Value *LHSPlusRHS = Builder.CreateAdd(NotLHS, NotRHS);
1918 return BinaryOperator::CreateSub(
1919 ConstantInt::getSigned(RHS->getType(), -2), LHSPlusRHS);
1920 }
1921 }
1922
1924 return R;
1925
1926 // TODO(jingyue): Consider willNotOverflowSignedAdd and
1927 // willNotOverflowUnsignedAdd to reduce the number of invocations of
1928 // computeKnownBits.
1929 bool Changed = false;
1930 if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHSCache, RHSCache, I)) {
1931 Changed = true;
1932 I.setHasNoSignedWrap(true);
1933 }
1934 if (!I.hasNoUnsignedWrap() &&
1935 willNotOverflowUnsignedAdd(LHSCache, RHSCache, I)) {
1936 Changed = true;
1937 I.setHasNoUnsignedWrap(true);
1938 }
1939
1941 return V;
1942
1943 if (Instruction *V =
1945 return V;
1946
1948 return SatAdd;
1949
1950 // usub.sat(A, B) + B => umax(A, B)
1951 if (match(&I, m_c_BinOp(
1953 m_Deferred(B)))) {
1954 return replaceInstUsesWith(I,
1955 Builder.CreateIntrinsic(Intrinsic::umax, {I.getType()}, {A, B}));
1956 }
1957
1958 // ctpop(A) + ctpop(B) => ctpop(A | B) if A and B have no bits set in common.
1959 if (match(LHS, m_OneUse(m_Ctpop(m_Value(A)))) &&
1960 match(RHS, m_OneUse(m_Ctpop(m_Value(B)))) &&
1961 haveNoCommonBitsSet(A, B, SQ.getWithInstruction(&I)))
1962 return replaceInstUsesWith(
1963 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
1964 {Builder.CreateDisjointOr(A, B)}));
1965
1966 // Fold the log2_ceil idiom:
1967 // zext(ctpop(A) >u/!= 1) + (ctlz(A, true) ^ (BW - 1))
1968 // -->
1969 // BW - ctlz(A - 1, false)
1970 const APInt *XorC;
1971 CmpPredicate Pred;
1972 if (match(&I, m_c_Add(m_ZExt(m_ICmp(Pred, m_Ctpop(m_Value(A)), m_One())),
1975 m_Ctlz(m_Deferred(A), m_One())))),
1976 m_APInt(XorC))))))) &&
1977 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_NE) &&
1978 *XorC == A->getType()->getScalarSizeInBits() - 1) {
1979 Value *Sub = Builder.CreateAdd(A, Constant::getAllOnesValue(A->getType()));
1980 Value *Ctlz = Builder.CreateIntrinsic(Intrinsic::ctlz, {A->getType()},
1981 {Sub, Builder.getFalse()});
1982 Value *Ret = Builder.CreateSub(
1983 ConstantInt::get(A->getType(), A->getType()->getScalarSizeInBits()),
1984 Ctlz, "", /*HasNUW=*/true, /*HasNSW=*/true);
1985 return replaceInstUsesWith(I, Builder.CreateZExtOrTrunc(Ret, I.getType()));
1986 }
1987
1988 if (Instruction *Res = foldSquareSumInt(I))
1989 return Res;
1990
1991 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
1992 return Res;
1993
1995 return Res;
1996
1997 if (Instruction *Res = foldDivCeil(I))
1998 return Res;
1999
2000 // Re-enqueue users of the induction variable of add recurrence if we infer
2001 // new nuw/nsw flags.
2002 if (Changed) {
2003 PHINode *PHI;
2004 Value *Start, *Step;
2005 if (matchSimpleRecurrence(&I, PHI, Start, Step))
2006 Worklist.pushUsersToWorkList(*PHI);
2007 }
2008
2009 return Changed ? &I : nullptr;
2010}
2011
2012/// Eliminate an op from a linear interpolation (lerp) pattern.
2014 InstCombiner::BuilderTy &Builder) {
2015 Value *X, *Y, *Z;
2018 m_Value(Z))))),
2020 return nullptr;
2021
2022 // (Y * (1.0 - Z)) + (X * Z) --> Y + Z * (X - Y) [8 commuted variants]
2023 Value *XY = Builder.CreateFSubFMF(X, Y, &I);
2024 Value *MulZ = Builder.CreateFMulFMF(Z, XY, &I);
2025 return BinaryOperator::CreateFAddFMF(Y, MulZ, &I);
2026}
2027
2028/// Factor a common operand out of fadd/fsub of fmul/fdiv.
2030 InstCombiner::BuilderTy &Builder) {
2031 assert((I.getOpcode() == Instruction::FAdd ||
2032 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub");
2033 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() &&
2034 "FP factorization requires FMF");
2035
2036 if (Instruction *Lerp = factorizeLerp(I, Builder))
2037 return Lerp;
2038
2039 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2040 if (!Op0->hasOneUse() || !Op1->hasOneUse())
2041 return nullptr;
2042
2043 Value *X, *Y, *Z;
2044 bool IsFMul;
2045 if ((match(Op0, m_FMul(m_Value(X), m_Value(Z))) &&
2046 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))) ||
2047 (match(Op0, m_FMul(m_Value(Z), m_Value(X))) &&
2048 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))))
2049 IsFMul = true;
2050 else if (match(Op0, m_FDiv(m_Value(X), m_Value(Z))) &&
2051 match(Op1, m_FDiv(m_Value(Y), m_Specific(Z))))
2052 IsFMul = false;
2053 else
2054 return nullptr;
2055
2056 // (X * Z) + (Y * Z) --> (X + Y) * Z
2057 // (X * Z) - (Y * Z) --> (X - Y) * Z
2058 // (X / Z) + (Y / Z) --> (X + Y) / Z
2059 // (X / Z) - (Y / Z) --> (X - Y) / Z
2060 bool IsFAdd = I.getOpcode() == Instruction::FAdd;
2061 Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I)
2062 : Builder.CreateFSubFMF(X, Y, &I);
2063
2064 // Bail out if we just created a denormal constant.
2065 // TODO: This is copied from a previous implementation. Is it necessary?
2066 const APFloat *C;
2067 if (match(XY, m_APFloat(C)) && !C->isNormal())
2068 return nullptr;
2069
2070 return IsFMul ? BinaryOperator::CreateFMulFMF(XY, Z, &I)
2072}
2073
2075 if (Value *V = simplifyFAddInst(I.getOperand(0), I.getOperand(1),
2076 I.getFastMathFlags(),
2077 SQ.getWithInstruction(&I)))
2078 return replaceInstUsesWith(I, V);
2079
2081 return &I;
2082
2084 return X;
2085
2087 return Phi;
2088
2089 if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I))
2090 return FoldedFAdd;
2091
2092 // B = fadd A, 0.0
2093 // Z = Op B
2094 // can be transformed into
2095 // Z = Op A
2096 // Where Op is such that we can ignore sign of 0 in fadd
2097 Value *A;
2098 if (match(&I, m_OneUse(m_FAdd(m_Value(A), m_AnyZeroFP()))) &&
2099 canIgnoreSignBitOfZero(*I.use_begin()))
2100 return replaceInstUsesWith(I, A);
2101
2102 // (-X) + Y --> Y - X
2103 Value *X, *Y;
2104 if (match(&I, m_c_FAdd(m_FNeg(m_Value(X)), m_Value(Y))))
2106
2107 // Similar to above, but look through fmul/fdiv for the negated term.
2108 // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants]
2109 Value *Z;
2111 m_Value(Z)))) {
2112 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
2113 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
2114 }
2115 // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants]
2116 // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants]
2118 m_Value(Z))) ||
2120 m_Value(Z)))) {
2121 Value *XY = Builder.CreateFDivFMF(X, Y, &I);
2122 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
2123 }
2124
2125 // Check for (fadd double (sitofp x), y), see if we can merge this into an
2126 // integer add followed by a promotion.
2127 if (Instruction *R = foldFBinOpOfIntCasts(I))
2128 return R;
2129
2130 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2131 // Handle specials cases for FAdd with selects feeding the operation
2132 if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS))
2133 return replaceInstUsesWith(I, V);
2134
2135 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
2137 return F;
2138
2140 return F;
2141
2142 // Try to fold fadd into start value of reduction intrinsic.
2144 m_AnyZeroFP(), m_Value(X))),
2145 m_Value(Y)))) {
2146 // fadd (rdx 0.0, X), Y --> rdx Y, X
2147 return replaceInstUsesWith(
2148 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
2149 {X->getType()}, {Y, X}, &I));
2150 }
2151 const APFloat *StartC, *C;
2153 m_APFloat(StartC), m_Value(X)))) &&
2154 match(RHS, m_APFloat(C))) {
2155 // fadd (rdx StartC, X), C --> rdx (C + StartC), X
2156 Constant *NewStartC = ConstantFP::get(I.getType(), *C + *StartC);
2157 return replaceInstUsesWith(
2158 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
2159 {X->getType()}, {NewStartC, X}, &I));
2160 }
2161
2162 // (X * MulC) + X --> X * (MulC + 1.0)
2163 Constant *MulC;
2164 if (match(&I, m_c_FAdd(m_FMul(m_Value(X), m_ImmConstant(MulC)),
2165 m_Deferred(X)))) {
2167 Instruction::FAdd, MulC, ConstantFP::get(I.getType(), 1.0), DL))
2168 return BinaryOperator::CreateFMulFMF(X, NewMulC, &I);
2169 }
2170
2171 // (-X - Y) + (X + Z) --> Z - Y
2173 m_c_FAdd(m_Deferred(X), m_Value(Z)))))
2174 return BinaryOperator::CreateFSubFMF(Z, Y, &I);
2175
2176 if (Value *V = FAddCombine(Builder).simplify(&I))
2177 return replaceInstUsesWith(I, V);
2178 }
2179
2180 // minumum(X, Y) + maximum(X, Y) => X + Y.
2181 if (match(&I,
2184 m_Deferred(Y))))) {
2186 // We cannot preserve ninf if nnan flag is not set.
2187 // If X is NaN and Y is Inf then in original program we had NaN + NaN,
2188 // while in optimized version NaN + Inf and this is a poison with ninf flag.
2189 if (!Result->hasNoNaNs())
2190 Result->setHasNoInfs(false);
2191 return Result;
2192 }
2193
2194 return nullptr;
2195}
2196
2199
2200 if (LHS->getType() != RHS->getType())
2201 return Base;
2202
2203 // Collect all base pointers of LHS.
2205 Value *Ptr = LHS;
2206 while (true) {
2207 Ptrs.insert(Ptr);
2208 if (auto *GEP = dyn_cast<GEPOperator>(Ptr))
2209 Ptr = GEP->getPointerOperand();
2210 else
2211 break;
2212 }
2213
2214 // Find common base and collect RHS GEPs.
2215 bool First = true;
2216 while (true) {
2217 if (Ptrs.contains(RHS)) {
2218 Base.Ptr = RHS;
2219 break;
2220 }
2221
2222 if (auto *GEP = dyn_cast<GEPOperator>(RHS)) {
2223 Base.RHSGEPs.push_back(GEP);
2224 if (First) {
2225 First = false;
2226 Base.RHSNW = GEP->getNoWrapFlags();
2227 } else {
2228 Base.RHSNW = Base.RHSNW.intersectForOffsetAdd(GEP->getNoWrapFlags());
2229 }
2230 RHS = GEP->getPointerOperand();
2231 } else {
2232 // No common base.
2233 return Base;
2234 }
2235 }
2236
2237 // Collect LHS GEPs.
2238 First = true;
2239 while (true) {
2240 if (LHS == Base.Ptr)
2241 break;
2242
2243 auto *GEP = cast<GEPOperator>(LHS);
2244 Base.LHSGEPs.push_back(GEP);
2245 if (First) {
2246 First = false;
2247 Base.LHSNW = GEP->getNoWrapFlags();
2248 } else {
2249 Base.LHSNW = Base.LHSNW.intersectForOffsetAdd(GEP->getNoWrapFlags());
2250 }
2251 LHS = GEP->getPointerOperand();
2252 }
2253
2254 return Base;
2255}
2256
2258 unsigned NumGEPs = 0;
2259 auto ProcessGEPs = [&NumGEPs](ArrayRef<GEPOperator *> GEPs) {
2260 bool SeenMultiUse = false;
2261 for (GEPOperator *GEP : GEPs) {
2262 // Only count multi-use GEPs, excluding the first one. For the first one,
2263 // we will directly reuse the offset. For one-use GEPs, their offset will
2264 // be folded into a multi-use GEP.
2265 if (!GEP->hasOneUse()) {
2266 if (SeenMultiUse)
2267 ++NumGEPs;
2268 SeenMultiUse = true;
2269 }
2270 }
2271 };
2272 ProcessGEPs(LHSGEPs);
2273 ProcessGEPs(RHSGEPs);
2274 return NumGEPs > 2;
2275}
2276
2277/// Optimize pointer differences into the same array into a size. Consider:
2278/// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
2279/// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
2281 Type *Ty, bool IsNUW) {
2283 if (!Base.Ptr || Base.isExpensive())
2284 return nullptr;
2285
2286 // To avoid duplicating the offset arithmetic, rewrite the GEP to use the
2287 // computed offset.
2288 // TODO: We should probably do this even if there is only one GEP.
2289 bool RewriteGEPs = !Base.LHSGEPs.empty() && !Base.RHSGEPs.empty();
2290
2291 Type *IdxTy = DL.getIndexType(LHS->getType());
2292 Value *Result = EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, RewriteGEPs);
2293 Value *Offset2 = EmitGEPOffsets(Base.RHSGEPs, Base.RHSNW, IdxTy, RewriteGEPs);
2294
2295 // If this is a single inbounds GEP and the original sub was nuw,
2296 // then the final multiplication is also nuw.
2297 if (auto *I = dyn_cast<OverflowingBinaryOperator>(Result))
2298 if (IsNUW && match(Offset2, m_Zero()) && Base.LHSNW.isInBounds() &&
2299 (I->use_empty() || I->hasOneUse()) && I->hasNoSignedWrap() &&
2300 !I->hasNoUnsignedWrap() &&
2301 ((I->getOpcode() == Instruction::Mul &&
2302 match(I->getOperand(1), m_NonNegative())) ||
2303 I->getOpcode() == Instruction::Shl))
2304 cast<Instruction>(I)->setHasNoUnsignedWrap();
2305
2306 // If we have a 2nd GEP of the same base pointer, subtract the offsets.
2307 // If both GEPs are inbounds, then the subtract does not have signed overflow.
2308 // If both GEPs are nuw and the original sub is nuw, the new sub is also nuw.
2309 if (!match(Offset2, m_Zero())) {
2310 Result =
2311 Builder.CreateSub(Result, Offset2, "gepdiff",
2312 IsNUW && Base.LHSNW.hasNoUnsignedWrap() &&
2313 Base.RHSNW.hasNoUnsignedWrap(),
2314 Base.LHSNW.isInBounds() && Base.RHSNW.isInBounds());
2315 }
2316
2317 return Builder.CreateIntCast(Result, Ty, true);
2318}
2319
2321 InstCombiner::BuilderTy &Builder) {
2322 Value *Op0 = I.getOperand(0);
2323 Value *Op1 = I.getOperand(1);
2324 Type *Ty = I.getType();
2325 auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op1);
2326 if (!MinMax)
2327 return nullptr;
2328
2329 // sub(add(X,Y), s/umin(X,Y)) --> s/umax(X,Y)
2330 // sub(add(X,Y), s/umax(X,Y)) --> s/umin(X,Y)
2331 Value *X = MinMax->getLHS();
2332 Value *Y = MinMax->getRHS();
2333 if (match(Op0, m_c_Add(m_Specific(X), m_Specific(Y))) &&
2334 (Op0->hasOneUse() || Op1->hasOneUse())) {
2335 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
2336 Function *F = Intrinsic::getOrInsertDeclaration(I.getModule(), InvID, Ty);
2337 return CallInst::Create(F, {X, Y});
2338 }
2339
2340 // sub(add(X,Y),umin(Y,Z)) --> add(X,usub.sat(Y,Z))
2341 // sub(add(X,Z),umin(Y,Z)) --> add(X,usub.sat(Z,Y))
2342 Value *Z;
2343 if (match(Op1, m_OneUse(m_UMin(m_Value(Y), m_Value(Z))))) {
2344 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Y), m_Value(X))))) {
2345 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Y, Z});
2346 return BinaryOperator::CreateAdd(X, USub);
2347 }
2348 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Z), m_Value(X))))) {
2349 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Z, Y});
2350 return BinaryOperator::CreateAdd(X, USub);
2351 }
2352 }
2353
2354 // sub Op0, smin((sub nsw Op0, Z), 0) --> smax Op0, Z
2355 // sub Op0, smax((sub nsw Op0, Z), 0) --> smin Op0, Z
2356 if (MinMax->isSigned() && match(Y, m_ZeroInt()) &&
2357 match(X, m_NSWSub(m_Specific(Op0), m_Value(Z)))) {
2358 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
2359 Function *F = Intrinsic::getOrInsertDeclaration(I.getModule(), InvID, Ty);
2360 return CallInst::Create(F, {Op0, Z});
2361 }
2362
2363 return nullptr;
2364}
2365
2367 if (Value *V = simplifySubInst(I.getOperand(0), I.getOperand(1),
2368 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
2369 SQ.getWithInstruction(&I)))
2370 return replaceInstUsesWith(I, V);
2371
2373 return X;
2374
2376 return Phi;
2377
2378 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2379
2380 // If this is a 'B = x-(-A)', change to B = x+A.
2381 // We deal with this without involving Negator to preserve NSW flag.
2382 if (Value *V = dyn_castNegVal(Op1)) {
2383 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
2384
2385 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
2386 assert(BO->getOpcode() == Instruction::Sub &&
2387 "Expected a subtraction operator!");
2388 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
2389 Res->setHasNoSignedWrap(true);
2390 } else {
2391 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
2392 Res->setHasNoSignedWrap(true);
2393 }
2394
2395 return Res;
2396 }
2397
2398 // Try this before Negator to preserve NSW flag.
2400 return R;
2401
2402 Constant *C;
2403 if (match(Op0, m_ImmConstant(C))) {
2404 Value *X;
2405 Constant *C2;
2406
2407 // C-(X+C2) --> (C-C2)-X
2408 if (match(Op1, m_AddLike(m_Value(X), m_ImmConstant(C2)))) {
2409 // C-C2 never overflow, and C-(X+C2), (X+C2) has NSW/NUW
2410 // => (C-C2)-X can have NSW/NUW
2411 bool WillNotSOV = willNotOverflowSignedSub(C, C2, I);
2412 BinaryOperator *Res =
2413 BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
2414
2415 // or disjoint is equivalent to add nuw nsw.
2416 bool Op1NSW = true;
2417 bool Op1NUW = true;
2418
2419 if (auto *OBO1 = dyn_cast<OverflowingBinaryOperator>(Op1)) {
2420 Op1NSW = OBO1->hasNoSignedWrap();
2421 Op1NUW = OBO1->hasNoUnsignedWrap();
2422 }
2423
2424 Res->setHasNoSignedWrap(I.hasNoSignedWrap() && Op1NSW && WillNotSOV);
2425 Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap() && Op1NUW);
2426 return Res;
2427 }
2428 }
2429
2430 auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * {
2431 if (Instruction *Ext = narrowMathIfNoOverflow(I))
2432 return Ext;
2433
2434 bool Changed = false;
2435 if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {
2436 Changed = true;
2437 I.setHasNoSignedWrap(true);
2438 }
2439 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {
2440 Changed = true;
2441 I.setHasNoUnsignedWrap(true);
2442 }
2443
2444 return Changed ? &I : nullptr;
2445 };
2446
2447 // First, let's try to interpret `sub a, b` as `add a, (sub 0, b)`,
2448 // and let's try to sink `(sub 0, b)` into `b` itself. But only if this isn't
2449 // a pure negation used by a select that looks like abs/nabs.
2450 bool IsNegation = match(Op0, m_ZeroInt());
2451 if (!IsNegation || none_of(I.users(), match_fn(m_c_Select(m_Specific(Op1),
2452 m_Specific(&I))))) {
2453 if (Value *NegOp1 = Negator::Negate(IsNegation, /* IsNSW */ IsNegation &&
2454 I.hasNoSignedWrap(),
2455 Op1, *this))
2456 return BinaryOperator::CreateAdd(NegOp1, Op0);
2457 }
2458 if (IsNegation)
2459 return TryToNarrowDeduceFlags(); // Should have been handled in Negator!
2460
2461 // (A*B)-(A*C) -> A*(B-C) etc
2463 return replaceInstUsesWith(I, V);
2464
2465 if (I.getType()->isIntOrIntVectorTy(1))
2466 return BinaryOperator::CreateXor(Op0, Op1);
2467
2468 // Replace (-1 - A) with (~A).
2469 if (match(Op0, m_AllOnes()))
2470 return BinaryOperator::CreateNot(Op1);
2471
2472 // (X + -1) - Y --> ~Y + X
2473 Value *X, *Y;
2474 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes()))))
2475 return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);
2476
2477 // if (C1 & C2) == C2 then (X & C1) - (X & C2) -> X & (C1 ^ C2)
2478 Constant *C1, *C2;
2479 if (match(Op0, m_And(m_Value(X), m_ImmConstant(C1))) &&
2480 match(Op1, m_And(m_Specific(X), m_ImmConstant(C2)))) {
2481 Value *AndC = ConstantFoldBinaryInstruction(Instruction::And, C1, C2);
2482 if (C2->isElementWiseEqual(AndC))
2483 return BinaryOperator::CreateAnd(
2484 X, ConstantFoldBinaryInstruction(Instruction::Xor, C1, C2));
2485 }
2486
2487 // Reassociate sub/add sequences to create more add instructions and
2488 // reduce dependency chains:
2489 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
2490 Value *Z;
2492 m_Value(Z))))) {
2493 Value *XZ = Builder.CreateAdd(X, Z);
2494 Value *YW = Builder.CreateAdd(Y, Op1);
2495 return BinaryOperator::CreateSub(XZ, YW);
2496 }
2497
2498 // ((X - Y) - Op1) --> X - (Y + Op1)
2499 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y))))) {
2501 bool HasNUW = I.hasNoUnsignedWrap() && LHSSub->hasNoUnsignedWrap();
2502 bool HasNSW = HasNUW && I.hasNoSignedWrap() && LHSSub->hasNoSignedWrap();
2503 Value *Add = Builder.CreateAdd(Y, Op1, "", /*HasNUW=*/HasNUW,
2504 /*HasNSW=*/HasNSW);
2505 BinaryOperator *Sub = BinaryOperator::CreateSub(X, Add);
2506 Sub->setHasNoUnsignedWrap(HasNUW);
2507 Sub->setHasNoSignedWrap(HasNSW);
2508 return Sub;
2509 }
2510
2511 {
2512 // (X + Z) - (Y + Z) --> (X - Y)
2513 // This is done in other passes, but we want to be able to consume this
2514 // pattern in InstCombine so we can generate it without creating infinite
2515 // loops.
2516 if (match(Op0, m_Add(m_Value(X), m_Value(Z))) &&
2517 match(Op1, m_c_Add(m_Value(Y), m_Specific(Z))))
2518 return BinaryOperator::CreateSub(X, Y);
2519
2520 // (X + C0) - (Y + C1) --> (X - Y) + (C0 - C1)
2521 Constant *CX, *CY;
2522 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_ImmConstant(CX)))) &&
2523 match(Op1, m_OneUse(m_Add(m_Value(Y), m_ImmConstant(CY))))) {
2524 Value *OpsSub = Builder.CreateSub(X, Y);
2525 Constant *ConstsSub = ConstantExpr::getSub(CX, CY);
2526 return BinaryOperator::CreateAdd(OpsSub, ConstsSub);
2527 }
2528 }
2529
2530 {
2531 Value *W, *Z;
2532 if (match(Op0, m_AddLike(m_Value(W), m_Value(X))) &&
2533 match(Op1, m_AddLike(m_Value(Y), m_Value(Z)))) {
2534 Instruction *R = nullptr;
2535 if (W == Y)
2536 R = BinaryOperator::CreateSub(X, Z);
2537 else if (W == Z)
2538 R = BinaryOperator::CreateSub(X, Y);
2539 else if (X == Y)
2540 R = BinaryOperator::CreateSub(W, Z);
2541 else if (X == Z)
2542 R = BinaryOperator::CreateSub(W, Y);
2543 if (R) {
2544 bool NSW = I.hasNoSignedWrap() &&
2545 match(Op0, m_NSWAddLike(m_Value(), m_Value())) &&
2546 match(Op1, m_NSWAddLike(m_Value(), m_Value()));
2547
2548 bool NUW = I.hasNoUnsignedWrap() &&
2549 match(Op1, m_NUWAddLike(m_Value(), m_Value()));
2550 R->setHasNoSignedWrap(NSW);
2551 R->setHasNoUnsignedWrap(NUW);
2552 return R;
2553 }
2554 }
2555 }
2556
2557 // (~X) - (~Y) --> Y - X
2558 {
2559 // Need to ensure we can consume at least one of the `not` instructions,
2560 // otherwise this can inf loop.
2561 bool ConsumesOp0, ConsumesOp1;
2562 if (isFreeToInvert(Op0, Op0->hasOneUse(), ConsumesOp0) &&
2563 isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&
2564 (ConsumesOp0 || ConsumesOp1)) {
2565 Value *NotOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder);
2566 Value *NotOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder);
2567 assert(NotOp0 != nullptr && NotOp1 != nullptr &&
2568 "isFreeToInvert desynced with getFreelyInverted");
2569 return BinaryOperator::CreateSub(NotOp1, NotOp0);
2570 }
2571 }
2572
2573 auto m_AddRdx = [](Value *&Vec) {
2575 };
2576 Value *V0, *V1;
2577 if (match(Op0, m_AddRdx(V0)) && match(Op1, m_AddRdx(V1)) &&
2578 V0->getType() == V1->getType()) {
2579 // Difference of sums is sum of differences:
2580 // add_rdx(V0) - add_rdx(V1) --> add_rdx(V0 - V1)
2581 Value *Sub = Builder.CreateSub(V0, V1);
2582 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_add,
2583 {Sub->getType()}, {Sub});
2584 return replaceInstUsesWith(I, Rdx);
2585 }
2586
2587 if (Constant *C = dyn_cast<Constant>(Op0)) {
2588 Value *X;
2589 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
2590 // C - (zext bool) --> bool ? C - 1 : C
2592 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
2593 // C - (sext bool) --> bool ? C + 1 : C
2595
2596 // C - ~X == X + (1+C)
2597 if (match(Op1, m_Not(m_Value(X))))
2598 return BinaryOperator::CreateAdd(X, InstCombiner::AddOne(C));
2599
2600 // Try to fold constant sub into select arguments.
2602 if (Instruction *R = FoldOpIntoSelect(I, SI))
2603 return R;
2604
2605 // Try to fold constant sub into PHI values.
2606 if (PHINode *PN = dyn_cast<PHINode>(Op1))
2607 if (Instruction *R = foldOpIntoPhi(I, PN))
2608 return R;
2609
2610 Constant *C2;
2611
2612 // C-(C2-X) --> X+(C-C2)
2613 if (match(Op1, m_Sub(m_ImmConstant(C2), m_Value(X))))
2614 return BinaryOperator::CreateAdd(X, ConstantExpr::getSub(C, C2));
2615 }
2616
2617 const APInt *Op0C;
2618 if (match(Op0, m_APInt(Op0C))) {
2619 if (Op0C->isMask()) {
2620 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
2621 // zero. We don't use information from dominating conditions so this
2622 // transform is easier to reverse if necessary.
2624 Op1, SQ.getWithInstruction(&I).getWithoutDomCondCache());
2625 if ((*Op0C | RHSKnown.Zero).isAllOnes())
2626 return BinaryOperator::CreateXor(Op1, Op0);
2627 }
2628
2629 // C - ((C3 -nuw X) & C2) --> (C - (C2 & C3)) + (X & C2) when:
2630 // (C3 - ((C2 & C3) - 1)) is pow2
2631 // ((C2 + C3) & ((C2 & C3) - 1)) == ((C2 & C3) - 1)
2632 // C2 is negative pow2 || sub nuw
2633 const APInt *C2, *C3;
2634 BinaryOperator *InnerSub;
2635 if (match(Op1, m_OneUse(m_And(m_BinOp(InnerSub), m_APInt(C2)))) &&
2636 match(InnerSub, m_Sub(m_APInt(C3), m_Value(X))) &&
2637 (InnerSub->hasNoUnsignedWrap() || C2->isNegatedPowerOf2())) {
2638 APInt C2AndC3 = *C2 & *C3;
2639 APInt C2AndC3Minus1 = C2AndC3 - 1;
2640 APInt C2AddC3 = *C2 + *C3;
2641 if ((*C3 - C2AndC3Minus1).isPowerOf2() &&
2642 C2AndC3Minus1.isSubsetOf(C2AddC3)) {
2643 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(), *C2));
2644 return BinaryOperator::CreateAdd(
2645 And, ConstantInt::get(I.getType(), *Op0C - C2AndC3));
2646 }
2647 }
2648 }
2649
2650 {
2651 Value *Y;
2652 // X-(X+Y) == -Y X-(Y+X) == -Y
2653 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
2655
2656 // (X-Y)-X == -Y
2657 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
2659 }
2660
2661 // (sub (or A, B) (and A, B)) --> (xor A, B)
2662 {
2663 Value *A, *B;
2664 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
2665 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2666 return BinaryOperator::CreateXor(A, B);
2667 }
2668
2669 // (sub (add A, B) (or A, B)) --> (and A, B)
2670 {
2671 Value *A, *B;
2672 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
2673 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
2674 return BinaryOperator::CreateAnd(A, B);
2675 }
2676
2677 // (sub (add A, B) (and A, B)) --> (or A, B)
2678 {
2679 Value *A, *B;
2680 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
2682 return BinaryOperator::CreateOr(A, B);
2683 }
2684
2685 // (sub (and A, B) (or A, B)) --> neg (xor A, B)
2686 {
2687 Value *A, *B;
2688 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2689 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
2690 (Op0->hasOneUse() || Op1->hasOneUse()))
2691 return BinaryOperator::CreateNeg(Builder.CreateXor(A, B));
2692 }
2693
2694 // (sub (or A, B), (xor A, B)) --> (and A, B)
2695 {
2696 Value *A, *B;
2697 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
2698 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2699 return BinaryOperator::CreateAnd(A, B);
2700 }
2701
2702 // (sub (xor A, B) (or A, B)) --> neg (and A, B)
2703 {
2704 Value *A, *B;
2705 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2706 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
2707 (Op0->hasOneUse() || Op1->hasOneUse()))
2708 return BinaryOperator::CreateNeg(Builder.CreateAnd(A, B));
2709 }
2710
2711 {
2712 Value *Y;
2713 // ((X | Y) - X) --> (~X & Y)
2714 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
2715 return BinaryOperator::CreateAnd(
2716 Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
2717 }
2718
2719 {
2720 // (sub (and Op1, (neg X)), Op1) --> neg (and Op1, (add X, -1))
2721 Value *X;
2722 if (match(Op0, m_OneUse(m_c_And(m_Specific(Op1),
2723 m_OneUse(m_Neg(m_Value(X))))))) {
2724 return BinaryOperator::CreateNeg(Builder.CreateAnd(
2725 Op1, Builder.CreateAdd(X, Constant::getAllOnesValue(I.getType()))));
2726 }
2727 }
2728
2729 {
2730 // (sub (and Op1, C), Op1) --> neg (and Op1, ~C)
2731 Constant *C;
2732 if (match(Op0, m_OneUse(m_And(m_Specific(Op1), m_Constant(C))))) {
2734 Builder.CreateAnd(Op1, Builder.CreateNot(C)));
2735 }
2736 }
2737
2738 {
2739 // (sub (xor X, (sext C)), (sext C)) => (select C, (neg X), X)
2740 // (sub (sext C), (xor X, (sext C))) => (select C, X, (neg X))
2741 Value *C, *X;
2742 auto m_SubXorCmp = [&C, &X](Value *LHS, Value *RHS) {
2743 return match(LHS, m_OneUse(m_c_Xor(m_Value(X), m_Specific(RHS)))) &&
2744 match(RHS, m_SExt(m_Value(C))) &&
2745 (C->getType()->getScalarSizeInBits() == 1);
2746 };
2747 if (m_SubXorCmp(Op0, Op1))
2748 return createSelectInstWithUnknownProfile(C, Builder.CreateNeg(X), X);
2749 if (m_SubXorCmp(Op1, Op0))
2750 return createSelectInstWithUnknownProfile(C, X, Builder.CreateNeg(X));
2751 }
2752
2754 return R;
2755
2757 return R;
2758
2759 {
2760 // If we have a subtraction between some value and a select between
2761 // said value and something else, sink subtraction into select hands, i.e.:
2762 // sub (select %Cond, %TrueVal, %FalseVal), %Op1
2763 // ->
2764 // select %Cond, (sub %TrueVal, %Op1), (sub %FalseVal, %Op1)
2765 // or
2766 // sub %Op0, (select %Cond, %TrueVal, %FalseVal)
2767 // ->
2768 // select %Cond, (sub %Op0, %TrueVal), (sub %Op0, %FalseVal)
2769 // This will result in select between new subtraction and 0.
2770 auto SinkSubIntoSelect =
2771 [Ty = I.getType()](Value *Select, Value *OtherHandOfSub,
2772 auto SubBuilder) -> Instruction * {
2773 Value *Cond, *TrueVal, *FalseVal;
2774 if (!match(Select, m_OneUse(m_Select(m_Value(Cond), m_Value(TrueVal),
2775 m_Value(FalseVal)))))
2776 return nullptr;
2777 if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal)
2778 return nullptr;
2779 // While it is really tempting to just create two subtractions and let
2780 // InstCombine fold one of those to 0, it isn't possible to do so
2781 // because of worklist visitation order. So ugly it is.
2782 bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal;
2783 Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal);
2784 Constant *Zero = Constant::getNullValue(Ty);
2785 SelectInst *NewSel =
2786 SelectInst::Create(Cond, OtherHandOfSubIsTrueVal ? Zero : NewSub,
2787 OtherHandOfSubIsTrueVal ? NewSub : Zero);
2788 // Preserve prof metadata if any.
2790 return NewSel;
2791 };
2792 if (Instruction *NewSel = SinkSubIntoSelect(
2793 /*Select=*/Op0, /*OtherHandOfSub=*/Op1,
2794 [Builder = &Builder, Op1](Value *OtherHandOfSelect) {
2795 return Builder->CreateSub(OtherHandOfSelect,
2796 /*OtherHandOfSub=*/Op1);
2797 }))
2798 return NewSel;
2799 if (Instruction *NewSel = SinkSubIntoSelect(
2800 /*Select=*/Op1, /*OtherHandOfSub=*/Op0,
2801 [Builder = &Builder, Op0](Value *OtherHandOfSelect) {
2802 return Builder->CreateSub(/*OtherHandOfSub=*/Op0,
2803 OtherHandOfSelect);
2804 }))
2805 return NewSel;
2806 }
2807
2808 // (X - (X & Y)) --> (X & ~Y)
2809 if (match(Op1, m_c_And(m_Specific(Op0), m_Value(Y))) &&
2810 (Op1->hasOneUse() || isa<Constant>(Y)))
2811 return BinaryOperator::CreateAnd(
2812 Op0, Builder.CreateNot(Y, Y->getName() + ".not"));
2813
2814 // ~X - Min/Max(~X, Y) -> ~Min/Max(X, ~Y) - X
2815 // ~X - Min/Max(Y, ~X) -> ~Min/Max(X, ~Y) - X
2816 // Min/Max(~X, Y) - ~X -> X - ~Min/Max(X, ~Y)
2817 // Min/Max(Y, ~X) - ~X -> X - ~Min/Max(X, ~Y)
2818 // As long as Y is freely invertible, this will be neutral or a win.
2819 // Note: We don't generate the inverse max/min, just create the 'not' of
2820 // it and let other folds do the rest.
2821 if (match(Op0, m_Not(m_Value(X))) &&
2822 match(Op1, m_c_MaxOrMin(m_Specific(Op0), m_Value(Y))) &&
2823 !Op0->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2824 Value *Not = Builder.CreateNot(Op1);
2825 return BinaryOperator::CreateSub(Not, X);
2826 }
2827 if (match(Op1, m_Not(m_Value(X))) &&
2828 match(Op0, m_c_MaxOrMin(m_Specific(Op1), m_Value(Y))) &&
2829 !Op1->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2830 Value *Not = Builder.CreateNot(Op0);
2831 return BinaryOperator::CreateSub(X, Not);
2832 }
2833
2834 // min(X+1, Y) - min(X, Y) --> zext X < Y
2835 // Replacing a sub and at least one min with an icmp
2836 // and a zext is a potential improvement.
2837 if (match(Op0, m_c_SMin(m_NSWAddLike(m_Value(X), m_One()), m_Value(Y))) &&
2838 match(Op1, m_c_SMin(m_Specific(X), m_Specific(Y))) &&
2839 I.getType()->getScalarSizeInBits() != 1 &&
2840 (Op0->hasOneUse() || Op1->hasOneUse())) {
2841 Value *Cond = Builder.CreateICmpSLT(X, Y);
2842 return new ZExtInst(Cond, I.getType());
2843 }
2844 if (match(Op0, m_c_UMin(m_NUWAddLike(m_Value(X), m_One()), m_Value(Y))) &&
2845 match(Op1, m_c_UMin(m_Specific(X), m_Specific(Y))) &&
2846 I.getType()->getScalarSizeInBits() != 1 &&
2847 (Op0->hasOneUse() || Op1->hasOneUse())) {
2848 Value *Cond = Builder.CreateICmpULT(X, Y);
2849 return new ZExtInst(Cond, I.getType());
2850 }
2851
2852 // Optimize pointer differences into the same array into a size. Consider:
2853 // &A[10] - &A[0]: we should compile this to "10".
2854 Value *LHSOp, *RHSOp;
2855 if (match(Op0, m_PtrToIntOrAddr(m_Value(LHSOp))) &&
2856 match(Op1, m_PtrToIntOrAddr(m_Value(RHSOp))))
2857 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2858 I.hasNoUnsignedWrap()))
2859 return replaceInstUsesWith(I, Res);
2860
2861 // trunc(p)-trunc(q) -> trunc(p-q)
2862 if (match(Op0, m_Trunc(m_PtrToIntOrAddr(m_Value(LHSOp)))) &&
2863 match(Op1, m_Trunc(m_PtrToIntOrAddr(m_Value(RHSOp)))))
2864 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2865 /* IsNUW */ false))
2866 return replaceInstUsesWith(I, Res);
2867
2868 auto MatchSubOfZExtOfPtrToIntOrAddr = [&]() {
2869 if (match(Op0, m_ZExt(m_PtrToIntSameSize(DL, m_Value(LHSOp)))) &&
2870 match(Op1, m_ZExt(m_PtrToIntSameSize(DL, m_Value(RHSOp)))))
2871 return true;
2872 if (match(Op0, m_ZExt(m_PtrToAddr(m_Value(LHSOp)))) &&
2873 match(Op1, m_ZExt(m_PtrToAddr(m_Value(RHSOp)))))
2874 return true;
2875 // Special case for non-canonical ptrtoint in constant expression,
2876 // where the zext has been folded into the ptrtoint.
2877 if (match(Op0, m_ZExt(m_PtrToIntSameSize(DL, m_Value(LHSOp)))) &&
2878 match(Op1, m_PtrToInt(m_Value(RHSOp))))
2879 return true;
2880 return false;
2881 };
2882 if (MatchSubOfZExtOfPtrToIntOrAddr()) {
2883 if (auto *GEP = dyn_cast<GEPOperator>(LHSOp)) {
2884 if (GEP->getPointerOperand() == RHSOp) {
2885 if (GEP->hasNoUnsignedWrap() || GEP->hasNoUnsignedSignedWrap()) {
2886 Value *Offset = EmitGEPOffset(GEP);
2887 Value *Res = GEP->hasNoUnsignedWrap()
2888 ? Builder.CreateZExt(
2889 Offset, I.getType(), "",
2890 /*IsNonNeg=*/GEP->hasNoUnsignedSignedWrap())
2891 : Builder.CreateSExt(Offset, I.getType());
2892 return replaceInstUsesWith(I, Res);
2893 }
2894 }
2895 }
2896 }
2897
2898 // Canonicalize a shifty way to code absolute value to the common pattern.
2899 // There are 2 potential commuted variants.
2900 // We're relying on the fact that we only do this transform when the shift has
2901 // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase
2902 // instructions).
2903 Value *A;
2904 const APInt *ShAmt;
2905 Type *Ty = I.getType();
2906 unsigned BitWidth = Ty->getScalarSizeInBits();
2907 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
2908 Op1->hasNUses(2) && *ShAmt == BitWidth - 1 &&
2909 match(Op0, m_OneUse(m_c_Xor(m_Specific(A), m_Specific(Op1))))) {
2910 // B = ashr i32 A, 31 ; smear the sign bit
2911 // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1)
2912 // --> (A < 0) ? -A : A
2913 Value *IsNeg = Builder.CreateIsNeg(A);
2914 // Copy the nsw flags from the sub to the negate.
2915 Value *NegA = I.hasNoUnsignedWrap()
2916 ? Constant::getNullValue(A->getType())
2917 : Builder.CreateNeg(A, "", I.hasNoSignedWrap());
2918 return SelectInst::Create(IsNeg, NegA, A);
2919 }
2920
2921 // If we are subtracting a low-bit masked subset of some value from an add
2922 // of that same value with no low bits changed, that is clearing some low bits
2923 // of the sum:
2924 // sub (X + AddC), (X & AndC) --> and (X + AddC), ~AndC
2925 const APInt *AddC, *AndC;
2926 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC))) &&
2927 match(Op1, m_And(m_Specific(X), m_APInt(AndC)))) {
2928 unsigned Cttz = AddC->countr_zero();
2929 APInt HighMask(APInt::getHighBitsSet(BitWidth, BitWidth - Cttz));
2930 if ((HighMask & *AndC).isZero())
2931 return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC)));
2932 }
2933
2934 if (Instruction *V =
2936 return V;
2937
2938 // X - usub.sat(X, Y) => umin(X, Y)
2940 m_Value(Y)))))
2941 return replaceInstUsesWith(
2942 I, Builder.CreateIntrinsic(Intrinsic::umin, {I.getType()}, {Op0, Y}));
2943
2944 // umax(X, Op1) - Op1 --> usub.sat(X, Op1)
2945 // TODO: The one-use restriction is not strictly necessary, but it may
2946 // require improving other pattern matching and/or codegen.
2947 if (match(Op0, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op1)))))
2948 return replaceInstUsesWith(
2949 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op1}));
2950
2951 // Op0 - umin(X, Op0) --> usub.sat(Op0, X)
2952 if (match(Op1, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op0)))))
2953 return replaceInstUsesWith(
2954 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op0, X}));
2955
2956 // Op0 - umax(X, Op0) --> 0 - usub.sat(X, Op0)
2957 if (match(Op1, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op0))))) {
2958 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op0});
2959 return BinaryOperator::CreateNeg(USub);
2960 }
2961
2962 // umin(X, Op1) - Op1 --> 0 - usub.sat(Op1, X)
2963 if (match(Op0, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op1))))) {
2964 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op1, X});
2965 return BinaryOperator::CreateNeg(USub);
2966 }
2967
2968 // C - ctpop(X) => ctpop(~X) if C is bitwidth
2969 if (match(Op0, m_SpecificInt(BitWidth)) &&
2970 match(Op1, m_OneUse(m_Ctpop(m_Value(X)))))
2971 return replaceInstUsesWith(
2972 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
2973 {Builder.CreateNot(X)}));
2974
2975 // Reduce multiplies for difference-of-squares by factoring:
2976 // (X * X) - (Y * Y) --> (X + Y) * (X - Y)
2977 if (match(Op0, m_OneUse(m_Mul(m_Value(X), m_Deferred(X)))) &&
2978 match(Op1, m_OneUse(m_Mul(m_Value(Y), m_Deferred(Y))))) {
2979 auto *OBO0 = cast<OverflowingBinaryOperator>(Op0);
2980 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2981 bool PropagateNSW = I.hasNoSignedWrap() && OBO0->hasNoSignedWrap() &&
2982 OBO1->hasNoSignedWrap() && BitWidth > 2;
2983 bool PropagateNUW = I.hasNoUnsignedWrap() && OBO0->hasNoUnsignedWrap() &&
2984 OBO1->hasNoUnsignedWrap() && BitWidth > 1;
2985 Value *Add = Builder.CreateAdd(X, Y, "add", PropagateNUW, PropagateNSW);
2986 Value *Sub = Builder.CreateSub(X, Y, "sub", PropagateNUW, PropagateNSW);
2987 Value *Mul = Builder.CreateMul(Add, Sub, "", PropagateNUW, PropagateNSW);
2988 return replaceInstUsesWith(I, Mul);
2989 }
2990
2991 // max(X,Y) nsw/nuw - min(X,Y) --> abs(X nsw - Y)
2992 if (match(Op0, m_OneUse(m_c_SMax(m_Value(X), m_Value(Y)))) &&
2994 if (I.hasNoUnsignedWrap() || I.hasNoSignedWrap()) {
2995 Value *Sub =
2996 Builder.CreateSub(X, Y, "sub", /*HasNUW=*/false, /*HasNSW=*/true);
2997 Value *Call =
2998 Builder.CreateBinaryIntrinsic(Intrinsic::abs, Sub, Builder.getTrue());
2999 return replaceInstUsesWith(I, Call);
3000 }
3001 }
3002
3004 return Res;
3005
3006 // (sub (sext (add nsw (X, Y)), sext (X))) --> (sext (Y))
3007 if (match(Op1, m_SExtLike(m_Value(X))) &&
3009 Value *SExtY = Builder.CreateSExt(Y, I.getType());
3010 return replaceInstUsesWith(I, SExtY);
3011 }
3012
3013 // (sub[ nsw] (sext (add nsw (X, Y)), sext (add nsw (X, Z)))) -->
3014 // --> (sub[ nsw] (sext (Y), sext (Z)))
3015 {
3016 Value *Z, *Add0, *Add1;
3017 if (match(Op0, m_SExtLike(m_Value(Add0))) &&
3018 match(Op1, m_SExtLike(m_Value(Add1))) &&
3019 ((match(Add0, m_NSWAdd(m_Value(X), m_Value(Y))) &&
3020 match(Add1, m_c_NSWAdd(m_Specific(X), m_Value(Z)))) ||
3021 (match(Add0, m_NSWAdd(m_Value(Y), m_Value(X))) &&
3022 match(Add1, m_c_NSWAdd(m_Specific(X), m_Value(Z)))))) {
3023 unsigned NumOfNewInstrs = 0;
3024 // Non-constant Y, Z require new SExt.
3025 NumOfNewInstrs += !isa<Constant>(Y) ? 1 : 0;
3026 NumOfNewInstrs += !isa<Constant>(Z) ? 1 : 0;
3027 // Check if we can trade some of the old instructions for the new ones.
3028 unsigned NumOfDeadInstrs = 0;
3029 if (Op0->hasOneUse()) {
3030 // If Op0 (sext) has multiple uses, then we keep it
3031 // and the add that it uses, otherwise, we can remove
3032 // the sext and probably the add (depending on the number of its uses).
3033 ++NumOfDeadInstrs;
3034 NumOfDeadInstrs += Add0->hasOneUse() ? 1 : 0;
3035 }
3036 if (Op1->hasOneUse()) {
3037 ++NumOfDeadInstrs;
3038 NumOfDeadInstrs += Add1->hasOneUse() ? 1 : 0;
3039 }
3040 if (NumOfDeadInstrs >= NumOfNewInstrs) {
3041 Value *SExtY = Builder.CreateSExt(Y, I.getType());
3042 Value *SExtZ = Builder.CreateSExt(Z, I.getType());
3043 Value *Sub = Builder.CreateSub(SExtY, SExtZ, "",
3044 /*HasNUW=*/false,
3045 /*HasNSW=*/I.hasNoSignedWrap());
3046 return replaceInstUsesWith(I, Sub);
3047 }
3048 }
3049 }
3050
3051 return TryToNarrowDeduceFlags();
3052}
3053
3054/// This eliminates floating-point negation in either 'fneg(X)' or
3055/// 'fsub(-0.0, X)' form by combining into a constant operand.
3057 // This is limited with one-use because fneg is assumed better for
3058 // reassociation and cheaper in codegen than fmul/fdiv.
3059 // TODO: Should the m_OneUse restriction be removed?
3060 Instruction *FNegOp;
3061 if (!match(&I, m_FNeg(m_OneUse(m_Instruction(FNegOp)))))
3062 return nullptr;
3063
3064 Value *X;
3065 Constant *C;
3066
3067 // Fold negation into constant operand.
3068 // -(X * C) --> X * (-C)
3069 if (match(FNegOp, m_FMul(m_Value(X), m_Constant(C))))
3070 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) {
3071 FastMathFlags FNegF = I.getFastMathFlags();
3072 FastMathFlags OpF = FNegOp->getFastMathFlags();
3073 FastMathFlags FMF = FastMathFlags::unionValue(FNegF, OpF) |
3075 FMF.setNoInfs(FNegF.noInfs() && OpF.noInfs());
3076 return BinaryOperator::CreateFMulFMF(X, NegC, FMF);
3077 }
3078 // -(X / C) --> X / (-C)
3079 if (match(FNegOp, m_FDiv(m_Value(X), m_Constant(C)))) {
3080 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) {
3082
3083 // Intersect 'nsz' and 'ninf' because those special value exceptions may
3084 // not apply to the fdiv. Everything else propagates from the fneg.
3085 FastMathFlags FMF = I.getFastMathFlags();
3086 FastMathFlags OpFMF = FNegOp->getFastMathFlags();
3087 FDiv->setHasNoSignedZeros(FMF.noSignedZeros() && OpFMF.noSignedZeros());
3088 FDiv->setHasNoInfs(FMF.noInfs() && OpFMF.noInfs());
3089 FDiv->copyMetadata(*FNegOp);
3090 return FDiv;
3091 }
3092 }
3093 // -(C / X) --> (-C) / X
3094 if (match(FNegOp, m_FDiv(m_Constant(C), m_Value(X))))
3095 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) {
3097
3098 // Intersect 'nsz' and 'ninf' because those special value exceptions may
3099 // not apply to the fdiv. Everything else propagates from the fneg.
3100 // TODO: We could propagate nsz/ninf from fdiv alone?
3101 FastMathFlags FMF = I.getFastMathFlags();
3102 FastMathFlags OpFMF = FNegOp->getFastMathFlags();
3103 FDiv->setHasNoSignedZeros(FMF.noSignedZeros() && OpFMF.noSignedZeros());
3104 FDiv->setHasNoInfs(FMF.noInfs() && OpFMF.noInfs());
3105 FDiv->copyMetadata(*FNegOp);
3106 return FDiv;
3107 }
3108 // With NSZ [ counter-example with -0.0: -(-0.0 + 0.0) != 0.0 + -0.0 ]:
3109 // -(X + C) --> -X + -C --> -C - X
3110 if (I.hasNoSignedZeros() && match(FNegOp, m_FAdd(m_Value(X), m_Constant(C))))
3111 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
3112 return BinaryOperator::CreateFSubFMF(NegC, X, &I);
3113
3114 return nullptr;
3115}
3116
3117Instruction *InstCombinerImpl::hoistFNegAboveFMulFDiv(Value *FNegOp,
3118 Instruction &FMFSource) {
3119 Value *X, *Y;
3120 if (match(FNegOp, m_FMul(m_Value(X), m_Value(Y)))) {
3121 // Push into RHS which is more likely to simplify (const or another fneg).
3122 // FIXME: It would be better to invert the transform.
3123 return cast<Instruction>(Builder.CreateFMulFMF(
3124 X, Builder.CreateFNegFMF(Y, &FMFSource), &FMFSource));
3125 }
3126
3127 if (match(FNegOp, m_FDiv(m_Value(X), m_Value(Y)))) {
3128 auto *FDiv = cast<Instruction>(Builder.CreateFDivFMF(
3129 Builder.CreateFNegFMF(X, &FMFSource), Y, &FMFSource));
3130 FDiv->copyMetadata(*cast<Instruction>(FNegOp));
3131 return FDiv;
3132 }
3133
3134 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(FNegOp)) {
3135 // Make sure to preserve flags and metadata on the call.
3136 if (II->getIntrinsicID() == Intrinsic::ldexp) {
3137 FastMathFlags FMF = FMFSource.getFastMathFlags() | II->getFastMathFlags();
3138 CallInst *New =
3139 Builder.CreateCall(II->getCalledFunction(),
3140 {Builder.CreateFNegFMF(II->getArgOperand(0), FMF),
3141 II->getArgOperand(1)});
3142 New->setFastMathFlags(FMF);
3143 New->copyMetadata(*II);
3144 return New;
3145 }
3146 }
3147
3148 return nullptr;
3149}
3150
3152 Value *Op = I.getOperand(0);
3153
3154 if (Value *V = simplifyFNegInst(Op, I.getFastMathFlags(),
3155 getSimplifyQuery().getWithInstruction(&I)))
3156 return replaceInstUsesWith(I, V);
3157
3159 return X;
3160
3161 Value *X, *Y;
3162
3163 // If we can ignore the sign of zeros: -(X - Y) --> (Y - X)
3164 if (I.hasNoSignedZeros() &&
3167
3168 Value *OneUse;
3169 if (!match(Op, m_OneUse(m_Value(OneUse))))
3170 return nullptr;
3171
3172 if (Instruction *R = hoistFNegAboveFMulFDiv(OneUse, I))
3173 return replaceInstUsesWith(I, R);
3174
3175 // Try to eliminate fneg if at least 1 arm of the select is negated.
3176 Value *Cond;
3177 if (match(OneUse, m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))) {
3178 // Unlike most transforms, this one is not safe to propagate nsz unless
3179 // it is present on the original select. We union the flags from the select
3180 // and fneg and then remove nsz if needed.
3181 auto propagateSelectFMF = [&](SelectInst *S, bool CommonOperand) {
3182 S->copyFastMathFlags(&I);
3183 if (auto *OldSel = dyn_cast<SelectInst>(Op)) {
3184 FastMathFlags FMF = I.getFastMathFlags() | OldSel->getFastMathFlags();
3185 S->setFastMathFlags(FMF);
3186 if (!OldSel->hasNoSignedZeros() && !CommonOperand &&
3187 !isGuaranteedNotToBeUndefOrPoison(OldSel->getCondition()))
3188 S->setHasNoSignedZeros(false);
3189 }
3190 };
3191 // -(Cond ? -P : Y) --> Cond ? P : -Y
3192 Value *P;
3193 if (match(X, m_FNeg(m_Value(P)))) {
3194 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");
3195 SelectInst *NewSel = SelectInst::Create(Cond, P, NegY);
3196 propagateSelectFMF(NewSel, P == Y);
3197 return NewSel;
3198 }
3199 // -(Cond ? X : -P) --> Cond ? -X : P
3200 if (match(Y, m_FNeg(m_Value(P)))) {
3201 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");
3202 SelectInst *NewSel = SelectInst::Create(Cond, NegX, P);
3203 propagateSelectFMF(NewSel, P == X);
3204 return NewSel;
3205 }
3206
3207 // -(Cond ? X : C) --> Cond ? -X : -C
3208 // -(Cond ? C : Y) --> Cond ? -C : -Y
3209 if (match(X, m_ImmConstant()) || match(Y, m_ImmConstant())) {
3210 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");
3211 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");
3212 SelectInst *NewSel = SelectInst::Create(Cond, NegX, NegY);
3213 propagateSelectFMF(NewSel, /*CommonOperand=*/true);
3214 return NewSel;
3215 }
3216 }
3217
3218 // fneg (copysign x, y) -> copysign x, (fneg y)
3219 if (match(OneUse, m_CopySign(m_Value(X), m_Value(Y)))) {
3220 // The source copysign has an additional value input, so we can't propagate
3221 // flags the copysign doesn't also have.
3222 FastMathFlags FMF = I.getFastMathFlags();
3223 FMF &= cast<FPMathOperator>(OneUse)->getFastMathFlags();
3224 Value *NegY = Builder.CreateFNegFMF(Y, FMF);
3225 Value *NewCopySign = Builder.CreateCopySign(X, NegY, FMF);
3226 return replaceInstUsesWith(I, NewCopySign);
3227 }
3228
3229 // fneg (shuffle x, Mask) --> shuffle (fneg x), Mask
3230 ArrayRef<int> Mask;
3231 if (match(OneUse, m_Shuffle(m_Value(X), m_Poison(), m_Mask(Mask))))
3232 return new ShuffleVectorInst(Builder.CreateFNegFMF(X, &I), Mask);
3233
3234 // fneg (reverse x) --> reverse (fneg x)
3235 if (match(OneUse, m_VecReverse(m_Value(X)))) {
3236 Value *Reverse = Builder.CreateVectorReverse(Builder.CreateFNegFMF(X, &I));
3237 return replaceInstUsesWith(I, Reverse);
3238 }
3239
3240 return nullptr;
3241}
3242
3244 if (Value *V = simplifyFSubInst(I.getOperand(0), I.getOperand(1),
3245 I.getFastMathFlags(),
3246 getSimplifyQuery().getWithInstruction(&I)))
3247 return replaceInstUsesWith(I, V);
3248
3250 return X;
3251
3253 return Phi;
3254
3255 // Subtraction from -0.0 is the canonical form of fneg.
3256 // fsub -0.0, X ==> fneg X
3257 // fsub nsz 0.0, X ==> fneg nsz X
3258 //
3259 // FIXME This matcher does not respect FTZ or DAZ yet:
3260 // fsub -0.0, Denorm ==> +-0
3261 // fneg Denorm ==> -Denorm
3262 Value *Op;
3263 if (match(&I, m_FNeg(m_Value(Op))))
3265
3267 return X;
3268
3269 if (Instruction *R = foldFBinOpOfIntCasts(I))
3270 return R;
3271
3272 Value *X, *Y;
3273 Constant *C;
3274
3275 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3276 // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X)
3277 // Canonicalize to fadd to make analysis easier.
3278 // This can also help codegen because fadd is commutative.
3279 // Note that if this fsub was really an fneg, the fadd with -0.0 will get
3280 // killed later. We still limit that particular transform with 'hasOneUse'
3281 // because an fneg is assumed better/cheaper than a generic fsub.
3282 if (I.hasNoSignedZeros() ||
3283 cannotBeNegativeZero(Op0, getSimplifyQuery().getWithInstruction(&I))) {
3284 if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
3285 Value *NewSub = Builder.CreateFSubFMF(Y, X, &I);
3286 return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I);
3287 }
3288 }
3289
3290 // (-X) - Op1 --> -(X + Op1)
3291 if (I.hasNoSignedZeros() && !isa<ConstantExpr>(Op0) &&
3292 match(Op0, m_OneUse(m_FNeg(m_Value(X))))) {
3293 Value *FAdd = Builder.CreateFAddFMF(X, Op1, &I);
3295 }
3296
3297 if (isa<Constant>(Op0))
3299 if (Instruction *NV = FoldOpIntoSelect(I, SI))
3300 return NV;
3301
3302 // X - C --> X + (-C)
3303 // But don't transform constant expressions because there's an inverse fold
3304 // for X + (-Y) --> X - Y.
3305 if (match(Op1, m_ImmConstant(C)))
3306 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
3307 return BinaryOperator::CreateFAddFMF(Op0, NegC, &I);
3308
3309 // X - (-Y) --> X + Y
3310 if (match(Op1, m_FNeg(m_Value(Y))))
3311 return BinaryOperator::CreateFAddFMF(Op0, Y, &I);
3312
3313 // Similar to above, but look through a cast of the negated value:
3314 // X - (fptrunc(-Y)) --> X + fptrunc(Y)
3315 Type *Ty = I.getType();
3316 if (match(Op1, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y))))))
3317 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPTrunc(Y, Ty), &I);
3318
3319 // X - (fpext(-Y)) --> X + fpext(Y)
3320 if (match(Op1, m_OneUse(m_FPExt(m_FNeg(m_Value(Y))))))
3321 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPExt(Y, Ty), &I);
3322
3323 // Similar to above, but look through fmul/fdiv of the negated value:
3324 // Op0 - (-X * Y) --> Op0 + (X * Y)
3325 // Op0 - (Y * -X) --> Op0 + (X * Y)
3326 if (match(Op1, m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))))) {
3327 Value *FMul = Builder.CreateFMulFMF(X, Y, &I);
3328 return BinaryOperator::CreateFAddFMF(Op0, FMul, &I);
3329 }
3330 // Op0 - (-X / Y) --> Op0 + (X / Y)
3331 // Op0 - (X / -Y) --> Op0 + (X / Y)
3332 if (match(Op1, m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y)))) ||
3333 match(Op1, m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))))) {
3334 Value *FDiv = Builder.CreateFDivFMF(X, Y, &I);
3335 return BinaryOperator::CreateFAddFMF(Op0, FDiv, &I);
3336 }
3337
3338 // Handle special cases for FSub with selects feeding the operation
3339 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
3340 return replaceInstUsesWith(I, V);
3341
3342 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
3343 // (Y - X) - Y --> -X
3344 if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X))))
3346
3347 // Y - (X + Y) --> -X
3348 // Y - (Y + X) --> -X
3349 if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X))))
3351
3352 // (X * C) - X --> X * (C - 1.0)
3353 if (match(Op0, m_FMul(m_Specific(Op1), m_Constant(C)))) {
3355 Instruction::FSub, C, ConstantFP::get(Ty, 1.0), DL))
3356 return BinaryOperator::CreateFMulFMF(Op1, CSubOne, &I);
3357 }
3358 // X - (X * C) --> X * (1.0 - C)
3359 if (match(Op1, m_FMul(m_Specific(Op0), m_Constant(C)))) {
3361 Instruction::FSub, ConstantFP::get(Ty, 1.0), C, DL))
3362 return BinaryOperator::CreateFMulFMF(Op0, OneSubC, &I);
3363 }
3364
3365 // Reassociate fsub/fadd sequences to create more fadd instructions and
3366 // reduce dependency chains:
3367 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
3368 Value *Z;
3370 m_Value(Z))))) {
3371 Value *XZ = Builder.CreateFAddFMF(X, Z, &I);
3372 Value *YW = Builder.CreateFAddFMF(Y, Op1, &I);
3373 return BinaryOperator::CreateFSubFMF(XZ, YW, &I);
3374 }
3375
3376 auto m_FaddRdx = [](Value *&Sum, Value *&Vec) {
3378 m_Value(Vec)));
3379 };
3380 Value *A0, *A1, *V0, *V1;
3381 if (match(Op0, m_FaddRdx(A0, V0)) && match(Op1, m_FaddRdx(A1, V1)) &&
3382 V0->getType() == V1->getType()) {
3383 // Difference of sums is sum of differences:
3384 // add_rdx(A0, V0) - add_rdx(A1, V1) --> add_rdx(A0, V0 - V1) - A1
3385 Value *Sub = Builder.CreateFSubFMF(V0, V1, &I);
3386 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
3387 {Sub->getType()}, {A0, Sub}, &I);
3388 return BinaryOperator::CreateFSubFMF(Rdx, A1, &I);
3389 }
3390
3392 return F;
3393
3394 // TODO: This performs reassociative folds for FP ops. Some fraction of the
3395 // functionality has been subsumed by simple pattern matching here and in
3396 // InstSimplify. We should let a dedicated reassociation pass handle more
3397 // complex pattern matching and remove this from InstCombine.
3398 if (Value *V = FAddCombine(Builder).simplify(&I))
3399 return replaceInstUsesWith(I, V);
3400
3401 // (X - Y) - Op1 --> X - (Y + Op1)
3402 if (match(Op0, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
3403 Value *FAdd = Builder.CreateFAddFMF(Y, Op1, &I);
3405 }
3406 }
3407
3408 return nullptr;
3409}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isConstant(const MachineInstr &MI)
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
hexagon bit simplify
Hexagon Common GEP
static Instruction * factorizeFAddFSub(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Factor a common operand out of fadd/fsub of fmul/fdiv.
static Instruction * foldAddToAshr(BinaryOperator &Add)
Try to reduce signed division by power-of-2 to an arithmetic shift right.
static bool MatchMul(Value *E, Value *&Op, APInt &C)
static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned)
static Instruction * foldFNegIntoConstant(Instruction &I, const DataLayout &DL)
This eliminates floating-point negation in either 'fneg(X)' or 'fsub(-0.0, X)' form by combining into...
static Instruction * combineAddSubWithShlAddSub(InstCombiner::BuilderTy &Builder, const BinaryOperator &I)
static Instruction * factorizeLerp(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Eliminate an op from a linear interpolation (lerp) pattern.
static Instruction * foldSubOfMinMax(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Instruction * foldBoxMultiply(BinaryOperator &I)
Reduce a sequence of masked half-width multiplies to a single multiply.
static Value * checkForNegativeOperand(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned)
static Instruction * foldNoWrapAdd(BinaryOperator &Add, InstCombiner::BuilderTy &Builder)
Wrapping flags may allow combining constants separated by an extend.
static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A, Value *&B)
static Instruction * factorizeMathWithShlOps(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
This is a specialization of a more general transform from foldUsingDistributiveLaws.
static Instruction * canonicalizeLowbitMask(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Fold (1 << NBits) - 1 Into: ~(-(1 << NBits)) Because a 'not' is better for bit-tracking analysis and ...
static bool checkDivCeilNUW(Value *X, Value *Y, const SimplifyQuery &SQ)
Return true if X + (Y-1) is provably non-wrapping in X's type.
static Instruction * foldToUnsignedSaturatedAdd(BinaryOperator &I)
static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static constexpr Value * getValue(Ty &ValueOrUse)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
Value * RHS
Value * LHS
const fltSemantics & getSemantics() const
Definition APFloat.h:1546
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1258
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:2023
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
Definition APInt.h:450
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:968
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1189
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
Definition APInt.h:467
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
int32_t exactLogBase2() const
Definition APInt.h:1806
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1662
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1621
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
unsigned logBase2() const
Definition APInt.h:1784
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:2012
bool isMask(unsigned numBits) const
Definition APInt.h:489
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:1028
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1264
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1244
bool isOne() const
Determine if this is a value of 1.
Definition APInt.h:390
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition InstrTypes.h:236
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition InstrTypes.h:244
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition InstrTypes.h:248
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition InstrTypes.h:240
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_NE
not equal
Definition InstrTypes.h:698
bool isSigned() const
Definition InstrTypes.h:930
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
bool isZero() const
Return true if the value is positive or negative zero.
Definition Constants.h:467
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
This class represents a range of values.
LLVM_ABI APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isElementWiseEqual(Value *Y) const
Return true if this constant and a constant 'Y' are element-wise equal.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
static FastMathFlags intersectRewrite(FastMathFlags LHS, FastMathFlags RHS)
Intersect rewrite-based flags.
Definition FMF.h:119
bool noSignedZeros() const
Definition FMF.h:70
bool noInfs() const
Definition FMF.h:69
static FastMathFlags unionValue(FastMathFlags LHS, FastMathFlags RHS)
Union value flags.
Definition FMF.h:127
void setNoInfs(bool B=true)
Definition FMF.h:84
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * visitAdd(BinaryOperator &I)
Instruction * canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator &I)
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * foldSquareSumInt(BinaryOperator &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
Instruction * foldSquareSumFP(BinaryOperator &I)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitSub(BinaryOperator &I)
Instruction * foldDivCeil(BinaryOperator &I)
Fold both forms of the div_ceil idiom: (add (udiv X, Y), (zext (icmp ne (urem X, Y),...
Value * OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty, bool isNUW)
Optimize pointer differences into the same array into a size.
Instruction * visitFAdd(BinaryOperator &I)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Instruction * foldAddLikeCommutative(Value *LHS, Value *RHS, bool NSW, bool NUW)
Common transforms for add / disjoint or.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Value * SimplifyAddWithRemainder(BinaryOperator &I)
Tries to simplify add operations using the definition of remainder.
Instruction * foldAddWithConstant(BinaryOperator &Add)
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
Instruction * visitFNeg(UnaryOperator &I)
Instruction * visitFSub(BinaryOperator &I)
SimplifyQuery SQ
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
const DataLayout & DL
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
AssumptionCache & AC
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
BuilderTy & Builder
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
static Constant * AddOne(Constant *C)
Add one to a Constant.
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
LLVM_ABI void setHasNoSignedZeros(bool B)
Set or clear the no-signed-zeros flag on this instruction, which must be an operator which supports t...
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI void setHasNoInfs(bool B)
Set or clear the no-infs flag on this instruction, which must be an operator which supports this flag...
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:147
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition Value.cpp:154
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
This class represents zero extension of integer types.
CallInst * Call
Changed
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap, true > m_c_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
CommutativeBinaryIntrinsic_match< IntrID, T0, T1 > m_c_Intrinsic(const T0 &Op0, const T1 &Op1)
auto m_Poison()
Match an arbitrary poison constant.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
CastOperator_match< OpTy, Instruction::PtrToAddr > m_PtrToAddr(const OpTy &Op)
Matches PtrToAddr.
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
match_combine_or< CastInst_match< OpTy, SExtInst >, OpTy > m_SExtOrSelf(const OpTy &Op)
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_Constant()
Match an arbitrary Constant and ignore it.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
auto m_c_MaxOrMin(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
m_Intrinsic_Ty< Opnd0 >::Ty m_Ctpop(const Opnd0 &Op0)
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_Ctlz(const Opnd0 &Op0, const Opnd1 &Op1)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:50
Context & getContext() const
Definition BasicBlock.h:99
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt & operator+=(DynamicAPInt &A, int64_t B)
LLVM_ABI bool canIgnoreSignBitOfZero(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is zero.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt & operator*=(DynamicAPInt &A, int64_t B)
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
LLVM_ABI Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
@ Mul
Product of integers.
@ FMul
Product of floats.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, const SimplifyQuery &SQ, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
A suitably aligned and sized character array member which can hold elements of any type.
Definition AlignOf.h:22
Value * Ptr
Common base pointer.
SmallVector< GEPOperator * > RHSGEPs
RHS GEPs until common base.
SmallVector< GEPOperator * > LHSGEPs
LHS GEPs until common base.
bool isExpensive() const
Whether expanding the GEP chains is expensive.
static CommonPointerBase compute(Value *LHS, Value *RHS)
Matching combinators.