LLVM 19.0.0git
InstCombineAddSub.cpp
Go to the documentation of this file.
1//===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for add, fadd, sub, and fsub.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/STLExtras.h"
20#include "llvm/IR/Constant.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/InstrTypes.h"
23#include "llvm/IR/Instruction.h"
25#include "llvm/IR/Operator.h"
27#include "llvm/IR/Type.h"
28#include "llvm/IR/Value.h"
33#include <cassert>
34#include <utility>
35
36using namespace llvm;
37using namespace PatternMatch;
38
39#define DEBUG_TYPE "instcombine"
40
41namespace {
42
43 /// Class representing coefficient of floating-point addend.
44 /// This class needs to be highly efficient, which is especially true for
45 /// the constructor. As of I write this comment, the cost of the default
46 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
47 /// perform write-merging).
48 ///
49 class FAddendCoef {
50 public:
51 // The constructor has to initialize a APFloat, which is unnecessary for
52 // most addends which have coefficient either 1 or -1. So, the constructor
53 // is expensive. In order to avoid the cost of the constructor, we should
54 // reuse some instances whenever possible. The pre-created instances
55 // FAddCombine::Add[0-5] embodies this idea.
56 FAddendCoef() = default;
57 ~FAddendCoef();
58
59 // If possible, don't define operator+/operator- etc because these
60 // operators inevitably call FAddendCoef's constructor which is not cheap.
61 void operator=(const FAddendCoef &A);
62 void operator+=(const FAddendCoef &A);
63 void operator*=(const FAddendCoef &S);
64
65 void set(short C) {
66 assert(!insaneIntVal(C) && "Insane coefficient");
67 IsFp = false; IntVal = C;
68 }
69
70 void set(const APFloat& C);
71
72 void negate();
73
74 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
75 Value *getValue(Type *) const;
76
77 bool isOne() const { return isInt() && IntVal == 1; }
78 bool isTwo() const { return isInt() && IntVal == 2; }
79 bool isMinusOne() const { return isInt() && IntVal == -1; }
80 bool isMinusTwo() const { return isInt() && IntVal == -2; }
81
82 private:
83 bool insaneIntVal(int V) { return V > 4 || V < -4; }
84
85 APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); }
86
87 const APFloat *getFpValPtr() const {
88 return reinterpret_cast<const APFloat *>(&FpValBuf);
89 }
90
91 const APFloat &getFpVal() const {
92 assert(IsFp && BufHasFpVal && "Incorret state");
93 return *getFpValPtr();
94 }
95
96 APFloat &getFpVal() {
97 assert(IsFp && BufHasFpVal && "Incorret state");
98 return *getFpValPtr();
99 }
100
101 bool isInt() const { return !IsFp; }
102
103 // If the coefficient is represented by an integer, promote it to a
104 // floating point.
105 void convertToFpType(const fltSemantics &Sem);
106
107 // Construct an APFloat from a signed integer.
108 // TODO: We should get rid of this function when APFloat can be constructed
109 // from an *SIGNED* integer.
110 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
111
112 bool IsFp = false;
113
114 // True iff FpValBuf contains an instance of APFloat.
115 bool BufHasFpVal = false;
116
117 // The integer coefficient of an individual addend is either 1 or -1,
118 // and we try to simplify at most 4 addends from neighboring at most
119 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
120 // is overkill of this end.
121 short IntVal = 0;
122
124 };
125
126 /// FAddend is used to represent floating-point addend. An addend is
127 /// represented as <C, V>, where the V is a symbolic value, and C is a
128 /// constant coefficient. A constant addend is represented as <C, 0>.
129 class FAddend {
130 public:
131 FAddend() = default;
132
133 void operator+=(const FAddend &T) {
134 assert((Val == T.Val) && "Symbolic-values disagree");
135 Coeff += T.Coeff;
136 }
137
138 Value *getSymVal() const { return Val; }
139 const FAddendCoef &getCoef() const { return Coeff; }
140
141 bool isConstant() const { return Val == nullptr; }
142 bool isZero() const { return Coeff.isZero(); }
143
144 void set(short Coefficient, Value *V) {
145 Coeff.set(Coefficient);
146 Val = V;
147 }
148 void set(const APFloat &Coefficient, Value *V) {
149 Coeff.set(Coefficient);
150 Val = V;
151 }
152 void set(const ConstantFP *Coefficient, Value *V) {
153 Coeff.set(Coefficient->getValueAPF());
154 Val = V;
155 }
156
157 void negate() { Coeff.negate(); }
158
159 /// Drill down the U-D chain one step to find the definition of V, and
160 /// try to break the definition into one or two addends.
161 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
162
163 /// Similar to FAddend::drillDownOneStep() except that the value being
164 /// splitted is the addend itself.
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
166
167 private:
168 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
169
170 // This addend has the value of "Coeff * Val".
171 Value *Val = nullptr;
172 FAddendCoef Coeff;
173 };
174
175 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
176 /// with its neighboring at most two instructions.
177 ///
178 class FAddCombine {
179 public:
180 FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {}
181
183
184 private:
185 using AddendVect = SmallVector<const FAddend *, 4>;
186
187 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
188
189 /// Convert given addend to a Value
190 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
191
192 /// Return the number of instructions needed to emit the N-ary addition.
193 unsigned calcInstrNumber(const AddendVect& Vect);
194
195 Value *createFSub(Value *Opnd0, Value *Opnd1);
196 Value *createFAdd(Value *Opnd0, Value *Opnd1);
197 Value *createFMul(Value *Opnd0, Value *Opnd1);
198 Value *createFNeg(Value *V);
199 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
200 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
201
202 // Debugging stuff are clustered here.
203 #ifndef NDEBUG
204 unsigned CreateInstrNum;
205 void initCreateInstNum() { CreateInstrNum = 0; }
206 void incCreateInstNum() { CreateInstrNum++; }
207 #else
208 void initCreateInstNum() {}
209 void incCreateInstNum() {}
210 #endif
211
213 Instruction *Instr = nullptr;
214 };
215
216} // end anonymous namespace
217
218//===----------------------------------------------------------------------===//
219//
220// Implementation of
221// {FAddendCoef, FAddend, FAddition, FAddCombine}.
222//
223//===----------------------------------------------------------------------===//
224FAddendCoef::~FAddendCoef() {
225 if (BufHasFpVal)
226 getFpValPtr()->~APFloat();
227}
228
229void FAddendCoef::set(const APFloat& C) {
230 APFloat *P = getFpValPtr();
231
232 if (isInt()) {
233 // As the buffer is meanless byte stream, we cannot call
234 // APFloat::operator=().
235 new(P) APFloat(C);
236 } else
237 *P = C;
238
239 IsFp = BufHasFpVal = true;
240}
241
242void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
243 if (!isInt())
244 return;
245
246 APFloat *P = getFpValPtr();
247 if (IntVal > 0)
248 new(P) APFloat(Sem, IntVal);
249 else {
250 new(P) APFloat(Sem, 0 - IntVal);
251 P->changeSign();
252 }
253 IsFp = BufHasFpVal = true;
254}
255
256APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
257 if (Val >= 0)
258 return APFloat(Sem, Val);
259
260 APFloat T(Sem, 0 - Val);
261 T.changeSign();
262
263 return T;
264}
265
266void FAddendCoef::operator=(const FAddendCoef &That) {
267 if (That.isInt())
268 set(That.IntVal);
269 else
270 set(That.getFpVal());
271}
272
273void FAddendCoef::operator+=(const FAddendCoef &That) {
274 RoundingMode RndMode = RoundingMode::NearestTiesToEven;
275 if (isInt() == That.isInt()) {
276 if (isInt())
277 IntVal += That.IntVal;
278 else
279 getFpVal().add(That.getFpVal(), RndMode);
280 return;
281 }
282
283 if (isInt()) {
284 const APFloat &T = That.getFpVal();
285 convertToFpType(T.getSemantics());
286 getFpVal().add(T, RndMode);
287 return;
288 }
289
290 APFloat &T = getFpVal();
291 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
292}
293
294void FAddendCoef::operator*=(const FAddendCoef &That) {
295 if (That.isOne())
296 return;
297
298 if (That.isMinusOne()) {
299 negate();
300 return;
301 }
302
303 if (isInt() && That.isInt()) {
304 int Res = IntVal * (int)That.IntVal;
305 assert(!insaneIntVal(Res) && "Insane int value");
306 IntVal = Res;
307 return;
308 }
309
310 const fltSemantics &Semantic =
311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
312
313 if (isInt())
314 convertToFpType(Semantic);
315 APFloat &F0 = getFpVal();
316
317 if (That.isInt())
318 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
319 APFloat::rmNearestTiesToEven);
320 else
321 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
322}
323
324void FAddendCoef::negate() {
325 if (isInt())
326 IntVal = 0 - IntVal;
327 else
328 getFpVal().changeSign();
329}
330
331Value *FAddendCoef::getValue(Type *Ty) const {
332 return isInt() ?
333 ConstantFP::get(Ty, float(IntVal)) :
334 ConstantFP::get(Ty->getContext(), getFpVal());
335}
336
337// The definition of <Val> Addends
338// =========================================
339// A + B <1, A>, <1,B>
340// A - B <1, A>, <1,B>
341// 0 - B <-1, B>
342// C * A, <C, A>
343// A + C <1, A> <C, NULL>
344// 0 +/- 0 <0, NULL> (corner case)
345//
346// Legend: A and B are not constant, C is constant
347unsigned FAddend::drillValueDownOneStep
348 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
349 Instruction *I = nullptr;
350 if (!Val || !(I = dyn_cast<Instruction>(Val)))
351 return 0;
352
353 unsigned Opcode = I->getOpcode();
354
355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
356 ConstantFP *C0, *C1;
357 Value *Opnd0 = I->getOperand(0);
358 Value *Opnd1 = I->getOperand(1);
359 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
360 Opnd0 = nullptr;
361
362 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
363 Opnd1 = nullptr;
364
365 if (Opnd0) {
366 if (!C0)
367 Addend0.set(1, Opnd0);
368 else
369 Addend0.set(C0, nullptr);
370 }
371
372 if (Opnd1) {
373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
374 if (!C1)
375 Addend.set(1, Opnd1);
376 else
377 Addend.set(C1, nullptr);
378 if (Opcode == Instruction::FSub)
379 Addend.negate();
380 }
381
382 if (Opnd0 || Opnd1)
383 return Opnd0 && Opnd1 ? 2 : 1;
384
385 // Both operands are zero. Weird!
386 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
387 return 1;
388 }
389
390 if (I->getOpcode() == Instruction::FMul) {
391 Value *V0 = I->getOperand(0);
392 Value *V1 = I->getOperand(1);
393 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
394 Addend0.set(C, V1);
395 return 1;
396 }
397
398 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
399 Addend0.set(C, V0);
400 return 1;
401 }
402 }
403
404 return 0;
405}
406
407// Try to break *this* addend into two addends. e.g. Suppose this addend is
408// <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
409// i.e. <2.3, X> and <2.3, Y>.
410unsigned FAddend::drillAddendDownOneStep
411 (FAddend &Addend0, FAddend &Addend1) const {
412 if (isConstant())
413 return 0;
414
415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416 if (!BreakNum || Coeff.isOne())
417 return BreakNum;
418
419 Addend0.Scale(Coeff);
420
421 if (BreakNum == 2)
422 Addend1.Scale(Coeff);
423
424 return BreakNum;
425}
426
427Value *FAddCombine::simplify(Instruction *I) {
428 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
430
431 // Currently we are not able to handle vector type.
432 if (I->getType()->isVectorTy())
433 return nullptr;
434
435 assert((I->getOpcode() == Instruction::FAdd ||
436 I->getOpcode() == Instruction::FSub) && "Expect add/sub");
437
438 // Save the instruction before calling other member-functions.
439 Instr = I;
440
441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
442
443 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
444
445 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
446 unsigned Opnd0_ExpNum = 0;
447 unsigned Opnd1_ExpNum = 0;
448
449 if (!Opnd0.isConstant())
450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
451
452 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
453 if (OpndNum == 2 && !Opnd1.isConstant())
454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
455
456 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
457 if (Opnd0_ExpNum && Opnd1_ExpNum) {
458 AddendVect AllOpnds;
459 AllOpnds.push_back(&Opnd0_0);
460 AllOpnds.push_back(&Opnd1_0);
461 if (Opnd0_ExpNum == 2)
462 AllOpnds.push_back(&Opnd0_1);
463 if (Opnd1_ExpNum == 2)
464 AllOpnds.push_back(&Opnd1_1);
465
466 // Compute instruction quota. We should save at least one instruction.
467 unsigned InstQuota = 0;
468
469 Value *V0 = I->getOperand(0);
470 Value *V1 = I->getOperand(1);
471 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
472 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
473
474 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
475 return R;
476 }
477
478 if (OpndNum != 2) {
479 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
480 // splitted into two addends, say "V = X - Y", the instruction would have
481 // been optimized into "I = Y - X" in the previous steps.
482 //
483 const FAddendCoef &CE = Opnd0.getCoef();
484 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
485 }
486
487 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
488 if (Opnd1_ExpNum) {
489 AddendVect AllOpnds;
490 AllOpnds.push_back(&Opnd0);
491 AllOpnds.push_back(&Opnd1_0);
492 if (Opnd1_ExpNum == 2)
493 AllOpnds.push_back(&Opnd1_1);
494
495 if (Value *R = simplifyFAdd(AllOpnds, 1))
496 return R;
497 }
498
499 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
500 if (Opnd0_ExpNum) {
501 AddendVect AllOpnds;
502 AllOpnds.push_back(&Opnd1);
503 AllOpnds.push_back(&Opnd0_0);
504 if (Opnd0_ExpNum == 2)
505 AllOpnds.push_back(&Opnd0_1);
506
507 if (Value *R = simplifyFAdd(AllOpnds, 1))
508 return R;
509 }
510
511 return nullptr;
512}
513
514Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
515 unsigned AddendNum = Addends.size();
516 assert(AddendNum <= 4 && "Too many addends");
517
518 // For saving intermediate results;
519 unsigned NextTmpIdx = 0;
520 FAddend TmpResult[3];
521
522 // Simplified addends are placed <SimpVect>.
523 AddendVect SimpVect;
524
525 // The outer loop works on one symbolic-value at a time. Suppose the input
526 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
527 // The symbolic-values will be processed in this order: x, y, z.
528 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
529
530 const FAddend *ThisAddend = Addends[SymIdx];
531 if (!ThisAddend) {
532 // This addend was processed before.
533 continue;
534 }
535
536 Value *Val = ThisAddend->getSymVal();
537
538 // If the resulting expr has constant-addend, this constant-addend is
539 // desirable to reside at the top of the resulting expression tree. Placing
540 // constant close to super-expr(s) will potentially reveal some
541 // optimization opportunities in super-expr(s). Here we do not implement
542 // this logic intentionally and rely on SimplifyAssociativeOrCommutative
543 // call later.
544
545 unsigned StartIdx = SimpVect.size();
546 SimpVect.push_back(ThisAddend);
547
548 // The inner loop collects addends sharing same symbolic-value, and these
549 // addends will be later on folded into a single addend. Following above
550 // example, if the symbolic value "y" is being processed, the inner loop
551 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
552 // be later on folded into "<b1+b2, y>".
553 for (unsigned SameSymIdx = SymIdx + 1;
554 SameSymIdx < AddendNum; SameSymIdx++) {
555 const FAddend *T = Addends[SameSymIdx];
556 if (T && T->getSymVal() == Val) {
557 // Set null such that next iteration of the outer loop will not process
558 // this addend again.
559 Addends[SameSymIdx] = nullptr;
560 SimpVect.push_back(T);
561 }
562 }
563
564 // If multiple addends share same symbolic value, fold them together.
565 if (StartIdx + 1 != SimpVect.size()) {
566 FAddend &R = TmpResult[NextTmpIdx ++];
567 R = *SimpVect[StartIdx];
568 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
569 R += *SimpVect[Idx];
570
571 // Pop all addends being folded and push the resulting folded addend.
572 SimpVect.resize(StartIdx);
573 if (!R.isZero()) {
574 SimpVect.push_back(&R);
575 }
576 }
577 }
578
579 assert((NextTmpIdx <= std::size(TmpResult) + 1) && "out-of-bound access");
580
581 Value *Result;
582 if (!SimpVect.empty())
583 Result = createNaryFAdd(SimpVect, InstrQuota);
584 else {
585 // The addition is folded to 0.0.
586 Result = ConstantFP::get(Instr->getType(), 0.0);
587 }
588
589 return Result;
590}
591
592Value *FAddCombine::createNaryFAdd
593 (const AddendVect &Opnds, unsigned InstrQuota) {
594 assert(!Opnds.empty() && "Expect at least one addend");
595
596 // Step 1: Check if the # of instructions needed exceeds the quota.
597
598 unsigned InstrNeeded = calcInstrNumber(Opnds);
599 if (InstrNeeded > InstrQuota)
600 return nullptr;
601
602 initCreateInstNum();
603
604 // step 2: Emit the N-ary addition.
605 // Note that at most three instructions are involved in Fadd-InstCombine: the
606 // addition in question, and at most two neighboring instructions.
607 // The resulting optimized addition should have at least one less instruction
608 // than the original addition expression tree. This implies that the resulting
609 // N-ary addition has at most two instructions, and we don't need to worry
610 // about tree-height when constructing the N-ary addition.
611
612 Value *LastVal = nullptr;
613 bool LastValNeedNeg = false;
614
615 // Iterate the addends, creating fadd/fsub using adjacent two addends.
616 for (const FAddend *Opnd : Opnds) {
617 bool NeedNeg;
618 Value *V = createAddendVal(*Opnd, NeedNeg);
619 if (!LastVal) {
620 LastVal = V;
621 LastValNeedNeg = NeedNeg;
622 continue;
623 }
624
625 if (LastValNeedNeg == NeedNeg) {
626 LastVal = createFAdd(LastVal, V);
627 continue;
628 }
629
630 if (LastValNeedNeg)
631 LastVal = createFSub(V, LastVal);
632 else
633 LastVal = createFSub(LastVal, V);
634
635 LastValNeedNeg = false;
636 }
637
638 if (LastValNeedNeg) {
639 LastVal = createFNeg(LastVal);
640 }
641
642#ifndef NDEBUG
643 assert(CreateInstrNum == InstrNeeded &&
644 "Inconsistent in instruction numbers");
645#endif
646
647 return LastVal;
648}
649
650Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
651 Value *V = Builder.CreateFSub(Opnd0, Opnd1);
652 if (Instruction *I = dyn_cast<Instruction>(V))
653 createInstPostProc(I);
654 return V;
655}
656
657Value *FAddCombine::createFNeg(Value *V) {
658 Value *NewV = Builder.CreateFNeg(V);
659 if (Instruction *I = dyn_cast<Instruction>(NewV))
660 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
661 return NewV;
662}
663
664Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
665 Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
666 if (Instruction *I = dyn_cast<Instruction>(V))
667 createInstPostProc(I);
668 return V;
669}
670
671Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
672 Value *V = Builder.CreateFMul(Opnd0, Opnd1);
673 if (Instruction *I = dyn_cast<Instruction>(V))
674 createInstPostProc(I);
675 return V;
676}
677
678void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
679 NewInstr->setDebugLoc(Instr->getDebugLoc());
680
681 // Keep track of the number of instruction created.
682 if (!NoNumber)
683 incCreateInstNum();
684
685 // Propagate fast-math flags
686 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
687}
688
689// Return the number of instruction needed to emit the N-ary addition.
690// NOTE: Keep this function in sync with createAddendVal().
691unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
692 unsigned OpndNum = Opnds.size();
693 unsigned InstrNeeded = OpndNum - 1;
694
695 // Adjust the number of instructions needed to emit the N-ary add.
696 for (const FAddend *Opnd : Opnds) {
697 if (Opnd->isConstant())
698 continue;
699
700 // The constant check above is really for a few special constant
701 // coefficients.
702 if (isa<UndefValue>(Opnd->getSymVal()))
703 continue;
704
705 const FAddendCoef &CE = Opnd->getCoef();
706 // Let the addend be "c * x". If "c == +/-1", the value of the addend
707 // is immediately available; otherwise, it needs exactly one instruction
708 // to evaluate the value.
709 if (!CE.isMinusOne() && !CE.isOne())
710 InstrNeeded++;
711 }
712 return InstrNeeded;
713}
714
715// Input Addend Value NeedNeg(output)
716// ================================================================
717// Constant C C false
718// <+/-1, V> V coefficient is -1
719// <2/-2, V> "fadd V, V" coefficient is -2
720// <C, V> "fmul V, C" false
721//
722// NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
723Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
724 const FAddendCoef &Coeff = Opnd.getCoef();
725
726 if (Opnd.isConstant()) {
727 NeedNeg = false;
728 return Coeff.getValue(Instr->getType());
729 }
730
731 Value *OpndVal = Opnd.getSymVal();
732
733 if (Coeff.isMinusOne() || Coeff.isOne()) {
734 NeedNeg = Coeff.isMinusOne();
735 return OpndVal;
736 }
737
738 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
739 NeedNeg = Coeff.isMinusTwo();
740 return createFAdd(OpndVal, OpndVal);
741 }
742
743 NeedNeg = false;
744 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
745}
746
747// Checks if any operand is negative and we can convert add to sub.
748// This function checks for following negative patterns
749// ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
750// ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
751// XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
753 InstCombiner::BuilderTy &Builder) {
754 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
755
756 // This function creates 2 instructions to replace ADD, we need at least one
757 // of LHS or RHS to have one use to ensure benefit in transform.
758 if (!LHS->hasOneUse() && !RHS->hasOneUse())
759 return nullptr;
760
761 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
762 const APInt *C1 = nullptr, *C2 = nullptr;
763
764 // if ONE is on other side, swap
765 if (match(RHS, m_Add(m_Value(X), m_One())))
766 std::swap(LHS, RHS);
767
768 if (match(LHS, m_Add(m_Value(X), m_One()))) {
769 // if XOR on other side, swap
770 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
771 std::swap(X, RHS);
772
773 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
774 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
775 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
776 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
777 Value *NewAnd = Builder.CreateAnd(Z, *C1);
778 return Builder.CreateSub(RHS, NewAnd, "sub");
779 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
780 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
781 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
782 Value *NewOr = Builder.CreateOr(Z, ~(*C1));
783 return Builder.CreateSub(RHS, NewOr, "sub");
784 }
785 }
786 }
787
788 // Restore LHS and RHS
789 LHS = I.getOperand(0);
790 RHS = I.getOperand(1);
791
792 // if XOR is on other side, swap
793 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
794 std::swap(LHS, RHS);
795
796 // C2 is ODD
797 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
798 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
799 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
800 if (C1->countr_zero() == 0)
801 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
802 Value *NewOr = Builder.CreateOr(Z, ~(*C2));
803 return Builder.CreateSub(RHS, NewOr, "sub");
804 }
805 return nullptr;
806}
807
808/// Wrapping flags may allow combining constants separated by an extend.
810 InstCombiner::BuilderTy &Builder) {
811 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
812 Type *Ty = Add.getType();
813 Constant *Op1C;
814 if (!match(Op1, m_Constant(Op1C)))
815 return nullptr;
816
817 // Try this match first because it results in an add in the narrow type.
818 // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1)))
819 Value *X;
820 const APInt *C1, *C2;
821 if (match(Op1, m_APInt(C1)) &&
823 C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) {
824 Constant *NewC =
825 ConstantInt::get(X->getType(), *C2 + C1->trunc(C2->getBitWidth()));
826 return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty);
827 }
828
829 // More general combining of constants in the wide type.
830 // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
831 // or (zext nneg (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
832 Constant *NarrowC;
833 if (match(Op0, m_OneUse(m_SExtLike(
834 m_NSWAddLike(m_Value(X), m_Constant(NarrowC)))))) {
835 Value *WideC = Builder.CreateSExt(NarrowC, Ty);
836 Value *NewC = Builder.CreateAdd(WideC, Op1C);
837 Value *WideX = Builder.CreateSExt(X, Ty);
838 return BinaryOperator::CreateAdd(WideX, NewC);
839 }
840 // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C)
841 if (match(Op0,
843 Value *WideC = Builder.CreateZExt(NarrowC, Ty);
844 Value *NewC = Builder.CreateAdd(WideC, Op1C);
845 Value *WideX = Builder.CreateZExt(X, Ty);
846 return BinaryOperator::CreateAdd(WideX, NewC);
847 }
848 return nullptr;
849}
850
852 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
853 Type *Ty = Add.getType();
854 Constant *Op1C;
855 if (!match(Op1, m_ImmConstant(Op1C)))
856 return nullptr;
857
859 return NV;
860
861 Value *X;
862 Constant *Op00C;
863
864 // add (sub C1, X), C2 --> sub (add C1, C2), X
865 if (match(Op0, m_Sub(m_Constant(Op00C), m_Value(X))))
866 return BinaryOperator::CreateSub(ConstantExpr::getAdd(Op00C, Op1C), X);
867
868 Value *Y;
869
870 // add (sub X, Y), -1 --> add (not Y), X
871 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) &&
872 match(Op1, m_AllOnes()))
873 return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X);
874
875 // zext(bool) + C -> bool ? C + 1 : C
876 if (match(Op0, m_ZExt(m_Value(X))) &&
877 X->getType()->getScalarSizeInBits() == 1)
878 return SelectInst::Create(X, InstCombiner::AddOne(Op1C), Op1);
879 // sext(bool) + C -> bool ? C - 1 : C
880 if (match(Op0, m_SExt(m_Value(X))) &&
881 X->getType()->getScalarSizeInBits() == 1)
882 return SelectInst::Create(X, InstCombiner::SubOne(Op1C), Op1);
883
884 // ~X + C --> (C-1) - X
885 if (match(Op0, m_Not(m_Value(X)))) {
886 // ~X + C has NSW and (C-1) won't oveflow => (C-1)-X can have NSW
887 auto *COne = ConstantInt::get(Op1C->getType(), 1);
888 bool WillNotSOV = willNotOverflowSignedSub(Op1C, COne, Add);
889 BinaryOperator *Res =
890 BinaryOperator::CreateSub(ConstantExpr::getSub(Op1C, COne), X);
891 Res->setHasNoSignedWrap(Add.hasNoSignedWrap() && WillNotSOV);
892 return Res;
893 }
894
895 // (iN X s>> (N - 1)) + 1 --> zext (X > -1)
896 const APInt *C;
897 unsigned BitWidth = Ty->getScalarSizeInBits();
898 if (match(Op0, m_OneUse(m_AShr(m_Value(X),
900 match(Op1, m_One()))
901 return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
902
903 if (!match(Op1, m_APInt(C)))
904 return nullptr;
905
906 // (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add`
907 Constant *Op01C;
908 if (match(Op0, m_DisjointOr(m_Value(X), m_ImmConstant(Op01C))))
909 return BinaryOperator::CreateAdd(X, ConstantExpr::getAdd(Op01C, Op1C));
910
911 // (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C)
912 const APInt *C2;
913 if (match(Op0, m_Or(m_Value(), m_APInt(C2))) && *C2 == -*C)
914 return BinaryOperator::CreateXor(Op0, ConstantInt::get(Add.getType(), *C2));
915
916 if (C->isSignMask()) {
917 // If wrapping is not allowed, then the addition must set the sign bit:
918 // X + (signmask) --> X | signmask
919 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
920 return BinaryOperator::CreateOr(Op0, Op1);
921
922 // If wrapping is allowed, then the addition flips the sign bit of LHS:
923 // X + (signmask) --> X ^ signmask
924 return BinaryOperator::CreateXor(Op0, Op1);
925 }
926
927 // Is this add the last step in a convoluted sext?
928 // add(zext(xor i16 X, -32768), -32768) --> sext X
929 if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) &&
930 C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
931 return CastInst::Create(Instruction::SExt, X, Ty);
932
933 if (match(Op0, m_Xor(m_Value(X), m_APInt(C2)))) {
934 // (X ^ signmask) + C --> (X + (signmask ^ C))
935 if (C2->isSignMask())
936 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C2 ^ *C));
937
938 // If X has no high-bits set above an xor mask:
939 // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X
940 if (C2->isMask()) {
941 KnownBits LHSKnown = computeKnownBits(X, 0, &Add);
942 if ((*C2 | LHSKnown.Zero).isAllOnes())
943 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X);
944 }
945
946 // Look for a math+logic pattern that corresponds to sext-in-register of a
947 // value with cleared high bits. Convert that into a pair of shifts:
948 // add (xor X, 0x80), 0xF..F80 --> (X << ShAmtC) >>s ShAmtC
949 // add (xor X, 0xF..F80), 0x80 --> (X << ShAmtC) >>s ShAmtC
950 if (Op0->hasOneUse() && *C2 == -(*C)) {
951 unsigned BitWidth = Ty->getScalarSizeInBits();
952 unsigned ShAmt = 0;
953 if (C->isPowerOf2())
954 ShAmt = BitWidth - C->logBase2() - 1;
955 else if (C2->isPowerOf2())
956 ShAmt = BitWidth - C2->logBase2() - 1;
958 0, &Add)) {
959 Constant *ShAmtC = ConstantInt::get(Ty, ShAmt);
960 Value *NewShl = Builder.CreateShl(X, ShAmtC, "sext");
961 return BinaryOperator::CreateAShr(NewShl, ShAmtC);
962 }
963 }
964 }
965
966 if (C->isOne() && Op0->hasOneUse()) {
967 // add (sext i1 X), 1 --> zext (not X)
968 // TODO: The smallest IR representation is (select X, 0, 1), and that would
969 // not require the one-use check. But we need to remove a transform in
970 // visitSelect and make sure that IR value tracking for select is equal or
971 // better than for these ops.
972 if (match(Op0, m_SExt(m_Value(X))) &&
973 X->getType()->getScalarSizeInBits() == 1)
974 return new ZExtInst(Builder.CreateNot(X), Ty);
975
976 // Shifts and add used to flip and mask off the low bit:
977 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
978 const APInt *C3;
979 if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) &&
980 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) {
981 Value *NotX = Builder.CreateNot(X);
982 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1));
983 }
984 }
985
986 // Fold (add (zext (add X, -1)), 1) -> (zext X) if X is non-zero.
987 // TODO: There's a general form for any constant on the outer add.
988 if (C->isOne()) {
989 if (match(Op0, m_ZExt(m_Add(m_Value(X), m_AllOnes())))) {
991 if (llvm::isKnownNonZero(X, Q))
992 return new ZExtInst(X, Ty);
993 }
994 }
995
996 return nullptr;
997}
998
999// match variations of a^2 + 2*a*b + b^2
1000//
1001// to reuse the code between the FP and Int versions, the instruction OpCodes
1002// and constant types have been turned into template parameters.
1003//
1004// Mul2Rhs: The constant to perform the multiplicative equivalent of X*2 with;
1005// should be `m_SpecificFP(2.0)` for FP and `m_SpecificInt(1)` for Int
1006// (we're matching `X<<1` instead of `X*2` for Int)
1007template <bool FP, typename Mul2Rhs>
1008static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A,
1009 Value *&B) {
1010 constexpr unsigned MulOp = FP ? Instruction::FMul : Instruction::Mul;
1011 constexpr unsigned AddOp = FP ? Instruction::FAdd : Instruction::Add;
1012 constexpr unsigned Mul2Op = FP ? Instruction::FMul : Instruction::Shl;
1013
1014 // (a * a) + (((a * 2) + b) * b)
1015 if (match(&I, m_c_BinOp(
1016 AddOp, m_OneUse(m_BinOp(MulOp, m_Value(A), m_Deferred(A))),
1018 MulOp,
1019 m_c_BinOp(AddOp, m_BinOp(Mul2Op, m_Deferred(A), M2Rhs),
1020 m_Value(B)),
1021 m_Deferred(B))))))
1022 return true;
1023
1024 // ((a * b) * 2) or ((a * 2) * b)
1025 // +
1026 // (a * a + b * b) or (b * b + a * a)
1027 return match(
1028 &I, m_c_BinOp(
1029 AddOp,
1032 Mul2Op, m_BinOp(MulOp, m_Value(A), m_Value(B)), M2Rhs)),
1033 m_OneUse(m_c_BinOp(MulOp, m_BinOp(Mul2Op, m_Value(A), M2Rhs),
1034 m_Value(B)))),
1035 m_OneUse(
1036 m_c_BinOp(AddOp, m_BinOp(MulOp, m_Deferred(A), m_Deferred(A)),
1037 m_BinOp(MulOp, m_Deferred(B), m_Deferred(B))))));
1038}
1039
1040// Fold integer variations of a^2 + 2*a*b + b^2 -> (a + b)^2
1042 Value *A, *B;
1043 if (matchesSquareSum</*FP*/ false>(I, m_SpecificInt(1), A, B)) {
1044 Value *AB = Builder.CreateAdd(A, B);
1045 return BinaryOperator::CreateMul(AB, AB);
1046 }
1047 return nullptr;
1048}
1049
1050// Fold floating point variations of a^2 + 2*a*b + b^2 -> (a + b)^2
1051// Requires `nsz` and `reassoc`.
1053 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && "Assumption mismatch");
1054 Value *A, *B;
1055 if (matchesSquareSum</*FP*/ true>(I, m_SpecificFP(2.0), A, B)) {
1056 Value *AB = Builder.CreateFAddFMF(A, B, &I);
1057 return BinaryOperator::CreateFMulFMF(AB, AB, &I);
1058 }
1059 return nullptr;
1060}
1061
1062// Matches multiplication expression Op * C where C is a constant. Returns the
1063// constant value in C and the other operand in Op. Returns true if such a
1064// match is found.
1065static bool MatchMul(Value *E, Value *&Op, APInt &C) {
1066 const APInt *AI;
1067 if (match(E, m_Mul(m_Value(Op), m_APInt(AI)))) {
1068 C = *AI;
1069 return true;
1070 }
1071 if (match(E, m_Shl(m_Value(Op), m_APInt(AI)))) {
1072 C = APInt(AI->getBitWidth(), 1);
1073 C <<= *AI;
1074 return true;
1075 }
1076 return false;
1077}
1078
1079// Matches remainder expression Op % C where C is a constant. Returns the
1080// constant value in C and the other operand in Op. Returns the signedness of
1081// the remainder operation in IsSigned. Returns true if such a match is
1082// found.
1083static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) {
1084 const APInt *AI;
1085 IsSigned = false;
1086 if (match(E, m_SRem(m_Value(Op), m_APInt(AI)))) {
1087 IsSigned = true;
1088 C = *AI;
1089 return true;
1090 }
1091 if (match(E, m_URem(m_Value(Op), m_APInt(AI)))) {
1092 C = *AI;
1093 return true;
1094 }
1095 if (match(E, m_And(m_Value(Op), m_APInt(AI))) && (*AI + 1).isPowerOf2()) {
1096 C = *AI + 1;
1097 return true;
1098 }
1099 return false;
1100}
1101
1102// Matches division expression Op / C with the given signedness as indicated
1103// by IsSigned, where C is a constant. Returns the constant value in C and the
1104// other operand in Op. Returns true if such a match is found.
1105static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) {
1106 const APInt *AI;
1107 if (IsSigned && match(E, m_SDiv(m_Value(Op), m_APInt(AI)))) {
1108 C = *AI;
1109 return true;
1110 }
1111 if (!IsSigned) {
1112 if (match(E, m_UDiv(m_Value(Op), m_APInt(AI)))) {
1113 C = *AI;
1114 return true;
1115 }
1116 if (match(E, m_LShr(m_Value(Op), m_APInt(AI)))) {
1117 C = APInt(AI->getBitWidth(), 1);
1118 C <<= *AI;
1119 return true;
1120 }
1121 }
1122 return false;
1123}
1124
1125// Returns whether C0 * C1 with the given signedness overflows.
1126static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) {
1127 bool overflow;
1128 if (IsSigned)
1129 (void)C0.smul_ov(C1, overflow);
1130 else
1131 (void)C0.umul_ov(C1, overflow);
1132 return overflow;
1133}
1134
1135// Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1)
1136// does not overflow.
1137// Simplifies (X / C0) * C1 + (X % C0) * C2 to
1138// (X / C0) * (C1 - C2 * C0) + X * C2
1140 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1141 Value *X, *MulOpV;
1142 APInt C0, MulOpC;
1143 bool IsSigned;
1144 // Match I = X % C0 + MulOpV * C0
1145 if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) ||
1146 (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) &&
1147 C0 == MulOpC) {
1148 Value *RemOpV;
1149 APInt C1;
1150 bool Rem2IsSigned;
1151 // Match MulOpC = RemOpV % C1
1152 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&
1153 IsSigned == Rem2IsSigned) {
1154 Value *DivOpV;
1155 APInt DivOpC;
1156 // Match RemOpV = X / C0
1157 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV &&
1158 C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) {
1159 Value *NewDivisor = ConstantInt::get(X->getType(), C0 * C1);
1160 return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem")
1161 : Builder.CreateURem(X, NewDivisor, "urem");
1162 }
1163 }
1164 }
1165
1166 // Match I = (X / C0) * C1 + (X % C0) * C2
1167 Value *Div, *Rem;
1168 APInt C1, C2;
1169 if (!LHS->hasOneUse() || !MatchMul(LHS, Div, C1))
1170 Div = LHS, C1 = APInt(I.getType()->getScalarSizeInBits(), 1);
1171 if (!RHS->hasOneUse() || !MatchMul(RHS, Rem, C2))
1172 Rem = RHS, C2 = APInt(I.getType()->getScalarSizeInBits(), 1);
1173 if (match(Div, m_IRem(m_Value(), m_Value()))) {
1174 std::swap(Div, Rem);
1175 std::swap(C1, C2);
1176 }
1177 Value *DivOpV;
1178 APInt DivOpC;
1179 if (MatchRem(Rem, X, C0, IsSigned) &&
1180 MatchDiv(Div, DivOpV, DivOpC, IsSigned) && X == DivOpV && C0 == DivOpC) {
1181 APInt NewC = C1 - C2 * C0;
1182 if (!NewC.isZero() && !Rem->hasOneUse())
1183 return nullptr;
1184 if (!isGuaranteedNotToBeUndef(X, &AC, &I, &DT))
1185 return nullptr;
1186 Value *MulXC2 = Builder.CreateMul(X, ConstantInt::get(X->getType(), C2));
1187 if (NewC.isZero())
1188 return MulXC2;
1189 return Builder.CreateAdd(
1190 Builder.CreateMul(Div, ConstantInt::get(X->getType(), NewC)), MulXC2);
1191 }
1192
1193 return nullptr;
1194}
1195
1196/// Fold
1197/// (1 << NBits) - 1
1198/// Into:
1199/// ~(-(1 << NBits))
1200/// Because a 'not' is better for bit-tracking analysis and other transforms
1201/// than an 'add'. The new shl is always nsw, and is nuw if old `and` was.
1203 InstCombiner::BuilderTy &Builder) {
1204 Value *NBits;
1205 if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes())))
1206 return nullptr;
1207
1208 Constant *MinusOne = Constant::getAllOnesValue(NBits->getType());
1209 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
1210 // Be wary of constant folding.
1211 if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
1212 // Always NSW. But NUW propagates from `add`.
1213 BOp->setHasNoSignedWrap();
1214 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1215 }
1216
1217 return BinaryOperator::CreateNot(NotMask, I.getName());
1218}
1219
1221 assert(I.getOpcode() == Instruction::Add && "Expecting add instruction");
1222 Type *Ty = I.getType();
1223 auto getUAddSat = [&]() {
1224 return Intrinsic::getDeclaration(I.getModule(), Intrinsic::uadd_sat, Ty);
1225 };
1226
1227 // add (umin X, ~Y), Y --> uaddsat X, Y
1228 Value *X, *Y;
1230 m_Deferred(Y))))
1231 return CallInst::Create(getUAddSat(), { X, Y });
1232
1233 // add (umin X, ~C), C --> uaddsat X, C
1234 const APInt *C, *NotC;
1235 if (match(&I, m_Add(m_UMin(m_Value(X), m_APInt(NotC)), m_APInt(C))) &&
1236 *C == ~*NotC)
1237 return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) });
1238
1239 return nullptr;
1240}
1241
1242// Transform:
1243// (add A, (shl (neg B), Y))
1244// -> (sub A, (shl B, Y))
1246 const BinaryOperator &I) {
1247 Value *A, *B, *Cnt;
1248 if (match(&I,
1250 m_Value(A)))) {
1251 Value *NewShl = Builder.CreateShl(B, Cnt);
1252 return BinaryOperator::CreateSub(A, NewShl);
1253 }
1254 return nullptr;
1255}
1256
1257/// Try to reduce signed division by power-of-2 to an arithmetic shift right.
1259 // Division must be by power-of-2, but not the minimum signed value.
1260 Value *X;
1261 const APInt *DivC;
1262 if (!match(Add.getOperand(0), m_SDiv(m_Value(X), m_Power2(DivC))) ||
1263 DivC->isNegative())
1264 return nullptr;
1265
1266 // Rounding is done by adding -1 if the dividend (X) is negative and has any
1267 // low bits set. It recognizes two canonical patterns:
1268 // 1. For an 'ugt' cmp with the signed minimum value (SMIN), the
1269 // pattern is: sext (icmp ugt (X & (DivC - 1)), SMIN).
1270 // 2. For an 'eq' cmp, the pattern's: sext (icmp eq X & (SMIN + 1), SMIN + 1).
1271 // Note that, by the time we end up here, if possible, ugt has been
1272 // canonicalized into eq.
1273 const APInt *MaskC, *MaskCCmp;
1275 if (!match(Add.getOperand(1),
1276 m_SExt(m_ICmp(Pred, m_And(m_Specific(X), m_APInt(MaskC)),
1277 m_APInt(MaskCCmp)))))
1278 return nullptr;
1279
1280 if ((Pred != ICmpInst::ICMP_UGT || !MaskCCmp->isSignMask()) &&
1281 (Pred != ICmpInst::ICMP_EQ || *MaskCCmp != *MaskC))
1282 return nullptr;
1283
1284 APInt SMin = APInt::getSignedMinValue(Add.getType()->getScalarSizeInBits());
1285 bool IsMaskValid = Pred == ICmpInst::ICMP_UGT
1286 ? (*MaskC == (SMin | (*DivC - 1)))
1287 : (*DivC == 2 && *MaskC == SMin + 1);
1288 if (!IsMaskValid)
1289 return nullptr;
1290
1291 // (X / DivC) + sext ((X & (SMin | (DivC - 1)) >u SMin) --> X >>s log2(DivC)
1292 return BinaryOperator::CreateAShr(
1293 X, ConstantInt::get(Add.getType(), DivC->exactLogBase2()));
1294}
1295
1298 BinaryOperator &I) {
1299 assert((I.getOpcode() == Instruction::Add ||
1300 I.getOpcode() == Instruction::Or ||
1301 I.getOpcode() == Instruction::Sub) &&
1302 "Expecting add/or/sub instruction");
1303
1304 // We have a subtraction/addition between a (potentially truncated) *logical*
1305 // right-shift of X and a "select".
1306 Value *X, *Select;
1307 Instruction *LowBitsToSkip, *Extract;
1309 m_LShr(m_Value(X), m_Instruction(LowBitsToSkip)),
1310 m_Instruction(Extract))),
1311 m_Value(Select))))
1312 return nullptr;
1313
1314 // `add`/`or` is commutative; but for `sub`, "select" *must* be on RHS.
1315 if (I.getOpcode() == Instruction::Sub && I.getOperand(1) != Select)
1316 return nullptr;
1317
1318 Type *XTy = X->getType();
1319 bool HadTrunc = I.getType() != XTy;
1320
1321 // If there was a truncation of extracted value, then we'll need to produce
1322 // one extra instruction, so we need to ensure one instruction will go away.
1323 if (HadTrunc && !match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1324 return nullptr;
1325
1326 // Extraction should extract high NBits bits, with shift amount calculated as:
1327 // low bits to skip = shift bitwidth - high bits to extract
1328 // The shift amount itself may be extended, and we need to look past zero-ext
1329 // when matching NBits, that will matter for matching later.
1330 Constant *C;
1331 Value *NBits;
1332 if (!match(
1333 LowBitsToSkip,
1336 APInt(C->getType()->getScalarSizeInBits(),
1337 X->getType()->getScalarSizeInBits()))))
1338 return nullptr;
1339
1340 // Sign-extending value can be zero-extended if we `sub`tract it,
1341 // or sign-extended otherwise.
1342 auto SkipExtInMagic = [&I](Value *&V) {
1343 if (I.getOpcode() == Instruction::Sub)
1344 match(V, m_ZExtOrSelf(m_Value(V)));
1345 else
1346 match(V, m_SExtOrSelf(m_Value(V)));
1347 };
1348
1349 // Now, finally validate the sign-extending magic.
1350 // `select` itself may be appropriately extended, look past that.
1351 SkipExtInMagic(Select);
1352
1354 const APInt *Thr;
1355 Value *SignExtendingValue, *Zero;
1356 bool ShouldSignext;
1357 // It must be a select between two values we will later establish to be a
1358 // sign-extending value and a zero constant. The condition guarding the
1359 // sign-extension must be based on a sign bit of the same X we had in `lshr`.
1360 if (!match(Select, m_Select(m_ICmp(Pred, m_Specific(X), m_APInt(Thr)),
1361 m_Value(SignExtendingValue), m_Value(Zero))) ||
1362 !isSignBitCheck(Pred, *Thr, ShouldSignext))
1363 return nullptr;
1364
1365 // icmp-select pair is commutative.
1366 if (!ShouldSignext)
1367 std::swap(SignExtendingValue, Zero);
1368
1369 // If we should not perform sign-extension then we must add/or/subtract zero.
1370 if (!match(Zero, m_Zero()))
1371 return nullptr;
1372 // Otherwise, it should be some constant, left-shifted by the same NBits we
1373 // had in `lshr`. Said left-shift can also be appropriately extended.
1374 // Again, we must look past zero-ext when looking for NBits.
1375 SkipExtInMagic(SignExtendingValue);
1376 Constant *SignExtendingValueBaseConstant;
1377 if (!match(SignExtendingValue,
1378 m_Shl(m_Constant(SignExtendingValueBaseConstant),
1379 m_ZExtOrSelf(m_Specific(NBits)))))
1380 return nullptr;
1381 // If we `sub`, then the constant should be one, else it should be all-ones.
1382 if (I.getOpcode() == Instruction::Sub
1383 ? !match(SignExtendingValueBaseConstant, m_One())
1384 : !match(SignExtendingValueBaseConstant, m_AllOnes()))
1385 return nullptr;
1386
1387 auto *NewAShr = BinaryOperator::CreateAShr(X, LowBitsToSkip,
1388 Extract->getName() + ".sext");
1389 NewAShr->copyIRFlags(Extract); // Preserve `exact`-ness.
1390 if (!HadTrunc)
1391 return NewAShr;
1392
1393 Builder.Insert(NewAShr);
1394 return TruncInst::CreateTruncOrBitCast(NewAShr, I.getType());
1395}
1396
1397/// This is a specialization of a more general transform from
1398/// foldUsingDistributiveLaws. If that code can be made to work optimally
1399/// for multi-use cases or propagating nsw/nuw, then we would not need this.
1401 InstCombiner::BuilderTy &Builder) {
1402 // TODO: Also handle mul by doubling the shift amount?
1403 assert((I.getOpcode() == Instruction::Add ||
1404 I.getOpcode() == Instruction::Sub) &&
1405 "Expected add/sub");
1406 auto *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
1407 auto *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
1408 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))
1409 return nullptr;
1410
1411 Value *X, *Y, *ShAmt;
1412 if (!match(Op0, m_Shl(m_Value(X), m_Value(ShAmt))) ||
1413 !match(Op1, m_Shl(m_Value(Y), m_Specific(ShAmt))))
1414 return nullptr;
1415
1416 // No-wrap propagates only when all ops have no-wrap.
1417 bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&
1418 Op1->hasNoSignedWrap();
1419 bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&
1420 Op1->hasNoUnsignedWrap();
1421
1422 // add/sub (X << ShAmt), (Y << ShAmt) --> (add/sub X, Y) << ShAmt
1423 Value *NewMath = Builder.CreateBinOp(I.getOpcode(), X, Y);
1424 if (auto *NewI = dyn_cast<BinaryOperator>(NewMath)) {
1425 NewI->setHasNoSignedWrap(HasNSW);
1426 NewI->setHasNoUnsignedWrap(HasNUW);
1427 }
1428 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);
1429 NewShl->setHasNoSignedWrap(HasNSW);
1430 NewShl->setHasNoUnsignedWrap(HasNUW);
1431 return NewShl;
1432}
1433
1434/// Reduce a sequence of masked half-width multiplies to a single multiply.
1435/// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y
1437 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1438 // Skip the odd bitwidth types.
1439 if ((BitWidth & 0x1))
1440 return nullptr;
1441
1442 unsigned HalfBits = BitWidth >> 1;
1443 APInt HalfMask = APInt::getMaxValue(HalfBits);
1444
1445 // ResLo = (CrossSum << HalfBits) + (YLo * XLo)
1446 Value *XLo, *YLo;
1447 Value *CrossSum;
1448 // Require one-use on the multiply to avoid increasing the number of
1449 // multiplications.
1450 if (!match(&I, m_c_Add(m_Shl(m_Value(CrossSum), m_SpecificInt(HalfBits)),
1451 m_OneUse(m_Mul(m_Value(YLo), m_Value(XLo))))))
1452 return nullptr;
1453
1454 // XLo = X & HalfMask
1455 // YLo = Y & HalfMask
1456 // TODO: Refactor with SimplifyDemandedBits or KnownBits known leading zeros
1457 // to enhance robustness
1458 Value *X, *Y;
1459 if (!match(XLo, m_And(m_Value(X), m_SpecificInt(HalfMask))) ||
1460 !match(YLo, m_And(m_Value(Y), m_SpecificInt(HalfMask))))
1461 return nullptr;
1462
1463 // CrossSum = (X' * (Y >> Halfbits)) + (Y' * (X >> HalfBits))
1464 // X' can be either X or XLo in the pattern (and the same for Y')
1465 if (match(CrossSum,
1470 return BinaryOperator::CreateMul(X, Y);
1471
1472 return nullptr;
1473}
1474
1476 if (Value *V = simplifyAddInst(I.getOperand(0), I.getOperand(1),
1477 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
1479 return replaceInstUsesWith(I, V);
1480
1482 return &I;
1483
1485 return X;
1486
1488 return Phi;
1489
1490 // (A*B)+(A*C) -> A*(B+C) etc
1492 return replaceInstUsesWith(I, V);
1493
1494 if (Instruction *R = foldBoxMultiply(I))
1495 return R;
1496
1498 return R;
1499
1501 return X;
1502
1504 return X;
1505
1507 return R;
1508
1510 return R;
1511
1512 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1513 Type *Ty = I.getType();
1514 if (Ty->isIntOrIntVectorTy(1))
1515 return BinaryOperator::CreateXor(LHS, RHS);
1516
1517 // X + X --> X << 1
1518 if (LHS == RHS) {
1519 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1));
1520 Shl->setHasNoSignedWrap(I.hasNoSignedWrap());
1521 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1522 return Shl;
1523 }
1524
1525 Value *A, *B;
1526 if (match(LHS, m_Neg(m_Value(A)))) {
1527 // -A + -B --> -(A + B)
1528 if (match(RHS, m_Neg(m_Value(B))))
1530
1531 // -A + B --> B - A
1532 auto *Sub = BinaryOperator::CreateSub(RHS, A);
1533 auto *OB0 = cast<OverflowingBinaryOperator>(LHS);
1534 Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OB0->hasNoSignedWrap());
1535
1536 return Sub;
1537 }
1538
1539 // A + -B --> A - B
1540 if (match(RHS, m_Neg(m_Value(B))))
1541 return BinaryOperator::CreateSub(LHS, B);
1542
1544 return replaceInstUsesWith(I, V);
1545
1546 // (A + 1) + ~B --> A - B
1547 // ~B + (A + 1) --> A - B
1548 // (~B + A) + 1 --> A - B
1549 // (A + ~B) + 1 --> A - B
1550 if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) ||
1552 return BinaryOperator::CreateSub(A, B);
1553
1554 // (A + RHS) + RHS --> A + (RHS << 1)
1556 return BinaryOperator::CreateAdd(A, Builder.CreateShl(RHS, 1, "reass.add"));
1557
1558 // LHS + (A + LHS) --> A + (LHS << 1)
1560 return BinaryOperator::CreateAdd(A, Builder.CreateShl(LHS, 1, "reass.add"));
1561
1562 {
1563 // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2)
1564 Constant *C1, *C2;
1565 if (match(&I, m_c_Add(m_Add(m_Value(A), m_ImmConstant(C1)),
1566 m_Sub(m_ImmConstant(C2), m_Value(B)))) &&
1567 (LHS->hasOneUse() || RHS->hasOneUse())) {
1568 Value *Sub = Builder.CreateSub(A, B);
1569 return BinaryOperator::CreateAdd(Sub, ConstantExpr::getAdd(C1, C2));
1570 }
1571
1572 // Canonicalize a constant sub operand as an add operand for better folding:
1573 // (C1 - A) + B --> (B - A) + C1
1575 m_Value(B)))) {
1576 Value *Sub = Builder.CreateSub(B, A, "reass.sub");
1577 return BinaryOperator::CreateAdd(Sub, C1);
1578 }
1579 }
1580
1581 // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1)
1583
1584 // ((X s/ C1) << C2) + X => X s% -C1 where -C1 is 1 << C2
1585 const APInt *C1, *C2;
1586 if (match(LHS, m_Shl(m_SDiv(m_Specific(RHS), m_APInt(C1)), m_APInt(C2)))) {
1587 APInt one(C2->getBitWidth(), 1);
1588 APInt minusC1 = -(*C1);
1589 if (minusC1 == (one << *C2)) {
1590 Constant *NewRHS = ConstantInt::get(RHS->getType(), minusC1);
1591 return BinaryOperator::CreateSRem(RHS, NewRHS);
1592 }
1593 }
1594
1595 // (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit
1596 if (match(&I, m_c_Add(m_And(m_Value(A), m_APInt(C1)), m_Deferred(A))) &&
1597 C1->isPowerOf2() && (ComputeNumSignBits(A) > C1->countl_zero())) {
1598 Constant *NewMask = ConstantInt::get(RHS->getType(), *C1 - 1);
1599 return BinaryOperator::CreateAnd(A, NewMask);
1600 }
1601
1602 // ZExt (B - A) + ZExt(A) --> ZExt(B)
1603 if ((match(RHS, m_ZExt(m_Value(A))) &&
1605 (match(LHS, m_ZExt(m_Value(A))) &&
1607 return new ZExtInst(B, LHS->getType());
1608
1609 // zext(A) + sext(A) --> 0 if A is i1
1611 A->getType()->isIntOrIntVectorTy(1))
1612 return replaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1613
1614 // A+B --> A|B iff A and B have no bits set in common.
1615 WithCache<const Value *> LHSCache(LHS), RHSCache(RHS);
1616 if (haveNoCommonBitsSet(LHSCache, RHSCache, SQ.getWithInstruction(&I)))
1617 return BinaryOperator::CreateDisjointOr(LHS, RHS);
1618
1619 if (Instruction *Ext = narrowMathIfNoOverflow(I))
1620 return Ext;
1621
1622 // (add (xor A, B) (and A, B)) --> (or A, B)
1623 // (add (and A, B) (xor A, B)) --> (or A, B)
1624 if (match(&I, m_c_BinOp(m_Xor(m_Value(A), m_Value(B)),
1626 return BinaryOperator::CreateOr(A, B);
1627
1628 // (add (or A, B) (and A, B)) --> (add A, B)
1629 // (add (and A, B) (or A, B)) --> (add A, B)
1630 if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)),
1632 // Replacing operands in-place to preserve nuw/nsw flags.
1633 replaceOperand(I, 0, A);
1634 replaceOperand(I, 1, B);
1635 return &I;
1636 }
1637
1638 // (add A (or A, -A)) --> (and (add A, -1) A)
1639 // (add A (or -A, A)) --> (and (add A, -1) A)
1640 // (add (or A, -A) A) --> (and (add A, -1) A)
1641 // (add (or -A, A) A) --> (and (add A, -1) A)
1643 m_Deferred(A)))))) {
1644 Value *Add =
1646 I.hasNoUnsignedWrap(), I.hasNoSignedWrap());
1647 return BinaryOperator::CreateAnd(Add, A);
1648 }
1649
1650 // Canonicalize ((A & -A) - 1) --> ((A - 1) & ~A)
1651 // Forms all commutable operations, and simplifies ctpop -> cttz folds.
1652 if (match(&I,
1654 m_AllOnes()))) {
1656 Value *Dec = Builder.CreateAdd(A, AllOnes);
1657 Value *Not = Builder.CreateXor(A, AllOnes);
1658 return BinaryOperator::CreateAnd(Dec, Not);
1659 }
1660
1661 // Disguised reassociation/factorization:
1662 // ~(A * C1) + A
1663 // ((A * -C1) - 1) + A
1664 // ((A * -C1) + A) - 1
1665 // (A * (1 - C1)) - 1
1666 if (match(&I,
1668 m_Deferred(A)))) {
1669 Type *Ty = I.getType();
1670 Constant *NewMulC = ConstantInt::get(Ty, 1 - *C1);
1671 Value *NewMul = Builder.CreateMul(A, NewMulC);
1672 return BinaryOperator::CreateAdd(NewMul, ConstantInt::getAllOnesValue(Ty));
1673 }
1674
1675 // (A * -2**C) + B --> B - (A << C)
1676 const APInt *NegPow2C;
1677 if (match(&I, m_c_Add(m_OneUse(m_Mul(m_Value(A), m_NegatedPower2(NegPow2C))),
1678 m_Value(B)))) {
1679 Constant *ShiftAmtC = ConstantInt::get(Ty, NegPow2C->countr_zero());
1680 Value *Shl = Builder.CreateShl(A, ShiftAmtC);
1681 return BinaryOperator::CreateSub(B, Shl);
1682 }
1683
1684 // Canonicalize signum variant that ends in add:
1685 // (A s>> (BW - 1)) + (zext (A s> 0)) --> (A s>> (BW - 1)) | (zext (A != 0))
1690 m_OneUse(m_ICmp(Pred, m_Specific(A), m_ZeroInt()))))) &&
1691 Pred == CmpInst::ICMP_SGT) {
1692 Value *NotZero = Builder.CreateIsNotNull(A, "isnotnull");
1693 Value *Zext = Builder.CreateZExt(NotZero, Ty, "isnotnull.zext");
1694 return BinaryOperator::CreateOr(LHS, Zext);
1695 }
1696
1697 if (Instruction *Ashr = foldAddToAshr(I))
1698 return Ashr;
1699
1700 // (~X) + (~Y) --> -2 - (X + Y)
1701 {
1702 // To ensure we can save instructions we need to ensure that we consume both
1703 // LHS/RHS (i.e they have a `not`).
1704 bool ConsumesLHS, ConsumesRHS;
1705 if (isFreeToInvert(LHS, LHS->hasOneUse(), ConsumesLHS) && ConsumesLHS &&
1706 isFreeToInvert(RHS, RHS->hasOneUse(), ConsumesRHS) && ConsumesRHS) {
1709 assert(NotLHS != nullptr && NotRHS != nullptr &&
1710 "isFreeToInvert desynced with getFreelyInverted");
1711 Value *LHSPlusRHS = Builder.CreateAdd(NotLHS, NotRHS);
1712 return BinaryOperator::CreateSub(
1713 ConstantInt::getSigned(RHS->getType(), -2), LHSPlusRHS);
1714 }
1715 }
1716
1718 return R;
1719
1720 // TODO(jingyue): Consider willNotOverflowSignedAdd and
1721 // willNotOverflowUnsignedAdd to reduce the number of invocations of
1722 // computeKnownBits.
1723 bool Changed = false;
1724 if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHSCache, RHSCache, I)) {
1725 Changed = true;
1726 I.setHasNoSignedWrap(true);
1727 }
1728 if (!I.hasNoUnsignedWrap() &&
1729 willNotOverflowUnsignedAdd(LHSCache, RHSCache, I)) {
1730 Changed = true;
1731 I.setHasNoUnsignedWrap(true);
1732 }
1733
1735 return V;
1736
1737 if (Instruction *V =
1739 return V;
1740
1742 return SatAdd;
1743
1744 // usub.sat(A, B) + B => umax(A, B)
1745 if (match(&I, m_c_BinOp(
1746 m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Value(A), m_Value(B))),
1747 m_Deferred(B)))) {
1748 return replaceInstUsesWith(I,
1749 Builder.CreateIntrinsic(Intrinsic::umax, {I.getType()}, {A, B}));
1750 }
1751
1752 // ctpop(A) + ctpop(B) => ctpop(A | B) if A and B have no bits set in common.
1753 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(A)))) &&
1754 match(RHS, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(B)))) &&
1756 return replaceInstUsesWith(
1757 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
1758 {Builder.CreateOr(A, B)}));
1759
1760 // Fold the log2_ceil idiom:
1761 // zext(ctpop(A) >u/!= 1) + (ctlz(A, true) ^ (BW - 1))
1762 // -->
1763 // BW - ctlz(A - 1, false)
1764 const APInt *XorC;
1765 if (match(&I,
1766 m_c_Add(
1767 m_ZExt(m_ICmp(Pred, m_Intrinsic<Intrinsic::ctpop>(m_Value(A)),
1768 m_One())),
1771 m_Intrinsic<Intrinsic::ctlz>(m_Deferred(A), m_One())))),
1772 m_APInt(XorC))))))) &&
1773 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_NE) &&
1774 *XorC == A->getType()->getScalarSizeInBits() - 1) {
1775 Value *Sub = Builder.CreateAdd(A, Constant::getAllOnesValue(A->getType()));
1776 Value *Ctlz = Builder.CreateIntrinsic(Intrinsic::ctlz, {A->getType()},
1777 {Sub, Builder.getFalse()});
1778 Value *Ret = Builder.CreateSub(
1779 ConstantInt::get(A->getType(), A->getType()->getScalarSizeInBits()),
1780 Ctlz, "", /*HasNUW*/ true, /*HasNSW*/ true);
1781 return replaceInstUsesWith(I, Builder.CreateZExtOrTrunc(Ret, I.getType()));
1782 }
1783
1784 if (Instruction *Res = foldSquareSumInt(I))
1785 return Res;
1786
1787 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
1788 return Res;
1789
1791 return Res;
1792
1793 return Changed ? &I : nullptr;
1794}
1795
1796/// Eliminate an op from a linear interpolation (lerp) pattern.
1798 InstCombiner::BuilderTy &Builder) {
1799 Value *X, *Y, *Z;
1802 m_Value(Z))))),
1804 return nullptr;
1805
1806 // (Y * (1.0 - Z)) + (X * Z) --> Y + Z * (X - Y) [8 commuted variants]
1807 Value *XY = Builder.CreateFSubFMF(X, Y, &I);
1808 Value *MulZ = Builder.CreateFMulFMF(Z, XY, &I);
1809 return BinaryOperator::CreateFAddFMF(Y, MulZ, &I);
1810}
1811
1812/// Factor a common operand out of fadd/fsub of fmul/fdiv.
1814 InstCombiner::BuilderTy &Builder) {
1815 assert((I.getOpcode() == Instruction::FAdd ||
1816 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub");
1817 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() &&
1818 "FP factorization requires FMF");
1819
1820 if (Instruction *Lerp = factorizeLerp(I, Builder))
1821 return Lerp;
1822
1823 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1824 if (!Op0->hasOneUse() || !Op1->hasOneUse())
1825 return nullptr;
1826
1827 Value *X, *Y, *Z;
1828 bool IsFMul;
1829 if ((match(Op0, m_FMul(m_Value(X), m_Value(Z))) &&
1830 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))) ||
1831 (match(Op0, m_FMul(m_Value(Z), m_Value(X))) &&
1832 match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))))
1833 IsFMul = true;
1834 else if (match(Op0, m_FDiv(m_Value(X), m_Value(Z))) &&
1835 match(Op1, m_FDiv(m_Value(Y), m_Specific(Z))))
1836 IsFMul = false;
1837 else
1838 return nullptr;
1839
1840 // (X * Z) + (Y * Z) --> (X + Y) * Z
1841 // (X * Z) - (Y * Z) --> (X - Y) * Z
1842 // (X / Z) + (Y / Z) --> (X + Y) / Z
1843 // (X / Z) - (Y / Z) --> (X - Y) / Z
1844 bool IsFAdd = I.getOpcode() == Instruction::FAdd;
1845 Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I)
1846 : Builder.CreateFSubFMF(X, Y, &I);
1847
1848 // Bail out if we just created a denormal constant.
1849 // TODO: This is copied from a previous implementation. Is it necessary?
1850 const APFloat *C;
1851 if (match(XY, m_APFloat(C)) && !C->isNormal())
1852 return nullptr;
1853
1854 return IsFMul ? BinaryOperator::CreateFMulFMF(XY, Z, &I)
1856}
1857
1859 if (Value *V = simplifyFAddInst(I.getOperand(0), I.getOperand(1),
1860 I.getFastMathFlags(),
1862 return replaceInstUsesWith(I, V);
1863
1865 return &I;
1866
1868 return X;
1869
1871 return Phi;
1872
1873 if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I))
1874 return FoldedFAdd;
1875
1876 // (-X) + Y --> Y - X
1877 Value *X, *Y;
1878 if (match(&I, m_c_FAdd(m_FNeg(m_Value(X)), m_Value(Y))))
1880
1881 // Similar to above, but look through fmul/fdiv for the negated term.
1882 // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants]
1883 Value *Z;
1885 m_Value(Z)))) {
1886 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
1887 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
1888 }
1889 // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants]
1890 // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants]
1892 m_Value(Z))) ||
1894 m_Value(Z)))) {
1895 Value *XY = Builder.CreateFDivFMF(X, Y, &I);
1896 return BinaryOperator::CreateFSubFMF(Z, XY, &I);
1897 }
1898
1899 // Check for (fadd double (sitofp x), y), see if we can merge this into an
1900 // integer add followed by a promotion.
1901 if (Instruction *R = foldFBinOpOfIntCasts(I))
1902 return R;
1903
1904 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1905 // Handle specials cases for FAdd with selects feeding the operation
1907 return replaceInstUsesWith(I, V);
1908
1909 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
1911 return F;
1912
1914 return F;
1915
1916 // Try to fold fadd into start value of reduction intrinsic.
1917 if (match(&I, m_c_FAdd(m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
1918 m_AnyZeroFP(), m_Value(X))),
1919 m_Value(Y)))) {
1920 // fadd (rdx 0.0, X), Y --> rdx Y, X
1921 return replaceInstUsesWith(
1922 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1923 {X->getType()}, {Y, X}, &I));
1924 }
1925 const APFloat *StartC, *C;
1926 if (match(LHS, m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
1927 m_APFloat(StartC), m_Value(X)))) &&
1928 match(RHS, m_APFloat(C))) {
1929 // fadd (rdx StartC, X), C --> rdx (C + StartC), X
1930 Constant *NewStartC = ConstantFP::get(I.getType(), *C + *StartC);
1931 return replaceInstUsesWith(
1932 I, Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1933 {X->getType()}, {NewStartC, X}, &I));
1934 }
1935
1936 // (X * MulC) + X --> X * (MulC + 1.0)
1937 Constant *MulC;
1938 if (match(&I, m_c_FAdd(m_FMul(m_Value(X), m_ImmConstant(MulC)),
1939 m_Deferred(X)))) {
1941 Instruction::FAdd, MulC, ConstantFP::get(I.getType(), 1.0), DL))
1942 return BinaryOperator::CreateFMulFMF(X, NewMulC, &I);
1943 }
1944
1945 // (-X - Y) + (X + Z) --> Z - Y
1947 m_c_FAdd(m_Deferred(X), m_Value(Z)))))
1948 return BinaryOperator::CreateFSubFMF(Z, Y, &I);
1949
1950 if (Value *V = FAddCombine(Builder).simplify(&I))
1951 return replaceInstUsesWith(I, V);
1952 }
1953
1954 // minumum(X, Y) + maximum(X, Y) => X + Y.
1955 if (match(&I,
1956 m_c_FAdd(m_Intrinsic<Intrinsic::maximum>(m_Value(X), m_Value(Y)),
1957 m_c_Intrinsic<Intrinsic::minimum>(m_Deferred(X),
1958 m_Deferred(Y))))) {
1960 // We cannot preserve ninf if nnan flag is not set.
1961 // If X is NaN and Y is Inf then in original program we had NaN + NaN,
1962 // while in optimized version NaN + Inf and this is a poison with ninf flag.
1963 if (!Result->hasNoNaNs())
1964 Result->setHasNoInfs(false);
1965 return Result;
1966 }
1967
1968 return nullptr;
1969}
1970
1971/// Optimize pointer differences into the same array into a size. Consider:
1972/// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1973/// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1975 Type *Ty, bool IsNUW) {
1976 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1977 // this.
1978 bool Swapped = false;
1979 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
1980 if (!isa<GEPOperator>(LHS) && isa<GEPOperator>(RHS)) {
1981 std::swap(LHS, RHS);
1982 Swapped = true;
1983 }
1984
1985 // Require at least one GEP with a common base pointer on both sides.
1986 if (auto *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1987 // (gep X, ...) - X
1988 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1990 GEP1 = LHSGEP;
1991 } else if (auto *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1992 // (gep X, ...) - (gep X, ...)
1993 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1994 RHSGEP->getOperand(0)->stripPointerCasts()) {
1995 GEP1 = LHSGEP;
1996 GEP2 = RHSGEP;
1997 }
1998 }
1999 }
2000
2001 if (!GEP1)
2002 return nullptr;
2003
2004 // To avoid duplicating the offset arithmetic, rewrite the GEP to use the
2005 // computed offset. This may erase the original GEP, so be sure to cache the
2006 // inbounds flag before emitting the offset.
2007 // TODO: We should probably do this even if there is only one GEP.
2008 bool RewriteGEPs = GEP2 != nullptr;
2009
2010 // Emit the offset of the GEP and an intptr_t.
2011 bool GEP1IsInBounds = GEP1->isInBounds();
2012 Value *Result = EmitGEPOffset(GEP1, RewriteGEPs);
2013
2014 // If this is a single inbounds GEP and the original sub was nuw,
2015 // then the final multiplication is also nuw.
2016 if (auto *I = dyn_cast<Instruction>(Result))
2017 if (IsNUW && !GEP2 && !Swapped && GEP1IsInBounds &&
2018 I->getOpcode() == Instruction::Mul)
2019 I->setHasNoUnsignedWrap();
2020
2021 // If we have a 2nd GEP of the same base pointer, subtract the offsets.
2022 // If both GEPs are inbounds, then the subtract does not have signed overflow.
2023 if (GEP2) {
2024 bool GEP2IsInBounds = GEP2->isInBounds();
2025 Value *Offset = EmitGEPOffset(GEP2, RewriteGEPs);
2026 Result = Builder.CreateSub(Result, Offset, "gepdiff", /* NUW */ false,
2027 GEP1IsInBounds && GEP2IsInBounds);
2028 }
2029
2030 // If we have p - gep(p, ...) then we have to negate the result.
2031 if (Swapped)
2032 Result = Builder.CreateNeg(Result, "diff.neg");
2033
2034 return Builder.CreateIntCast(Result, Ty, true);
2035}
2036
2038 InstCombiner::BuilderTy &Builder) {
2039 Value *Op0 = I.getOperand(0);
2040 Value *Op1 = I.getOperand(1);
2041 Type *Ty = I.getType();
2042 auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op1);
2043 if (!MinMax)
2044 return nullptr;
2045
2046 // sub(add(X,Y), s/umin(X,Y)) --> s/umax(X,Y)
2047 // sub(add(X,Y), s/umax(X,Y)) --> s/umin(X,Y)
2048 Value *X = MinMax->getLHS();
2049 Value *Y = MinMax->getRHS();
2050 if (match(Op0, m_c_Add(m_Specific(X), m_Specific(Y))) &&
2051 (Op0->hasOneUse() || Op1->hasOneUse())) {
2052 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
2053 Function *F = Intrinsic::getDeclaration(I.getModule(), InvID, Ty);
2054 return CallInst::Create(F, {X, Y});
2055 }
2056
2057 // sub(add(X,Y),umin(Y,Z)) --> add(X,usub.sat(Y,Z))
2058 // sub(add(X,Z),umin(Y,Z)) --> add(X,usub.sat(Z,Y))
2059 Value *Z;
2060 if (match(Op1, m_OneUse(m_UMin(m_Value(Y), m_Value(Z))))) {
2061 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Y), m_Value(X))))) {
2062 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Y, Z});
2063 return BinaryOperator::CreateAdd(X, USub);
2064 }
2065 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Z), m_Value(X))))) {
2066 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Z, Y});
2067 return BinaryOperator::CreateAdd(X, USub);
2068 }
2069 }
2070
2071 // sub Op0, smin((sub nsw Op0, Z), 0) --> smax Op0, Z
2072 // sub Op0, smax((sub nsw Op0, Z), 0) --> smin Op0, Z
2073 if (MinMax->isSigned() && match(Y, m_ZeroInt()) &&
2074 match(X, m_NSWSub(m_Specific(Op0), m_Value(Z)))) {
2075 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
2076 Function *F = Intrinsic::getDeclaration(I.getModule(), InvID, Ty);
2077 return CallInst::Create(F, {Op0, Z});
2078 }
2079
2080 return nullptr;
2081}
2082
2084 if (Value *V = simplifySubInst(I.getOperand(0), I.getOperand(1),
2085 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
2087 return replaceInstUsesWith(I, V);
2088
2090 return X;
2091
2093 return Phi;
2094
2095 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2096
2097 // If this is a 'B = x-(-A)', change to B = x+A.
2098 // We deal with this without involving Negator to preserve NSW flag.
2099 if (Value *V = dyn_castNegVal(Op1)) {
2100 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
2101
2102 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
2103 assert(BO->getOpcode() == Instruction::Sub &&
2104 "Expected a subtraction operator!");
2105 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
2106 Res->setHasNoSignedWrap(true);
2107 } else {
2108 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
2109 Res->setHasNoSignedWrap(true);
2110 }
2111
2112 return Res;
2113 }
2114
2115 // Try this before Negator to preserve NSW flag.
2117 return R;
2118
2119 Constant *C;
2120 if (match(Op0, m_ImmConstant(C))) {
2121 Value *X;
2122 Constant *C2;
2123
2124 // C-(X+C2) --> (C-C2)-X
2125 if (match(Op1, m_Add(m_Value(X), m_ImmConstant(C2)))) {
2126 // C-C2 never overflow, and C-(X+C2), (X+C2) has NSW/NUW
2127 // => (C-C2)-X can have NSW/NUW
2128 bool WillNotSOV = willNotOverflowSignedSub(C, C2, I);
2129 BinaryOperator *Res =
2130 BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
2131 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2132 Res->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO1->hasNoSignedWrap() &&
2133 WillNotSOV);
2134 Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap() &&
2135 OBO1->hasNoUnsignedWrap());
2136 return Res;
2137 }
2138 }
2139
2140 auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * {
2141 if (Instruction *Ext = narrowMathIfNoOverflow(I))
2142 return Ext;
2143
2144 bool Changed = false;
2145 if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) {
2146 Changed = true;
2147 I.setHasNoSignedWrap(true);
2148 }
2149 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) {
2150 Changed = true;
2151 I.setHasNoUnsignedWrap(true);
2152 }
2153
2154 return Changed ? &I : nullptr;
2155 };
2156
2157 // First, let's try to interpret `sub a, b` as `add a, (sub 0, b)`,
2158 // and let's try to sink `(sub 0, b)` into `b` itself. But only if this isn't
2159 // a pure negation used by a select that looks like abs/nabs.
2160 bool IsNegation = match(Op0, m_ZeroInt());
2161 if (!IsNegation || none_of(I.users(), [&I, Op1](const User *U) {
2162 const Instruction *UI = dyn_cast<Instruction>(U);
2163 if (!UI)
2164 return false;
2165 return match(UI,
2166 m_Select(m_Value(), m_Specific(Op1), m_Specific(&I))) ||
2167 match(UI, m_Select(m_Value(), m_Specific(&I), m_Specific(Op1)));
2168 })) {
2169 if (Value *NegOp1 = Negator::Negate(IsNegation, /* IsNSW */ IsNegation &&
2170 I.hasNoSignedWrap(),
2171 Op1, *this))
2172 return BinaryOperator::CreateAdd(NegOp1, Op0);
2173 }
2174 if (IsNegation)
2175 return TryToNarrowDeduceFlags(); // Should have been handled in Negator!
2176
2177 // (A*B)-(A*C) -> A*(B-C) etc
2179 return replaceInstUsesWith(I, V);
2180
2181 if (I.getType()->isIntOrIntVectorTy(1))
2182 return BinaryOperator::CreateXor(Op0, Op1);
2183
2184 // Replace (-1 - A) with (~A).
2185 if (match(Op0, m_AllOnes()))
2186 return BinaryOperator::CreateNot(Op1);
2187
2188 // (X + -1) - Y --> ~Y + X
2189 Value *X, *Y;
2190 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes()))))
2191 return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);
2192
2193 // Reassociate sub/add sequences to create more add instructions and
2194 // reduce dependency chains:
2195 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
2196 Value *Z;
2198 m_Value(Z))))) {
2199 Value *XZ = Builder.CreateAdd(X, Z);
2200 Value *YW = Builder.CreateAdd(Y, Op1);
2201 return BinaryOperator::CreateSub(XZ, YW);
2202 }
2203
2204 // ((X - Y) - Op1) --> X - (Y + Op1)
2205 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y))))) {
2206 OverflowingBinaryOperator *LHSSub = cast<OverflowingBinaryOperator>(Op0);
2207 bool HasNUW = I.hasNoUnsignedWrap() && LHSSub->hasNoUnsignedWrap();
2208 bool HasNSW = HasNUW && I.hasNoSignedWrap() && LHSSub->hasNoSignedWrap();
2209 Value *Add = Builder.CreateAdd(Y, Op1, "", /* HasNUW */ HasNUW,
2210 /* HasNSW */ HasNSW);
2211 BinaryOperator *Sub = BinaryOperator::CreateSub(X, Add);
2212 Sub->setHasNoUnsignedWrap(HasNUW);
2213 Sub->setHasNoSignedWrap(HasNSW);
2214 return Sub;
2215 }
2216
2217 {
2218 // (X + Z) - (Y + Z) --> (X - Y)
2219 // This is done in other passes, but we want to be able to consume this
2220 // pattern in InstCombine so we can generate it without creating infinite
2221 // loops.
2222 if (match(Op0, m_Add(m_Value(X), m_Value(Z))) &&
2223 match(Op1, m_c_Add(m_Value(Y), m_Specific(Z))))
2224 return BinaryOperator::CreateSub(X, Y);
2225
2226 // (X + C0) - (Y + C1) --> (X - Y) + (C0 - C1)
2227 Constant *CX, *CY;
2228 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_ImmConstant(CX)))) &&
2229 match(Op1, m_OneUse(m_Add(m_Value(Y), m_ImmConstant(CY))))) {
2230 Value *OpsSub = Builder.CreateSub(X, Y);
2231 Constant *ConstsSub = ConstantExpr::getSub(CX, CY);
2232 return BinaryOperator::CreateAdd(OpsSub, ConstsSub);
2233 }
2234 }
2235
2236 // (~X) - (~Y) --> Y - X
2237 {
2238 // Need to ensure we can consume at least one of the `not` instructions,
2239 // otherwise this can inf loop.
2240 bool ConsumesOp0, ConsumesOp1;
2241 if (isFreeToInvert(Op0, Op0->hasOneUse(), ConsumesOp0) &&
2242 isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&
2243 (ConsumesOp0 || ConsumesOp1)) {
2244 Value *NotOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder);
2245 Value *NotOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder);
2246 assert(NotOp0 != nullptr && NotOp1 != nullptr &&
2247 "isFreeToInvert desynced with getFreelyInverted");
2248 return BinaryOperator::CreateSub(NotOp1, NotOp0);
2249 }
2250 }
2251
2252 auto m_AddRdx = [](Value *&Vec) {
2253 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_add>(m_Value(Vec)));
2254 };
2255 Value *V0, *V1;
2256 if (match(Op0, m_AddRdx(V0)) && match(Op1, m_AddRdx(V1)) &&
2257 V0->getType() == V1->getType()) {
2258 // Difference of sums is sum of differences:
2259 // add_rdx(V0) - add_rdx(V1) --> add_rdx(V0 - V1)
2260 Value *Sub = Builder.CreateSub(V0, V1);
2261 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_add,
2262 {Sub->getType()}, {Sub});
2263 return replaceInstUsesWith(I, Rdx);
2264 }
2265
2266 if (Constant *C = dyn_cast<Constant>(Op0)) {
2267 Value *X;
2268 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
2269 // C - (zext bool) --> bool ? C - 1 : C
2271 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
2272 // C - (sext bool) --> bool ? C + 1 : C
2274
2275 // C - ~X == X + (1+C)
2276 if (match(Op1, m_Not(m_Value(X))))
2277 return BinaryOperator::CreateAdd(X, InstCombiner::AddOne(C));
2278
2279 // Try to fold constant sub into select arguments.
2280 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2281 if (Instruction *R = FoldOpIntoSelect(I, SI))
2282 return R;
2283
2284 // Try to fold constant sub into PHI values.
2285 if (PHINode *PN = dyn_cast<PHINode>(Op1))
2286 if (Instruction *R = foldOpIntoPhi(I, PN))
2287 return R;
2288
2289 Constant *C2;
2290
2291 // C-(C2-X) --> X+(C-C2)
2292 if (match(Op1, m_Sub(m_ImmConstant(C2), m_Value(X))))
2293 return BinaryOperator::CreateAdd(X, ConstantExpr::getSub(C, C2));
2294 }
2295
2296 const APInt *Op0C;
2297 if (match(Op0, m_APInt(Op0C))) {
2298 if (Op0C->isMask()) {
2299 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
2300 // zero. We don't use information from dominating conditions so this
2301 // transform is easier to reverse if necessary.
2304 if ((*Op0C | RHSKnown.Zero).isAllOnes())
2305 return BinaryOperator::CreateXor(Op1, Op0);
2306 }
2307
2308 // C - ((C3 -nuw X) & C2) --> (C - (C2 & C3)) + (X & C2) when:
2309 // (C3 - ((C2 & C3) - 1)) is pow2
2310 // ((C2 + C3) & ((C2 & C3) - 1)) == ((C2 & C3) - 1)
2311 // C2 is negative pow2 || sub nuw
2312 const APInt *C2, *C3;
2313 BinaryOperator *InnerSub;
2314 if (match(Op1, m_OneUse(m_And(m_BinOp(InnerSub), m_APInt(C2)))) &&
2315 match(InnerSub, m_Sub(m_APInt(C3), m_Value(X))) &&
2316 (InnerSub->hasNoUnsignedWrap() || C2->isNegatedPowerOf2())) {
2317 APInt C2AndC3 = *C2 & *C3;
2318 APInt C2AndC3Minus1 = C2AndC3 - 1;
2319 APInt C2AddC3 = *C2 + *C3;
2320 if ((*C3 - C2AndC3Minus1).isPowerOf2() &&
2321 C2AndC3Minus1.isSubsetOf(C2AddC3)) {
2322 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(), *C2));
2323 return BinaryOperator::CreateAdd(
2324 And, ConstantInt::get(I.getType(), *Op0C - C2AndC3));
2325 }
2326 }
2327 }
2328
2329 {
2330 Value *Y;
2331 // X-(X+Y) == -Y X-(Y+X) == -Y
2332 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y))))
2334
2335 // (X-Y)-X == -Y
2336 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
2338 }
2339
2340 // (sub (or A, B) (and A, B)) --> (xor A, B)
2341 {
2342 Value *A, *B;
2343 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
2344 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2345 return BinaryOperator::CreateXor(A, B);
2346 }
2347
2348 // (sub (add A, B) (or A, B)) --> (and A, B)
2349 {
2350 Value *A, *B;
2351 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
2352 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
2353 return BinaryOperator::CreateAnd(A, B);
2354 }
2355
2356 // (sub (add A, B) (and A, B)) --> (or A, B)
2357 {
2358 Value *A, *B;
2359 if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
2361 return BinaryOperator::CreateOr(A, B);
2362 }
2363
2364 // (sub (and A, B) (or A, B)) --> neg (xor A, B)
2365 {
2366 Value *A, *B;
2367 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2368 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
2369 (Op0->hasOneUse() || Op1->hasOneUse()))
2371 }
2372
2373 // (sub (or A, B), (xor A, B)) --> (and A, B)
2374 {
2375 Value *A, *B;
2376 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
2377 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2378 return BinaryOperator::CreateAnd(A, B);
2379 }
2380
2381 // (sub (xor A, B) (or A, B)) --> neg (and A, B)
2382 {
2383 Value *A, *B;
2384 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2385 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
2386 (Op0->hasOneUse() || Op1->hasOneUse()))
2388 }
2389
2390 {
2391 Value *Y;
2392 // ((X | Y) - X) --> (~X & Y)
2393 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
2394 return BinaryOperator::CreateAnd(
2395 Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
2396 }
2397
2398 {
2399 // (sub (and Op1, (neg X)), Op1) --> neg (and Op1, (add X, -1))
2400 Value *X;
2401 if (match(Op0, m_OneUse(m_c_And(m_Specific(Op1),
2402 m_OneUse(m_Neg(m_Value(X))))))) {
2404 Op1, Builder.CreateAdd(X, Constant::getAllOnesValue(I.getType()))));
2405 }
2406 }
2407
2408 {
2409 // (sub (and Op1, C), Op1) --> neg (and Op1, ~C)
2410 Constant *C;
2411 if (match(Op0, m_OneUse(m_And(m_Specific(Op1), m_Constant(C))))) {
2414 }
2415 }
2416
2417 {
2418 // (sub (xor X, (sext C)), (sext C)) => (select C, (neg X), X)
2419 // (sub (sext C), (xor X, (sext C))) => (select C, X, (neg X))
2420 Value *C, *X;
2421 auto m_SubXorCmp = [&C, &X](Value *LHS, Value *RHS) {
2422 return match(LHS, m_OneUse(m_c_Xor(m_Value(X), m_Specific(RHS)))) &&
2423 match(RHS, m_SExt(m_Value(C))) &&
2424 (C->getType()->getScalarSizeInBits() == 1);
2425 };
2426 if (m_SubXorCmp(Op0, Op1))
2428 if (m_SubXorCmp(Op1, Op0))
2430 }
2431
2433 return R;
2434
2436 return R;
2437
2438 {
2439 // If we have a subtraction between some value and a select between
2440 // said value and something else, sink subtraction into select hands, i.e.:
2441 // sub (select %Cond, %TrueVal, %FalseVal), %Op1
2442 // ->
2443 // select %Cond, (sub %TrueVal, %Op1), (sub %FalseVal, %Op1)
2444 // or
2445 // sub %Op0, (select %Cond, %TrueVal, %FalseVal)
2446 // ->
2447 // select %Cond, (sub %Op0, %TrueVal), (sub %Op0, %FalseVal)
2448 // This will result in select between new subtraction and 0.
2449 auto SinkSubIntoSelect =
2450 [Ty = I.getType()](Value *Select, Value *OtherHandOfSub,
2451 auto SubBuilder) -> Instruction * {
2452 Value *Cond, *TrueVal, *FalseVal;
2453 if (!match(Select, m_OneUse(m_Select(m_Value(Cond), m_Value(TrueVal),
2454 m_Value(FalseVal)))))
2455 return nullptr;
2456 if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal)
2457 return nullptr;
2458 // While it is really tempting to just create two subtractions and let
2459 // InstCombine fold one of those to 0, it isn't possible to do so
2460 // because of worklist visitation order. So ugly it is.
2461 bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal;
2462 Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal);
2463 Constant *Zero = Constant::getNullValue(Ty);
2464 SelectInst *NewSel =
2465 SelectInst::Create(Cond, OtherHandOfSubIsTrueVal ? Zero : NewSub,
2466 OtherHandOfSubIsTrueVal ? NewSub : Zero);
2467 // Preserve prof metadata if any.
2468 NewSel->copyMetadata(cast<Instruction>(*Select));
2469 return NewSel;
2470 };
2471 if (Instruction *NewSel = SinkSubIntoSelect(
2472 /*Select=*/Op0, /*OtherHandOfSub=*/Op1,
2473 [Builder = &Builder, Op1](Value *OtherHandOfSelect) {
2474 return Builder->CreateSub(OtherHandOfSelect,
2475 /*OtherHandOfSub=*/Op1);
2476 }))
2477 return NewSel;
2478 if (Instruction *NewSel = SinkSubIntoSelect(
2479 /*Select=*/Op1, /*OtherHandOfSub=*/Op0,
2480 [Builder = &Builder, Op0](Value *OtherHandOfSelect) {
2481 return Builder->CreateSub(/*OtherHandOfSub=*/Op0,
2482 OtherHandOfSelect);
2483 }))
2484 return NewSel;
2485 }
2486
2487 // (X - (X & Y)) --> (X & ~Y)
2488 if (match(Op1, m_c_And(m_Specific(Op0), m_Value(Y))) &&
2489 (Op1->hasOneUse() || isa<Constant>(Y)))
2490 return BinaryOperator::CreateAnd(
2491 Op0, Builder.CreateNot(Y, Y->getName() + ".not"));
2492
2493 // ~X - Min/Max(~X, Y) -> ~Min/Max(X, ~Y) - X
2494 // ~X - Min/Max(Y, ~X) -> ~Min/Max(X, ~Y) - X
2495 // Min/Max(~X, Y) - ~X -> X - ~Min/Max(X, ~Y)
2496 // Min/Max(Y, ~X) - ~X -> X - ~Min/Max(X, ~Y)
2497 // As long as Y is freely invertible, this will be neutral or a win.
2498 // Note: We don't generate the inverse max/min, just create the 'not' of
2499 // it and let other folds do the rest.
2500 if (match(Op0, m_Not(m_Value(X))) &&
2501 match(Op1, m_c_MaxOrMin(m_Specific(Op0), m_Value(Y))) &&
2502 !Op0->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2503 Value *Not = Builder.CreateNot(Op1);
2504 return BinaryOperator::CreateSub(Not, X);
2505 }
2506 if (match(Op1, m_Not(m_Value(X))) &&
2507 match(Op0, m_c_MaxOrMin(m_Specific(Op1), m_Value(Y))) &&
2508 !Op1->hasNUsesOrMore(3) && isFreeToInvert(Y, Y->hasOneUse())) {
2509 Value *Not = Builder.CreateNot(Op0);
2510 return BinaryOperator::CreateSub(X, Not);
2511 }
2512
2513 // Optimize pointer differences into the same array into a size. Consider:
2514 // &A[10] - &A[0]: we should compile this to "10".
2515 Value *LHSOp, *RHSOp;
2516 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
2517 match(Op1, m_PtrToInt(m_Value(RHSOp))))
2518 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2519 I.hasNoUnsignedWrap()))
2520 return replaceInstUsesWith(I, Res);
2521
2522 // trunc(p)-trunc(q) -> trunc(p-q)
2523 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
2524 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
2525 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
2526 /* IsNUW */ false))
2527 return replaceInstUsesWith(I, Res);
2528
2529 // Canonicalize a shifty way to code absolute value to the common pattern.
2530 // There are 2 potential commuted variants.
2531 // We're relying on the fact that we only do this transform when the shift has
2532 // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase
2533 // instructions).
2534 Value *A;
2535 const APInt *ShAmt;
2536 Type *Ty = I.getType();
2537 unsigned BitWidth = Ty->getScalarSizeInBits();
2538 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
2539 Op1->hasNUses(2) && *ShAmt == BitWidth - 1 &&
2540 match(Op0, m_OneUse(m_c_Xor(m_Specific(A), m_Specific(Op1))))) {
2541 // B = ashr i32 A, 31 ; smear the sign bit
2542 // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1)
2543 // --> (A < 0) ? -A : A
2544 Value *IsNeg = Builder.CreateIsNeg(A);
2545 // Copy the nsw flags from the sub to the negate.
2546 Value *NegA = I.hasNoUnsignedWrap()
2547 ? Constant::getNullValue(A->getType())
2548 : Builder.CreateNeg(A, "", I.hasNoSignedWrap());
2549 return SelectInst::Create(IsNeg, NegA, A);
2550 }
2551
2552 // If we are subtracting a low-bit masked subset of some value from an add
2553 // of that same value with no low bits changed, that is clearing some low bits
2554 // of the sum:
2555 // sub (X + AddC), (X & AndC) --> and (X + AddC), ~AndC
2556 const APInt *AddC, *AndC;
2557 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC))) &&
2558 match(Op1, m_And(m_Specific(X), m_APInt(AndC)))) {
2559 unsigned Cttz = AddC->countr_zero();
2560 APInt HighMask(APInt::getHighBitsSet(BitWidth, BitWidth - Cttz));
2561 if ((HighMask & *AndC).isZero())
2562 return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC)));
2563 }
2564
2565 if (Instruction *V =
2567 return V;
2568
2569 // X - usub.sat(X, Y) => umin(X, Y)
2570 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::usub_sat>(m_Specific(Op0),
2571 m_Value(Y)))))
2572 return replaceInstUsesWith(
2573 I, Builder.CreateIntrinsic(Intrinsic::umin, {I.getType()}, {Op0, Y}));
2574
2575 // umax(X, Op1) - Op1 --> usub.sat(X, Op1)
2576 // TODO: The one-use restriction is not strictly necessary, but it may
2577 // require improving other pattern matching and/or codegen.
2578 if (match(Op0, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op1)))))
2579 return replaceInstUsesWith(
2580 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op1}));
2581
2582 // Op0 - umin(X, Op0) --> usub.sat(Op0, X)
2583 if (match(Op1, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op0)))))
2584 return replaceInstUsesWith(
2585 I, Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op0, X}));
2586
2587 // Op0 - umax(X, Op0) --> 0 - usub.sat(X, Op0)
2588 if (match(Op1, m_OneUse(m_c_UMax(m_Value(X), m_Specific(Op0))))) {
2589 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op0});
2590 return BinaryOperator::CreateNeg(USub);
2591 }
2592
2593 // umin(X, Op1) - Op1 --> 0 - usub.sat(Op1, X)
2594 if (match(Op0, m_OneUse(m_c_UMin(m_Value(X), m_Specific(Op1))))) {
2595 Value *USub = Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op1, X});
2596 return BinaryOperator::CreateNeg(USub);
2597 }
2598
2599 // C - ctpop(X) => ctpop(~X) if C is bitwidth
2600 if (match(Op0, m_SpecificInt(BitWidth)) &&
2601 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(X)))))
2602 return replaceInstUsesWith(
2603 I, Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
2604 {Builder.CreateNot(X)}));
2605
2606 // Reduce multiplies for difference-of-squares by factoring:
2607 // (X * X) - (Y * Y) --> (X + Y) * (X - Y)
2608 if (match(Op0, m_OneUse(m_Mul(m_Value(X), m_Deferred(X)))) &&
2609 match(Op1, m_OneUse(m_Mul(m_Value(Y), m_Deferred(Y))))) {
2610 auto *OBO0 = cast<OverflowingBinaryOperator>(Op0);
2611 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2612 bool PropagateNSW = I.hasNoSignedWrap() && OBO0->hasNoSignedWrap() &&
2613 OBO1->hasNoSignedWrap() && BitWidth > 2;
2614 bool PropagateNUW = I.hasNoUnsignedWrap() && OBO0->hasNoUnsignedWrap() &&
2615 OBO1->hasNoUnsignedWrap() && BitWidth > 1;
2616 Value *Add = Builder.CreateAdd(X, Y, "add", PropagateNUW, PropagateNSW);
2617 Value *Sub = Builder.CreateSub(X, Y, "sub", PropagateNUW, PropagateNSW);
2618 Value *Mul = Builder.CreateMul(Add, Sub, "", PropagateNUW, PropagateNSW);
2619 return replaceInstUsesWith(I, Mul);
2620 }
2621
2622 // max(X,Y) nsw/nuw - min(X,Y) --> abs(X nsw - Y)
2623 if (match(Op0, m_OneUse(m_c_SMax(m_Value(X), m_Value(Y)))) &&
2625 if (I.hasNoUnsignedWrap() || I.hasNoSignedWrap()) {
2626 Value *Sub =
2627 Builder.CreateSub(X, Y, "sub", /*HasNUW=*/false, /*HasNSW=*/true);
2628 Value *Call =
2629 Builder.CreateBinaryIntrinsic(Intrinsic::abs, Sub, Builder.getTrue());
2630 return replaceInstUsesWith(I, Call);
2631 }
2632 }
2633
2635 return Res;
2636
2637 return TryToNarrowDeduceFlags();
2638}
2639
2640/// This eliminates floating-point negation in either 'fneg(X)' or
2641/// 'fsub(-0.0, X)' form by combining into a constant operand.
2643 // This is limited with one-use because fneg is assumed better for
2644 // reassociation and cheaper in codegen than fmul/fdiv.
2645 // TODO: Should the m_OneUse restriction be removed?
2646 Instruction *FNegOp;
2647 if (!match(&I, m_FNeg(m_OneUse(m_Instruction(FNegOp)))))
2648 return nullptr;
2649
2650 Value *X;
2651 Constant *C;
2652
2653 // Fold negation into constant operand.
2654 // -(X * C) --> X * (-C)
2655 if (match(FNegOp, m_FMul(m_Value(X), m_Constant(C))))
2656 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2657 return BinaryOperator::CreateFMulFMF(X, NegC, &I);
2658 // -(X / C) --> X / (-C)
2659 if (match(FNegOp, m_FDiv(m_Value(X), m_Constant(C))))
2660 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2661 return BinaryOperator::CreateFDivFMF(X, NegC, &I);
2662 // -(C / X) --> (-C) / X
2663 if (match(FNegOp, m_FDiv(m_Constant(C), m_Value(X))))
2664 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL)) {
2666
2667 // Intersect 'nsz' and 'ninf' because those special value exceptions may
2668 // not apply to the fdiv. Everything else propagates from the fneg.
2669 // TODO: We could propagate nsz/ninf from fdiv alone?
2670 FastMathFlags FMF = I.getFastMathFlags();
2671 FastMathFlags OpFMF = FNegOp->getFastMathFlags();
2672 FDiv->setHasNoSignedZeros(FMF.noSignedZeros() && OpFMF.noSignedZeros());
2673 FDiv->setHasNoInfs(FMF.noInfs() && OpFMF.noInfs());
2674 return FDiv;
2675 }
2676 // With NSZ [ counter-example with -0.0: -(-0.0 + 0.0) != 0.0 + -0.0 ]:
2677 // -(X + C) --> -X + -C --> -C - X
2678 if (I.hasNoSignedZeros() && match(FNegOp, m_FAdd(m_Value(X), m_Constant(C))))
2679 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2680 return BinaryOperator::CreateFSubFMF(NegC, X, &I);
2681
2682 return nullptr;
2683}
2684
2685Instruction *InstCombinerImpl::hoistFNegAboveFMulFDiv(Value *FNegOp,
2686 Instruction &FMFSource) {
2687 Value *X, *Y;
2688 if (match(FNegOp, m_FMul(m_Value(X), m_Value(Y)))) {
2689 return cast<Instruction>(Builder.CreateFMulFMF(
2690 Builder.CreateFNegFMF(X, &FMFSource), Y, &FMFSource));
2691 }
2692
2693 if (match(FNegOp, m_FDiv(m_Value(X), m_Value(Y)))) {
2694 return cast<Instruction>(Builder.CreateFDivFMF(
2695 Builder.CreateFNegFMF(X, &FMFSource), Y, &FMFSource));
2696 }
2697
2698 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(FNegOp)) {
2699 // Make sure to preserve flags and metadata on the call.
2700 if (II->getIntrinsicID() == Intrinsic::ldexp) {
2701 FastMathFlags FMF = FMFSource.getFastMathFlags() | II->getFastMathFlags();
2704
2706 II->getCalledFunction(),
2707 {Builder.CreateFNeg(II->getArgOperand(0)), II->getArgOperand(1)});
2708 New->copyMetadata(*II);
2709 return New;
2710 }
2711 }
2712
2713 return nullptr;
2714}
2715
2717 Value *Op = I.getOperand(0);
2718
2719 if (Value *V = simplifyFNegInst(Op, I.getFastMathFlags(),
2720 getSimplifyQuery().getWithInstruction(&I)))
2721 return replaceInstUsesWith(I, V);
2722
2724 return X;
2725
2726 Value *X, *Y;
2727
2728 // If we can ignore the sign of zeros: -(X - Y) --> (Y - X)
2729 if (I.hasNoSignedZeros() &&
2732
2733 Value *OneUse;
2734 if (!match(Op, m_OneUse(m_Value(OneUse))))
2735 return nullptr;
2736
2737 if (Instruction *R = hoistFNegAboveFMulFDiv(OneUse, I))
2738 return replaceInstUsesWith(I, R);
2739
2740 // Try to eliminate fneg if at least 1 arm of the select is negated.
2741 Value *Cond;
2742 if (match(OneUse, m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))) {
2743 // Unlike most transforms, this one is not safe to propagate nsz unless
2744 // it is present on the original select. We union the flags from the select
2745 // and fneg and then remove nsz if needed.
2746 auto propagateSelectFMF = [&](SelectInst *S, bool CommonOperand) {
2747 S->copyFastMathFlags(&I);
2748 if (auto *OldSel = dyn_cast<SelectInst>(Op)) {
2749 FastMathFlags FMF = I.getFastMathFlags() | OldSel->getFastMathFlags();
2750 S->setFastMathFlags(FMF);
2751 if (!OldSel->hasNoSignedZeros() && !CommonOperand &&
2752 !isGuaranteedNotToBeUndefOrPoison(OldSel->getCondition()))
2753 S->setHasNoSignedZeros(false);
2754 }
2755 };
2756 // -(Cond ? -P : Y) --> Cond ? P : -Y
2757 Value *P;
2758 if (match(X, m_FNeg(m_Value(P)))) {
2759 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");
2760 SelectInst *NewSel = SelectInst::Create(Cond, P, NegY);
2761 propagateSelectFMF(NewSel, P == Y);
2762 return NewSel;
2763 }
2764 // -(Cond ? X : -P) --> Cond ? -X : P
2765 if (match(Y, m_FNeg(m_Value(P)))) {
2766 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");
2767 SelectInst *NewSel = SelectInst::Create(Cond, NegX, P);
2768 propagateSelectFMF(NewSel, P == X);
2769 return NewSel;
2770 }
2771
2772 // -(Cond ? X : C) --> Cond ? -X : -C
2773 // -(Cond ? C : Y) --> Cond ? -C : -Y
2774 if (match(X, m_ImmConstant()) || match(Y, m_ImmConstant())) {
2775 Value *NegX = Builder.CreateFNegFMF(X, &I, X->getName() + ".neg");
2776 Value *NegY = Builder.CreateFNegFMF(Y, &I, Y->getName() + ".neg");
2777 SelectInst *NewSel = SelectInst::Create(Cond, NegX, NegY);
2778 propagateSelectFMF(NewSel, /*CommonOperand=*/true);
2779 return NewSel;
2780 }
2781 }
2782
2783 // fneg (copysign x, y) -> copysign x, (fneg y)
2784 if (match(OneUse, m_CopySign(m_Value(X), m_Value(Y)))) {
2785 // The source copysign has an additional value input, so we can't propagate
2786 // flags the copysign doesn't also have.
2787 FastMathFlags FMF = I.getFastMathFlags();
2788 FMF &= cast<FPMathOperator>(OneUse)->getFastMathFlags();
2789
2792
2793 Value *NegY = Builder.CreateFNeg(Y);
2794 Value *NewCopySign = Builder.CreateCopySign(X, NegY);
2795 return replaceInstUsesWith(I, NewCopySign);
2796 }
2797
2798 return nullptr;
2799}
2800
2802 if (Value *V = simplifyFSubInst(I.getOperand(0), I.getOperand(1),
2803 I.getFastMathFlags(),
2804 getSimplifyQuery().getWithInstruction(&I)))
2805 return replaceInstUsesWith(I, V);
2806
2808 return X;
2809
2811 return Phi;
2812
2813 // Subtraction from -0.0 is the canonical form of fneg.
2814 // fsub -0.0, X ==> fneg X
2815 // fsub nsz 0.0, X ==> fneg nsz X
2816 //
2817 // FIXME This matcher does not respect FTZ or DAZ yet:
2818 // fsub -0.0, Denorm ==> +-0
2819 // fneg Denorm ==> -Denorm
2820 Value *Op;
2821 if (match(&I, m_FNeg(m_Value(Op))))
2823
2825 return X;
2826
2827 if (Instruction *R = foldFBinOpOfIntCasts(I))
2828 return R;
2829
2830 Value *X, *Y;
2831 Constant *C;
2832
2833 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2834 // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X)
2835 // Canonicalize to fadd to make analysis easier.
2836 // This can also help codegen because fadd is commutative.
2837 // Note that if this fsub was really an fneg, the fadd with -0.0 will get
2838 // killed later. We still limit that particular transform with 'hasOneUse'
2839 // because an fneg is assumed better/cheaper than a generic fsub.
2840 if (I.hasNoSignedZeros() ||
2841 cannotBeNegativeZero(Op0, 0, getSimplifyQuery().getWithInstruction(&I))) {
2842 if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
2843 Value *NewSub = Builder.CreateFSubFMF(Y, X, &I);
2844 return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I);
2845 }
2846 }
2847
2848 // (-X) - Op1 --> -(X + Op1)
2849 if (I.hasNoSignedZeros() && !isa<ConstantExpr>(Op0) &&
2850 match(Op0, m_OneUse(m_FNeg(m_Value(X))))) {
2851 Value *FAdd = Builder.CreateFAddFMF(X, Op1, &I);
2853 }
2854
2855 if (isa<Constant>(Op0))
2856 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2857 if (Instruction *NV = FoldOpIntoSelect(I, SI))
2858 return NV;
2859
2860 // X - C --> X + (-C)
2861 // But don't transform constant expressions because there's an inverse fold
2862 // for X + (-Y) --> X - Y.
2863 if (match(Op1, m_ImmConstant(C)))
2864 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
2865 return BinaryOperator::CreateFAddFMF(Op0, NegC, &I);
2866
2867 // X - (-Y) --> X + Y
2868 if (match(Op1, m_FNeg(m_Value(Y))))
2869 return BinaryOperator::CreateFAddFMF(Op0, Y, &I);
2870
2871 // Similar to above, but look through a cast of the negated value:
2872 // X - (fptrunc(-Y)) --> X + fptrunc(Y)
2873 Type *Ty = I.getType();
2874 if (match(Op1, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y))))))
2876
2877 // X - (fpext(-Y)) --> X + fpext(Y)
2878 if (match(Op1, m_OneUse(m_FPExt(m_FNeg(m_Value(Y))))))
2880
2881 // Similar to above, but look through fmul/fdiv of the negated value:
2882 // Op0 - (-X * Y) --> Op0 + (X * Y)
2883 // Op0 - (Y * -X) --> Op0 + (X * Y)
2884 if (match(Op1, m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))))) {
2886 return BinaryOperator::CreateFAddFMF(Op0, FMul, &I);
2887 }
2888 // Op0 - (-X / Y) --> Op0 + (X / Y)
2889 // Op0 - (X / -Y) --> Op0 + (X / Y)
2890 if (match(Op1, m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y)))) ||
2891 match(Op1, m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))))) {
2892 Value *FDiv = Builder.CreateFDivFMF(X, Y, &I);
2893 return BinaryOperator::CreateFAddFMF(Op0, FDiv, &I);
2894 }
2895
2896 // Handle special cases for FSub with selects feeding the operation
2897 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
2898 return replaceInstUsesWith(I, V);
2899
2900 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) {
2901 // (Y - X) - Y --> -X
2902 if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X))))
2904
2905 // Y - (X + Y) --> -X
2906 // Y - (Y + X) --> -X
2907 if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X))))
2909
2910 // (X * C) - X --> X * (C - 1.0)
2911 if (match(Op0, m_FMul(m_Specific(Op1), m_Constant(C)))) {
2913 Instruction::FSub, C, ConstantFP::get(Ty, 1.0), DL))
2914 return BinaryOperator::CreateFMulFMF(Op1, CSubOne, &I);
2915 }
2916 // X - (X * C) --> X * (1.0 - C)
2917 if (match(Op1, m_FMul(m_Specific(Op0), m_Constant(C)))) {
2919 Instruction::FSub, ConstantFP::get(Ty, 1.0), C, DL))
2920 return BinaryOperator::CreateFMulFMF(Op0, OneSubC, &I);
2921 }
2922
2923 // Reassociate fsub/fadd sequences to create more fadd instructions and
2924 // reduce dependency chains:
2925 // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
2926 Value *Z;
2928 m_Value(Z))))) {
2929 Value *XZ = Builder.CreateFAddFMF(X, Z, &I);
2930 Value *YW = Builder.CreateFAddFMF(Y, Op1, &I);
2931 return BinaryOperator::CreateFSubFMF(XZ, YW, &I);
2932 }
2933
2934 auto m_FaddRdx = [](Value *&Sum, Value *&Vec) {
2935 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(m_Value(Sum),
2936 m_Value(Vec)));
2937 };
2938 Value *A0, *A1, *V0, *V1;
2939 if (match(Op0, m_FaddRdx(A0, V0)) && match(Op1, m_FaddRdx(A1, V1)) &&
2940 V0->getType() == V1->getType()) {
2941 // Difference of sums is sum of differences:
2942 // add_rdx(A0, V0) - add_rdx(A1, V1) --> add_rdx(A0, V0 - V1) - A1
2943 Value *Sub = Builder.CreateFSubFMF(V0, V1, &I);
2944 Value *Rdx = Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
2945 {Sub->getType()}, {A0, Sub}, &I);
2946 return BinaryOperator::CreateFSubFMF(Rdx, A1, &I);
2947 }
2948
2950 return F;
2951
2952 // TODO: This performs reassociative folds for FP ops. Some fraction of the
2953 // functionality has been subsumed by simple pattern matching here and in
2954 // InstSimplify. We should let a dedicated reassociation pass handle more
2955 // complex pattern matching and remove this from InstCombine.
2956 if (Value *V = FAddCombine(Builder).simplify(&I))
2957 return replaceInstUsesWith(I, V);
2958
2959 // (X - Y) - Op1 --> X - (Y + Op1)
2960 if (match(Op0, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
2961 Value *FAdd = Builder.CreateFAddFMF(Y, Op1, &I);
2963 }
2964 }
2965
2966 return nullptr;
2967}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isConstant(const MachineInstr &MI)
amdgpu AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
hexagon bit simplify
static Instruction * factorizeFAddFSub(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Factor a common operand out of fadd/fsub of fmul/fdiv.
static Instruction * foldAddToAshr(BinaryOperator &Add)
Try to reduce signed division by power-of-2 to an arithmetic shift right.
static bool MatchMul(Value *E, Value *&Op, APInt &C)
static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned)
static Instruction * foldFNegIntoConstant(Instruction &I, const DataLayout &DL)
This eliminates floating-point negation in either 'fneg(X)' or 'fsub(-0.0, X)' form by combining into...
static Instruction * combineAddSubWithShlAddSub(InstCombiner::BuilderTy &Builder, const BinaryOperator &I)
static Instruction * factorizeLerp(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Eliminate an op from a linear interpolation (lerp) pattern.
static Instruction * foldSubOfMinMax(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Instruction * foldBoxMultiply(BinaryOperator &I)
Reduce a sequence of masked half-width multiplies to a single multiply.
static Value * checkForNegativeOperand(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned)
static Instruction * foldNoWrapAdd(BinaryOperator &Add, InstCombiner::BuilderTy &Builder)
Wrapping flags may allow combining constants separated by an extend.
static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A, Value *&B)
static Instruction * factorizeMathWithShlOps(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
This is a specialization of a more general transform from foldUsingDistributiveLaws.
static Instruction * canonicalizeLowbitMask(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Fold (1 << NBits) - 1 Into: ~(-(1 << NBits)) Because a 'not' is better for bit-tracking analysis and ...
static Instruction * foldToUnsignedSaturatedAdd(BinaryOperator &I)
static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition: Lint.cpp:528
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
Value * RHS
Value * LHS
const fltSemantics & getSemantics() const
Definition: APFloat.h:1316
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1064
Class for arbitrary precision integers.
Definition: APInt.h:77
APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1941
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
Definition: APInt.h:428
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:402
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:906
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:185
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:359
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
Definition: APInt.h:445
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1447
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:308
int32_t exactLogBase2() const
Definition: APInt.h:1740
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1597
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition: APInt.h:1556
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition: APInt.h:198
unsigned logBase2() const
Definition: APInt.h:1718
APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1930
bool isMask(unsigned numBits) const
Definition: APInt.h:467
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:954
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1236
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:419
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:275
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition: APInt.h:1216
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition: InstrTypes.h:324
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition: InstrTypes.h:332
static BinaryOperator * CreateNot(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition: InstrTypes.h:336
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition: InstrTypes.h:328
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a Trunc or BitCast cast instruction.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:1016
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:1020
@ ICMP_EQ
equal
Definition: InstrTypes.h:1014
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2663
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2656
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
bool isZero() const
Return true if the value is positive or negative zero.
Definition: Constants.h:316
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.h:124
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
bool noSignedZeros() const
Definition: FMF.h:68
bool noInfs() const
Definition: FMF.h:67
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:413
Value * CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Definition: IRBuilder.h:1547
Value * CreateSRem(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1410
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:921
Value * CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Definition: IRBuilder.h:1601
Value * CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Definition: IRBuilder.h:1574
Value * CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Definition: IRBuilder.h:1628
Value * CreateFPTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2102
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:2040
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition: IRBuilder.h:466
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:932
Value * CreateFNegFMF(Value *V, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Definition: IRBuilder.h:1740
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2034
Value * CreateIsNotNeg(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg > -1.
Definition: IRBuilder.h:2560
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:311
Value * CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1340
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition: IRBuilder.h:1721
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1749
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Definition: IRBuilder.h:145
Value * CreateIsNeg(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg < 0.
Definition: IRBuilder.h:2555
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1416
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2022
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1475
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1327
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition: IRBuilder.h:471
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2550
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1497
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1666
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2197
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2413
Value * CreateFPExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2111
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1519
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1730
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1404
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1361
Value * CreateCopySign(Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create call to the copysign intrinsic.
Definition: IRBuilder.h:1022
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * visitAdd(BinaryOperator &I)
Instruction * canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator &I)
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * foldSquareSumInt(BinaryOperator &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Instruction * foldSquareSumFP(BinaryOperator &I)
Instruction * visitSub(BinaryOperator &I)
Value * OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty, bool isNUW)
Optimize pointer differences into the same array into a size.
Instruction * visitFAdd(BinaryOperator &I)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Value * SimplifyAddWithRemainder(BinaryOperator &I)
Tries to simplify add operations using the definition of remainder.
Instruction * foldAddWithConstant(BinaryOperator &Add)
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
Instruction * visitFNeg(UnaryOperator &I)
Instruction * visitFSub(BinaryOperator &I)
SimplifyQuery SQ
Definition: InstCombiner.h:76
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
Definition: InstCombiner.h:232
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:386
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
Definition: InstCombiner.h:180
const DataLayout & DL
Definition: InstCombiner.h:75
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombiner.h:452
AssumptionCache & AC
Definition: InstCombiner.h:72
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:410
DominatorTree & DT
Definition: InstCombiner.h:74
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
Definition: InstCombiner.h:431
BuilderTy & Builder
Definition: InstCombiner.h:60
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombiner.h:447
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
Definition: InstCombiner.h:213
const SimplifyQuery & getSimplifyQuery() const
Definition: InstCombiner.h:342
static Constant * AddOne(Constant *C)
Add one to a Constant.
Definition: InstCombiner.h:175
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
void setHasNoSignedZeros(bool B)
Set or clear the no-signed-zeros flag on this instruction, which must be an operator which supports t...
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
void setHasNoInfs(bool B)
Set or clear the no-infs flag on this instruction, which must be an operator which supports this flag...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:451
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition: Operator.h:110
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition: Operator.h:104
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr, BasicBlock::iterator InsertBefore, Instruction *MDFrom=nullptr)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name, BasicBlock::iterator InsertBefore)
Definition: InstrTypes.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition: Value.cpp:153
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:693
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
This class represents zero extension of integer types.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1469
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:524
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:100
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
Definition: PatternMatch.h:619
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:165
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:972
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:816
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
Definition: PatternMatch.h:764
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:875
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
Definition: PatternMatch.h:980
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:592
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_or< CastInst_match< OpTy, SExtInst >, OpTy > m_SExtOrSelf(const OpTy &Op)
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
Definition: PatternMatch.h:918
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:245
CastOperator_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:893
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:599
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:854
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
Definition: PatternMatch.h:627
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
Definition: PatternMatch.h:921
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
match_combine_or< CastOperator_match< OpTy, Instruction::Trunc >, OpTy > m_TruncOrSelf(const OpTy &Op)
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition: PatternMatch.h:316
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
Definition: PatternMatch.h:698
@ CE
Windows NT (Windows on ARM)
NodeAddr< InstrNode * > Instr
Definition: RDFGraph.h:389
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition: DWP.cpp:456
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition: MathExtras.h:153
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
std::string & operator+=(std::string &buffer, StringRef string)
Definition: StringRef.h:900
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ Mul
Product of integers.
@ FMul
Product of floats.
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Add
Sum of integers.
@ FAdd
Sum of floats.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool cannotBeNegativeZero(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if we can prove that the specified FP value is never equal to -0.0.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:27
SimplifyQuery getWithInstruction(const Instruction *I) const
Definition: SimplifyQuery.h:96
SimplifyQuery getWithoutDomCondCache() const