Go to the documentation of this file.
37 using namespace PatternMatch;
39 #define DEBUG_TYPE "instcombine"
56 FAddendCoef() =
default;
61 void operator=(
const FAddendCoef &A);
63 void operator*=(
const FAddendCoef &
S);
66 assert(!insaneIntVal(
C) &&
"Insane coefficient");
77 bool isOne()
const {
return isInt() &&
IntVal == 1; }
78 bool isTwo()
const {
return isInt() &&
IntVal == 2; }
79 bool isMinusOne()
const {
return isInt() &&
IntVal == -1; }
80 bool isMinusTwo()
const {
return isInt() &&
IntVal == -2; }
83 bool insaneIntVal(
int V) {
return V > 4 || V < -4; }
85 APFloat *getFpValPtr() {
return reinterpret_cast<APFloat *
>(&FpValBuf); }
87 const APFloat *getFpValPtr()
const {
88 return reinterpret_cast<const APFloat *
>(&FpValBuf);
91 const APFloat &getFpVal()
const {
92 assert(IsFp && BufHasFpVal &&
"Incorret state");
93 return *getFpValPtr();
97 assert(IsFp && BufHasFpVal &&
"Incorret state");
98 return *getFpValPtr();
101 bool isInt()
const {
return !IsFp; }
115 bool BufHasFpVal =
false;
134 assert((Val ==
T.Val) &&
"Symbolic-values disagree");
138 Value *getSymVal()
const {
return Val; }
139 const FAddendCoef &getCoef()
const {
return Coeff; }
141 bool isConstant()
const {
return Val ==
nullptr; }
142 bool isZero()
const {
return Coeff.isZero(); }
144 void set(
short Coefficient,
Value *V) {
145 Coeff.set(Coefficient);
149 Coeff.set(Coefficient);
157 void negate() { Coeff.negate(); }
161 static unsigned drillValueDownOneStep(
Value* V, FAddend &A0, FAddend &A1);
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1)
const;
168 void Scale(
const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
171 Value *Val =
nullptr;
187 Value *simplifyFAdd(AddendVect& V,
unsigned InstrQuota);
190 Value *createAddendVal(
const FAddend &A,
bool& NeedNeg);
193 unsigned calcInstrNumber(
const AddendVect& Vect);
199 Value *createNaryFAdd(
const AddendVect& Opnds,
unsigned InstrQuota);
200 void createInstPostProc(
Instruction *NewInst,
bool NoNumber =
false);
204 unsigned CreateInstrNum;
205 void initCreateInstNum() { CreateInstrNum = 0; }
206 void incCreateInstNum() { CreateInstrNum++; }
208 void initCreateInstNum() {}
209 void incCreateInstNum() {}
224 FAddendCoef::~FAddendCoef() {
226 getFpValPtr()->~APFloat();
239 IsFp = BufHasFpVal =
true;
242 void FAddendCoef::convertToFpType(
const fltSemantics &Sem) {
253 IsFp = BufHasFpVal =
true;
266 void FAddendCoef::operator=(
const FAddendCoef &That) {
270 set(That.getFpVal());
275 if (
isInt() == That.isInt()) {
279 getFpVal().add(That.getFpVal(), RndMode);
285 convertToFpType(
T.getSemantics());
286 getFpVal().add(
T, RndMode);
291 T.add(createAPFloatFromInt(
T.getSemantics(), That.IntVal), RndMode);
294 void FAddendCoef::operator*=(
const FAddendCoef &That) {
298 if (That.isMinusOne()) {
303 if (
isInt() && That.isInt()) {
305 assert(!insaneIntVal(Res) &&
"Insane int value");
311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
314 convertToFpType(Semantic);
318 F0.
multiply(createAPFloatFromInt(Semantic, That.IntVal),
324 void FAddendCoef::negate() {
328 getFpVal().changeSign();
331 Value *FAddendCoef::getValue(
Type *Ty)
const {
347 unsigned FAddend::drillValueDownOneStep
348 (
Value *Val, FAddend &Addend0, FAddend &Addend1) {
350 if (!Val || !(
I = dyn_cast<Instruction>(Val)))
353 unsigned Opcode =
I->getOpcode();
355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
357 Value *Opnd0 =
I->getOperand(0);
358 Value *Opnd1 =
I->getOperand(1);
359 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->
isZero())
362 if ((
C1 = dyn_cast<ConstantFP>(Opnd1)) &&
C1->isZero())
367 Addend0.set(1, Opnd0);
369 Addend0.set(C0,
nullptr);
373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
375 Addend.set(1, Opnd1);
377 Addend.set(
C1,
nullptr);
378 if (Opcode == Instruction::FSub)
383 return Opnd0 && Opnd1 ? 2 : 1;
390 if (
I->getOpcode() == Instruction::FMul) {
391 Value *V0 =
I->getOperand(0);
392 Value *V1 =
I->getOperand(1);
410 unsigned FAddend::drillAddendDownOneStep
411 (FAddend &Addend0, FAddend &Addend1)
const {
415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416 if (!BreakNum || Coeff.isOne())
419 Addend0.Scale(Coeff);
422 Addend1.Scale(Coeff);
428 assert(
I->hasAllowReassoc() &&
I->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
432 if (
I->getType()->isVectorTy())
435 assert((
I->getOpcode() == Instruction::FAdd ||
436 I->getOpcode() == Instruction::FSub) &&
"Expect add/sub");
441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
443 unsigned OpndNum = FAddend::drillValueDownOneStep(
I, Opnd0, Opnd1);
446 unsigned Opnd0_ExpNum = 0;
447 unsigned Opnd1_ExpNum = 0;
449 if (!Opnd0.isConstant())
450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
453 if (OpndNum == 2 && !Opnd1.isConstant())
454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
457 if (Opnd0_ExpNum && Opnd1_ExpNum) {
459 AllOpnds.push_back(&Opnd0_0);
460 AllOpnds.push_back(&Opnd1_0);
461 if (Opnd0_ExpNum == 2)
462 AllOpnds.push_back(&Opnd0_1);
463 if (Opnd1_ExpNum == 2)
464 AllOpnds.push_back(&Opnd1_1);
467 unsigned InstQuota = 0;
469 Value *V0 =
I->getOperand(0);
470 Value *V1 =
I->getOperand(1);
471 InstQuota = ((!isa<Constant>(V0) && V0->
hasOneUse()) &&
472 (!isa<Constant>(V1) && V1->
hasOneUse())) ? 2 : 1;
474 if (
Value *R = simplifyFAdd(AllOpnds, InstQuota))
483 const FAddendCoef &
CE = Opnd0.getCoef();
484 return CE.isOne() ? Opnd0.getSymVal() :
nullptr;
490 AllOpnds.push_back(&Opnd0);
491 AllOpnds.push_back(&Opnd1_0);
492 if (Opnd1_ExpNum == 2)
493 AllOpnds.push_back(&Opnd1_1);
495 if (
Value *R = simplifyFAdd(AllOpnds, 1))
502 AllOpnds.push_back(&Opnd1);
503 AllOpnds.push_back(&Opnd0_0);
504 if (Opnd0_ExpNum == 2)
505 AllOpnds.push_back(&Opnd0_1);
507 if (
Value *R = simplifyFAdd(AllOpnds, 1))
514 Value *FAddCombine::simplifyFAdd(AddendVect& Addends,
unsigned InstrQuota) {
515 unsigned AddendNum = Addends.size();
516 assert(AddendNum <= 4 &&
"Too many addends");
519 unsigned NextTmpIdx = 0;
520 FAddend TmpResult[3];
528 for (
unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
530 const FAddend *ThisAddend = Addends[SymIdx];
536 Value *Val = ThisAddend->getSymVal();
545 unsigned StartIdx = SimpVect.size();
546 SimpVect.push_back(ThisAddend);
553 for (
unsigned SameSymIdx = SymIdx + 1;
554 SameSymIdx < AddendNum; SameSymIdx++) {
555 const FAddend *
T = Addends[SameSymIdx];
556 if (
T &&
T->getSymVal() == Val) {
559 Addends[SameSymIdx] =
nullptr;
560 SimpVect.push_back(
T);
565 if (StartIdx + 1 != SimpVect.size()) {
566 FAddend &
R = TmpResult[NextTmpIdx ++];
567 R = *SimpVect[StartIdx];
568 for (
unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
572 SimpVect.resize(StartIdx);
574 SimpVect.push_back(&R);
580 "out-of-bound access");
583 if (!SimpVect.empty())
584 Result = createNaryFAdd(SimpVect, InstrQuota);
593 Value *FAddCombine::createNaryFAdd
594 (
const AddendVect &Opnds,
unsigned InstrQuota) {
595 assert(!Opnds.empty() &&
"Expect at least one addend");
599 unsigned InstrNeeded = calcInstrNumber(Opnds);
600 if (InstrNeeded > InstrQuota)
613 Value *LastVal =
nullptr;
614 bool LastValNeedNeg =
false;
617 for (
const FAddend *Opnd : Opnds) {
619 Value *V = createAddendVal(*Opnd, NeedNeg);
622 LastValNeedNeg = NeedNeg;
626 if (LastValNeedNeg == NeedNeg) {
627 LastVal = createFAdd(LastVal, V);
632 LastVal = createFSub(V, LastVal);
634 LastVal = createFSub(LastVal, V);
636 LastValNeedNeg =
false;
639 if (LastValNeedNeg) {
640 LastVal = createFNeg(LastVal);
644 assert(CreateInstrNum == InstrNeeded &&
645 "Inconsistent in instruction numbers");
654 createInstPostProc(
I);
661 createInstPostProc(
I,
true);
668 createInstPostProc(
I);
675 createInstPostProc(
I);
679 void FAddCombine::createInstPostProc(
Instruction *NewInstr,
bool NoNumber) {
692 unsigned FAddCombine::calcInstrNumber(
const AddendVect &Opnds) {
693 unsigned OpndNum = Opnds.size();
694 unsigned InstrNeeded = OpndNum - 1;
697 for (
const FAddend *Opnd : Opnds) {
698 if (Opnd->isConstant())
703 if (isa<UndefValue>(Opnd->getSymVal()))
706 const FAddendCoef &
CE = Opnd->getCoef();
710 if (!
CE.isMinusOne() && !
CE.isOne())
724 Value *FAddCombine::createAddendVal(
const FAddend &Opnd,
bool &NeedNeg) {
725 const FAddendCoef &Coeff = Opnd.getCoef();
727 if (Opnd.isConstant()) {
729 return Coeff.getValue(Instr->getType());
732 Value *OpndVal = Opnd.getSymVal();
734 if (Coeff.isMinusOne() || Coeff.isOne()) {
735 NeedNeg = Coeff.isMinusOne();
739 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
740 NeedNeg = Coeff.isMinusTwo();
741 return createFAdd(OpndVal, OpndVal);
745 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
762 Value *
X =
nullptr, *
Y =
nullptr, *Z =
nullptr;
763 const APInt *
C1 =
nullptr, *C2 =
nullptr;
790 LHS =
I.getOperand(0);
791 RHS =
I.getOperand(1);
801 if (
C1->countTrailingZeros() == 0)
812 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
813 Type *Ty = Add.getType();
824 C1->isNegative() &&
C1->sge(-C2->
sext(
C1->getBitWidth()))) {
851 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
875 X->getType()->getScalarSizeInBits() == 1)
879 X->getType()->getScalarSizeInBits() == 1)
899 return BinaryOperator::CreateXor(Op0,
ConstantInt::get(Add.getType(), *C2));
901 if (
C->isSignMask()) {
904 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap())
905 return BinaryOperator::CreateOr(Op0, Op1);
909 return BinaryOperator::CreateXor(Op0, Op1);
914 Type *Ty = Add.getType();
928 if ((*C2 | LHSKnown.
Zero).isAllOnes())
947 return BinaryOperator::CreateAShr(NewShl, ShAmtC);
959 X->getType()->getScalarSizeInBits() == 1)
1072 if (
MatchRem(MulOpV, RemOpV,
C1, Rem2IsSigned) &&
1073 IsSigned == Rem2IsSigned) {
1077 if (
MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) &&
X == DivOpV &&
1080 return IsSigned ?
Builder.CreateSRem(
X, NewDivisor,
"srem")
1081 :
Builder.CreateURem(
X, NewDivisor,
"urem");
1102 Value *NotMask =
Builder.CreateShl(MinusOne, NBits,
"notmask");
1104 if (
auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
1106 BOp->setHasNoSignedWrap();
1107 BOp->setHasNoUnsignedWrap(
I.hasNoUnsignedWrap());
1115 Type *Ty =
I.getType();
1116 auto getUAddSat = [&]() {
1139 I.getOpcode() == Instruction::Or ||
1140 I.getOpcode() == Instruction::Sub) &&
1141 "Expecting add/or/sub instruction");
1154 if (
I.getOpcode() == Instruction::Sub &&
I.getOperand(1) !=
Select)
1157 Type *XTy =
X->getType();
1158 bool HadTrunc =
I.getType() != XTy;
1175 APInt(
C->getType()->getScalarSizeInBits(),
1176 X->getType()->getScalarSizeInBits()))))
1181 auto SkipExtInMagic = [&
I](
Value *&V) {
1182 if (
I.getOpcode() == Instruction::Sub)
1194 Value *SignExtendingValue, *Zero;
1201 !isSignBitCheck(Pred, *Thr, ShouldSignext))
1214 SkipExtInMagic(SignExtendingValue);
1215 Constant *SignExtendingValueBaseConstant;
1216 if (!
match(SignExtendingValue,
1221 if (
I.getOpcode() == Instruction::Sub
1222 ? !
match(SignExtendingValueBaseConstant,
m_One())
1226 auto *NewAShr = BinaryOperator::CreateAShr(
X, LowBitsToSkip,
1227 Extract->
getName() +
".sext");
1228 NewAShr->copyIRFlags(Extract);
1243 I.getOpcode() == Instruction::Sub) &&
1244 "Expected add/sub");
1245 auto *Op0 = dyn_cast<BinaryOperator>(
I.getOperand(0));
1246 auto *Op1 = dyn_cast<BinaryOperator>(
I.getOperand(1));
1247 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))
1256 bool HasNSW =
I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&
1257 Op1->hasNoSignedWrap();
1258 bool HasNUW =
I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&
1259 Op1->hasNoUnsignedWrap();
1263 if (
auto *NewI = dyn_cast<BinaryOperator>(NewMath)) {
1264 NewI->setHasNoSignedWrap(HasNSW);
1265 NewI->setHasNoUnsignedWrap(HasNUW);
1267 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);
1268 NewShl->setHasNoSignedWrap(HasNSW);
1269 NewShl->setHasNoUnsignedWrap(HasNUW);
1275 I.hasNoSignedWrap(),
I.hasNoUnsignedWrap(),
1276 SQ.getWithInstruction(&
I)))
1277 return replaceInstUsesWith(
I, V);
1279 if (SimplifyAssociativeOrCommutative(
I))
1289 if (
Value *V = SimplifyUsingDistributiveLaws(
I))
1290 return replaceInstUsesWith(
I, V);
1302 Type *Ty =
I.getType();
1304 return BinaryOperator::CreateXor(
LHS,
RHS);
1309 Shl->setHasNoSignedWrap(
I.hasNoSignedWrap());
1310 Shl->setHasNoUnsignedWrap(
I.hasNoUnsignedWrap());
1321 return BinaryOperator::CreateSub(
RHS, A);
1326 return BinaryOperator::CreateSub(
LHS,
B);
1329 return replaceInstUsesWith(
I, V);
1337 return BinaryOperator::CreateSub(A,
B);
1359 if (
Value *V = SimplifyAddWithRemainder(
I))
return replaceInstUsesWith(
I, V);
1365 APInt minusC1 = -(*C1);
1366 if (minusC1 == (
one << *C2)) {
1368 return BinaryOperator::CreateSRem(
RHS, NewRHS);
1376 return BinaryOperator::CreateAnd(A, NewMask);
1381 return BinaryOperator::CreateOr(
LHS,
RHS);
1388 SI = dyn_cast<SelectInst>(
RHS);
1391 if (
SI &&
SI->hasOneUse()) {
1392 Value *TV =
SI->getTrueValue();
1393 Value *FV =
SI->getFalseValue();
1415 return BinaryOperator::CreateOr(A,
B);
1422 replaceOperand(
I, 0, A);
1423 replaceOperand(
I, 1,
B);
1430 bool Changed =
false;
1431 if (!
I.hasNoSignedWrap() && willNotOverflowSignedAdd(
LHS,
RHS,
I)) {
1433 I.setHasNoSignedWrap(
true);
1435 if (!
I.hasNoUnsignedWrap() && willNotOverflowUnsignedAdd(
LHS,
RHS,
I)) {
1437 I.setHasNoUnsignedWrap(
true);
1444 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
I))
1454 return replaceInstUsesWith(
I,
1462 return replaceInstUsesWith(
1463 I,
Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
1464 {Builder.CreateOr(A, B)}));
1466 return Changed ? &
I :
nullptr;
1488 assert((
I.getOpcode() == Instruction::FAdd ||
1489 I.getOpcode() == Instruction::FSub) &&
"Expecting fadd/fsub");
1490 assert(
I.hasAllowReassoc() &&
I.hasNoSignedZeros() &&
1491 "FP factorization requires FMF");
1496 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1497 if (!Op0->
hasOneUse() || !Op1->hasOneUse())
1517 bool IsFAdd =
I.getOpcode() == Instruction::FAdd;
1533 I.getFastMathFlags(),
1534 SQ.getWithInstruction(&
I)))
1535 return replaceInstUsesWith(
I, V);
1537 if (SimplifyAssociativeOrCommutative(
I))
1546 if (
Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(
I))
1576 Value *LHSIntVal = LHSConv->getOperand(0);
1577 Type *FPType = LHSConv->getType();
1582 auto IsValidPromotion = [](
Type *FTy,
Type *ITy) {
1588 unsigned MaxRepresentableBits =
1599 if (IsValidPromotion(FPType, LHSIntVal->
getType())) {
1602 if (LHSConv->hasOneUse() &&
1604 willNotOverflowSignedAdd(LHSIntVal, CI,
I)) {
1606 Value *NewAdd =
Builder.CreateNSWAdd(LHSIntVal, CI,
"addconv");
1613 Value *RHSIntVal = RHSConv->getOperand(0);
1616 if (IsValidPromotion(FPType, LHSIntVal->
getType())) {
1621 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1622 willNotOverflowSignedAdd(LHSIntVal, RHSIntVal,
I)) {
1624 Value *NewAdd =
Builder.CreateNSWAdd(LHSIntVal, RHSIntVal,
"addconv");
1632 if (
Value *V = SimplifySelectsFeedingBinaryOp(
I,
LHS,
RHS))
1633 return replaceInstUsesWith(
I, V);
1635 if (
I.hasAllowReassoc() &&
I.hasNoSignedZeros()) {
1644 return replaceInstUsesWith(
1645 I,
Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1646 {X->getType()}, {Y, X}, &
I));
1654 return replaceInstUsesWith(
1655 I,
Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
1656 {X->getType()}, {NewStartC, X}, &
I));
1668 return replaceInstUsesWith(
I, V);
1678 Type *Ty,
bool IsNUW) {
1681 bool Swapped =
false;
1683 if (!isa<GEPOperator>(
LHS) && isa<GEPOperator>(
RHS)) {
1689 if (
auto *LHSGEP = dyn_cast<GEPOperator>(
LHS)) {
1691 if (LHSGEP->getOperand(0) ==
RHS) {
1693 }
else if (
auto *RHSGEP = dyn_cast<GEPOperator>(
RHS)) {
1695 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1696 RHSGEP->getOperand(0)->stripPointerCasts()) {
1719 unsigned NumNonConstantIndices2 = GEP2->countNonConstantIndices();
1720 if (NumNonConstantIndices1 + NumNonConstantIndices2 > 1 &&
1721 ((NumNonConstantIndices1 > 0 && !GEP1->
hasOneUse()) ||
1722 (NumNonConstantIndices2 > 0 && !GEP2->hasOneUse()))) {
1732 if (
auto *
I = dyn_cast<Instruction>(Result))
1733 if (IsNUW && !GEP2 && !Swapped && GEP1->
isInBounds() &&
1735 I->setHasNoUnsignedWrap();
1741 Result =
Builder.CreateSub(Result, Offset,
"gepdiff",
false,
1747 Result =
Builder.CreateNeg(Result,
"diff.neg");
1749 return Builder.CreateIntCast(Result, Ty,
true);
1754 I.hasNoSignedWrap(),
I.hasNoUnsignedWrap(),
1755 SQ.getWithInstruction(&
I)))
1756 return replaceInstUsesWith(
I, V);
1764 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1768 if (
Value *V = dyn_castNegVal(Op1)) {
1771 if (
const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
1772 assert(BO->getOpcode() == Instruction::Sub &&
1773 "Expected a subtraction operator!");
1774 if (BO->hasNoSignedWrap() &&
I.hasNoSignedWrap())
1777 if (cast<Constant>(Op1)->isNotMinSignedValue() &&
I.hasNoSignedWrap())
1798 auto TryToNarrowDeduceFlags = [
this, &
I, &Op0, &Op1]() ->
Instruction * {
1802 bool Changed =
false;
1803 if (!
I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1,
I)) {
1805 I.setHasNoSignedWrap(
true);
1807 if (!
I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1,
I)) {
1809 I.setHasNoUnsignedWrap(
true);
1812 return Changed ? &
I :
nullptr;
1819 if (!IsNegation ||
none_of(
I.users(), [&
I, Op1](
const User *U) {
1820 const Instruction *UI = dyn_cast<Instruction>(U);
1824 m_Select(m_Value(), m_Specific(Op1), m_Specific(&I))) ||
1825 match(UI, m_Select(m_Value(), m_Specific(&I), m_Specific(Op1)));
1831 return TryToNarrowDeduceFlags();
1834 if (
Value *V = SimplifyUsingDistributiveLaws(
I))
1835 return replaceInstUsesWith(
I, V);
1837 if (
I.getType()->isIntOrIntVectorTy(1))
1838 return BinaryOperator::CreateXor(Op0, Op1);
1857 return BinaryOperator::CreateSub(XZ, YW);
1863 return BinaryOperator::CreateSub(
X, Add);
1869 if (isFreeToInvert(Op0, Op0->hasOneUse()) &&
1870 isFreeToInvert(Op1, Op1->hasOneUse()) &&
1874 return BinaryOperator::CreateSub(NotOp1, NotOp0);
1877 auto m_AddRdx = [](
Value *&Vec) {
1878 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_add>(
m_Value(Vec)));
1881 if (
match(Op0, m_AddRdx(V0)) &&
match(Op1, m_AddRdx(V1)) &&
1886 Value *Rdx =
Builder.CreateIntrinsic(Intrinsic::vector_reduce_add,
1888 return replaceInstUsesWith(
I, Rdx);
1891 if (
Constant *
C = dyn_cast<Constant>(Op0)) {
1910 if (
PHINode *PN = dyn_cast<PHINode>(Op1))
1926 if ((*Op0C | RHSKnown.
Zero).isAllOnes())
1927 return BinaryOperator::CreateXor(Op1, Op0);
1946 return BinaryOperator::CreateXor(A,
B);
1954 return BinaryOperator::CreateAnd(A,
B);
1962 return BinaryOperator::CreateOr(A,
B);
1970 (Op0->hasOneUse() || Op1->hasOneUse()))
1979 return BinaryOperator::CreateAnd(A,
B);
1987 (Op0->hasOneUse() || Op1->hasOneUse()))
1995 return BinaryOperator::CreateAnd(
1996 Y,
Builder.CreateNot(Op1, Op1->getName() +
".not"));
2018 if (
auto *II = dyn_cast<MinMaxIntrinsic>(Op1)) {
2025 (Op0->hasOneUse() || Op1->hasOneUse())) {
2028 return replaceInstUsesWith(
I, InvMaxMin);
2039 X,
Builder.CreateIntrinsic(Intrinsic::usub_sat,
I.getType(),
2043 X,
Builder.CreateIntrinsic(Intrinsic::usub_sat,
I.getType(),
2060 auto SinkSubIntoSelect =
2072 bool OtherHandOfSubIsTrueVal = OtherHandOfSub ==
TrueVal;
2077 OtherHandOfSubIsTrueVal ? NewSub : Zero);
2085 return Builder->CreateSub(OtherHandOfSelect,
2092 return Builder->CreateSub(Op0,
2100 (Op1->hasOneUse() || isa<Constant>(
Y)))
2101 return BinaryOperator::CreateAnd(
2102 Op0,
Builder.CreateNot(
Y,
Y->getName() +
".not"));
2113 !Op0->hasNUsesOrMore(3) && isFreeToInvert(
Y,
Y->hasOneUse())) {
2115 return BinaryOperator::CreateSub(Not,
X);
2119 !Op1->hasNUsesOrMore(3) && isFreeToInvert(
Y,
Y->hasOneUse())) {
2121 return BinaryOperator::CreateSub(
X, Not);
2126 Value *LHSOp, *RHSOp;
2129 if (
Value *Res = OptimizePointerDifference(LHSOp, RHSOp,
I.getType(),
2130 I.hasNoUnsignedWrap()))
2131 return replaceInstUsesWith(
I, Res);
2136 if (
Value *Res = OptimizePointerDifference(LHSOp, RHSOp,
I.getType(),
2138 return replaceInstUsesWith(
I, Res);
2147 Type *Ty =
I.getType();
2156 Value *NegA =
Builder.CreateNeg(A,
"",
I.hasNoUnsignedWrap(),
2157 I.hasNoSignedWrap());
2165 const APInt *AddC, *AndC;
2171 if ((HighMask & *AndC).
isZero())
2176 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
I))
2182 return replaceInstUsesWith(
2189 return replaceInstUsesWith(
2190 I,
Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {X, Op1}));
2194 return replaceInstUsesWith(
2195 I,
Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op0, X}));
2199 Value *USub =
Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {
X, Op0});
2205 Value *USub =
Builder.CreateIntrinsic(Intrinsic::usub_sat, {Ty}, {Op1,
X});
2212 return replaceInstUsesWith(
2213 I,
Builder.CreateIntrinsic(Intrinsic::ctpop, {I.getType()},
2214 {Builder.CreateNot(X)}));
2216 return TryToNarrowDeduceFlags();
2281 getSimplifyQuery().getWithInstruction(&
I)))
2282 return replaceInstUsesWith(
I, V);
2290 if (
I.hasNoSignedZeros() &&
2303 auto propagateSelectFMF = [&](
SelectInst *
S,
bool CommonOperand) {
2304 S->copyFastMathFlags(&
I);
2305 if (
auto *OldSel = dyn_cast<SelectInst>(
Op))
2306 if (!OldSel->hasNoSignedZeros() && !CommonOperand &&
2308 S->setHasNoSignedZeros(
false);
2315 propagateSelectFMF(NewSel,
P ==
Y);
2322 propagateSelectFMF(NewSel,
P ==
X);
2332 I.getFastMathFlags(),
2333 getSimplifyQuery().getWithInstruction(&
I)))
2334 return replaceInstUsesWith(
I, V);
2362 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2377 if (
I.hasNoSignedZeros() && !isa<ConstantExpr>(Op0) &&
2383 if (isa<Constant>(Op0))
2400 Type *Ty =
I.getType();
2424 if (
Value *V = SimplifySelectsFeedingBinaryOp(
I, Op0, Op1))
2425 return replaceInstUsesWith(
I, V);
2427 if (
I.hasAllowReassoc() &&
I.hasNoSignedZeros()) {
2459 auto m_FaddRdx = [](
Value *&Sum,
Value *&Vec) {
2460 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
m_Value(Sum),
2463 Value *A0, *A1, *V0, *V1;
2464 if (
match(Op0, m_FaddRdx(A0, V0)) &&
match(Op1, m_FaddRdx(A1, V1)) &&
2469 Value *Rdx =
Builder.CreateIntrinsic(Intrinsic::vector_reduce_fadd,
2482 return replaceInstUsesWith(
I, V);
We currently generate a but we really shouldn eax ecx xorl edx divl ecx eax divl ecx movl eax ret A similar code sequence works for division We currently compile i32 v2 eax eax jo LBB1_2 atomic and others It is also currently not done for read modify write instructions It is also current not done if the OF or CF flags are needed The shift operators have the complication that when the shift count is EFLAGS is not set
static Instruction * foldToUnsignedSaturatedAdd(BinaryOperator &I)
This is an optimization pass for GlobalISel generic memory operations.
static Instruction * hoistFNegAboveFMulFDiv(Instruction &I, InstCombiner::BuilderTy &Builder)
Instruction * visitFAdd(BinaryOperator &I)
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if LHS and RHS have no common bits set.
match_combine_or< CastClass_match< OpTy, Instruction::Trunc >, OpTy > m_TruncOrSelf(const OpTy &Op)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Value * SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
static Constant * getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool hasOneUse() const
Return true if there is exactly one use of this value.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isMask(unsigned numBits) const
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
instcombine should handle this C2 when C1
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
const fltSemantics & getFltSemantics() const
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Value * SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
bool isZero() const
Return true if the value is positive or negative zero.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static Constant * getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Value * SimplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
bool noSignedZeros() const
const APFloat & getValueAPF() const
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
DiagnosticInfoOptimizationBase::Argument NV
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
NUW NUW NUW NUW Exact static Exact BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static Instruction * factorizeLerp(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Eliminate an op from a linear interpolation (lerp) pattern.
BinaryOp_match< ValTy, cst_pred_ty< is_all_ones >, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Convenience struct for specifying and reasoning about fast-math flags.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Instruction * foldAddWithConstant(BinaryOperator &Add)
const fltSemantics & getSemantics() const
A suitably aligned and sized character array member which can hold elements of any type.
Value * EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
static Instruction * factorizeFAddFSub(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Factor a common operand out of fadd/fsub of fmul/fdiv.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
OneUse_match< T > m_OneUse(const T &SubPattern)
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
bool isShiftedMask() const
Return true if this APInt value contains a non-empty sequence of ones with the remainder zero.
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
bool isNegative() const
Determine sign of this APInt.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
bool match(Val *V, const Pattern &P)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
RoundingMode
Rounding mode.
(vector float) vec_cmpeq(*A, *B) C
Clang compiles this i1 i64 store i64 i64 store i64 i64 store i64 i64 store i64 align Which gets codegen d xmm0 movaps rbp movaps rbp movaps rbp movaps rbp rbp rbp rbp rbp It would be better to have movq s of instead of the movaps s LLVM produces ret int
std::string & operator+=(std::string &buffer, StringRef string)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
static Instruction * foldFNegIntoConstant(Instruction &I)
This eliminates floating-point negation in either 'fneg(X)' or 'fsub(-0.0, X)' form by combining into...
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static Value * checkForNegativeOperand(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
static Constant * getAllOnesValue(Type *Ty)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ConstantFP - Floating Point Values [float, double].
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static Instruction * canonicalizeLowbitMask(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Fold (1 << NBits) - 1 Into: ~(-(1 << NBits)) Because a 'not' is better for bit-tracking analysis and ...
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
Value * SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
unsigned getIntegerBitWidth() const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
static Constant * getFNeg(Constant *C)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only one
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
opStatus multiply(const APFloat &RHS, roundingMode RM)
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Instruction * visitFNeg(UnaryOperator &I)
CastClass_match< OpTy, Instruction::FPTrunc > m_FPTrunc(const OpTy &Op)
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, OpTy > m_ZExtOrSelf(const OpTy &Op)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
This is an important base class in LLVM.
Instruction * visitAdd(BinaryOperator &I)
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", Instruction *InsertBefore=nullptr)
match_combine_and< class_match< Constant >, match_unless< class_match< ConstantExpr > > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
static Constant * AddOne(Constant *C)
Add one to a Constant.
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
Instruction * canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator &I)
unsigned countNonConstantIndices() const
unsigned logBase2() const
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
static bool isConstant(const MachineInstr &MI)
static Constant * getFSub(Constant *C1, Constant *C2)
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
Value * SimplifyAddWithRemainder(BinaryOperator &I)
Tries to simplify add operations using the definition of remainder.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Instruction * visitFSub(BinaryOperator &I)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_NODISCARD Value * Negate(bool LHSIsZero, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
StandardInstrumentations SI(Debug, VerifyEach)
This class represents the LLVM 'select' instruction.
@ CE
Windows NT (Windows on ARM)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
This class represents zero extension of integer types.
Class for arbitrary precision integers.
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
SmallVector< MachineOperand, 4 > Cond
Type * getType() const
All values are typed, get the type of this value.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static Instruction * foldNoWrapAdd(BinaryOperator &Add, InstCombiner::BuilderTy &Builder)
Wrapping flags may allow combining constants separated by an extend.
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
static bool MatchMul(Value *E, Value *&Op, APInt &C)
static Instruction * factorizeMathWithShlOps(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
This is a specialization of a more general transform from SimplifyUsingDistributiveLaws.
StringRef getName() const
Return a constant reference to the value's name.
Value * OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty, bool isNUW)
Optimize pointer differences into the same array into a size.
CastClass_match< OpTy, Instruction::FPExt > m_FPExt(const OpTy &Op)
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
match_combine_or< CastClass_match< OpTy, Instruction::SExt >, OpTy > m_SExtOrSelf(const OpTy &Op)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
bool isMinSignedValue() const
Determine if this is the smallest signed value.
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
Should compile to something r4 addze r3 instead we get
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
constexpr unsigned BitWidth
APInt smul_ov(const APInt &RHS, bool &Overflow) const
static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned)
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
CastClass_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
Instruction * visitSub(BinaryOperator &I)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
APInt sext(unsigned width) const
Sign extend to a new width.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
static constexpr roundingMode rmNearestTiesToEven
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
void setHasNoInfs(bool B)
Set or clear the no-infs flag on this instruction, which must be an operator which supports this flag...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
void setHasNoSignedZeros(bool B)
Set or clear the no-signed-zeros flag on this instruction, which must be an operator which supports t...
This class represents a cast from signed integer to floating point.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned)
static unsigned int semanticsPrecision(const fltSemantics &)
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
static Constant * getFAdd(Constant *C1, Constant *C2)
Value * SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
@ NearestTiesToEven
roundTiesToEven.
LLVM Value Representation.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)