38#define DEBUG_TYPE "instcombine"
52 if (!V->hasOneUse())
return nullptr;
54 bool MadeChange =
false;
58 Value *
A =
nullptr, *
B =
nullptr, *One =
nullptr;
68 if (
I &&
I->isLogicalShift() &&
81 if (
I->getOpcode() == Instruction::LShr && !
I->isExact()) {
86 if (
I->getOpcode() == Instruction::Shl && !
I->hasNoUnsignedWrap()) {
87 I->setHasNoUnsignedWrap();
96 return MadeChange ? V :
nullptr;
112 bool HasAnyNoWrap =
I.hasNoSignedWrap() ||
I.hasNoUnsignedWrap();
113 Value *Neg = Builder.CreateNeg(OtherOp,
"", HasAnyNoWrap);
114 return Builder.CreateSelect(
Cond, OtherOp, Neg);
120 bool HasAnyNoWrap =
I.hasNoSignedWrap() ||
I.hasNoUnsignedWrap();
121 Value *Neg = Builder.CreateNeg(OtherOp,
"", HasAnyNoWrap);
122 return Builder.CreateSelect(
Cond, Neg, OtherOp);
130 return Builder.CreateSelectFMF(
Cond, OtherOp,
131 Builder.CreateFNegFMF(OtherOp, &
I), &
I);
138 return Builder.CreateSelectFMF(
Cond, Builder.CreateFNegFMF(OtherOp, &
I),
152 const bool HasNSW =
Mul.hasNoSignedWrap();
153 const bool HasNUW =
Mul.hasNoUnsignedWrap();
159 return Builder.CreateShl(
X, Z,
Mul.getName(), HasNUW, PropagateNSW);
172 FrX = Builder.CreateFreeze(
X,
X->getName() +
".fr");
173 Value *Shl = Builder.CreateShl(FrX, Z,
"mulshl", HasNUW, PropagateNSW);
174 return Builder.CreateAdd(Shl, FrX,
Mul.getName(), HasNUW, PropagateNSW);
185 FrX = Builder.CreateFreeze(
X,
X->getName() +
".fr");
186 Value *Shl = Builder.CreateShl(FrX, Z,
"mulshl");
187 return Builder.CreateSub(Shl, FrX,
Mul.getName());
194 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
197 SQ.getWithInstruction(&
I)))
212 Type *Ty =
I.getType();
213 const unsigned BitWidth = Ty->getScalarSizeInBits();
214 const bool HasNSW =
I.hasNoSignedWrap();
215 const bool HasNUW =
I.hasNoUnsignedWrap();
234 assert(Shl &&
"Constant folding of immediate constants failed");
237 if (HasNUW &&
Mul->hasNoUnsignedWrap())
253 if (
match(NewCst,
m_APInt(V)) && *V != V->getBitWidth() - 1)
270 (*MulAP - 1).isPowerOf2() && *ShiftC == MulAP->
logBase2()) {
276 BinOp =
Builder.CreateLShr(NewOp, ConstantInt::get(Ty, *ShiftC),
"",
279 auto *NewAdd = BinaryOperator::CreateAdd(NewOp, BinOp);
280 if (HasNSW && (HasNUW || OpBO->
getOpcode() == Instruction::LShr ||
282 NewAdd->setHasNoSignedWrap(
true);
284 NewAdd->setHasNoUnsignedWrap(HasNUW);
298 HasNSW && Op1C->isNotMinSignedValue()));
307 const APInt *NegPow2C;
311 unsigned SrcWidth =
X->getType()->getScalarSizeInBits();
313 if (ShiftAmt >=
BitWidth - SrcWidth) {
316 return BinaryOperator::CreateShl(Z, ConstantInt::get(Ty, ShiftAmt));
342 (BOp0->getOpcode() == Instruction::Or || BOp0->hasNoUnsignedWrap());
344 auto *BO = BinaryOperator::CreateAdd(NewMul, NewC);
345 if (HasNUW && Op0NUW) {
348 NewMulBO->setHasNoUnsignedWrap();
349 BO->setHasNoUnsignedWrap();
358 return BinaryOperator::CreateMul(
X,
X);
363 if (
I.hasNoSignedWrap() &&
368 I,
Builder.CreateBinaryIntrinsic(Intrinsic::abs,
381 auto *NewMul = BinaryOperator::CreateMul(
X,
Y);
384 NewMul->setHasNoSignedWrap();
397 return BinaryOperator::CreateMul(NegOp0,
X);
405 auto UDivCheck = [&C1](
const APInt &
C) {
return C.urem(*C1).isZero(); };
406 auto SDivCheck = [&C1](
const APInt &
C) {
427 if (!Div || (Div->
getOpcode() != Instruction::UDiv &&
428 Div->
getOpcode() != Instruction::SDiv)) {
432 Value *Neg = dyn_castNegVal(
Y);
435 (Div->
getOpcode() == Instruction::UDiv ||
436 Div->
getOpcode() == Instruction::SDiv)) {
446 auto RemOpc = Div->
getOpcode() == Instruction::UDiv ? Instruction::URem
451 XFreeze =
Builder.CreateFreeze(
X,
X->getName() +
".fr");
452 Value *Rem =
Builder.CreateBinOp(RemOpc, XFreeze, DivOp1);
454 return BinaryOperator::CreateSub(XFreeze, Rem);
455 return BinaryOperator::CreateSub(Rem, XFreeze);
464 if (Ty->isIntOrIntVectorTy(1) ||
467 return BinaryOperator::CreateAnd(Op0, Op1);
479 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType() &&
480 (Op0->
hasOneUse() || Op1->hasOneUse() ||
X ==
Y)) {
489 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType() &&
490 (Op0->
hasOneUse() || Op1->hasOneUse())) {
505 X->getType()->isIntOrIntVectorTy(1))
520 *
C ==
C->getBitWidth() - 1) {
532 *
C ==
C->getBitWidth() - 1) {
580 if (!HasNSW && willNotOverflowSignedMul(Op0, Op1,
I)) {
582 I.setHasNoSignedWrap(
true);
585 if (!HasNUW && willNotOverflowUnsignedMul(Op0, Op1,
I,
I.hasNoSignedWrap())) {
587 I.setHasNoUnsignedWrap(
true);
595 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
596 "Expected fmul or fdiv");
598 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
614 (Op0->
hasOneUse() || Op1->hasOneUse())) {
615 Value *XY = Builder.CreateBinOpFMF(Opcode,
X,
Y, &
I);
617 Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY, &
I,
I.getName());
630 Intrinsic::powi, {
X->getType(), YZ->
getType()}, {
X, YZ}, &
I);
636 unsigned Opcode =
I.getOpcode();
637 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
638 "Unexpected opcode");
645 Constant *One = ConstantInt::get(
Y->getType(), 1);
646 if (willNotOverflowSignedAdd(
Y, One,
I)) {
653 Value *Op0 =
I.getOperand(0);
654 Value *Op1 =
I.getOperand(1);
655 if (Opcode == Instruction::FMul &&
I.isOnlyUserOfAnyOperand() &&
660 Y->getType() == Z->getType()) {
665 if (Opcode == Instruction::FDiv &&
I.hasAllowReassoc() &&
I.hasNoNaNs()) {
672 willNotOverflowSignedSub(
Y, ConstantInt::get(
Y->getType(), 1),
I)) {
674 Instruction *NewPow = createPowiExpr(
I, *
this, Op1,
Y, NegOne);
685 willNotOverflowSignedSub(
Y, ConstantInt::get(
Y->getType(), 1),
I)) {
687 auto *NewPow = createPowiExpr(
I, *
this,
X,
Y, NegOne);
719 return !R1.
empty() && !
R2.empty();
753 if (!
X->hasAllowReassoc() || !
X->hasAllowReciprocal() || !
X->hasNoInfs())
760 if (BBx != BBr1 && BBx != BBr2)
769 return (
I->getParent() != BBr1 || !
I->hasAllowReassoc());
779 return (
I->getParent() == BBr2 &&
I->hasAllowReassoc());
784 Value *Op0 =
I.getOperand(0);
785 Value *Op1 =
I.getOperand(1);
849 auto *NewFMul =
Builder.CreateFMulFMF(
X, Z, FMF);
860 Value *Sqrt =
Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &
I);
870 if (
I.hasNoSignedZeros() &&
874 if (
I.hasNoSignedZeros() &&
881 if (
I.hasNoNaNs() &&
I.hasNoSignedZeros() && Op0 == Op1 && Op0->
hasNUses(2)) {
900 Value *Y1 =
Builder.CreateFAddFMF(
Y, ConstantFP::get(
I.getType(), 1.0), &
I);
901 Value *Pow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow,
X, Y1, &
I);
908 if (
I.isOnlyUserOfAnyOperand()) {
912 auto *YZ =
Builder.CreateFAddFMF(
Y, Z, &
I);
913 auto *NewPow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow,
X, YZ, &
I);
919 auto *XZ =
Builder.CreateFMulFMF(
X, Z, &
I);
920 auto *NewPow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow, XZ,
Y, &
I);
928 Value *Exp =
Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &
I);
936 Value *Exp2 =
Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &
I);
962 I.getFastMathFlags(),
963 SQ.getWithInstruction(&
I)))
988 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1000 Op0 =
Builder.CreateFNegFMF(Op0, &
I);
1002 {
I.getType()}, {Op1, Op0}, &
I);
1013 if (
I.hasNoNaNs() &&
I.hasNoSignedZeros()) {
1018 X->getType()->isIntOrIntVectorTy(1)) {
1020 SI->copyFastMathFlags(
I.getFastMathFlags());
1024 X->getType()->isIntOrIntVectorTy(1)) {
1026 SI->copyFastMathFlags(
I.getFastMathFlags());
1035 if (
I.hasAllowReassoc())
1063 Value *Start =
nullptr, *Step =
nullptr;
1077 if (!Result->hasNoNaNs())
1078 Result->setHasNoInfs(
false);
1083 if (
I.hasAllowContract() &&
1087 auto *Sin =
Builder.CreateUnaryIntrinsic(Intrinsic::sin,
X, &
I);
1088 if (
auto *
Metadata =
I.getMetadata(LLVMContext::MD_fpmath)) {
1089 Sin->setMetadata(LLVMContext::MD_fpmath,
Metadata);
1126 Value *SelectCond =
SI->getCondition();
1133 while (BBI != BBFront) {
1141 for (
Use &
Op : BBI->operands()) {
1145 }
else if (
Op == SelectCond) {
1155 if (&*BBI == SelectCond)
1156 SelectCond =
nullptr;
1159 if (!SelectCond && !
SI)
1170 Product = IsSigned ? C1.
smul_ov(C2, Overflow) : C1.
umul_ov(C2, Overflow);
1197 assert((
I.getOpcode() == Instruction::SDiv ||
1198 I.getOpcode() == Instruction::UDiv) &&
1199 "Expected integer divide");
1201 bool IsSigned =
I.getOpcode() == Instruction::SDiv;
1202 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1203 Type *Ty =
I.getType();
1214 bool HasNUW =
Mul->hasNoUnsignedWrap() && Shl->hasNoUnsignedWrap();
1215 bool HasNSW =
Mul->hasNoSignedWrap() && Shl->hasNoSignedWrap();
1218 if (!IsSigned && HasNUW)
1219 return Builder.CreateLShr(
Y, Z,
"",
I.isExact());
1222 if (IsSigned && HasNSW && (Op0->
hasOneUse() || Op1->hasOneUse())) {
1223 Value *Shl = Builder.CreateShl(ConstantInt::get(Ty, 1), Z);
1224 return Builder.CreateSDiv(
Y, Shl,
"",
I.isExact());
1239 ((Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap()) ||
1240 (Shl0->hasNoUnsignedWrap() && Shl0->hasNoSignedWrap() &&
1241 Shl1->hasNoSignedWrap())))
1242 return Builder.CreateUDiv(
X,
Y,
"",
I.isExact());
1246 if (IsSigned && Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap() &&
1247 Shl1->hasNoUnsignedWrap())
1248 return Builder.CreateSDiv(
X,
Y,
"",
I.isExact());
1258 if (IsSigned ? (Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap())
1259 : (Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap())) {
1260 Constant *One = ConstantInt::get(
X->getType(), 1);
1263 Value *Dividend = Builder.CreateShl(
1264 One,
Y,
"shl.dividend",
1267 IsSigned ? (Shl0->hasNoUnsignedWrap() || Shl1->hasNoUnsignedWrap())
1268 : Shl0->hasNoSignedWrap());
1269 return Builder.CreateLShr(Dividend, Z,
"",
I.isExact());
1278 assert(
I.isIntDivRem() &&
"Unexpected instruction");
1279 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1284 Type *Ty =
I.getType();
1287 unsigned NumElts = VTy->getNumElements();
1288 for (
unsigned i = 0; i != NumElts; ++i) {
1328 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1329 bool IsSigned =
I.getOpcode() == Instruction::SDiv;
1330 Type *Ty =
I.getType();
1343 ConstantInt::get(Ty, Product));
1351 if (
isMultiple(*C2, *C1, Quotient, IsSigned)) {
1353 ConstantInt::get(Ty, Quotient));
1354 NewDiv->setIsExact(
I.isExact());
1359 if (
isMultiple(*C1, *C2, Quotient, IsSigned)) {
1361 ConstantInt::get(Ty, Quotient));
1363 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
1364 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
1377 if (
isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
1379 ConstantInt::get(Ty, Quotient));
1380 BO->setIsExact(
I.isExact());
1385 if (
isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
1387 ConstantInt::get(Ty, Quotient));
1389 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
1390 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
1403 return BinaryOperator::CreateNSWAdd(
X, ConstantInt::get(Ty, Quotient));
1408 return BinaryOperator::CreateNUWAdd(
X,
1409 ConstantInt::get(Ty, C1->
udiv(*C2)));
1418 assert(!Ty->isIntOrIntVectorTy(1) &&
"i1 divide not removed?");
1425 F1 =
Builder.CreateFreeze(Op1, Op1->getName() +
".fr");
1427 Value *Cmp =
Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
1450 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1),
Y);
1452 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1),
Y);
1458 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
1467 if (!IsSigned && Op1->hasOneUse() &&
1472 Builder.CreateShl(ConstantInt::get(Ty, 1), Z,
"",
true),
Y);
1488 if (!IsSigned &&
Mul->hasNoUnsignedWrap())
1489 NewDiv = BinaryOperator::CreateUDiv(
X,
Y);
1490 else if (IsSigned &&
Mul->hasNoSignedWrap())
1491 NewDiv = BinaryOperator::CreateSDiv(
X,
Y);
1495 NewDiv->
setIsExact(
I.isExact() && InnerDiv->isExact());
1509 const APInt *C1, *C2;
1510 if (IsSigned && OB0HasNSW) {
1512 return BinaryOperator::CreateSDiv(
A,
B);
1514 if (!IsSigned && OB0HasNUW) {
1516 return BinaryOperator::CreateUDiv(
A,
B);
1518 return BinaryOperator::CreateUDiv(
A,
B);
1524 if (
auto *Val = CreateDivOrNull(
Y, Z))
1528 if (
auto *Val = CreateDivOrNull(
X, Z))
1539 return reinterpret_cast<Value *
>(-1);
1547 return IfFold([&]() {
1563 return IfFold([&]() {
return Builder.CreateZExt(LogX,
Op->getType()); });
1569 if (AssumeNonZero || TI->hasNoUnsignedWrap())
1571 return IfFold([&]() {
1572 return Builder.CreateTrunc(LogX,
Op->getType(),
"",
1573 TI->hasNoUnsignedWrap());
1582 if (AssumeNonZero || BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap())
1584 return IfFold([&]() {
return Builder.CreateAdd(LogX,
Y); });
1591 if (AssumeNonZero || PEO->isExact())
1593 return IfFold([&]() {
return Builder.CreateSub(LogX,
Y); });
1600 return IfFold([&]() {
return LogX; });
1602 return IfFold([&]() {
return LogY; });
1611 return IfFold([&]() {
1612 return Builder.CreateSelect(
SI->getOperand(0), LogX, LogY);
1625 return IfFold([&]() {
1626 return Builder.CreateBinaryIntrinsic(
MinMax->getIntrinsicID(), LogX,
1641 Type *Ty =
I.getType();
1644 X->getType() ==
Y->getType() && (
N->hasOneUse() ||
D->hasOneUse())) {
1681 SQ.getWithInstruction(&
I)))
1691 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1693 const APInt *C1, *C2;
1701 X, ConstantInt::get(
X->getType(), C2ShlC1));
1710 Type *Ty =
I.getType();
1736 auto GetShiftableDenom = [&](
Value *Denom) ->
Value * {
1746 return Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Denom,
1752 if (
auto *Res = GetShiftableDenom(Op1))
1754 I,
Builder.CreateLShr(Op0, Res,
I.getName(),
I.isExact()));
1761 SQ.getWithInstruction(&
I)))
1771 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1772 Type *Ty =
I.getType();
1788 return BinaryOperator::CreateExactAShr(Op0,
C);
1794 return BinaryOperator::CreateExactAShr(Op0, ShAmt);
1800 Value *Ashr =
Builder.CreateAShr(Op0,
C,
I.getName() +
".neg",
true);
1821 Value *NarrowOp =
Builder.CreateSDiv(Op0Src, NarrowDivisor);
1829 Constant *NegC = ConstantInt::get(Ty, -(*Op1C));
1840 Builder.CreateSDiv(
X,
Y,
I.getName(),
I.isExact()));
1863 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1,
I.getName());
1864 BO->setIsExact(
I.isExact());
1873 Value *Shr =
Builder.CreateLShr(Op0, CNegLog2,
I.getName(),
I.isExact());
1882 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1,
I.getName());
1883 BO->setIsExact(
I.isExact());
1913 if (
I.hasNoNaNs() &&
1918 Intrinsic::copysign, {
C->getType()},
1927 if (!(
C->hasExactInverseFP() || (
I.hasAllowReciprocal() &&
C->isNormalFP())))
1935 Instruction::FDiv, ConstantFP::get(
I.getType(), 1.0),
C,
DL);
1936 if (!RecipC || !RecipC->isNormalFP())
1956 if (!
I.hasAllowReassoc() || !
I.hasAllowReciprocal())
1981 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1983 if (!
II || !
II->hasOneUse() || !
I.hasAllowReassoc() ||
1984 !
I.hasAllowReciprocal())
1994 case Intrinsic::pow:
1995 Args.push_back(
II->getArgOperand(0));
1996 Args.push_back(Builder.CreateFNegFMF(
II->getArgOperand(1), &
I));
1998 case Intrinsic::powi: {
2006 Args.push_back(
II->getArgOperand(0));
2007 Args.push_back(Builder.CreateNeg(
II->getArgOperand(1)));
2008 Type *Tys[] = {
I.getType(),
II->getArgOperand(1)->getType()};
2009 Value *Pow = Builder.CreateIntrinsic(IID, Tys, Args, &
I);
2012 case Intrinsic::exp:
2013 case Intrinsic::exp2:
2014 Args.push_back(Builder.CreateFNegFMF(
II->getArgOperand(0), &
I));
2019 Value *Pow = Builder.CreateIntrinsic(IID,
I.getType(), Args, &
I);
2028 if (!
I.hasAllowReassoc() || !
I.hasAllowReciprocal())
2030 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2032 if (!
II ||
II->getIntrinsicID() != Intrinsic::sqrt || !
II->hasOneUse() ||
2033 !
II->hasAllowReassoc() || !
II->hasAllowReciprocal())
2042 if (!DivOp->hasAllowReassoc() || !
I.hasAllowReciprocal() ||
2043 !DivOp->hasOneUse())
2045 Value *SwapDiv = Builder.CreateFDivFMF(Z,
Y, DivOp);
2047 Builder.CreateUnaryIntrinsic(
II->getIntrinsicID(), SwapDiv,
II);
2070 B.SetInsertPoint(
X);
2076 B.CreateFDiv(ConstantFP::get(
X->getType(), 1.0), SqrtOp));
2077 auto *R1FPMathMDNode = (*R1.
begin())->getMetadata(LLVMContext::MD_fpmath);
2081 R1FPMathMDNode,
I->getMetadata(LLVMContext::MD_fpmath));
2082 R1FMF &=
I->getFastMathFlags();
2086 FDiv->setMetadata(LLVMContext::MD_fpmath, R1FPMathMDNode);
2087 FDiv->copyFastMathFlags(R1FMF);
2094 auto *R2FPMathMDNode = (*
R2.begin())->getMetadata(LLVMContext::MD_fpmath);
2098 R2FPMathMDNode,
I->getMetadata(LLVMContext::MD_fpmath));
2099 R2FMF &=
I->getFastMathFlags();
2103 FSqrt->setMetadata(LLVMContext::MD_fpmath, R2FPMathMDNode);
2104 FSqrt->copyFastMathFlags(R2FMF);
2113 FMul->copyMetadata(*
X);
2123 I.getFastMathFlags(),
2124 SQ.getWithInstruction(&
I)))
2142 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2171 if (
I.hasAllowReassoc() &&
I.hasAllowReciprocal()) {
2195 if (
I.hasAllowReassoc() && Op0->
hasOneUse() && Op1->hasOneUse()) {
2205 if ((IsTan || IsCot) &&
hasFloatFn(M, &
TLI,
I.getType(), LibFunc_tan,
2206 LibFunc_tanf, LibFunc_tanl)) {
2209 B.setFastMathFlags(
I.getFastMathFlags());
2210 AttributeList Attrs =
2213 LibFunc_tanl,
B, Attrs);
2215 Res =
B.CreateFDiv(ConstantFP::get(
I.getType(), 1.0), Res);
2224 if (
I.hasNoNaNs() &&
I.hasAllowReassoc() &&
2233 if (
I.hasNoNaNs() &&
I.hasNoInfs() &&
2237 Intrinsic::copysign, ConstantFP::get(
I.getType(), 1.0),
X, &
I);
2248 if (
I.hasAllowReassoc() &&
2252 Builder.CreateFAddFMF(
Y, ConstantFP::get(
I.getType(), -1.0), &
I);
2253 Value *Pow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow, Op1, Y1, &
I);
2271 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
X =
nullptr;
2273 bool ShiftByX =
false;
2277 bool &PreserveNSW) ->
bool {
2278 const APInt *Tmp =
nullptr;
2297 const APInt *Tmp =
nullptr;
2309 bool Op0PreserveNSW =
true, Op1PreserveNSW =
true;
2310 if (MatchShiftOrMulXC(Op0,
X,
Y, Op0PreserveNSW) &&
2311 MatchShiftOrMulXC(Op1,
X, Z, Op1PreserveNSW)) {
2313 }
else if (MatchShiftCX(Op0,
Y,
X) && MatchShiftCX(Op1, Z,
X)) {
2319 bool IsSRem =
I.getOpcode() == Instruction::SRem;
2326 bool BO0NoWrap = IsSRem ? BO0HasNSW : BO0HasNUW;
2328 APInt RemYZ = IsSRem ?
Y.srem(Z) :
Y.urem(Z);
2332 if (RemYZ.
isZero() && BO0NoWrap)
2338 auto CreateMulOrShift =
2340 Value *RemSimplification =
2341 ConstantInt::get(
I.getType(), RemSimplificationC);
2342 return ShiftByX ? BinaryOperator::CreateShl(RemSimplification,
X)
2343 : BinaryOperator::CreateMul(
X, RemSimplification);
2349 bool BO1NoWrap = IsSRem ? BO1HasNSW : BO1HasNUW;
2353 if (RemYZ ==
Y && BO1NoWrap) {
2364 if (
Y.uge(Z) && (IsSRem ? (BO0HasNSW && BO1HasNSW) : BO0HasNUW)) {
2382 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2390 const APInt *Op1Int;
2392 (
I.getOpcode() == Instruction::URem ||
2416 SQ.getWithInstruction(&
I)))
2429 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2430 Type *Ty =
I.getType();
2436 return BinaryOperator::CreateAnd(Op0,
Add);
2441 Value *Cmp =
Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1));
2462 Value *FrozenOp0 = Op0;
2464 FrozenOp0 =
Builder.CreateFreeze(Op0, Op0->
getName() +
".frozen");
2475 Value *FrozenOp0 = Op0;
2477 FrozenOp0 =
Builder.CreateFreeze(Op0, Op0->
getName() +
".frozen");
2488 SQ.getWithInstruction(&
I)))
2498 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2516 return BinaryOperator::CreateURem(Op0, Op1,
I.getName());
2524 bool hasNegative =
false;
2525 bool hasMissing =
false;
2526 for (
unsigned i = 0; i != VWidth; ++i) {
2527 Constant *Elt =
C->getAggregateElement(i);
2534 if (RHS->isNegative())
2538 if (hasNegative && !hasMissing) {
2540 for (
unsigned i = 0; i != VWidth; ++i) {
2541 Elts[i] =
C->getAggregateElement(i);
2543 if (RHS->isNegative())
2559 I.getFastMathFlags(),
2560 SQ.getWithInstruction(&
I)))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file provides internal interfaces used to implement the InstCombine.
static Instruction * convertFSqrtDivIntoFMul(CallInst *CI, Instruction *X, const SmallPtrSetImpl< Instruction * > &R1, const SmallPtrSetImpl< Instruction * > &R2, InstCombiner::BuilderTy &B, InstCombinerImpl *IC)
static Instruction * simplifyIRemMulShl(BinaryOperator &I, InstCombinerImpl &IC)
static Instruction * narrowUDivURem(BinaryOperator &I, InstCombinerImpl &IC)
If we have zero-extended operands of an unsigned div or rem, we may be able to narrow the operation (...
static Value * simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC, Instruction &CxtI)
The specific integer value is used in a context where it is known to be non-zero.
static bool getFSqrtDivOptPattern(Instruction *Div, SmallPtrSetImpl< Instruction * > &R1, SmallPtrSetImpl< Instruction * > &R2)
static Value * foldMulSelectToNegate(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static bool isFSqrtDivToFMulLegal(Instruction *X, SmallPtrSetImpl< Instruction * > &R1, SmallPtrSetImpl< Instruction * > &R2)
static Instruction * foldFDivPowDivisor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Negate the exponent of pow/exp to fold division-by-pow() into multiply.
static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product, bool IsSigned)
True if the multiply can not be expressed in an int this size.
static Value * foldMulShl1(BinaryOperator &Mul, bool CommuteOperands, InstCombiner::BuilderTy &Builder)
Reduce integer multiplication patterns that contain a (+/-1 << Z) factor.
static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient, bool IsSigned)
True if C1 is a multiple of C2. Quotient contains C1/C2.
static Instruction * foldFDivSqrtDivisor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Convert div to mul if we have an sqrt divisor iff sqrt's operand is a fdiv instruction.
static Instruction * foldFDivConstantDividend(BinaryOperator &I)
Remove negation and try to reassociate constant math.
static Value * foldIDivShl(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
This file provides the interface for the instcombine pass implementation.
static bool hasNoSignedWrap(BinaryOperator &I)
static bool hasNoUnsignedWrap(BinaryOperator &I)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
bool isMinValue() const
Determine if this is the smallest unsigned value.
unsigned countr_zero() const
Count the number of trailing zero bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt ushl_ov(const APInt &Amt, bool &Overflow) const
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned logBase2() const
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
static BinaryOperator * CreateExact(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
@ ICMP_ULT
unsigned less than
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExactLogBase2(Constant *C)
If C is a scalar/fixed width vector of known powers of 2, then this function returns a new scalar/fix...
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isNormalFP() const
Return true if this is a normal (as opposed to denormal, infinity, nan, or zero) floating-point scala...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNotMinSignedValue() const
Return true if the value is not the smallest signed value, or, for vectors, does not contain smallest...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
Convenience struct for specifying and reasoning about fast-math flags.
static FastMathFlags intersectRewrite(FastMathFlags LHS, FastMathFlags RHS)
Intersect rewrite-based flags.
static FastMathFlags unionValue(FastMathFlags LHS, FastMathFlags RHS)
Union value flags.
bool allowReassoc() const
Flag queries.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Instruction * visitMul(BinaryOperator &I)
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
Instruction * visitUDiv(BinaryOperator &I)
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * visitURem(BinaryOperator &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder, Function &F, AAResults *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const DataLayout &DL, ReversePostOrderTraversal< BasicBlock * > &RPOT)
Value * takeLog2(Value *Op, unsigned Depth, bool AssumeNonZero, bool DoFold)
Take the exact integer log2 of the value.
Instruction * visitSRem(BinaryOperator &I)
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
Instruction * visitFDiv(BinaryOperator &I)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I)
Fold a divide or remainder with a select instruction divisor when one of the select operands is zero.
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * commonIDivRemTransforms(BinaryOperator &I)
Common integer divide/remainder transforms.
Value * tryGetLog2(Value *Op, bool AssumeNonZero)
Instruction * commonIDivTransforms(BinaryOperator &I)
This function implements the transforms common to both integer division instructions (udiv and sdiv).
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Instruction * visitFRem(BinaryOperator &I)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * visitFMul(BinaryOperator &I)
Instruction * foldFMulReassoc(BinaryOperator &I)
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
Instruction * foldPowiReassoc(BinaryOperator &I)
Instruction * visitSDiv(BinaryOperator &I)
Instruction * commonIRemTransforms(BinaryOperator &I)
This function implements the transforms common to both integer remainder instructions (urem and srem)...
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
LLVM_ABI bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
A wrapper class for inspecting calls to intrinsic functions.
static LLVM_ABI MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
A Module instance is used to store all the information related to an LLVM module.
static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
This class represents zero extension of integer types.
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
CommutativeBinaryIntrinsic_match< IntrID, T0, T1 > m_c_Intrinsic(const T0 &Op0, const T1 &Op1)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
AllowReassoc_match< T > m_AllowReassoc(const T &SubPattern)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Value * emitUnaryFloatFnCall(Value *Op, const TargetLibraryInfo *TLI, StringRef Name, IRBuilderBase &B, const AttributeList &Attrs)
Emit a call to the unary function named 'Name' (e.g.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
LLVM_ABI Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
LLVM_ABI bool hasFloatFn(const Module *M, const TargetLibraryInfo *TLI, Type *Ty, LibFunc DoubleFn, LibFunc FloatFn, LibFunc LongDoubleFn)
Check whether the overloaded floating point function corresponding to Ty is available.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
LLVM_ABI Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.