25#define DEBUG_TYPE "instcombine"
31 const APInt &Demanded) {
33 assert(OpNo < I->getNumOperands() &&
"Operand index too large");
42 if (
C->isSubsetOf(Demanded))
63 if (V == &Inst)
return true;
72 const APInt &DemandedMask,
74 Use &U =
I->getOperandUse(OpNo);
77 if (!NewVal)
return false;
112 assert(V !=
nullptr &&
"Null pointer of Value???");
115 Type *VTy = V->getType();
119 "Value *V, DemandedMask and Known must have same BitWidth");
121 if (isa<Constant>(V)) {
127 if (DemandedMask.
isZero())
142 if (
Depth != 0 && !
I->hasOneUse())
151 if (
Depth == 0 && !V->hasOneUse())
156 auto disableWrapFlagsBasedOnUnusedHighBits = [](
Instruction *
I,
162 I->setHasNoSignedWrap(
false);
163 I->setHasNoUnsignedWrap(
false);
170 auto simplifyOperandsBasedOnUnusedHighBits = [&](
APInt &DemandedFromOps) {
179 disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
185 switch (
I->getOpcode()) {
189 case Instruction::And: {
196 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
209 return I->getOperand(0);
211 return I->getOperand(1);
219 case Instruction::Or: {
226 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
239 return I->getOperand(0);
241 return I->getOperand(1);
249 case Instruction::Xor: {
254 if (DemandedMask == 1 &&
265 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
278 return I->getOperand(0);
280 return I->getOperand(1);
287 BinaryOperator::CreateOr(
I->getOperand(0),
I->getOperand(1),
299 ~RHSKnown.
One & DemandedMask);
309 if ((*
C | ~DemandedMask).isAllOnes()) {
323 if (
Instruction *LHSInst = dyn_cast<Instruction>(
I->getOperand(0))) {
325 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
328 (LHSKnown.One & RHSKnown.
One & DemandedMask) != 0) {
329 APInt NewMask = ~(LHSKnown.One & RHSKnown.
One & DemandedMask);
332 Instruction *NewAnd = BinaryOperator::CreateAnd(
I->getOperand(0), AndC);
336 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
342 case Instruction::Select: {
347 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
354 auto CanonicalizeSelectConstant = [](
Instruction *
I,
unsigned OpNo,
355 const APInt &DemandedMask) {
376 if ((*CmpC & DemandedMask) == (*SelC & DemandedMask)) {
382 if (CanonicalizeSelectConstant(
I, 1, DemandedMask) ||
383 CanonicalizeSelectConstant(
I, 2, DemandedMask))
390 case Instruction::Trunc: {
409 case Instruction::ZExt: {
410 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
421 case Instruction::BitCast:
422 if (!
I->getOperand(0)->getType()->isIntOrIntVectorTy())
425 if (
auto *DstVTy = dyn_cast<VectorType>(VTy)) {
426 if (
auto *SrcVTy = dyn_cast<VectorType>(
I->getOperand(0)->getType())) {
427 if (isa<ScalableVectorType>(DstVTy) ||
428 isa<ScalableVectorType>(SrcVTy) ||
429 cast<FixedVectorType>(DstVTy)->getNumElements() !=
430 cast<FixedVectorType>(SrcVTy)->getNumElements())
436 }
else if (
I->getOperand(0)->getType()->isVectorTy())
444 case Instruction::SExt: {
446 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
448 APInt InputDemandedBits = DemandedMask.
trunc(SrcBitWidth);
453 InputDemandedBits.
setBit(SrcBitWidth-1);
474 case Instruction::Add: {
475 if ((DemandedMask & 1) == 0) {
481 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType()) {
498 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType()) {
518 return disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
524 APInt DemandedFromLHS = DemandedFromOps;
528 return disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
533 return I->getOperand(0);
534 if (DemandedFromOps.
isSubsetOf(LHSKnown.Zero))
535 return I->getOperand(1);
538 bool NSW = cast<OverflowingBinaryOperator>(
I)->hasNoSignedWrap();
542 case Instruction::Sub: {
549 return disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
555 APInt DemandedFromLHS = DemandedFromOps;
559 return disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
564 return I->getOperand(0);
567 if (DemandedFromOps.
isOne() && DemandedFromOps.
isSubsetOf(LHSKnown.Zero))
568 return I->getOperand(1);
571 bool NSW = cast<OverflowingBinaryOperator>(
I)->hasNoSignedWrap();
575 case Instruction::Mul: {
576 APInt DemandedFromOps;
577 if (simplifyOperandsBasedOnUnusedHighBits(DemandedFromOps))
588 Instruction *Shl = BinaryOperator::CreateShl(
I->getOperand(0), ShiftC);
595 if (
I->getOperand(0) ==
I->getOperand(1) && DemandedMask.
ult(4)) {
597 Instruction *And1 = BinaryOperator::CreateAnd(
I->getOperand(0), One);
604 case Instruction::Shl: {
609 if (
Instruction *Shr = dyn_cast<Instruction>(
I->getOperand(0)))
611 DemandedMask, Known))
629 Instruction *Lshr = BinaryOperator::CreateLShr(NewC,
X);
634 APInt DemandedMaskIn(DemandedMask.
lshr(ShiftAmt));
659 I->dropPoisonGeneratingFlags();
667 case Instruction::LShr: {
680 if (SignBits >= NumHiDemandedBits)
681 return I->getOperand(0);
700 APInt DemandedMaskIn(DemandedMask.
shl(ShiftAmt));
704 if (cast<LShrOperator>(
I)->isExact())
719 case Instruction::AShr: {
725 if (SignBits >= NumHiDemandedBits)
726 return I->getOperand(0);
732 if (DemandedMask.
isOne()) {
735 I->getOperand(0),
I->getOperand(1),
I->getName());
744 APInt DemandedMaskIn(DemandedMask.
shl(ShiftAmt));
752 if (cast<AShrOperator>(
I)->isExact())
772 LShr->
setIsExact(cast<BinaryOperator>(
I)->isExact());
775 Known.
One |= HighBits;
782 case Instruction::UDiv: {
788 APInt DemandedMaskIn =
793 I->dropPoisonGeneratingFlags();
798 cast<BinaryOperator>(
I)->isExact());
804 case Instruction::SRem: {
812 if (
RA.isPowerOf2()) {
813 if (DemandedMask.
ult(
RA))
814 return I->getOperand(0);
822 Known.
Zero = LHSKnown.Zero & LowBits;
823 Known.
One = LHSKnown.One & LowBits;
827 if (LHSKnown.isNonNegative() || LowBits.
isSubsetOf(LHSKnown.Zero))
828 Known.
Zero |= ~LowBits;
832 if (LHSKnown.isNegative() && LowBits.
intersects(LHSKnown.One))
833 Known.
One |= ~LowBits;
843 case Instruction::URem: {
852 case Instruction::Call: {
853 bool KnownBitsComputed =
false;
855 switch (II->getIntrinsicID()) {
856 case Intrinsic::abs: {
857 if (DemandedMask == 1)
858 return II->getArgOperand(0);
861 case Intrinsic::ctpop: {
869 II->getModule(), Intrinsic::ctpop, VTy);
874 case Intrinsic::bswap: {
891 NewVal = BinaryOperator::CreateLShr(
894 NewVal = BinaryOperator::CreateShl(
901 case Intrinsic::fshr:
902 case Intrinsic::fshl: {
910 if (II->getIntrinsicID() == Intrinsic::fshr)
913 APInt DemandedMaskLHS(DemandedMask.
lshr(ShiftAmt));
915 if (
I->getOperand(0) !=
I->getOperand(1)) {
924 if (DemandedMaskLHS.
isSubsetOf(LHSKnown.Zero | LHSKnown.One) &&
938 Known.
Zero = LHSKnown.Zero.
shl(ShiftAmt) |
940 Known.
One = LHSKnown.One.
shl(ShiftAmt) |
942 KnownBitsComputed =
true;
945 case Intrinsic::umax: {
952 CTZ >=
C->getActiveBits())
953 return II->getArgOperand(0);
956 case Intrinsic::umin: {
964 CTZ >=
C->getBitWidth() -
C->countl_one())
965 return II->getArgOperand(0);
971 *II, DemandedMask, Known, KnownBitsComputed);
979 if (!KnownBitsComputed)
999 Type *ITy =
I->getType();
1008 switch (
I->getOpcode()) {
1009 case Instruction::And: {
1012 Known = LHSKnown & RHSKnown;
1023 return I->getOperand(0);
1025 return I->getOperand(1);
1029 case Instruction::Or: {
1032 Known = LHSKnown | RHSKnown;
1045 return I->getOperand(0);
1047 return I->getOperand(1);
1051 case Instruction::Xor: {
1054 Known = LHSKnown ^ RHSKnown;
1066 return I->getOperand(0);
1068 return I->getOperand(1);
1072 case Instruction::Add: {
1080 return I->getOperand(0);
1084 return I->getOperand(1);
1086 bool NSW = cast<OverflowingBinaryOperator>(
I)->hasNoSignedWrap();
1091 case Instruction::Sub: {
1099 return I->getOperand(0);
1101 bool NSW = cast<OverflowingBinaryOperator>(
I)->hasNoSignedWrap();
1107 case Instruction::AShr: {
1120 const APInt *ShiftRC;
1121 const APInt *ShiftLC;
1169 if (!ShlOp1 || !ShrOp1)
1183 Known.
Zero &= DemandedMask;
1188 bool isLshr = (Shr->
getOpcode() == Instruction::LShr);
1189 BitMask1 = isLshr ? (BitMask1.
lshr(ShrAmt) << ShlAmt) :
1190 (BitMask1.
ashr(ShrAmt) << ShlAmt);
1192 if (ShrAmt <= ShlAmt) {
1193 BitMask2 <<= (ShlAmt - ShrAmt);
1195 BitMask2 = isLshr ? BitMask2.
lshr(ShrAmt - ShlAmt):
1196 BitMask2.
ashr(ShrAmt - ShlAmt);
1200 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
1201 if (ShrAmt == ShlAmt)
1208 if (ShrAmt < ShlAmt) {
1210 New = BinaryOperator::CreateShl(VarX, Amt);
1216 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
1217 BinaryOperator::CreateAShr(VarX, Amt);
1218 if (cast<BinaryOperator>(Shr)->isExact())
1219 New->setIsExact(
true);
1245 bool AllowMultipleUsers) {
1248 if (isa<ScalableVectorType>(V->getType()))
1251 unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
1253 assert((DemandedElts & ~EltMask) == 0 &&
"Invalid DemandedElts!");
1257 UndefElts = EltMask;
1261 if (DemandedElts.
isZero()) {
1262 UndefElts = EltMask;
1268 if (
auto *
C = dyn_cast<Constant>(V)) {
1274 Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1277 for (
unsigned i = 0; i != VWidth; ++i) {
1278 if (!DemandedElts[i]) {
1284 Constant *Elt =
C->getAggregateElement(i);
1285 if (!Elt)
return nullptr;
1288 if (isa<UndefValue>(Elt))
1294 return NewCV !=
C ? NewCV :
nullptr;
1301 if (!AllowMultipleUsers) {
1305 if (!V->hasOneUse()) {
1314 DemandedElts = EltMask;
1319 if (!
I)
return nullptr;
1321 bool MadeChange =
false;
1322 auto simplifyAndSetOp = [&](
Instruction *Inst,
unsigned OpNum,
1324 auto *II = dyn_cast<IntrinsicInst>(Inst);
1332 APInt UndefElts2(VWidth, 0);
1333 APInt UndefElts3(VWidth, 0);
1334 switch (
I->getOpcode()) {
1337 case Instruction::GetElementPtr: {
1347 if (mayIndexStructType(cast<GetElementPtrInst>(*
I)))
1355 for (
unsigned i = 0; i <
I->getNumOperands(); i++) {
1359 UndefElts = EltMask;
1362 if (
I->getOperand(i)->getType()->isVectorTy()) {
1363 APInt UndefEltsOp(VWidth, 0);
1364 simplifyAndSetOp(
I, i, DemandedElts, UndefEltsOp);
1369 UndefElts |= UndefEltsOp;
1375 case Instruction::InsertElement: {
1382 simplifyAndSetOp(
I, 0, DemandedElts, UndefElts2);
1388 unsigned IdxNo =
Idx->getZExtValue();
1389 APInt PreInsertDemandedElts = DemandedElts;
1391 PreInsertDemandedElts.
clearBit(IdxNo);
1399 if (PreInsertDemandedElts == 0 &&
1406 simplifyAndSetOp(
I, 0, PreInsertDemandedElts, UndefElts);
1410 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1412 return I->getOperand(0);
1419 case Instruction::ShuffleVector: {
1420 auto *Shuffle = cast<ShuffleVectorInst>(
I);
1421 assert(Shuffle->getOperand(0)->getType() ==
1422 Shuffle->getOperand(1)->getType() &&
1423 "Expected shuffle operands to have same type");
1424 unsigned OpWidth = cast<FixedVectorType>(Shuffle->getOperand(0)->getType())
1428 if (
all_of(Shuffle->getShuffleMask(), [](
int Elt) { return Elt == 0; }) &&
1434 APInt LeftDemanded(OpWidth, 1);
1435 APInt LHSUndefElts(OpWidth, 0);
1436 simplifyAndSetOp(
I, 0, LeftDemanded, LHSUndefElts);
1437 if (LHSUndefElts[0])
1438 UndefElts = EltMask;
1444 APInt LeftDemanded(OpWidth, 0), RightDemanded(OpWidth, 0);
1445 for (
unsigned i = 0; i < VWidth; i++) {
1446 if (DemandedElts[i]) {
1447 unsigned MaskVal = Shuffle->getMaskValue(i);
1448 if (MaskVal != -1u) {
1449 assert(MaskVal < OpWidth * 2 &&
1450 "shufflevector mask index out of range!");
1451 if (MaskVal < OpWidth)
1452 LeftDemanded.setBit(MaskVal);
1454 RightDemanded.
setBit(MaskVal - OpWidth);
1459 APInt LHSUndefElts(OpWidth, 0);
1460 simplifyAndSetOp(
I, 0, LeftDemanded, LHSUndefElts);
1462 APInt RHSUndefElts(OpWidth, 0);
1463 simplifyAndSetOp(
I, 1, RightDemanded, RHSUndefElts);
1476 if (VWidth == OpWidth) {
1477 bool IsIdentityShuffle =
true;
1478 for (
unsigned i = 0; i < VWidth; i++) {
1479 unsigned MaskVal = Shuffle->getMaskValue(i);
1480 if (DemandedElts[i] && i != MaskVal) {
1481 IsIdentityShuffle =
false;
1485 if (IsIdentityShuffle)
1486 return Shuffle->getOperand(0);
1489 bool NewUndefElts =
false;
1490 unsigned LHSIdx = -1u, LHSValIdx = -1u;
1491 unsigned RHSIdx = -1u, RHSValIdx = -1u;
1492 bool LHSUniform =
true;
1493 bool RHSUniform =
true;
1494 for (
unsigned i = 0; i < VWidth; i++) {
1495 unsigned MaskVal = Shuffle->getMaskValue(i);
1496 if (MaskVal == -1u) {
1498 }
else if (!DemandedElts[i]) {
1499 NewUndefElts =
true;
1501 }
else if (MaskVal < OpWidth) {
1502 if (LHSUndefElts[MaskVal]) {
1503 NewUndefElts =
true;
1506 LHSIdx = LHSIdx == -1u ? i : OpWidth;
1507 LHSValIdx = LHSValIdx == -1u ? MaskVal : OpWidth;
1508 LHSUniform = LHSUniform && (MaskVal == i);
1511 if (RHSUndefElts[MaskVal - OpWidth]) {
1512 NewUndefElts =
true;
1515 RHSIdx = RHSIdx == -1u ? i : OpWidth;
1516 RHSValIdx = RHSValIdx == -1u ? MaskVal - OpWidth : OpWidth;
1517 RHSUniform = RHSUniform && (MaskVal - OpWidth == i);
1527 cast<FixedVectorType>(Shuffle->getType())->getNumElements()) {
1533 if (LHSIdx < OpWidth && RHSUniform) {
1534 if (
auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) {
1535 Op = Shuffle->getOperand(1);
1536 Value = CV->getOperand(LHSValIdx);
1540 if (RHSIdx < OpWidth && LHSUniform) {
1541 if (
auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) {
1542 Op = Shuffle->getOperand(0);
1543 Value = CV->getOperand(RHSValIdx);
1559 for (
unsigned i = 0; i < VWidth; ++i) {
1563 Elts.
push_back(Shuffle->getMaskValue(i));
1565 Shuffle->setShuffleMask(Elts);
1570 case Instruction::Select: {
1580 simplifyAndSetOp(
I, 0, DemandedElts, UndefElts);
1584 APInt DemandedLHS(DemandedElts), DemandedRHS(DemandedElts);
1585 if (
auto *CV = dyn_cast<ConstantVector>(Sel->
getCondition())) {
1586 for (
unsigned i = 0; i < VWidth; i++) {
1590 if (isa<ConstantExpr>(CElt))
1596 DemandedLHS.clearBit(i);
1602 simplifyAndSetOp(
I, 1, DemandedLHS, UndefElts2);
1603 simplifyAndSetOp(
I, 2, DemandedRHS, UndefElts3);
1607 UndefElts = UndefElts2 & UndefElts3;
1610 case Instruction::BitCast: {
1612 VectorType *VTy = dyn_cast<VectorType>(
I->getOperand(0)->getType());
1614 unsigned InVWidth = cast<FixedVectorType>(VTy)->getNumElements();
1615 APInt InputDemandedElts(InVWidth, 0);
1616 UndefElts2 =
APInt(InVWidth, 0);
1619 if (VWidth == InVWidth) {
1623 InputDemandedElts = DemandedElts;
1624 }
else if ((VWidth % InVWidth) == 0) {
1628 Ratio = VWidth / InVWidth;
1629 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1630 if (DemandedElts[OutIdx])
1631 InputDemandedElts.
setBit(OutIdx / Ratio);
1632 }
else if ((InVWidth % VWidth) == 0) {
1636 Ratio = InVWidth / VWidth;
1637 for (
unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1638 if (DemandedElts[InIdx / Ratio])
1639 InputDemandedElts.
setBit(InIdx);
1645 simplifyAndSetOp(
I, 0, InputDemandedElts, UndefElts2);
1647 if (VWidth == InVWidth) {
1648 UndefElts = UndefElts2;
1649 }
else if ((VWidth % InVWidth) == 0) {
1653 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1654 if (UndefElts2[OutIdx / Ratio])
1655 UndefElts.
setBit(OutIdx);
1656 }
else if ((InVWidth % VWidth) == 0) {
1660 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1663 UndefElts.
setBit(OutIdx);
1670 case Instruction::FPTrunc:
1671 case Instruction::FPExt:
1672 simplifyAndSetOp(
I, 0, DemandedElts, UndefElts);
1675 case Instruction::Call: {
1679 case Intrinsic::masked_gather:
1680 case Intrinsic::masked_load: {
1685 DemandedPassThrough(DemandedElts);
1686 if (
auto *CV = dyn_cast<ConstantVector>(II->
getOperand(2)))
1687 for (
unsigned i = 0; i < VWidth; i++) {
1690 DemandedPtrs.clearBit(i);
1695 simplifyAndSetOp(II, 0, DemandedPtrs, UndefElts2);
1696 simplifyAndSetOp(II, 3, DemandedPassThrough, UndefElts3);
1700 UndefElts = UndefElts2 & UndefElts3;
1706 *II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
1740 if (DemandedElts == 1 && !
X->hasOneUse() && !
Y->hasOneUse() &&
1743 auto findShufBO = [&](
bool MatchShufAsOp0) ->
User * {
1748 Value *ShufOp = MatchShufAsOp0 ?
X :
Y;
1749 Value *OtherOp = MatchShufAsOp0 ?
Y :
X;
1763 if (
User *ShufBO = findShufBO(
true))
1765 if (
User *ShufBO = findShufBO(
false))
1769 simplifyAndSetOp(
I, 0, DemandedElts, UndefElts);
1770 simplifyAndSetOp(
I, 1, DemandedElts, UndefElts2);
1774 UndefElts &= UndefElts2;
1782 return MadeChange ?
I :
nullptr;
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file provides internal interfaces used to implement the InstCombine.
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
This file provides the interface for the instcombine pass implementation.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
void clearAllBits()
Set every bit to 0.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool isOne() const
Determine if this is a value of 1.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
BinaryOps getOpcode() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getLShr(Constant *C1, Constant *C2, bool isExact=false)
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const APInt & getValue() const
Return the constant as an APInt value reference.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static Constant * getAllOnesValue(Type *Ty)
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Value * SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded bits.
Value * SimplifyMultipleUseDemandedBits(Instruction *I, const APInt &DemandedMask, KnownBits &Known, unsigned Depth, Instruction *CxtI)
Helper routine of SimplifyDemandedUseBits.
Value * simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known)
Helper routine of SimplifyDemandedUseBits.
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
void push(Instruction *I)
Push the instruction onto the worklist stack.
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
const Value * getCondition() const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static IntegerType * getInt64Ty(LLVMContext &C)
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
This class represents zero extension of integer types.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
Merge bits known from assumes into Known.
gep_type_iterator gep_type_end(const User *GEP)
constexpr unsigned MaxAnalysisRecursionDepth
KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, unsigned Depth, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
constexpr int PoisonMaskElem
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
constexpr unsigned BitWidth
gep_type_iterator gep_type_begin(const User *GEP)
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool hasConflict() const
Returns true if there is conflicting information.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static KnownBits computeForAddSub(bool Add, bool NSW, const KnownBits &LHS, KnownBits RHS)
Compute known bits resulting from adding LHS and RHS.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
SimplifyQuery getWithInstruction(Instruction *I) const