25 #define DEBUG_TYPE "instcombine"
31 const APInt &Demanded) {
33 assert(OpNo < I->getNumOperands() &&
"Operand index too large");
42 if (
C->isSubsetOf(Demanded))
60 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
63 if (V == &Inst)
return true;
64 replaceInstUsesWith(Inst, V);
72 const APInt &DemandedMask,
74 Use &U =
I->getOperandUse(OpNo);
75 Value *NewVal = SimplifyDemandedUseBits(U.
get(), DemandedMask, Known,
77 if (!NewVal)
return false;
81 replaceUse(U, NewVal);
112 assert(V !=
nullptr &&
"Null pointer of Value???");
119 "Value *V, DemandedMask and Known must have same BitWidth");
121 if (isa<Constant>(V)) {
127 if (DemandedMask.
isZero())
133 if (isa<ScalableVectorType>(VTy))
145 if (
Depth != 0 && !
I->hasOneUse())
146 return SimplifyMultipleUseDemandedBits(
I, DemandedMask, Known,
Depth, CxtI);
159 auto simplifyOperandsBasedOnUnusedHighBits = [&](
APInt &DemandedFromOps) {
165 SimplifyDemandedBits(
I, 0, DemandedFromOps, LHSKnown,
Depth + 1) ||
167 SimplifyDemandedBits(
I, 1, DemandedFromOps, RHSKnown,
Depth + 1)) {
172 I->setHasNoSignedWrap(
false);
173 I->setHasNoUnsignedWrap(
false);
180 switch (
I->getOpcode()) {
184 case Instruction::And: {
186 if (SimplifyDemandedBits(
I, 1, DemandedMask, RHSKnown,
Depth + 1) ||
187 SimplifyDemandedBits(
I, 0, DemandedMask & ~RHSKnown.
Zero, LHSKnown,
191 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
193 Known = LHSKnown & RHSKnown;
202 if (DemandedMask.
isSubsetOf(LHSKnown.Zero | RHSKnown.One))
203 return I->getOperand(0);
204 if (DemandedMask.
isSubsetOf(RHSKnown.Zero | LHSKnown.One))
205 return I->getOperand(1);
213 case Instruction::Or: {
215 if (SimplifyDemandedBits(
I, 1, DemandedMask, RHSKnown,
Depth + 1) ||
216 SimplifyDemandedBits(
I, 0, DemandedMask & ~RHSKnown.
One, LHSKnown,
220 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
222 Known = LHSKnown | RHSKnown;
232 return I->getOperand(0);
234 return I->getOperand(1);
242 case Instruction::Xor: {
243 if (SimplifyDemandedBits(
I, 1, DemandedMask, RHSKnown,
Depth + 1) ||
244 SimplifyDemandedBits(
I, 0, DemandedMask, LHSKnown,
Depth + 1))
247 if (DemandedMask == 1 &&
254 return Builder.CreateUnaryIntrinsic(Intrinsic::ctpop,
Xor);
258 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
260 Known = LHSKnown ^ RHSKnown;
270 return I->getOperand(0);
272 return I->getOperand(1);
277 if (DemandedMask.
isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) {
279 BinaryOperator::CreateOr(
I->getOperand(0),
I->getOperand(1),
281 return InsertNewInstWith(
Or, *
I);
288 if (DemandedMask.
isSubsetOf(RHSKnown.Zero|RHSKnown.One) &&
289 RHSKnown.One.isSubsetOf(LHSKnown.One)) {
291 ~RHSKnown.One & DemandedMask);
293 return InsertNewInstWith(
And, *
I);
301 if ((*
C | ~DemandedMask).isAllOnes()) {
315 if (
Instruction *LHSInst = dyn_cast<Instruction>(
I->getOperand(0))) {
317 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
320 (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) {
321 APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask);
324 Instruction *NewAnd = BinaryOperator::CreateAnd(
I->getOperand(0), AndC);
325 InsertNewInstWith(NewAnd, *
I);
328 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
329 return InsertNewInstWith(NewXor, *
I);
335 if (SimplifyDemandedBits(
I, 2, DemandedMask, RHSKnown,
Depth + 1) ||
336 SimplifyDemandedBits(
I, 1, DemandedMask, LHSKnown,
Depth + 1))
339 assert(!LHSKnown.hasConflict() &&
"Bits known to be one AND zero?");
346 auto CanonicalizeSelectConstant = [](
Instruction *
I,
unsigned OpNo,
347 const APInt &DemandedMask) {
368 if ((*CmpC & DemandedMask) == (*SelC & DemandedMask)) {
374 if (CanonicalizeSelectConstant(
I, 1, DemandedMask) ||
375 CanonicalizeSelectConstant(
I, 2, DemandedMask))
382 case Instruction::Trunc: {
396 return Builder.CreateLShr(Trunc,
C->getZExtValue());
401 case Instruction::ZExt: {
402 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
406 if (SimplifyDemandedBits(
I, 0, InputDemandedMask, InputKnown,
Depth + 1))
413 case Instruction::BitCast:
414 if (!
I->getOperand(0)->getType()->isIntOrIntVectorTy())
417 if (
auto *DstVTy = dyn_cast<VectorType>(VTy)) {
418 if (
auto *SrcVTy = dyn_cast<VectorType>(
I->getOperand(0)->getType())) {
419 if (cast<FixedVectorType>(DstVTy)->getNumElements() !=
420 cast<FixedVectorType>(SrcVTy)->getNumElements())
426 }
else if (
I->getOperand(0)->getType()->isVectorTy())
430 if (SimplifyDemandedBits(
I, 0, DemandedMask, Known,
Depth + 1))
434 case Instruction::SExt: {
436 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
438 APInt InputDemandedBits = DemandedMask.
trunc(SrcBitWidth);
443 InputDemandedBits.
setBit(SrcBitWidth-1);
446 if (SimplifyDemandedBits(
I, 0, InputDemandedBits, InputKnown,
Depth + 1))
455 return InsertNewInstWith(NewCast, *
I);
465 if ((DemandedMask & 1) == 0) {
471 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType()) {
481 return Builder.CreateSExt(AndNot, VTy);
488 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType()) {
502 case Instruction::Sub: {
503 APInt DemandedFromOps;
504 if (simplifyOperandsBasedOnUnusedHighBits(DemandedFromOps))
510 return I->getOperand(0);
515 return I->getOperand(1);
518 bool NSW = cast<OverflowingBinaryOperator>(
I)->hasNoSignedWrap();
520 NSW, LHSKnown, RHSKnown);
524 APInt DemandedFromOps;
525 if (simplifyOperandsBasedOnUnusedHighBits(DemandedFromOps))
535 C->countTrailingZeros() == CTZ) {
537 Instruction *Shl = BinaryOperator::CreateShl(
I->getOperand(0), ShiftC);
538 return InsertNewInstWith(Shl, *
I);
544 if (
I->getOperand(0) ==
I->getOperand(1) && DemandedMask.
ult(4)) {
546 Instruction *And1 = BinaryOperator::CreateAnd(
I->getOperand(0), One);
547 return InsertNewInstWith(And1, *
I);
553 case Instruction::Shl: {
558 if (
Instruction *Shr = dyn_cast<Instruction>(
I->getOperand(0)))
559 if (
Value *R = simplifyShrShlDemandedBits(Shr, *ShrAmt,
I, *SA,
560 DemandedMask, Known))
578 Instruction *Lshr = BinaryOperator::CreateLShr(NewC,
X);
579 return InsertNewInstWith(Lshr, *
I);
583 APInt DemandedMaskIn(DemandedMask.
lshr(ShiftAmt));
592 if (SimplifyDemandedBits(
I, 0, DemandedMaskIn, Known,
Depth + 1))
598 Known.
Zero <<= ShiftAmt;
599 Known.
One <<= ShiftAmt;
620 if (SimplifyDemandedBits(
I, 0, DemandedFromOp, Known,
Depth + 1)) {
622 I->dropPoisonGeneratingFlags();
630 case Instruction::LShr: {
640 unsigned NumHiDemandedBits =
644 if (SignBits >= NumHiDemandedBits)
645 return I->getOperand(0);
658 return InsertNewInstWith(Shl, *
I);
664 APInt DemandedMaskIn(DemandedMask.
shl(ShiftAmt));
668 if (cast<LShrOperator>(
I)->isExact())
671 if (SimplifyDemandedBits(
I, 0, DemandedMaskIn, Known,
Depth + 1))
683 case Instruction::AShr: {
689 if (SignBits >= NumHiDemandedBits)
690 return I->getOperand(0);
696 if (DemandedMask.
isOne()) {
699 I->getOperand(0),
I->getOperand(1),
I->getName());
700 return InsertNewInstWith(NewVal, *
I);
708 APInt DemandedMaskIn(DemandedMask.
shl(ShiftAmt));
716 if (cast<AShrOperator>(
I)->isExact())
719 if (SimplifyDemandedBits(
I, 0, DemandedMaskIn, Known,
Depth + 1))
736 LShr->
setIsExact(cast<BinaryOperator>(
I)->isExact());
737 return InsertNewInstWith(LShr, *
I);
739 Known.
One |= HighBits;
746 case Instruction::UDiv: {
751 if (cast<UDivOperator>(
I)->isExact())
756 APInt DemandedMaskIn =
758 if (SimplifyDemandedBits(
I, 0, DemandedMaskIn, LHSKnown,
Depth + 1))
763 BitWidth, LHSKnown.Zero.countLeadingOnes() + RHSTrailingZeros));
769 case Instruction::SRem: {
777 if (
RA.isPowerOf2()) {
778 if (DemandedMask.
ult(
RA))
779 return I->getOperand(0);
783 if (SimplifyDemandedBits(
I, 0, Mask2, LHSKnown,
Depth + 1))
787 Known.
Zero = LHSKnown.Zero & LowBits;
788 Known.
One = LHSKnown.One & LowBits;
792 if (LHSKnown.isNonNegative() || LowBits.
isSubsetOf(LHSKnown.Zero))
793 Known.
Zero |= ~LowBits;
797 if (LHSKnown.isNegative() && LowBits.
intersects(LHSKnown.One))
798 Known.
One |= ~LowBits;
810 if (LHSKnown.isNonNegative())
815 case Instruction::URem: {
818 if (SimplifyDemandedBits(
I, 0, AllOnes, Known2,
Depth + 1) ||
819 SimplifyDemandedBits(
I, 1, AllOnes, Known2,
Depth + 1))
827 bool KnownBitsComputed =
false;
829 switch (II->getIntrinsicID()) {
831 if (DemandedMask == 1)
832 return II->getArgOperand(0);
835 case Intrinsic::ctpop: {
843 II->getModule(), Intrinsic::ctpop, VTy);
848 case Intrinsic::bswap: {
865 NewVal = BinaryOperator::CreateLShr(
868 NewVal = BinaryOperator::CreateShl(
871 return InsertNewInstWith(NewVal, *
I);
875 case Intrinsic::fshr:
876 case Intrinsic::fshl: {
884 if (II->getIntrinsicID() == Intrinsic::fshr)
887 APInt DemandedMaskLHS(DemandedMask.
lshr(ShiftAmt));
889 if (SimplifyDemandedBits(
I, 0, DemandedMaskLHS, LHSKnown,
Depth + 1) ||
890 SimplifyDemandedBits(
I, 1, DemandedMaskRHS, RHSKnown,
Depth + 1))
893 Known.
Zero = LHSKnown.Zero.
shl(ShiftAmt) |
895 Known.
One = LHSKnown.One.
shl(ShiftAmt) |
897 KnownBitsComputed =
true;
907 CTZ >=
C->getActiveBits())
908 return II->getArgOperand(0);
919 CTZ >=
C->getBitWidth() -
C->countLeadingOnes())
920 return II->getArgOperand(0);
926 *II, DemandedMask, Known, KnownBitsComputed);
934 if (!KnownBitsComputed)
954 Type *ITy =
I->getType();
963 switch (
I->getOpcode()) {
964 case Instruction::And: {
970 Known = LHSKnown & RHSKnown;
981 return I->getOperand(0);
983 return I->getOperand(1);
987 case Instruction::Or: {
996 Known = LHSKnown | RHSKnown;
1007 return I->getOperand(0);
1009 return I->getOperand(1);
1013 case Instruction::Xor: {
1021 Known = LHSKnown ^ RHSKnown;
1031 return I->getOperand(0);
1033 return I->getOperand(1);
1037 case Instruction::AShr: {
1050 const APInt *ShiftRC;
1051 const APInt *ShiftLC;
1099 if (!ShlOp1 || !ShrOp1)
1113 Known.
Zero &= DemandedMask;
1118 bool isLshr = (Shr->
getOpcode() == Instruction::LShr);
1119 BitMask1 = isLshr ? (BitMask1.
lshr(ShrAmt) << ShlAmt) :
1120 (BitMask1.
ashr(ShrAmt) << ShlAmt);
1122 if (ShrAmt <= ShlAmt) {
1123 BitMask2 <<= (ShlAmt - ShrAmt);
1125 BitMask2 = isLshr ? BitMask2.
lshr(ShrAmt - ShlAmt):
1126 BitMask2.
ashr(ShrAmt - ShlAmt);
1130 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
1131 if (ShrAmt == ShlAmt)
1138 if (ShrAmt < ShlAmt) {
1140 New = BinaryOperator::CreateShl(VarX, Amt);
1146 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
1147 BinaryOperator::CreateAShr(VarX, Amt);
1148 if (cast<BinaryOperator>(Shr)->isExact())
1149 New->setIsExact(
true);
1152 return InsertNewInstWith(New, *Shl);
1175 bool AllowMultipleUsers) {
1178 if (isa<ScalableVectorType>(V->
getType()))
1181 unsigned VWidth = cast<FixedVectorType>(V->
getType())->getNumElements();
1183 assert((DemandedElts & ~EltMask) == 0 &&
"Invalid DemandedElts!");
1187 UndefElts = EltMask;
1191 if (DemandedElts.
isZero()) {
1192 UndefElts = EltMask;
1198 if (
auto *
C = dyn_cast<Constant>(V)) {
1204 Type *EltTy = cast<VectorType>(V->
getType())->getElementType();
1207 for (
unsigned i = 0;
i != VWidth; ++
i) {
1208 if (!DemandedElts[
i]) {
1209 Elts.push_back(Poison);
1215 if (!Elt)
return nullptr;
1217 Elts.push_back(Elt);
1218 if (isa<UndefValue>(Elt))
1224 return NewCV !=
C ? NewCV :
nullptr;
1231 if (!AllowMultipleUsers) {
1244 DemandedElts = EltMask;
1249 if (!
I)
return nullptr;
1251 bool MadeChange =
false;
1252 auto simplifyAndSetOp = [&](
Instruction *Inst,
unsigned OpNum,
1254 auto *II = dyn_cast<IntrinsicInst>(Inst);
1257 replaceOperand(*Inst, OpNum, V);
1262 APInt UndefElts2(VWidth, 0);
1263 APInt UndefElts3(VWidth, 0);
1264 switch (
I->getOpcode()) {
1267 case Instruction::GetElementPtr: {
1277 if (mayIndexStructType(cast<GetElementPtrInst>(*
I)))
1285 for (
unsigned i = 0;
i <
I->getNumOperands();
i++) {
1289 UndefElts = EltMask;
1292 if (
I->getOperand(
i)->getType()->isVectorTy()) {
1293 APInt UndefEltsOp(VWidth, 0);
1294 simplifyAndSetOp(
I,
i, DemandedElts, UndefEltsOp);
1299 UndefElts |= UndefEltsOp;
1305 case Instruction::InsertElement: {
1308 ConstantInt *Idx = dyn_cast<ConstantInt>(
I->getOperand(2));
1312 simplifyAndSetOp(
I, 0, DemandedElts, UndefElts2);
1319 APInt PreInsertDemandedElts = DemandedElts;
1321 PreInsertDemandedElts.
clearBit(IdxNo);
1329 if (PreInsertDemandedElts == 0 &&
1336 simplifyAndSetOp(
I, 0, PreInsertDemandedElts, UndefElts);
1340 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1342 return I->getOperand(0);
1349 case Instruction::ShuffleVector: {
1350 auto *Shuffle = cast<ShuffleVectorInst>(
I);
1351 assert(Shuffle->getOperand(0)->getType() ==
1352 Shuffle->getOperand(1)->getType() &&
1353 "Expected shuffle operands to have same type");
1354 unsigned OpWidth = cast<FixedVectorType>(Shuffle->getOperand(0)->getType())
1358 if (
all_of(Shuffle->getShuffleMask(), [](
int Elt) { return Elt == 0; }) &&
1364 APInt LeftDemanded(OpWidth, 1);
1365 APInt LHSUndefElts(OpWidth, 0);
1366 simplifyAndSetOp(
I, 0, LeftDemanded, LHSUndefElts);
1367 if (LHSUndefElts[0])
1368 UndefElts = EltMask;
1374 APInt LeftDemanded(OpWidth, 0), RightDemanded(OpWidth, 0);
1375 for (
unsigned i = 0;
i < VWidth;
i++) {
1376 if (DemandedElts[
i]) {
1377 unsigned MaskVal = Shuffle->getMaskValue(
i);
1378 if (MaskVal != -1u) {
1379 assert(MaskVal < OpWidth * 2 &&
1380 "shufflevector mask index out of range!");
1381 if (MaskVal < OpWidth)
1382 LeftDemanded.setBit(MaskVal);
1384 RightDemanded.
setBit(MaskVal - OpWidth);
1389 APInt LHSUndefElts(OpWidth, 0);
1390 simplifyAndSetOp(
I, 0, LeftDemanded, LHSUndefElts);
1392 APInt RHSUndefElts(OpWidth, 0);
1393 simplifyAndSetOp(
I, 1, RightDemanded, RHSUndefElts);
1406 if (VWidth == OpWidth) {
1407 bool IsIdentityShuffle =
true;
1408 for (
unsigned i = 0;
i < VWidth;
i++) {
1409 unsigned MaskVal = Shuffle->getMaskValue(
i);
1410 if (DemandedElts[
i] &&
i != MaskVal) {
1411 IsIdentityShuffle =
false;
1415 if (IsIdentityShuffle)
1416 return Shuffle->getOperand(0);
1419 bool NewUndefElts =
false;
1420 unsigned LHSIdx = -1u, LHSValIdx = -1u;
1421 unsigned RHSIdx = -1u, RHSValIdx = -1u;
1422 bool LHSUniform =
true;
1423 bool RHSUniform =
true;
1424 for (
unsigned i = 0;
i < VWidth;
i++) {
1425 unsigned MaskVal = Shuffle->getMaskValue(
i);
1426 if (MaskVal == -1u) {
1428 }
else if (!DemandedElts[
i]) {
1429 NewUndefElts =
true;
1431 }
else if (MaskVal < OpWidth) {
1432 if (LHSUndefElts[MaskVal]) {
1433 NewUndefElts =
true;
1436 LHSIdx = LHSIdx == -1u ?
i : OpWidth;
1437 LHSValIdx = LHSValIdx == -1u ? MaskVal : OpWidth;
1438 LHSUniform = LHSUniform && (MaskVal ==
i);
1441 if (RHSUndefElts[MaskVal - OpWidth]) {
1442 NewUndefElts =
true;
1445 RHSIdx = RHSIdx == -1u ?
i : OpWidth;
1446 RHSValIdx = RHSValIdx == -1u ? MaskVal - OpWidth : OpWidth;
1447 RHSUniform = RHSUniform && (MaskVal - OpWidth ==
i);
1457 cast<FixedVectorType>(Shuffle->getType())->getNumElements()) {
1463 if (LHSIdx < OpWidth && RHSUniform) {
1464 if (
auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) {
1465 Op = Shuffle->getOperand(1);
1466 Value = CV->getOperand(LHSValIdx);
1470 if (RHSIdx < OpWidth && LHSUniform) {
1471 if (
auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) {
1472 Op = Shuffle->getOperand(0);
1473 Value = CV->getOperand(RHSValIdx);
1482 InsertNewInstWith(New, *Shuffle);
1489 for (
unsigned i = 0;
i < VWidth; ++
i) {
1493 Elts.push_back(Shuffle->getMaskValue(
i));
1495 Shuffle->setShuffleMask(Elts);
1510 simplifyAndSetOp(
I, 0, DemandedElts, UndefElts);
1514 APInt DemandedLHS(DemandedElts), DemandedRHS(DemandedElts);
1515 if (
auto *CV = dyn_cast<ConstantVector>(Sel->
getCondition())) {
1516 for (
unsigned i = 0;
i < VWidth;
i++) {
1520 if (isa<ConstantExpr>(CElt))
1526 DemandedLHS.clearBit(
i);
1532 simplifyAndSetOp(
I, 1, DemandedLHS, UndefElts2);
1533 simplifyAndSetOp(
I, 2, DemandedRHS, UndefElts3);
1537 UndefElts = UndefElts2 & UndefElts3;
1540 case Instruction::BitCast: {
1542 VectorType *VTy = dyn_cast<VectorType>(
I->getOperand(0)->getType());
1544 unsigned InVWidth = cast<FixedVectorType>(VTy)->getNumElements();
1545 APInt InputDemandedElts(InVWidth, 0);
1546 UndefElts2 =
APInt(InVWidth, 0);
1549 if (VWidth == InVWidth) {
1553 InputDemandedElts = DemandedElts;
1554 }
else if ((VWidth % InVWidth) == 0) {
1558 Ratio = VWidth / InVWidth;
1559 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1560 if (DemandedElts[OutIdx])
1561 InputDemandedElts.
setBit(OutIdx / Ratio);
1562 }
else if ((InVWidth % VWidth) == 0) {
1566 Ratio = InVWidth / VWidth;
1567 for (
unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1568 if (DemandedElts[InIdx / Ratio])
1569 InputDemandedElts.
setBit(InIdx);
1575 simplifyAndSetOp(
I, 0, InputDemandedElts, UndefElts2);
1577 if (VWidth == InVWidth) {
1578 UndefElts = UndefElts2;
1579 }
else if ((VWidth % InVWidth) == 0) {
1583 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1584 if (UndefElts2[OutIdx / Ratio])
1585 UndefElts.
setBit(OutIdx);
1586 }
else if ((InVWidth % VWidth) == 0) {
1590 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1593 UndefElts.
setBit(OutIdx);
1600 case Instruction::FPTrunc:
1601 case Instruction::FPExt:
1602 simplifyAndSetOp(
I, 0, DemandedElts, UndefElts);
1609 case Intrinsic::masked_gather:
1610 case Intrinsic::masked_load: {
1615 DemandedPassThrough(DemandedElts);
1616 if (
auto *CV = dyn_cast<ConstantVector>(II->
getOperand(2)))
1617 for (
unsigned i = 0;
i < VWidth;
i++) {
1620 DemandedPtrs.clearBit(
i);
1625 simplifyAndSetOp(II, 0, DemandedPtrs, UndefElts2);
1626 simplifyAndSetOp(II, 3, DemandedPassThrough, UndefElts3);
1630 UndefElts = UndefElts2 & UndefElts3;
1636 *II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
1651 simplifyAndSetOp(
I, 0, DemandedElts, UndefElts);
1652 simplifyAndSetOp(
I, 1, DemandedElts, UndefElts2);
1656 UndefElts &= UndefElts2;
1664 return MadeChange ?
I :
nullptr;