108#define DEBUG_TYPE "instcombine"
116 "Number of instruction combining iterations performed");
117STATISTIC(NumOneIteration,
"Number of functions with one iteration");
118STATISTIC(NumTwoIterations,
"Number of functions with two iterations");
119STATISTIC(NumThreeIterations,
"Number of functions with three iterations");
121 "Number of functions with four or more iterations");
125STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
131 "Controls which instructions are visited");
138 "instcombine-max-sink-users",
cl::init(32),
139 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
143 cl::desc(
"Maximum array size considered when doing a combine"));
155std::optional<Instruction *>
158 if (
II.getCalledFunction()->isTargetIntrinsic()) {
166 bool &KnownBitsComputed) {
168 if (
II.getCalledFunction()->isTargetIntrinsic()) {
170 *
this,
II, DemandedMask, Known, KnownBitsComputed);
181 if (
II.getCalledFunction()->isTargetIntrinsic()) {
183 *
this,
II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
201 auto *Inst = dyn_cast<Instruction>(
GEP);
208 if (Inst && !
GEP->hasOneUse() && !
GEP->hasAllConstantIndices() &&
209 !
GEP->getSourceElementType()->isIntegerTy(8)) {
223bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
242bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
243 unsigned ToWidth)
const {
249 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
254 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
259 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
270bool InstCombinerImpl::shouldChangeType(
Type *
From,
Type *To)
const {
276 unsigned FromWidth =
From->getPrimitiveSizeInBits();
278 return shouldChangeType(FromWidth, ToWidth);
287 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
288 if (!OBO || !OBO->hasNoSignedWrap())
293 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
296 const APInt *BVal, *CVal;
300 bool Overflow =
false;
301 if (Opcode == Instruction::Add)
302 (void)BVal->
sadd_ov(*CVal, Overflow);
304 (
void)BVal->
ssub_ov(*CVal, Overflow);
310 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
311 return OBO && OBO->hasNoUnsignedWrap();
315 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
316 return OBO && OBO->hasNoSignedWrap();
325 I.clearSubclassOptionalData();
330 I.clearSubclassOptionalData();
331 I.setFastMathFlags(FMF);
340 auto *Cast = dyn_cast<CastInst>(BinOp1->
getOperand(0));
341 if (!Cast || !Cast->hasOneUse())
345 auto CastOpcode = Cast->getOpcode();
346 if (CastOpcode != Instruction::ZExt)
354 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
355 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
381 Cast->dropPoisonGeneratingFlags();
387Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
388 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
391 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
392 Type *CastTy = IntToPtr->getDestTy();
395 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
398 return PtrToInt->getOperand(0);
425 bool Changed =
false;
433 Changed = !
I.swapOperands();
435 if (
I.isCommutative()) {
436 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
446 if (
I.isAssociative()) {
469 I.setHasNoUnsignedWrap(
true);
472 I.setHasNoSignedWrap(
true);
501 if (
I.isAssociative() &&
I.isCommutative()) {
564 if (isa<FPMathOperator>(NewBO)) {
578 I.setHasNoUnsignedWrap(
true);
596 if (LOp == Instruction::And)
597 return ROp == Instruction::Or || ROp == Instruction::Xor;
600 if (LOp == Instruction::Or)
601 return ROp == Instruction::And;
605 if (LOp == Instruction::Mul)
606 return ROp == Instruction::Add || ROp == Instruction::Sub;
629 if (isa<Constant>(V))
643 assert(
Op &&
"Expected a binary operator");
644 LHS =
Op->getOperand(0);
645 RHS =
Op->getOperand(1);
646 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
651 Instruction::Shl, ConstantInt::get(
Op->getType(), 1),
C);
652 assert(
RHS &&
"Constant folding of immediate constants failed");
653 return Instruction::Mul;
658 if (OtherOp && OtherOp->
getOpcode() == Instruction::AShr &&
661 return Instruction::AShr;
664 return Op->getOpcode();
673 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
676 Value *RetVal =
nullptr;
687 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
707 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
730 if (isa<OverflowingBinaryOperator>(RetVal)) {
733 if (isa<OverflowingBinaryOperator>(&
I)) {
734 HasNSW =
I.hasNoSignedWrap();
735 HasNUW =
I.hasNoUnsignedWrap();
737 if (
auto *LOBO = dyn_cast<OverflowingBinaryOperator>(
LHS)) {
738 HasNSW &= LOBO->hasNoSignedWrap();
739 HasNUW &= LOBO->hasNoUnsignedWrap();
742 if (
auto *ROBO = dyn_cast<OverflowingBinaryOperator>(
RHS)) {
743 HasNSW &= ROBO->hasNoSignedWrap();
744 HasNUW &= ROBO->hasNoUnsignedWrap();
747 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
757 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
760 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
775 unsigned Opc =
I->getOpcode();
776 unsigned ConstIdx = 1;
783 case Instruction::Sub:
786 case Instruction::ICmp:
793 case Instruction::Or:
797 case Instruction::Add:
803 if (!
match(
I->getOperand(1 - ConstIdx),
816 if (Opc == Instruction::ICmp && !cast<ICmpInst>(
I)->isEquality()) {
819 if (!Cmp || !Cmp->isZeroValue())
824 bool Consumes =
false;
828 assert(NotOp !=
nullptr &&
829 "Desync between isFreeToInvert and getFreelyInverted");
838 case Instruction::Sub:
841 case Instruction::Or:
842 case Instruction::Add:
845 case Instruction::ICmp:
881 auto IsValidBinOpc = [](
unsigned Opc) {
885 case Instruction::And:
886 case Instruction::Or:
887 case Instruction::Xor:
888 case Instruction::Add:
897 auto IsCompletelyDistributable = [](
unsigned BinOpc1,
unsigned BinOpc2,
899 assert(ShOpc != Instruction::AShr);
900 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
901 ShOpc == Instruction::Shl;
904 auto GetInvShift = [](
unsigned ShOpc) {
905 assert(ShOpc != Instruction::AShr);
906 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
909 auto CanDistributeBinops = [&](
unsigned BinOpc1,
unsigned BinOpc2,
913 if (BinOpc1 == Instruction::And)
918 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
924 if (BinOpc2 == Instruction::And)
935 auto MatchBinOp = [&](
unsigned ShOpnum) ->
Instruction * {
937 Value *
X, *
Y, *ShiftedX, *Mask, *Shift;
938 if (!
match(
I.getOperand(ShOpnum),
941 if (!
match(
I.getOperand(1 - ShOpnum),
949 auto *IY = dyn_cast<Instruction>(
I.getOperand(ShOpnum));
950 auto *IX = dyn_cast<Instruction>(ShiftedX);
955 unsigned ShOpc = IY->getOpcode();
956 if (ShOpc != IX->getOpcode())
960 auto *BO2 = dyn_cast<Instruction>(
I.getOperand(1 - ShOpnum));
964 unsigned BinOpc = BO2->getOpcode();
966 if (!IsValidBinOpc(
I.getOpcode()) || !IsValidBinOpc(BinOpc))
969 if (ShOpc == Instruction::AShr) {
983 if (BinOpc ==
I.getOpcode() &&
984 IsCompletelyDistributable(
I.getOpcode(), BinOpc, ShOpc)) {
999 if (!CanDistributeBinops(
I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1013 return MatchBinOp(1);
1031 Value *
A, *CondVal, *TrueVal, *FalseVal;
1034 auto MatchSelectAndCast = [&](
Value *CastOp,
Value *SelectOp) {
1036 A->getType()->getScalarSizeInBits() == 1 &&
1043 if (MatchSelectAndCast(
LHS,
RHS))
1045 else if (MatchSelectAndCast(
RHS,
LHS))
1050 auto NewFoldedConst = [&](
bool IsTrueArm,
Value *V) {
1051 bool IsCastOpRHS = (CastOp ==
RHS);
1052 bool IsZExt = isa<ZExtInst>(CastOp);
1057 }
else if (IsZExt) {
1058 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1071 Value *NewTrueVal = NewFoldedConst(
false, TrueVal);
1073 NewFoldedConst(
true, FalseVal));
1077 Value *NewTrueVal = NewFoldedConst(
true, TrueVal);
1079 NewFoldedConst(
false, FalseVal));
1100 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1220static std::optional<std::pair<Value *, Value *>>
1222 if (
LHS->getParent() !=
RHS->getParent())
1223 return std::nullopt;
1225 if (
LHS->getNumIncomingValues() < 2)
1226 return std::nullopt;
1229 return std::nullopt;
1231 Value *L0 =
LHS->getIncomingValue(0);
1232 Value *R0 =
RHS->getIncomingValue(0);
1234 for (
unsigned I = 1, E =
LHS->getNumIncomingValues();
I != E; ++
I) {
1238 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1241 return std::nullopt;
1244 return std::optional(std::pair(L0, R0));
1247std::optional<std::pair<Value *, Value *>>
1248InstCombinerImpl::matchSymmetricPair(
Value *LHS,
Value *RHS) {
1249 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1250 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1252 return std::nullopt;
1254 case Instruction::PHI:
1256 case Instruction::Select: {
1262 return std::pair(TrueVal, FalseVal);
1263 return std::nullopt;
1265 case Instruction::Call: {
1269 if (LHSMinMax && RHSMinMax &&
1276 return std::pair(LHSMinMax->
getLHS(), LHSMinMax->
getRHS());
1277 return std::nullopt;
1280 return std::nullopt;
1290 if (!LHSIsSelect && !RHSIsSelect)
1295 if (isa<FPMathOperator>(&
I)) {
1296 FMF =
I.getFastMathFlags();
1303 Value *
Cond, *True =
nullptr, *False =
nullptr;
1311 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1326 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
1335 else if (True && !False)
1343 if (
Value *NewSel = foldAddNegate(
B,
C,
RHS))
1350 if (
Value *NewSel = foldAddNegate(E,
F,
LHS))
1354 if (!True || !False)
1365 assert(!isa<Constant>(
I) &&
"Shouldn't invert users of constant");
1367 if (U == IgnoredUser)
1369 switch (cast<Instruction>(U)->
getOpcode()) {
1370 case Instruction::Select: {
1371 auto *SI = cast<SelectInst>(U);
1373 SI->swapProfMetadata();
1376 case Instruction::Br: {
1383 case Instruction::Xor:
1390 "canFreelyInvertAllUsersOf() ?");
1397Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
1407 if (
C->getType()->getElementType()->isIntegerTy())
1411 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1416 if (isa<UndefValue>(Elt))
1419 if (!isa<ConstantInt>(Elt))
1426 if (
auto *CV = dyn_cast<Constant>(V))
1427 if (CV->getType()->isVectorTy() &&
1428 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1441Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1442 BinaryOperator &BO,
bool OpsFromSigned, std::array<Value *, 2> IntOps,
1446 Type *IntTy = IntOps[0]->getType();
1451 unsigned MaxRepresentableBits =
1456 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1460 auto IsNonZero = [&](
unsigned OpNo) ->
bool {
1461 if (OpsKnown[OpNo].hasKnownBits() &&
1462 OpsKnown[OpNo].getKnownBits(
SQ).isNonZero())
1467 auto IsNonNeg = [&](
unsigned OpNo) ->
bool {
1471 return OpsKnown[OpNo].getKnownBits(
SQ).isNonNegative();
1475 auto IsValidPromotion = [&](
unsigned OpNo) ->
bool {
1477 if (OpsFromSigned != isa<SIToFPInst>(BO.
getOperand(OpNo)) &&
1486 if (MaxRepresentableBits < IntSz) {
1496 NumUsedLeadingBits[OpNo] =
1497 IntSz - OpsKnown[OpNo].getKnownBits(
SQ).countMinLeadingZeros();
1505 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1508 return !OpsFromSigned || BO.
getOpcode() != Instruction::FMul ||
1513 if (Op1FpC !=
nullptr) {
1515 if (OpsFromSigned && BO.
getOpcode() == Instruction::FMul &&
1520 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1522 if (Op1IntC ==
nullptr)
1525 : Instruction::UIToFP,
1526 Op1IntC, FPTy,
DL) != Op1FpC)
1530 IntOps[1] = Op1IntC;
1534 if (IntTy != IntOps[1]->
getType())
1537 if (Op1FpC ==
nullptr) {
1538 if (!IsValidPromotion(1))
1541 if (!IsValidPromotion(0))
1547 bool NeedsOverflowCheck =
true;
1550 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1551 unsigned OverflowMaxCurBits =
1552 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1553 bool OutputSigned = OpsFromSigned;
1555 case Instruction::FAdd:
1556 IntOpc = Instruction::Add;
1557 OverflowMaxOutputBits += OverflowMaxCurBits;
1559 case Instruction::FSub:
1560 IntOpc = Instruction::Sub;
1561 OverflowMaxOutputBits += OverflowMaxCurBits;
1563 case Instruction::FMul:
1564 IntOpc = Instruction::Mul;
1565 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1571 if (OverflowMaxOutputBits < IntSz) {
1572 NeedsOverflowCheck =
false;
1575 if (IntOpc == Instruction::Sub)
1576 OutputSigned =
true;
1582 if (NeedsOverflowCheck &&
1583 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1587 if (
auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1588 IntBO->setHasNoSignedWrap(OutputSigned);
1589 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1602 std::array<Value *, 2> IntOps = {
nullptr,
nullptr};
1622 if (
Instruction *R = foldFBinOpOfIntCastsFromSign(BO,
false,
1623 IntOps, Op1FpC, OpsKnown))
1625 return foldFBinOpOfIntCastsFromSign(BO,
true, IntOps,
1641 !
X->getType()->isIntOrIntVectorTy(1))
1658 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1659 }
else if (
match(SI->getCondition(),
1684 bool FoldWithMultiUse) {
1686 if (!SI->hasOneUse() && !FoldWithMultiUse)
1689 Value *TV = SI->getTrueValue();
1690 Value *FV = SI->getFalseValue();
1693 if (SI->getType()->isIntOrIntVectorTy(1))
1703 if (
auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1704 if (CI->hasOneUse()) {
1705 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1706 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1715 if (!NewTV && !NewFV)
1752 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&
I);
1767 bool AllowMultipleUses) {
1769 if (NumPHIValues == 0)
1776 bool IdenticalUsers =
false;
1777 if (!AllowMultipleUses && !OneUse) {
1781 if (UI != &
I && !
I.isIdenticalTo(UI))
1785 IdenticalUsers =
true;
1794 auto *
I = dyn_cast<Instruction>(
Op);
1799 if (isa<PHINode>(
I))
1815 bool SeenNonSimplifiedInVal =
false;
1816 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1828 const APInt *Ignored;
1829 if (isa<CmpIntrinsic>(InVal) && InVal->
hasOneUser() &&
1836 if (!OneUse && !IdenticalUsers)
1839 if (SeenNonSimplifiedInVal)
1841 SeenNonSimplifiedInVal =
true;
1857 if (isa<InvokeInst>(InVal))
1858 if (cast<Instruction>(InVal)->
getParent() == InBB)
1871 for (
auto OpIndex : OpsToMoveUseToIncomingBB) {
1882 U = U->DoPHITranslation(PN->
getParent(), OpBB);
1885 Clones.
insert({OpBB, Clone});
1888 NewPhiValues[
OpIndex] = Clone;
1897 for (
unsigned i = 0; i != NumPHIValues; ++i)
1900 if (IdenticalUsers) {
1913 const_cast<PHINode &
>(*NewPN),
1923 auto *Phi0 = dyn_cast<PHINode>(BO.
getOperand(0));
1924 auto *Phi1 = dyn_cast<PHINode>(BO.
getOperand(1));
1925 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1926 Phi0->getNumOperands() != Phi1->getNumOperands())
1930 if (BO.
getParent() != Phi0->getParent() ||
1947 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
1948 auto &Phi0Use = std::get<0>(
T);
1949 auto &Phi1Use = std::get<1>(
T);
1950 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1952 Value *Phi0UseV = Phi0Use.get();
1953 Value *Phi1UseV = Phi1Use.get();
1956 else if (Phi1UseV ==
C)
1963 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
1964 CanFoldIncomingValuePair)) {
1967 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
1968 "The number of collected incoming values should equal the number "
1969 "of the original PHINode operands!");
1970 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
1971 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
1976 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1983 ConstBB = Phi0->getIncomingBlock(0);
1984 OtherBB = Phi0->getIncomingBlock(1);
1986 ConstBB = Phi0->getIncomingBlock(1);
1987 OtherBB = Phi0->getIncomingBlock(0);
1997 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->
getTerminator());
1998 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2005 for (
auto BBIter = BO.
getParent()->begin(); &*BBIter != &BO; ++BBIter)
2018 Phi0->getIncomingValueForBlock(OtherBB),
2019 Phi1->getIncomingValueForBlock(OtherBB));
2020 if (
auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2021 NotFoldedNewBO->copyIRFlags(&BO);
2031 if (!isa<Constant>(
I.getOperand(1)))
2034 if (
auto *Sel = dyn_cast<SelectInst>(
I.getOperand(0))) {
2037 }
else if (
auto *PN = dyn_cast<PHINode>(
I.getOperand(0))) {
2048 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2055 if (!isa<VectorType>(Inst.
getType()))
2061 cast<VectorType>(Inst.
getType())->getElementCount());
2063 cast<VectorType>(Inst.
getType())->getElementCount());
2068 Value *L0, *L1, *R0, *R1;
2073 cast<ShuffleVectorInst>(
LHS)->isConcat() &&
2074 cast<ShuffleVectorInst>(
RHS)->isConcat()) {
2081 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2084 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2091 if (
auto *BO = dyn_cast<BinaryOperator>(V))
2095 M, Intrinsic::vector_reverse, V->getType());
2108 return createBinOpReverse(V1, V2);
2112 return createBinOpReverse(V1,
RHS);
2116 return createBinOpReverse(
LHS, V2);
2126 if (
auto *BO = dyn_cast<BinaryOperator>(XY))
2135 V1->
getType() == V2->getType() &&
2138 return createBinOpShuffle(V1, V2, Mask);
2147 auto *LShuf = cast<ShuffleVectorInst>(
LHS);
2148 auto *RShuf = cast<ShuffleVectorInst>(
RHS);
2153 if (LShuf->isSelect() &&
2155 RShuf->isSelect() &&
2173 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.
getType());
2178 cast<FixedVectorType>(V1->
getType())->getNumElements() <=
2179 InstVTy->getNumElements()) {
2181 "Shuffle should not change scalar type");
2188 bool ConstOp1 = isa<Constant>(
RHS);
2190 unsigned SrcVecNumElts =
2191 cast<FixedVectorType>(V1->
getType())->getNumElements();
2194 bool MayChange =
true;
2195 unsigned NumElts = InstVTy->getNumElements();
2196 for (
unsigned I = 0;
I < NumElts; ++
I) {
2198 if (ShMask[
I] >= 0) {
2199 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
2207 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2208 I >= SrcVecNumElts) {
2212 NewVecC[ShMask[
I]] = CElt;
2223 if (
I >= SrcVecNumElts || ShMask[
I] < 0) {
2228 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2245 Value *NewLHS = ConstOp1 ? V1 : NewC;
2246 Value *NewRHS = ConstOp1 ? NewC : V1;
2247 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2254 if (isa<ShuffleVectorInst>(
RHS))
2287 if (isa<FPMathOperator>(R)) {
2288 R->copyFastMathFlags(&Inst);
2291 if (
auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2292 NewInstBO->copyIRFlags(R);
2321 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2322 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
2340 if (!willNotOverflow(BO.
getOpcode(),
X,
Y, BO, IsSext))
2346 if (
auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2348 NewBinOp->setHasNoSignedWrap();
2350 NewBinOp->setHasNoUnsignedWrap();
2366 if (!
GEP.hasAllConstantIndices())
2382 Type *Ty =
GEP.getSourceElementType();
2384 Value *NewFalseC = Builder.
CreateGEP(Ty, FalseC, IndexC,
"", NW);
2394 if (
GEP.getNumIndices() != 1)
2403 Type *PtrTy = Src->getType()->getScalarType();
2404 unsigned IndexSizeInBits =
DL.getIndexTypeSizeInBits(PtrTy);
2411 if (isa<ScalableVectorType>(
BaseType))
2415 if (NewOffset.
isZero() ||
2416 (Src->hasOneUse() &&
GEP.getOperand(1)->hasOneUse())) {
2437 Type *PtrTy = Src->getType()->getScalarType();
2438 if (
GEP.hasAllConstantIndices() &&
2439 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2443 bool IsFirstType =
true;
2444 unsigned NumVarIndices = 0;
2445 for (
auto Pair :
enumerate(Src->indices())) {
2446 if (!isa<ConstantInt>(Pair.value())) {
2448 IsFirstType =
false;
2449 NumVarIndices = Pair.index() + 1;
2456 if (NumVarIndices != Src->getNumIndices()) {
2476 if (!
Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2482 Src->getNumIndices() - NumVarIndices));
2489 if (
Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2491 if (!
Idx.isNonNegative())
2500 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2506 bool EndsWithSequential =
false;
2509 EndsWithSequential =
I.isSequential();
2512 if (EndsWithSequential) {
2515 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2532 Indices.
append(Src->op_begin()+1, Src->op_end()-1);
2535 }
else if (isa<Constant>(*
GEP.idx_begin()) &&
2536 cast<Constant>(*
GEP.idx_begin())->isNullValue() &&
2537 Src->getNumOperands() != 1) {
2539 Indices.
append(Src->op_begin()+1, Src->op_end());
2543 if (!Indices.
empty())
2546 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
2554 bool &DoesConsume,
unsigned Depth) {
2555 static Value *
const NonNull =
reinterpret_cast<Value *
>(uintptr_t(1));
2573 if (!WillInvertAllUses)
2578 if (
auto *
I = dyn_cast<CmpInst>(V)) {
2589 DoesConsume,
Depth))
2592 DoesConsume,
Depth))
2601 DoesConsume,
Depth))
2604 DoesConsume,
Depth))
2613 DoesConsume,
Depth))
2622 DoesConsume,
Depth))
2634 bool LocalDoesConsume = DoesConsume;
2636 LocalDoesConsume,
Depth))
2639 LocalDoesConsume,
Depth)) {
2640 DoesConsume = LocalDoesConsume;
2643 DoesConsume,
Depth);
2644 assert(NotB !=
nullptr &&
2645 "Unable to build inverted value for known freely invertable op");
2646 if (
auto *
II = dyn_cast<IntrinsicInst>(V))
2655 if (
PHINode *PN = dyn_cast<PHINode>(V)) {
2656 bool LocalDoesConsume = DoesConsume;
2658 for (
Use &U : PN->operands()) {
2659 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2663 if (NewIncomingVal ==
nullptr)
2666 if (NewIncomingVal == V)
2669 IncomingValues.
emplace_back(NewIncomingVal, IncomingBlock);
2672 DoesConsume = LocalDoesConsume;
2678 for (
auto [Val, Pred] : IncomingValues)
2687 DoesConsume,
Depth))
2694 DoesConsume,
Depth))
2703 bool IsLogical,
Value *
A,
2705 bool LocalDoesConsume = DoesConsume;
2707 LocalDoesConsume,
Depth))
2710 LocalDoesConsume,
Depth)) {
2712 LocalDoesConsume,
Depth);
2713 DoesConsume = LocalDoesConsume;
2723 return TryInvertAndOrUsingDeMorgan(Instruction::And,
false,
A,
2727 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
false,
A,
2731 return TryInvertAndOrUsingDeMorgan(Instruction::And,
true,
A,
2735 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
true,
A,
2744 Type *GEPEltType =
GEP.getSourceElementType();
2755 if (
GEP.getNumIndices() == 1 &&
2763 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
2764 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
2767 return match(V, m_APInt(C)) && !C->isZero();
2773 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->
getOperand(0));
2790 auto *Op2 = dyn_cast<GetElementPtrInst>(*
I);
2791 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2792 Op1->getSourceElementType() != Op2->getSourceElementType())
2800 Type *CurTy =
nullptr;
2802 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
2803 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2806 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2815 assert(CurTy &&
"No current type?");
2835 CurTy = Op1->getSourceElementType();
2843 NW &= Op2->getNoWrapFlags();
2852 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2853 NewGEP->setNoWrapFlags(NW);
2866 NewPN = Builder.
CreatePHI(Op1->getOperand(DI)->getType(),
2871 NewPN->
addIncoming(cast<GEPOperator>(
I)->getOperand(DI),
2874 NewGEP->setOperand(DI, NewPN);
2877 NewGEP->insertBefore(*
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
2884 Type *GEPType =
GEP.getType();
2885 Type *GEPEltType =
GEP.getSourceElementType();
2894 if (
auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2895 auto VWidth = GEPFVTy->getNumElements();
2896 APInt PoisonElts(VWidth, 0);
2912 bool MadeChange =
false;
2916 Type *NewScalarIndexTy =
2926 Type *IndexTy = (*I)->getType();
2927 Type *NewIndexType =
2930 cast<VectorType>(IndexTy)->getElementCount())
2942 if (IndexTy != NewIndexType) {
2954 if (!GEPEltType->
isIntegerTy(8) &&
GEP.hasAllConstantIndices()) {
2959 GEP.getNoWrapFlags()));
2970 if (
auto *PN = dyn_cast<PHINode>(PtrOp)) {
2975 if (
auto *Src = dyn_cast<GEPOperator>(PtrOp))
2979 if (
GEP.getNumIndices() == 1) {
2980 unsigned AS =
GEP.getPointerAddressSpace();
2981 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2985 if (TyAllocSize == 1) {
2994 GEPType ==
Y->getType()) {
2995 bool HasSameUnderlyingObject =
2997 bool Changed =
false;
2998 GEP.replaceUsesWithIf(
Y, [&](
Use &U) {
2999 bool ShouldReplace = HasSameUnderlyingObject ||
3000 isa<ICmpInst>(U.getUser()) ||
3001 isa<PtrToIntInst>(U.getUser());
3002 Changed |= ShouldReplace;
3003 return ShouldReplace;
3005 return Changed ? &
GEP :
nullptr;
3007 }
else if (
auto *ExactIns =
3008 dyn_cast<PossiblyExactOperator>(
GEP.getOperand(1))) {
3011 if (ExactIns->isExact()) {
3019 GEP.getPointerOperand(), V,
3020 GEP.getNoWrapFlags());
3023 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3029 std::optional<APInt> NewC;
3049 if (NewC.has_value()) {
3052 ConstantInt::get(V->getType(), *NewC));
3053 cast<BinaryOperator>(NewOp)->setIsExact();
3055 GEP.getPointerOperand(), NewOp,
3056 GEP.getNoWrapFlags());
3066 if (
GEP.getNumIndices() == 1) {
3069 auto CanPreserveInBounds = [&](
bool AddIsNSW,
Value *Idx1,
Value *Idx2) {
3084 bool IsInBounds = CanPreserveInBounds(
3085 cast<OverflowingBinaryOperator>(
GEP.getOperand(1))->hasNoSignedWrap(),
3089 Idx1,
"", IsInBounds);
3103 bool IsInBounds = CanPreserveInBounds(
3106 GEP.getSourceElementType(),
GEP.getPointerOperand(),
3117 if (!
GEP.isInBounds()) {
3120 APInt BasePtrOffset(IdxWidth, 0);
3121 Value *UnderlyingPtrOp =
3124 bool CanBeNull, CanBeFreed;
3126 DL, CanBeNull, CanBeFreed);
3127 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3128 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
3130 APInt AllocSize(IdxWidth, DerefBytes);
3131 if (BasePtrOffset.
ule(AllocSize)) {
3133 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
3140 if (
GEP.hasNoUnsignedSignedWrap() && !
GEP.hasNoUnsignedWrap() &&
3142 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3156 if (isa<ConstantPointerNull>(V))
3158 if (
auto *LI = dyn_cast<LoadInst>(V))
3159 return isa<GlobalVariable>(LI->getPointerOperand());
3183 return Dest && Dest->Ptr == UsedV;
3197 switch (
I->getOpcode()) {
3202 case Instruction::AddrSpaceCast:
3203 case Instruction::BitCast:
3204 case Instruction::GetElementPtr:
3209 case Instruction::ICmp: {
3216 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
3223 auto AlignmentAndSizeKnownValid = [](
CallBase *CB) {
3227 const APInt *Alignment;
3229 return match(CB->getArgOperand(0),
m_APInt(Alignment)) &&
3233 auto *CB = dyn_cast<CallBase>(AI);
3235 if (CB && TLI.
getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3236 TLI.
has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3237 !AlignmentAndSizeKnownValid(CB))
3243 case Instruction::Call:
3246 switch (
II->getIntrinsicID()) {
3250 case Intrinsic::memmove:
3251 case Intrinsic::memcpy:
3252 case Intrinsic::memset: {
3254 if (
MI->isVolatile() ||
MI->getRawDest() != PI)
3258 case Intrinsic::assume:
3259 case Intrinsic::invariant_start:
3260 case Intrinsic::invariant_end:
3261 case Intrinsic::lifetime_start:
3262 case Intrinsic::lifetime_end:
3263 case Intrinsic::objectsize:
3266 case Intrinsic::launder_invariant_group:
3267 case Intrinsic::strip_invariant_group:
3296 case Instruction::Store: {
3298 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3306 }
while (!Worklist.
empty());
3329 std::unique_ptr<DIBuilder> DIB;
3330 if (isa<AllocaInst>(
MI)) {
3336 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3345 if (
II->getIntrinsicID() == Intrinsic::objectsize) {
3348 II,
DL, &
TLI,
AA,
true, &InsertedInstructions);
3349 for (
Instruction *Inserted : InsertedInstructions)
3357 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3366 C->isFalseWhenEqual()));
3367 }
else if (
auto *SI = dyn_cast<StoreInst>(
I)) {
3368 for (
auto *DVI : DVIs)
3369 if (DVI->isAddressOfVariable())
3371 for (
auto *DVR : DVRs)
3372 if (DVR->isAddressOfVariable())
3415 for (
auto *DVI : DVIs)
3416 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3417 DVI->eraseFromParent();
3418 for (
auto *DVR : DVRs)
3419 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3420 DVR->eraseFromParent();
3466 if (FreeInstrBB->
size() != 2) {
3468 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3470 auto *Cast = dyn_cast<CastInst>(&Inst);
3471 if (!Cast || !Cast->isNoopCast(
DL))
3492 "Broken CFG: missing edge from predecessor to successor");
3497 if (&Instr == FreeInstrBBTerminator)
3499 Instr.moveBeforePreserving(TI);
3502 "Only the branch instruction should remain");
3513 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
3514 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3515 if (Dereferenceable.
isValid()) {
3517 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
3518 Attribute::Dereferenceable);
3519 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
3528 if (isa<UndefValue>(
Op)) {
3536 if (isa<ConstantPointerNull>(
Op))
3572 FPClassTest ReturnClass =
F->getAttributes().getRetNoFPClass();
3573 if (ReturnClass ==
fcNone)
3590 bool Changed =
false;
3591 while (
Instruction *Prev =
I.getPrevNonDebugInstruction()) {
3596 if (Prev->isEHPad())
3627 return BBI->isDebugOrPseudoInst() ||
3628 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3633 if (BBI != FirstInstr)
3635 }
while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3637 return dyn_cast<StoreInst>(BBI);
3649 if (!
DeadEdges.insert({From, To}).second)
3654 for (
Use &U : PN.incoming_values())
3655 if (PN.getIncomingBlock(U) ==
From && !isa<PoisonValue>(U)) {
3671 std::next(
I->getReverseIterator())))) {
3672 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3676 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3679 Inst.dropDbgRecords();
3687 for (
Value *V : Changed)
3714 if (Succ == LiveSucc)
3742 if (isa<SelectInst>(
Cond) &&
3763 auto *Cmp = cast<CmpInst>(
Cond);
3772 if (isa<UndefValue>(
Cond)) {
3776 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3811 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3812 auto *
C = dyn_cast<ConstantInt>(
Select->getOperand(CstOpIdx));
3816 BasicBlock *CstBB = SI.findCaseValue(
C)->getCaseSuccessor();
3817 if (CstBB != SI.getDefaultDest())
3830 for (
auto Case : SI.cases())
3831 if (!CR.
contains(Case.getCaseValue()->getValue()))
3843 for (
auto Case : SI.cases()) {
3845 assert(isa<ConstantInt>(NewCase) &&
3846 "Result of expression should be constant");
3847 Case.setValue(cast<ConstantInt>(NewCase));
3855 for (
auto Case : SI.cases()) {
3857 assert(isa<ConstantInt>(NewCase) &&
3858 "Result of expression should be constant");
3859 Case.setValue(cast<ConstantInt>(NewCase));
3867 all_of(SI.cases(), [&](
const auto &Case) {
3868 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3874 Value *NewCond = Op0;
3881 for (
auto Case : SI.cases()) {
3882 const APInt &CaseVal = Case.getCaseValue()->getValue();
3884 : CaseVal.
lshr(ShiftAmt);
3885 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3893 bool IsZExt = isa<ZExtInst>(
Cond);
3897 if (
all_of(SI.cases(), [&](
const auto &Case) {
3898 const APInt &CaseVal = Case.getCaseValue()->getValue();
3899 return IsZExt ? CaseVal.isIntN(NewWidth)
3900 : CaseVal.isSignedIntN(NewWidth);
3902 for (
auto &Case : SI.cases()) {
3903 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3904 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3911 if (
auto *
Select = dyn_cast<SelectInst>(
Cond)) {
3926 for (
const auto &
C : SI.cases()) {
3928 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
3930 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
3933 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3939 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
3940 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
3945 for (
auto Case : SI.cases()) {
3946 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3947 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3952 if (isa<UndefValue>(
Cond)) {
3956 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3958 SI.findCaseValue(CI)->getCaseSuccessor());
3972 const APInt *
C =
nullptr;
3974 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3975 OvID == Intrinsic::umul_with_overflow)) {
3980 if (
C->isPowerOf2()) {
3981 return BinaryOperator::CreateShl(
3983 ConstantInt::get(WO->getLHS()->getType(),
C->logBase2()));
3991 if (!WO->hasOneUse())
4005 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
4008 if (OvID == Intrinsic::usub_with_overflow)
4013 if (OvID == Intrinsic::smul_with_overflow &&
4014 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4015 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4018 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4019 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4024 ConstantInt::get(WO->getLHS()->getType(),
4035 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
4040 auto *OpTy = WO->getRHS()->getType();
4041 auto *NewLHS = WO->getLHS();
4045 ConstantInt::get(OpTy, NewRHSC));
4063 const unsigned *exti, *exte, *insi, *inse;
4064 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
4065 exte = EV.
idx_end(), inse =
IV->idx_end();
4066 exti != exte && insi != inse;
4080 if (exti == exte && insi == inse)
4113 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4116 if (
LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4118 if (
auto *STy = dyn_cast<StructType>(Agg->
getType());
4119 STy && STy->isScalableTy())
4127 if (L->isSimple() && L->hasOneUse()) {
4139 L->getPointerOperand(), Indices);
4150 if (
auto *PN = dyn_cast<PHINode>(Agg))
4156 if (
auto *SI = dyn_cast<SelectInst>(Agg))
4173 switch (Personality) {
4203 cast<ArrayType>(
LHS->
getType())->getNumElements()
4205 cast<ArrayType>(
RHS->
getType())->getNumElements();
4217 bool MakeNewInstruction =
false;
4223 bool isLastClause = i + 1 == e;
4231 if (AlreadyCaught.
insert(TypeInfo).second) {
4236 MakeNewInstruction =
true;
4243 MakeNewInstruction =
true;
4244 CleanupFlag =
false;
4263 if (!NumTypeInfos) {
4266 MakeNewInstruction =
true;
4267 CleanupFlag =
false;
4271 bool MakeNewFilter =
false;
4273 if (isa<ConstantAggregateZero>(FilterClause)) {
4275 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
4281 MakeNewInstruction =
true;
4288 if (NumTypeInfos > 1)
4289 MakeNewFilter =
true;
4293 NewFilterElts.
reserve(NumTypeInfos);
4298 bool SawCatchAll =
false;
4299 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
4327 if (SeenInFilter.
insert(TypeInfo).second)
4328 NewFilterElts.
push_back(cast<Constant>(Elt));
4333 MakeNewInstruction =
true;
4338 if (NewFilterElts.
size() < NumTypeInfos)
4339 MakeNewFilter =
true;
4341 if (MakeNewFilter) {
4343 NewFilterElts.
size());
4345 MakeNewInstruction =
true;
4354 if (MakeNewFilter && !NewFilterElts.
size()) {
4355 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
4356 CleanupFlag =
false;
4367 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
4370 for (j = i; j != e; ++j)
4371 if (!isa<ArrayType>(NewClauses[j]->
getType()))
4377 for (
unsigned k = i; k + 1 < j; ++k)
4381 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
4383 MakeNewInstruction =
true;
4402 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
4412 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
4413 Value *LFilter = NewClauses[j];
4424 NewClauses.
erase(J);
4425 MakeNewInstruction =
true;
4435 if (isa<ConstantAggregateZero>(LFilter)) {
4438 if (isa<ConstantAggregateZero>(
Filter)) {
4439 assert(FElts <= LElts &&
"Should have handled this case earlier!");
4441 NewClauses.
erase(J);
4442 MakeNewInstruction =
true;
4448 if (isa<ConstantAggregateZero>(
Filter)) {
4451 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
4452 for (
unsigned l = 0; l != LElts; ++l)
4455 NewClauses.
erase(J);
4456 MakeNewInstruction =
true;
4467 bool AllFound =
true;
4468 for (
unsigned f = 0; f != FElts; ++f) {
4471 for (
unsigned l = 0; l != LElts; ++l) {
4473 if (LTypeInfo == FTypeInfo) {
4483 NewClauses.
erase(J);
4484 MakeNewInstruction =
true;
4492 if (MakeNewInstruction) {
4500 if (NewClauses.empty())
4509 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
4534 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4539 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4553 Use *MaybePoisonOperand =
nullptr;
4554 for (
Use &U : OrigOpInst->operands()) {
4555 if (isa<MetadataAsValue>(U.get()) ||
4558 if (!MaybePoisonOperand)
4559 MaybePoisonOperand = &U;
4564 OrigOpInst->dropPoisonGeneratingAnnotations();
4567 if (!MaybePoisonOperand)
4572 MaybePoisonOperand->get(), MaybePoisonOperand->get()->
getName() +
".fr");
4574 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4585 Use *StartU =
nullptr;
4603 Value *StartV = StartU->get();
4605 bool StartNeedsFreeze = !