110 #define DEBUG_TYPE "instcombine"
113 using namespace llvm;
117 "Number of instruction combining iterations performed");
119 STATISTIC(NumCombined ,
"Number of insts combined");
120 STATISTIC(NumConstProp,
"Number of constant folds");
121 STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
122 STATISTIC(NumSunkInst ,
"Number of instructions sunk");
123 STATISTIC(NumExpand,
"Number of expansions");
124 STATISTIC(NumFactor ,
"Number of factorizations");
125 STATISTIC(NumReassoc ,
"Number of reassociations");
127 "Controls which instructions are visited");
142 "instcombine-max-sink-users",
cl::init(32),
143 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
146 "instcombine-max-iterations",
147 cl::desc(
"Limit the maximum number of instruction combining iterations"),
151 "instcombine-infinite-loop-threshold",
152 cl::desc(
"Number of instruction combining iterations considered an "
158 cl::desc(
"Maximum array size considered when doing a combine"));
181 bool &KnownBitsComputed) {
198 *
this, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
213 bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
231 bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
232 unsigned ToWidth)
const {
233 bool FromLegal = FromWidth == 1 ||
DL.isLegalInteger(FromWidth);
234 bool ToLegal = ToWidth == 1 ||
DL.isLegalInteger(ToWidth);
238 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
243 if (FromLegal && !ToLegal)
248 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
259 bool InstCombinerImpl::shouldChangeType(
Type *
From,
Type *To)
const {
265 unsigned FromWidth =
From->getPrimitiveSizeInBits();
267 return shouldChangeType(FromWidth, ToWidth);
276 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
277 if (!OBO || !OBO->hasNoSignedWrap())
285 const APInt *BVal, *CVal;
289 bool Overflow =
false;
291 (void)BVal->
sadd_ov(*CVal, Overflow);
293 (
void)BVal->
ssub_ov(*CVal, Overflow);
299 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
300 return OBO && OBO->hasNoUnsignedWrap();
304 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
305 return OBO && OBO->hasNoSignedWrap();
314 I.clearSubclassOptionalData();
319 I.clearSubclassOptionalData();
320 I.setFastMathFlags(FMF);
329 auto *Cast = dyn_cast<CastInst>(BinOp1->
getOperand(0));
330 if (!Cast || !Cast->hasOneUse())
334 auto CastOpcode = Cast->getOpcode();
335 if (CastOpcode != Instruction::ZExt)
343 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
344 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
358 Type *DestTy =
C1->getType();
368 Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
369 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
370 if (IntToPtr &&
DL.getPointerTypeSizeInBits(IntToPtr->getDestTy()) ==
371 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
372 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
373 Type *CastTy = IntToPtr->getDestTy();
376 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
377 DL.getPointerTypeSizeInBits(PtrToInt->getSrcTy()) ==
378 DL.getTypeSizeInBits(PtrToInt->getDestTy())) {
408 bool Changed =
false;
414 if (
I.isCommutative() && getComplexity(
I.getOperand(0)) <
415 getComplexity(
I.getOperand(1)))
416 Changed = !
I.swapOperands();
421 if (
I.isAssociative()) {
431 replaceOperand(
I, 0, A);
432 replaceOperand(
I, 1, V);
444 I.setHasNoUnsignedWrap(
true);
447 I.setHasNoSignedWrap(
true);
457 Value *A =
I.getOperand(0);
464 replaceOperand(
I, 0, V);
465 replaceOperand(
I, 1,
C);
476 if (
I.isAssociative() &&
I.isCommutative()) {
492 replaceOperand(
I, 0, V);
493 replaceOperand(
I, 1,
B);
505 Value *A =
I.getOperand(0);
512 replaceOperand(
I, 0,
B);
513 replaceOperand(
I, 1, V);
538 if (isa<FPMathOperator>(NewBO)) {
544 InsertNewInstWith(NewBO,
I);
546 replaceOperand(
I, 0, NewBO);
552 I.setHasNoUnsignedWrap(
true);
570 if (LOp == Instruction::And)
571 return ROp == Instruction::Or || ROp == Instruction::Xor;
574 if (LOp == Instruction::Or)
575 return ROp == Instruction::And;
603 if (isa<Constant>(V))
617 assert(
Op &&
"Expected a binary operator");
618 LHS =
Op->getOperand(0);
619 RHS =
Op->getOperand(1);
629 return Op->getOpcode();
638 assert(A &&
B &&
C &&
D &&
"All values must be provided");
641 Value *SimplifiedInst =
nullptr;
652 if (A ==
C || (InnerCommutative && A ==
D)) {
663 SimplifiedInst =
Builder.CreateBinOp(InnerOpcode, A, V);
671 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
683 SimplifiedInst =
Builder.CreateBinOp(InnerOpcode, V,
B);
687 if (SimplifiedInst) {
692 if (
BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) {
693 if (isa<OverflowingBinaryOperator>(SimplifiedInst)) {
696 if (isa<OverflowingBinaryOperator>(&
I)) {
697 HasNSW =
I.hasNoSignedWrap();
698 HasNUW =
I.hasNoUnsignedWrap();
701 if (
auto *LOBO = dyn_cast<OverflowingBinaryOperator>(
LHS)) {
702 HasNSW &= LOBO->hasNoSignedWrap();
703 HasNUW &= LOBO->hasNoUnsignedWrap();
706 if (
auto *ROBO = dyn_cast<OverflowingBinaryOperator>(
RHS)) {
707 HasNSW &= ROBO->hasNoSignedWrap();
708 HasNUW &= ROBO->hasNoUnsignedWrap();
723 BO->setHasNoSignedWrap(HasNSW);
727 BO->setHasNoUnsignedWrap(HasNUW);
732 return SimplifiedInst;
757 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
758 if (
Value *V = tryFactorization(
I, LHSOpcode, A,
B,
C,
D))
765 if (
Value *V = tryFactorization(
I, LHSOpcode, A,
B,
RHS, Ident))
772 if (
Value *V = tryFactorization(
I, RHSOpcode,
LHS, Ident,
C,
D))
784 auto SQDistributive = SQ.getWithInstruction(&
I).getWithoutUndef();
792 C =
Builder.CreateBinOp(InnerOpcode, L, R);
801 C =
Builder.CreateBinOp(TopLevelOpcode,
B,
C);
810 C =
Builder.CreateBinOp(TopLevelOpcode, A,
C);
823 auto SQDistributive = SQ.getWithInstruction(&
I).getWithoutUndef();
831 A =
Builder.CreateBinOp(InnerOpcode, L, R);
840 A =
Builder.CreateBinOp(TopLevelOpcode, A,
C);
849 A =
Builder.CreateBinOp(TopLevelOpcode, A,
B);
855 return SimplifySelectsFeedingBinaryOp(
I,
LHS,
RHS);
864 if (!LHSIsSelect && !RHSIsSelect)
869 if (isa<FPMathOperator>(&
I)) {
870 FMF =
I.getFastMathFlags();
877 Value *
Cond, *True =
nullptr, *False =
nullptr;
878 if (LHSIsSelect && RHSIsSelect && A ==
D) {
886 True =
Builder.CreateBinOp(Opcode,
B,
E);
887 else if (True && !False)
888 False =
Builder.CreateBinOp(Opcode,
C,
F);
912 void InstCombinerImpl::freelyInvertAllUsersOf(
Value *
I) {
913 for (
User *U :
I->users()) {
914 switch (cast<Instruction>(U)->
getOpcode()) {
916 auto *
SI = cast<SelectInst>(U);
918 SI->swapProfMetadata();
921 case Instruction::Br:
922 cast<BranchInst>(U)->swapSuccessors();
924 case Instruction::Xor:
925 replaceInstUsesWith(cast<Instruction>(*U),
I);
929 "canFreelyInvertAllUsersOf() ?");
936 Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
946 if (
C->getType()->getElementType()->isIntegerTy())
950 for (
unsigned i = 0,
e = CV->getNumOperands();
i !=
e; ++
i) {
955 if (isa<UndefValue>(Elt))
958 if (!isa<ConstantInt>(Elt))
965 if (
auto *CV = dyn_cast<Constant>(V))
966 if (CV->getType()->isVectorTy() &&
967 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
985 !
X->getType()->isIntOrIntVectorTy(1))
998 if (
auto *Cast = dyn_cast<CastInst>(&
I))
999 return Builder.CreateCast(Cast->getOpcode(), SO,
I.getType());
1001 if (
auto *II = dyn_cast<IntrinsicInst>(&
I)) {
1003 "Expected constant-foldable intrinsic");
1006 return Builder.CreateUnaryIntrinsic(IID, SO);
1017 assert(
I.isBinaryOp() &&
"Unexpected opcode for select folding");
1020 bool ConstIsRHS = isa<Constant>(
I.getOperand(1));
1021 Constant *ConstOperand = cast<Constant>(
I.getOperand(ConstIsRHS));
1023 if (
auto *SOC = dyn_cast<Constant>(SO)) {
1029 Value *Op0 = SO, *Op1 = ConstOperand;
1035 if (
auto *NewBOI = dyn_cast<Instruction>(NewBO))
1036 NewBOI->copyIRFlags(&
I);
1041 bool FoldWithMultiUse) {
1043 if (!
SI->hasOneUse() && !FoldWithMultiUse)
1046 Value *TV =
SI->getTrueValue();
1047 Value *FV =
SI->getFalseValue();
1048 if (!(isa<Constant>(TV) || isa<Constant>(FV)))
1052 if (
SI->getType()->isIntOrIntVectorTy(1))
1057 if (
auto *BC = dyn_cast<BitCastInst>(&
Op)) {
1058 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
1059 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
1062 if ((SrcTy ==
nullptr) != (DestTy ==
nullptr))
1077 if (
auto *CI = dyn_cast<CmpInst>(
SI->getCondition())) {
1078 if (CI->hasOneUse()) {
1079 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1094 if (!A->getType()->isIntOrIntVectorTy() || A->getType() !=
B->getType())
1103 if ((areLooselyEqual(TV, Op0) && areLooselyEqual(FV, Op1)) ||
1104 (areLooselyEqual(FV, Op0) && areLooselyEqual(TV, Op1)))
1116 bool ConstIsRHS = isa<Constant>(
I->getOperand(1));
1117 Constant *
C = cast<Constant>(
I->getOperand(ConstIsRHS));
1119 if (
auto *InC = dyn_cast<Constant>(InV)) {
1125 Value *Op0 = InV, *Op1 =
C;
1129 Value *RI =
Builder.CreateBinOp(
I->getOpcode(), Op0, Op1,
"phi.bo");
1130 auto *FPInst = dyn_cast<Instruction>(RI);
1131 if (FPInst && isa<FPMathOperator>(FPInst))
1132 FPInst->copyFastMathFlags(
I);
1138 if (NumPHIValues == 0)
1148 if (UI != &
I && !
I.isIdenticalTo(UI))
1160 for (
unsigned i = 0;
i != NumPHIValues; ++
i) {
1169 if (isa<PHINode>(InVal))
return nullptr;
1170 if (NonConstBB)
return nullptr;
1176 if (isa<InvokeInst>(InVal))
1177 if (cast<Instruction>(InVal)->
getParent() == NonConstBB)
1192 if (NonConstBB !=
nullptr) {
1194 if (!BI || !BI->
isUnconditional() || !DT.isReachableFromEntry(NonConstBB))
1200 InsertNewInstBefore(NewPN, *PN);
1212 Value *TrueV =
SI->getTrueValue();
1213 Value *FalseV =
SI->getFalseValue();
1215 for (
unsigned i = 0;
i != NumPHIValues; ++
i) {
1219 Value *InV =
nullptr;
1227 if (InC && isa<ConstantInt>(InC))
1228 InV = InC->
isNullValue() ? FalseVInPred : TrueVInPred;
1240 FalseVInPred,
"phi.sel");
1244 }
else if (
CmpInst *CI = dyn_cast<CmpInst>(&
I)) {
1245 Constant *
C = cast<Constant>(
I.getOperand(1));
1246 for (
unsigned i = 0;
i != NumPHIValues; ++
i) {
1247 Value *InV =
nullptr;
1255 }
else if (
auto *BO = dyn_cast<BinaryOperator>(&
I)) {
1256 for (
unsigned i = 0;
i != NumPHIValues; ++
i) {
1261 }
else if (isa<FreezeInst>(&
I)) {
1262 for (
unsigned i = 0;
i != NumPHIValues; ++
i) {
1273 for (
unsigned i = 0;
i != NumPHIValues; ++
i) {
1279 I.getType(),
"phi.cast");
1286 if (
User == &
I)
continue;
1287 replaceInstUsesWith(*
User, NewPN);
1288 eraseInstFromFunction(*
User);
1290 return replaceInstUsesWith(
I, NewPN);
1297 auto *Phi0 = dyn_cast<PHINode>(BO.
getOperand(0));
1298 auto *Phi1 = dyn_cast<PHINode>(BO.
getOperand(1));
1299 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1300 Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1312 ConstBB = Phi0->getIncomingBlock(0);
1313 OtherBB = Phi0->getIncomingBlock(1);
1315 ConstBB = Phi0->getIncomingBlock(1);
1316 OtherBB = Phi0->getIncomingBlock(0);
1326 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->
getTerminator());
1327 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
1328 !DT.isReachableFromEntry(OtherBB))
1334 for (
auto BBIter = BO.
getParent()->
begin(); &*BBIter != &BO; ++BBIter)
1340 Builder.SetInsertPoint(PredBlockBranch);
1342 Phi0->getIncomingValueForBlock(OtherBB),
1343 Phi1->getIncomingValueForBlock(OtherBB));
1344 if (
auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
1345 NotFoldedNewBO->copyIRFlags(&BO);
1358 if (!isa<Constant>(
I.getOperand(1)))
1361 if (
auto *Sel = dyn_cast<SelectInst>(
I.getOperand(0))) {
1364 }
else if (
auto *PN = dyn_cast<PHINode>(
I.getOperand(0))) {
1383 APInt Offset(
DL.getIndexTypeSizeInBits(PtrTy), IntOffset);
1385 if (!Offset.isZero())
1388 for (
const APInt &Index : Indices)
1397 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1406 assert(isa<IntegerType>(Val->
getType()) &&
"Can only descale integers!");
1408 Scale.
getBitWidth() &&
"Scale not compatible with value!");
1412 NoSignedWrap =
true;
1444 std::pair<Instruction *, unsigned> Parent;
1448 bool RequireNoSignedWrap =
false;
1453 for (;;
Op = Parent.first->getOperand(Parent.second)) {
1456 APInt Quotient(Scale), Remainder(Scale);
1463 NoSignedWrap =
true;
1471 if (RequireNoSignedWrap && !NoSignedWrap)
1482 if (CI->getValue() == Scale) {
1490 if (!
Op->hasOneUse())
1493 Parent = std::make_pair(BO, 1);
1499 if (!
Op->hasOneUse())
1502 Parent = std::make_pair(BO, 0);
1506 if (logScale > 0 && BO->
getOpcode() == Instruction::Shl &&
1510 if (RequireNoSignedWrap && !NoSignedWrap)
1514 int32_t Amt = cast<ConstantInt>(BO->
getOperand(1))->
1518 if (Amt == logScale) {
1529 Parent = std::make_pair(BO, 1);
1535 if (!
Op->hasOneUse())
1538 if (
CastInst *Cast = dyn_cast<CastInst>(
Op)) {
1539 if (Cast->getOpcode() == Instruction::SExt) {
1541 unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
1553 RequireNoSignedWrap =
true;
1556 Parent = std::make_pair(Cast, 0);
1561 if (Cast->getOpcode() == Instruction::Trunc) {
1568 if (RequireNoSignedWrap)
1572 unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
1573 Parent = std::make_pair(Cast, 0);
1574 Scale = Scale.
sext(LargeSize);
1575 if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
1588 NoSignedWrap =
true;
1602 assert(Parent.first->hasOneUse() &&
"Drilled down when more than one use!");
1603 assert(
Op != Parent.first->getOperand(Parent.second) &&
1604 "Descaling was a no-op?");
1605 replaceOperand(*Parent.first, Parent.second,
Op);
1606 Worklist.push(Parent.first);
1621 NoSignedWrap &= OpNoSignedWrap;
1622 if (NoSignedWrap != OpNoSignedWrap) {
1624 Worklist.push(Ancestor);
1626 }
else if (Ancestor->
getOpcode() == Instruction::Trunc) {
1630 NoSignedWrap =
false;
1632 assert((Ancestor->
getOpcode() != Instruction::SExt || NoSignedWrap) &&
1633 "Failed to keep proper track of nsw flags while drilling down?");
1635 if (Ancestor == Val)
1640 assert(Ancestor->
hasOneUse() &&
"Drilled down when more than one use!");
1646 if (!isa<VectorType>(Inst.
getType()))
1652 cast<VectorType>(Inst.
getType())->getElementCount());
1654 cast<VectorType>(Inst.
getType())->getElementCount());
1659 Value *L0, *L1, *R0, *R1;
1664 cast<ShuffleVectorInst>(
LHS)->isConcat() &&
1665 cast<ShuffleVectorInst>(
RHS)->isConcat()) {
1672 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO0))
1675 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO1))
1688 if (
auto *BO = dyn_cast<BinaryOperator>(XY))
1701 return createBinOpShuffle(V1,
V2,
Mask);
1710 auto *LShuf = cast<ShuffleVectorInst>(
LHS);
1711 auto *RShuf = cast<ShuffleVectorInst>(
RHS);
1716 if (LShuf->isSelect() &&
1718 RShuf->isSelect() &&
1736 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.
getType());
1741 cast<FixedVectorType>(V1->
getType())->getNumElements() <=
1742 InstVTy->getNumElements()) {
1744 "Shuffle should not change scalar type");
1751 bool ConstOp1 = isa<Constant>(
RHS);
1753 unsigned SrcVecNumElts =
1754 cast<FixedVectorType>(V1->
getType())->getNumElements();
1757 bool MayChange =
true;
1758 unsigned NumElts = InstVTy->getNumElements();
1759 for (
unsigned I = 0;
I < NumElts; ++
I) {
1761 if (ShMask[
I] >= 0) {
1762 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
1770 if (!CElt || (!isa<UndefValue>(NewCElt) && NewCElt != CElt) ||
1771 I >= SrcVecNumElts) {
1775 NewVecC[ShMask[
I]] = CElt;
1786 if (
I >= SrcVecNumElts || ShMask[
I] < 0) {
1802 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
1806 Value *NewLHS = ConstOp1 ? V1 : NewC;
1807 Value *NewRHS = ConstOp1 ? NewC : V1;
1808 return createBinOpShuffle(NewLHS, NewRHS,
Mask);
1815 if (isa<ShuffleVectorInst>(
RHS))
1843 Value *NewSplat =
Builder.CreateShuffleVector(NewBO, NewMask);
1848 if (isa<FPMathOperator>(R)) {
1849 R->copyFastMathFlags(&Inst);
1852 if (
auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
1853 NewInstBO->copyIRFlags(R);
1882 cast<Operator>(Op1)->getOpcode() == CastOpc &&
1883 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
1907 if (
auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
1909 NewBinOp->setHasNoSignedWrap();
1911 NewBinOp->setHasNoUnsignedWrap();
1929 if (!
GEP.hasAllConstantIndices())
1944 bool IsInBounds =
GEP.isInBounds();
1945 Type *Ty =
GEP.getSourceElementType();
1946 Value *NewTrueC =
Builder.CreateGEP(Ty, TrueC, IndexC,
"", IsInBounds);
1947 Value *NewFalseC =
Builder.CreateGEP(Ty, FalseC, IndexC,
"", IsInBounds);
1959 if (Src->getResultElementType() ==
GEP.getSourceElementType() &&
1960 Src->getNumOperands() == 2 &&
GEP.getNumOperands() == 2 &&
1963 Value *SO1 = Src->getOperand(1);
1967 if (
Loop *L = LI->getLoopFor(
GEP.getParent())) {
1971 if (L->isLoopInvariant(GO1) && !L->isLoopInvariant(SO1)) {
1989 auto *SO0 = Src->getOperand(0);
1990 auto *SO0Ty = SO0->getType();
1991 if (!isa<VectorType>(
GEP.getType()) ||
1992 isa<VectorType>(SO0Ty)) {
1993 Src->setOperand(1, GO1);
1994 GEP.setOperand(1, SO1);
2000 Builder.SetInsertPoint(cast<Instruction>(Src));
2002 Builder.CreateGEP(
GEP.getSourceElementType(), SO0, GO1,
2003 Src->getName(), Src->isInBounds());
2005 GEP.getSourceElementType(), NewSrc, {SO1});
2017 if (
auto *SrcGEP = dyn_cast<GEPOperator>(Src->getOperand(0)))
2023 Type *PtrTy = Src->getType()->getScalarType();
2025 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2029 bool IsFirstType =
true;
2030 unsigned NumVarIndices = 0;
2031 for (
auto Pair :
enumerate(Src->indices())) {
2032 if (!isa<ConstantInt>(Pair.value())) {
2034 IsFirstType =
false;
2035 NumVarIndices = Pair.index() + 1;
2041 APInt Offset(
DL.getIndexTypeSizeInBits(PtrTy), 0);
2042 if (NumVarIndices != Src->getNumIndices()) {
2044 if (isa<ScalableVectorType>(
BaseType))
2049 ConstantIndices.push_back(
2052 Offset +=
DL.getIndexedOffsetInType(
BaseType, ConstantIndices);
2056 if (!
GEP.accumulateConstantOffset(
DL, Offset))
2062 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2067 Src->getNumIndices() - NumVarIndices));
2073 Src->getOperand(0), Indices,
2076 Src->getOperand(0), Indices,
2080 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2086 bool EndsWithSequential =
false;
2089 EndsWithSequential =
I.isSequential();
2092 if (EndsWithSequential) {
2095 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2113 if (Src->getNumOperands() == 2) {
2115 replaceOperand(
GEP, 0, Src->getOperand(0));
2116 replaceOperand(
GEP, 1, Sum);
2119 Indices.
append(Src->op_begin()+1, Src->op_end()-1);
2120 Indices.push_back(Sum);
2122 }
else if (isa<Constant>(*
GEP.idx_begin()) &&
2123 cast<Constant>(*
GEP.idx_begin())->isNullValue() &&
2124 Src->getNumOperands() != 1) {
2126 Indices.
append(Src->op_begin()+1, Src->op_end());
2130 if (!Indices.empty())
2133 Src->getSourceElementType(), Src->getOperand(0), Indices,
2136 Src->getOperand(0), Indices,
2151 Type *GEPEltType =
GEP.getSourceElementType();
2159 auto areMatchingArrayAndVecTypes = [](
Type *ArrTy,
Type *VecTy,
2161 auto *VecVTy = cast<FixedVectorType>(VecTy);
2164 DL.getTypeAllocSize(ArrTy) ==
DL.getTypeAllocSize(VecTy);
2166 if (
GEP.getNumOperands() == 3 &&
2167 ((GEPEltType->
isArrayTy() && isa<FixedVectorType>(SrcEltType) &&
2168 areMatchingArrayAndVecTypes(GEPEltType, SrcEltType,
DL)) ||
2169 (isa<FixedVectorType>(GEPEltType) && SrcEltType->
isArrayTy() &&
2170 areMatchingArrayAndVecTypes(SrcEltType, GEPEltType,
DL)))) {
2185 return replaceInstUsesWith(
GEP, NGEP);
2193 unsigned OffsetBits =
DL.getIndexTypeSizeInBits(
GEP.getType());
2194 APInt Offset(OffsetBits, 0);
2202 if (!isa<BitCastInst>(
SrcOp) &&
GEP.accumulateConstantOffset(
DL, Offset) &&
2209 if (isa<AllocaInst>(
SrcOp)) {
2215 replaceInstUsesWith(*BCI,
I);
2235 return replaceInstUsesWith(
GEP, NGEP);
2250 Type *GEPType =
GEP.getType();
2251 Type *GEPEltType =
GEP.getSourceElementType();
2252 bool IsGEPSrcEleScalable = isa<ScalableVectorType>(GEPEltType);
2254 SQ.getWithInstruction(&
GEP)))
2255 return replaceInstUsesWith(
GEP, V);
2260 if (
auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2261 auto VWidth = GEPFVTy->getNumElements();
2262 APInt UndefElts(VWidth, 0);
2264 if (
Value *V = SimplifyDemandedVectorElts(&
GEP, AllOnesEltMask,
2267 return replaceInstUsesWith(
GEP, V);
2278 bool MadeChange =
false;
2282 Type *NewScalarIndexTy =
2283 DL.getIndexType(
GEP.getPointerOperandType()->getScalarType());
2292 Type *IndexTy = (*I)->getType();
2293 Type *NewIndexType =
2296 cast<VectorType>(IndexTy)->getElementCount())
2302 if (EltTy->
isSized() &&
DL.getTypeAllocSize(EltTy).isZero())
2308 if (IndexTy != NewIndexType) {
2312 *
I =
Builder.CreateIntCast(*
I, NewIndexType,
true);
2320 if (
auto *PN = dyn_cast<PHINode>(PtrOp)) {
2321 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2336 for (
auto I = PN->op_begin()+1,
E = PN->op_end();
I !=
E; ++
I) {
2337 auto *Op2 = dyn_cast<GetElementPtrInst>(*
I);
2338 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2339 Op1->getSourceElementType() != Op2->getSourceElementType())
2347 Type *CurTy =
nullptr;
2349 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
2350 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2353 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2362 assert(CurTy &&
"No current type?");
2382 CurTy = Op1->getSourceElementType();
2394 if (DI != -1 && !PN->hasOneUse())
2397 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2409 NewPN =
Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2410 PN->getNumOperands());
2413 for (
auto &
I : PN->operands())
2414 NewPN->
addIncoming(cast<GEPOperator>(
I)->getOperand(DI),
2415 PN->getIncomingBlock(
I));
2417 NewGEP->setOperand(DI, NewPN);
2420 GEP.getParent()->getInstList().insert(
2421 GEP.getParent()->getFirstInsertionPt(), NewGEP);
2422 replaceOperand(
GEP, 0, NewGEP);
2426 if (
auto *Src = dyn_cast<GEPOperator>(PtrOp))
2432 if (
GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) {
2433 unsigned AS =
GEP.getPointerAddressSpace();
2434 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2435 DL.getIndexSizeInBits(AS)) {
2436 uint64_t TyAllocSize =
DL.getTypeAllocSize(GEPEltType).getFixedSize();
2438 bool Matched =
false;
2441 if (TyAllocSize == 1) {
2442 V =
GEP.getOperand(1);
2444 }
else if (
match(
GEP.getOperand(1),
2446 if (TyAllocSize == 1ULL <<
C)
2448 }
else if (
match(
GEP.getOperand(1),
2450 if (TyAllocSize ==
C)
2478 if (StrippedPtr != PtrOp && !StrippedPtrTy->
isOpaque()) {
2479 bool HasZeroPointerIndex =
false;
2482 if (
auto *
C = dyn_cast<ConstantInt>(
GEP.getOperand(1)))
2483 HasZeroPointerIndex =
C->isZero();
2492 if (HasZeroPointerIndex) {
2493 if (
auto *CATy = dyn_cast<ArrayType>(GEPEltType)) {
2495 if (CATy->getElementType() == StrippedPtrEltTy) {
2499 StrippedPtrEltTy, StrippedPtr, Idx,
GEP.getName());
2512 if (
auto *XATy = dyn_cast<ArrayType>(StrippedPtrEltTy)) {
2514 if (CATy->getElementType() == XATy->getElementType()) {
2521 GEP.setSourceElementType(XATy);
2522 return replaceOperand(
GEP, 0, StrippedPtr);
2535 Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx,
2536 GEP.getName(),
GEP.isInBounds());
2541 }
else if (
GEP.getNumOperands() == 2 && !IsGEPSrcEleScalable) {
2549 DL.getTypeAllocSize(GEPEltType)) {
2550 Type *IdxType =
DL.getIndexType(GEPType);
2552 Value *NewGEP =
Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx,
2553 GEP.getName(),
GEP.isInBounds());
2566 uint64_t ResSize =
DL.getTypeAllocSize(GEPEltType).getFixedSize();
2567 uint64_t SrcSize =
DL.getTypeAllocSize(StrippedPtrEltTy).getFixedSize();
2568 if (ResSize && SrcSize % ResSize == 0) {
2571 uint64_t Scale = SrcSize / ResSize;
2577 "Index type does not match the Data Layout preferences");
2585 Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, NewIdx,
2586 GEP.getName(),
GEP.isInBounds() && NSW);
2603 uint64_t ResSize =
DL.getTypeAllocSize(GEPEltType).getFixedSize();
2607 if (ResSize && ArrayEltSize % ResSize == 0) {
2610 uint64_t Scale = ArrayEltSize / ResSize;
2616 "Index type does not match the Data Layout preferences");
2623 Type *IndTy =
DL.getIndexType(GEPType);
2627 Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Off,
2628 GEP.getName(),
GEP.isInBounds() && NSW);
2641 Value *ASCStrippedPtrOp = PtrOp;
2642 if (
auto *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) {
2647 if (
auto *BC = dyn_cast<BitCastInst>(ASC->getOperand(0)))
2648 ASCStrippedPtrOp = BC;
2651 if (
auto *BCI = dyn_cast<BitCastInst>(ASCStrippedPtrOp))
2655 if (!
GEP.isInBounds()) {
2658 APInt BasePtrOffset(IdxWidth, 0);
2659 Value *UnderlyingPtrOp =
2662 if (
auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) {
2663 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
2667 DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinSize());
2668 if (BasePtrOffset.
ule(AllocSize)) {
2670 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
2684 if (isa<ConstantPointerNull>(V))
2686 if (
auto *LI = dyn_cast<LoadInst>(V))
2687 return isa<GlobalVariable>(LI->getPointerOperand());
2711 return Dest && Dest->
Ptr == UsedV;
2719 Worklist.push_back(AI);
2725 switch (
I->getOpcode()) {
2730 case Instruction::AddrSpaceCast:
2731 case Instruction::BitCast:
2732 case Instruction::GetElementPtr:
2734 Worklist.push_back(
I);
2737 case Instruction::ICmp: {
2744 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
2758 case Intrinsic::memmove:
2760 case Intrinsic::memset: {
2762 if (
MI->isVolatile() ||
MI->getRawDest() != PI)
2766 case Intrinsic::assume:
2767 case Intrinsic::invariant_start:
2768 case Intrinsic::invariant_end:
2769 case Intrinsic::lifetime_start:
2770 case Intrinsic::lifetime_end:
2771 case Intrinsic::objectsize:
2774 case Intrinsic::launder_invariant_group:
2775 case Intrinsic::strip_invariant_group:
2777 Worklist.push_back(
I);
2797 Worklist.push_back(
I);
2805 if (
SI->isVolatile() ||
SI->getPointerOperand() != PI)
2813 }
while (!Worklist.empty());
2835 std::unique_ptr<DIBuilder> DIB;
2836 if (isa<AllocaInst>(
MI)) {
2842 for (
unsigned i = 0,
e =
Users.size();
i !=
e; ++
i) {
2854 replaceInstUsesWith(*
I, Result);
2855 eraseInstFromFunction(*
I);
2860 for (
unsigned i = 0,
e =
Users.size();
i !=
e; ++
i) {
2867 replaceInstUsesWith(*
C,
2869 C->isFalseWhenEqual()));
2870 }
else if (
auto *
SI = dyn_cast<StoreInst>(
I)) {
2871 for (
auto *DVI : DVIs)
2872 if (DVI->isAddressOfVariable())
2879 eraseInstFromFunction(*
I);
2912 for (
auto *DVI : DVIs)
2913 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
2914 DVI->eraseFromParent();
2916 return eraseInstFromFunction(
MI);
2960 if (FreeInstrBB->
size() != 2) {
2962 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
2964 auto *Cast = dyn_cast<CastInst>(&Inst);
2965 if (!Cast || !Cast->isNoopCast(
DL))
2986 "Broken CFG: missing edge from predecessor to successor");
2991 if (&Instr == FreeInstrBBTerminator)
2993 Instr.moveBefore(TI);
2996 "Only the branch instruction should remain");
3008 Attribute Dereferenceable =
Attrs.getParamAttr(0, Attribute::Dereferenceable);
3009 if (Dereferenceable.
isValid()) {
3012 Attribute::Dereferenceable);
3024 if (isa<UndefValue>(
Op)) {
3026 CreateNonTerminatorUnreachable(&FI);
3027 return eraseInstFromFunction(FI);
3032 if (isa<ConstantPointerNull>(
Op))
3033 return eraseInstFromFunction(FI);
3039 return eraseInstFromFunction(
3040 *replaceInstUsesWith(*CI, CI->getOperand(0)));
3056 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3065 if (
auto *CI = dyn_cast<CallInst>(V))
3066 return CI->isMustTailCall();
3076 if (!VTy->
isIntegerTy() || isa<Constant>(ResultOp))
3087 return replaceOperand(RI, 0,
3098 while (
Instruction *Prev =
I.getPrevNonDebugInstruction()) {
3103 if (Prev->isEHPad())
3114 eraseInstFromFunction(*Prev);
3116 assert(
I.getParent()->sizeWithoutDebug() == 1 &&
"The block is now empty.");
3130 return BBI->isDebugOrPseudoInst() ||
3131 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3136 if (BBI != FirstInstr)
3138 }
while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3140 return dyn_cast<StoreInst>(BBI);
3144 if (mergeStoreIntoSuccessor(*
SI))
3152 return visitUnconditionalBranchInst(BI);
3157 !isa<Constant>(
X)) {
3160 return replaceOperand(BI, 0,
X);
3167 return replaceOperand(
3174 !isCanonicalPredicate(Pred)) {
3179 Worklist.push(
Cond);
3192 for (
auto Case :
SI.cases()) {
3194 assert(isa<ConstantInt>(NewCase) &&
3195 "Result of expression should be constant");
3196 Case.setValue(cast<ConstantInt>(NewCase));
3198 return replaceOperand(
SI, 0, Op0);
3207 for (
auto &
C :
SI.cases()) {
3209 LeadingKnownZeros,
C.getCaseValue()->getValue().countLeadingZeros());
3211 LeadingKnownOnes,
C.getCaseValue()->getValue().countLeadingOnes());
3220 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
3221 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
3226 for (
auto Case :
SI.cases()) {
3227 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3230 return replaceOperand(
SI, 0, NewCond);
3240 return replaceInstUsesWith(EV, Agg);
3243 SQ.getWithInstruction(&EV)))
3244 return replaceInstUsesWith(EV, V);
3248 const unsigned *exti, *exte, *insi, *inse;
3249 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
3250 exte = EV.
idx_end(), inse =
IV->idx_end();
3251 exti != exte && insi != inse;
3265 if (exti == exte && insi == inse)
3270 return replaceInstUsesWith(EV,
IV->getInsertedValueOperand());
3280 Value *NewEV =
Builder.CreateExtractValue(
IV->getAggregateOperand(),
3301 (OvID == Intrinsic::smul_with_overflow ||
3302 OvID == Intrinsic::umul_with_overflow) &&
3310 if (WO->hasOneUse()) {
3318 eraseInstFromFunction(*WO);
3323 "unexpected extract index for overflow inst");
3334 WO->getNoWrapKind());
3337 APInt NewRHSC, Offset;
3339 auto *OpTy = WO->getRHS()->getType();
3340 auto *NewLHS = WO->getLHS();
3348 if (
LoadInst *L = dyn_cast<LoadInst>(Agg))
3354 if (L->isSimple() && L->hasOneUse()) {
3358 Indices.push_back(
Builder.getInt32(0));
3359 for (
unsigned Idx : EV.
indices())
3360 Indices.push_back(
Builder.getInt32(Idx));
3366 L->getPointerOperand(), Indices);
3370 NL->setAAMetadata(L->getAAMetadata());
3373 return replaceInstUsesWith(EV,
NL);
3388 switch (Personality) {
3417 cast<ArrayType>(
LHS->
getType())->getNumElements()
3419 cast<ArrayType>(
RHS->
getType())->getNumElements();
3431 bool MakeNewInstruction =
false;
3437 bool isLastClause =
i + 1 ==
e;
3445 if (AlreadyCaught.
insert(TypeInfo).second) {
3447 NewClauses.push_back(CatchClause);
3450 MakeNewInstruction =
true;
3457 MakeNewInstruction =
true;
3458 CleanupFlag =
false;
3477 if (!NumTypeInfos) {
3478 NewClauses.push_back(FilterClause);
3480 MakeNewInstruction =
true;
3481 CleanupFlag =
false;
3485 bool MakeNewFilter =
false;
3487 if (isa<ConstantAggregateZero>(FilterClause)) {
3489 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
3495 MakeNewInstruction =
true;
3501 NewFilterElts.push_back(TypeInfo);
3502 if (NumTypeInfos > 1)
3503 MakeNewFilter =
true;
3507 NewFilterElts.
reserve(NumTypeInfos);
3512 bool SawCatchAll =
false;
3513 for (
unsigned j = 0;
j != NumTypeInfos; ++
j) {
3541 if (SeenInFilter.
insert(TypeInfo).second)
3542 NewFilterElts.push_back(cast<Constant>(Elt));
3547 MakeNewInstruction =
true;
3552 if (NewFilterElts.size() < NumTypeInfos)
3553 MakeNewFilter =
true;
3555 if (MakeNewFilter) {
3557 NewFilterElts.size());
3559 MakeNewInstruction =
true;
3562 NewClauses.push_back(FilterClause);
3568 if (MakeNewFilter && !NewFilterElts.size()) {
3569 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
3570 CleanupFlag =
false;
3581 for (
unsigned i = 0,
e = NewClauses.size();
i + 1 <
e; ) {
3584 for (
j =
i;
j !=
e; ++
j)
3585 if (!isa<ArrayType>(NewClauses[
j]->
getType()))
3591 for (
unsigned k =
i; k + 1 <
j; ++k)
3597 MakeNewInstruction =
true;
3616 for (
unsigned i = 0;
i + 1 < NewClauses.size(); ++
i) {
3626 for (
unsigned j = NewClauses.size() - 1;
j !=
i; --
j) {
3627 Value *LFilter = NewClauses[
j];
3638 NewClauses.
erase(J);
3639 MakeNewInstruction =
true;
3649 if (isa<ConstantAggregateZero>(LFilter)) {
3652 if (isa<ConstantAggregateZero>(
Filter)) {
3653 assert(FElts <= LElts &&
"Should have handled this case earlier!");
3655 NewClauses.
erase(J);
3656 MakeNewInstruction =
true;
3662 if (isa<ConstantAggregateZero>(
Filter)) {
3665 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
3666 for (
unsigned l = 0;
l != LElts; ++
l)
3669 NewClauses.
erase(J);
3670 MakeNewInstruction =
true;
3681 bool AllFound =
true;
3682 for (
unsigned f = 0;
f != FElts; ++
f) {
3685 for (
unsigned l = 0;
l != LElts; ++
l) {
3687 if (LTypeInfo == FTypeInfo) {
3697 NewClauses.
erase(J);
3698 MakeNewInstruction =
true;
3706 if (MakeNewInstruction) {
3709 for (
unsigned i = 0,
e = NewClauses.size();
i !=
e; ++
i)
3714 if (NewClauses.empty())
3723 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
3748 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
3753 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
3766 Use *MaybePoisonOperand =
nullptr;
3767 for (
Use &U : OrigOpInst->operands()) {
3770 if (!MaybePoisonOperand)
3771 MaybePoisonOperand = &U;
3776 OrigOpInst->dropPoisonGeneratingFlags();
3779 if (!MaybePoisonOperand)
3782 auto *FrozenMaybePoisonOperand =
new FreezeInst(
3783 MaybePoisonOperand->get(), MaybePoisonOperand->get()->
getName() +
".fr");
3785 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
3786 FrozenMaybePoisonOperand->insertBefore(OrigOpInst);
3793 if (isa<Constant>(
Op) ||
Op->hasOneUse())
3802 if (isa<Argument>(
Op)) {
3804 while (isa<AllocaInst>(MoveBefore))
3806 }
else if (
auto *PN = dyn_cast<PHINode>(
Op)) {
3808 }
else if (
auto *II = dyn_cast<InvokeInst>(
Op)) {
3809 MoveBefore = II->getNormalDest()->getFirstNonPHI();
3810 }
else if (
auto *CB = dyn_cast<CallBrInst>(
Op)) {
3811 MoveBefore = CB->getDefaultDest()->getFirstNonPHI();
3813 auto *
I = cast<Instruction>(
Op);
3814 assert(!
I->isTerminator() &&
"Cannot be a terminator");
3815 MoveBefore =
I->getNextNode();
3818 bool Changed =
false;
3819 if (&FI != MoveBefore) {
3824 Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
3825 bool Dominates = DT.dominates(&FI, U);
3826 Changed |= Dominates;
3834 Value *Op0 =
I.getOperand(0);
3837 return replaceInstUsesWith(
I, V);
3840 if (
auto *PN = dyn_cast<PHINode>(Op0)) {
3845 if (
Value *NI = pushFreezeToPreventPoisonFromPropagating(
I))
3846 return replaceInstUsesWith(
I, NI);
3861 auto getUndefReplacement = [&
I](
Type *Ty) {
3864 for (
const auto *U :
I.users()) {
3873 else if (BestValue !=
C)
3874 BestValue = NullValue;
3876 assert(BestValue &&
"Must have at least one use");
3881 return replaceInstUsesWith(
I, getUndefReplacement(
I.getType()));
3885 Constant *ReplaceC = getUndefReplacement(
I.getType()->getScalarType());
3890 if (freezeOtherUses(
I))
3900 auto *CB = dyn_cast<CallBase>(
I);
3919 for (
const User *U :
I.users()) {
3920 if (Visited.
insert(U).second)
3921 AllocaUsers.push_back(U);
3925 while (!AllocaUsers.empty()) {
3926 auto *UserI = cast<Instruction>(AllocaUsers.
pop_back_val());
3927 if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
3928 isa<AddrSpaceCastInst>(UserI)) {
3949 if (isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
3957 if (isa<AllocaInst>(
I))
3965 if (
auto *CI = dyn_cast<CallInst>(
I)) {
3966 if (CI->isConvergent())
3972 if (
I->mayWriteToMemory()) {
3979 if (
I->mayReadFromMemory()) {
3986 E =
I->getParent()->end();
3988 if (Scan->mayWriteToMemory())
3992 I->dropDroppableUses([DestBlock](
const Use *U) {
3993 if (
auto *
I = dyn_cast<Instruction>(U->getUser()))
3994 return I->getParent() != DestBlock;
4001 I->moveBefore(&*InsertPos);
4015 if (DVI->getParent() == SrcBlock)
4016 DbgUsersToSink.push_back(DVI);
4018 [](
auto *A,
auto *
B) {
return B->comesBefore(A); });
4022 for (
auto User : DbgUsersToSink) {
4027 if (isa<DbgDeclareInst>(
User))
4032 User->getDebugLoc()->getInlinedAt());
4034 if (!SunkVariables.
insert(DbgUserVariable).second)
4037 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(
User->clone()));
4038 if (isa<DbgDeclareInst>(
User) && isa<CastInst>(
I))
4039 DIIClones.back()->replaceVariableLocationOp(
I,
I->getOperand(0));
4044 if (!DIIClones.empty()) {
4049 DIIClone->insertBefore(&*InsertPos);
4058 while (!Worklist.isEmpty()) {
4067 eraseInstFromFunction(*
I);
4076 if (
I ==
nullptr)
continue;
4080 eraseInstFromFunction(*
I);
4089 if (!
I->use_empty() &&
4090 (
I->getNumOperands() == 0 || isa<Constant>(
I->getOperand(0)))) {
4096 replaceInstUsesWith(*
I,
C);
4099 eraseInstFromFunction(*
I);
4100 MadeIRChange =
true;
4108 auto getOptionalSinkBlockForInst =
4115 unsigned NumUsers = 0;
4117 for (
auto *U :
I->users()) {
4118 if (U->isDroppable())
4125 if (
PHINode *PN = dyn_cast<PHINode>(UserInst)) {
4126 for (
unsigned i = 0;
i < PN->getNumIncomingValues();
i++) {
4127 if (PN->getIncomingValue(
i) ==
I) {
4131 if (UserParent && UserParent != PN->getIncomingBlock(
i))
4133 UserParent = PN->getIncomingBlock(
i);
4136 assert(UserParent &&
"expected to find user block!");
4138 if (UserParent && UserParent != UserInst->
getParent())
4145 if (NumUsers == 0) {
4148 if (UserParent ==
BB || !DT.isReachableFromEntry(UserParent))
4163 assert(DT.dominates(
BB, UserParent) &&
"Dominance relation broken?");
4176 auto OptBB = getOptionalSinkBlockForInst(
I);
4178 auto *UserParent = *OptBB;
4182 MadeIRChange =
true;
4186 for (
Use &U :
I->operands())
4187 if (
Instruction *OpI = dyn_cast<Instruction>(U.get()))
4194 Builder.CollectMetadataToCopy(
4195 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4208 <<
" New = " << *Result <<
'\n');
4210 Result->copyMetadata(*
I,
4211 {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4213 I->replaceAllUsesWith(Result);
4216 Result->takeName(
I);
4223 if (isa<PHINode>(Result) != isa<PHINode>(
I)) {
4225 if (isa<PHINode>(
I))
4231 InstParent->
getInstList().insert(InsertPos, Result);
4234 Worklist.pushUsersToWorkList(*Result);
4235 Worklist.push(Result);
4237 eraseInstFromFunction(*
I);
4240 <<
" New = " << *
I <<
'\n');
4245 eraseInstFromFunction(*
I);
4247 Worklist.pushUsersToWorkList(*
I);
4251 MadeIRChange =
true;
4256 return MadeIRChange;
4272 if (!
I->hasMetadataOtherThanDebugLoc())
4275 auto Track = [](
Metadata *ScopeList,
auto &Container) {
4276 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
4277 if (!MDScopeList || !Container.insert(MDScopeList).second)
4279 for (
auto &
MDOperand : MDScopeList->operands())
4280 if (
auto *MDScope = dyn_cast<MDNode>(
MDOperand))
4281 Container.insert(MDScope);
4284 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
4285 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
4294 "llvm.experimental.noalias.scope.decl in use ?");
4297 "llvm.experimental.noalias.scope should refer to a single scope");
4299 if (
auto *MD = dyn_cast<MDNode>(
MDOperand))
4300 return !UsedAliasScopesAndLists.
contains(MD) ||
4301 !UsedNoAliasScopesAndLists.
contains(MD);
4319 bool MadeIRChange =
false;
4322 Worklist.push_back(&
F.front());
4337 if (!Inst.use_empty() &&
4338 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
4342 Inst.replaceAllUsesWith(
C);
4345 Inst.eraseFromParent();
4346 MadeIRChange =
true;
4351 for (
Use &U : Inst.operands()) {
4352 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
4355 auto *
C = cast<Constant>(U);
4356 Constant *&FoldRes = FoldedConstants[
C];
4362 <<
"\n Old = " << *
C
4363 <<
"\n New = " << *FoldRes <<
'\n');
4365 MadeIRChange =
true;
4372 if (!Inst.isDebugOrPseudoInst()) {
4373 InstrsForInstructionWorklist.push_back(&Inst);
4374 SeenAliasScopes.
analyse(&Inst);
4381 if (
BranchInst *BI = dyn_cast<BranchInst>(TI)) {
4382 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
4383 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
4384 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
4385 Worklist.push_back(ReachableBB);
4388 }
else if (
SwitchInst *
SI = dyn_cast<SwitchInst>(TI)) {
4390 Worklist.push_back(
SI->findCaseValue(
Cond)->getCaseSuccessor());
4396 }
while (!Worklist.empty());
4405 unsigned NumDeadInstInBB;
4406 unsigned NumDeadDbgInstInBB;
4407 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
4410 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
4411 NumDeadInst += NumDeadInstInBB;
4419 ICWorklist.
reserve(InstrsForInstructionWorklist.size());
4428 Inst->eraseFromParent();
4429 MadeIRChange =
true;
4433 ICWorklist.
push(Inst);
4436 return MadeIRChange;
4444 auto &
DL =
F.getParent()->getDataLayout();
4453 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
4459 bool MadeIRChange =
false;
4464 unsigned Iteration = 0;
4466 ++NumWorklistIterations;
4471 "Instruction Combining seems stuck in an infinite loop after " +
4475 if (Iteration > MaxIterations) {
4476 LLVM_DEBUG(
dbgs() <<
"\n\n[IC] Iteration limit #" << MaxIterations
4477 <<
" on " <<
F.getName()
4478 <<
" reached; stopping before reaching a fixpoint\n");
4482 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
4483 <<
F.getName() <<
"\n");
4488 ORE,
BFI, PSI,
DL, LI);
4494 MadeIRChange =
true;
4497 return MadeIRChange;
4503 : MaxIterations(MaxIterations) {}
4519 auto *
BFI = (PSI && PSI->hasProfileSummary()) ?
4523 BFI, PSI, MaxIterations, LI))
4554 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4555 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
4556 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
4557 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
4558 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
4559 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
4562 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
4563 auto *LI = LIWP ? &LIWP->getLoopInfo() :
nullptr;
4565 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
4568 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
4572 BFI, PSI, MaxIterations, LI);
4588 "Combine redundant instructions",
false,
false)