108#define DEBUG_TYPE "instcombine"
116 "Number of instruction combining iterations performed");
117STATISTIC(NumOneIteration,
"Number of functions with one iteration");
118STATISTIC(NumTwoIterations,
"Number of functions with two iterations");
119STATISTIC(NumThreeIterations,
"Number of functions with three iterations");
121 "Number of functions with four or more iterations");
125STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
131 "Controls which instructions are visited");
138 "instcombine-max-sink-users",
cl::init(32),
139 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
143 cl::desc(
"Maximum array size considered when doing a combine"));
155std::optional<Instruction *>
166 bool &KnownBitsComputed) {
183 *
this, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
202bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
221bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
222 unsigned ToWidth)
const {
228 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
233 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
238 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
249bool InstCombinerImpl::shouldChangeType(
Type *
From,
Type *To)
const {
255 unsigned FromWidth =
From->getPrimitiveSizeInBits();
257 return shouldChangeType(FromWidth, ToWidth);
266 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
267 if (!OBO || !OBO->hasNoSignedWrap())
272 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
275 const APInt *BVal, *CVal;
279 bool Overflow =
false;
280 if (Opcode == Instruction::Add)
281 (void)BVal->
sadd_ov(*CVal, Overflow);
283 (
void)BVal->
ssub_ov(*CVal, Overflow);
289 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
290 return OBO && OBO->hasNoUnsignedWrap();
294 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
295 return OBO && OBO->hasNoSignedWrap();
304 I.clearSubclassOptionalData();
309 I.clearSubclassOptionalData();
310 I.setFastMathFlags(FMF);
319 auto *Cast = dyn_cast<CastInst>(BinOp1->
getOperand(0));
320 if (!Cast || !Cast->hasOneUse())
324 auto CastOpcode = Cast->getOpcode();
325 if (CastOpcode != Instruction::ZExt)
333 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
334 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
364Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
365 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
368 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
369 Type *CastTy = IntToPtr->getDestTy();
372 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
375 return PtrToInt->getOperand(0);
402 bool Changed =
false;
410 Changed = !
I.swapOperands();
415 if (
I.isAssociative()) {
438 I.setHasNoUnsignedWrap(
true);
441 I.setHasNoSignedWrap(
true);
470 if (
I.isAssociative() &&
I.isCommutative()) {
533 if (isa<FPMathOperator>(NewBO)) {
547 I.setHasNoUnsignedWrap(
true);
565 if (LOp == Instruction::And)
566 return ROp == Instruction::Or || ROp == Instruction::Xor;
569 if (LOp == Instruction::Or)
570 return ROp == Instruction::And;
574 if (LOp == Instruction::Mul)
575 return ROp == Instruction::Add || ROp == Instruction::Sub;
598 if (isa<Constant>(V))
612 assert(
Op &&
"Expected a binary operator");
613 LHS =
Op->getOperand(0);
614 RHS =
Op->getOperand(1);
615 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
620 return Instruction::Mul;
624 return Op->getOpcode();
633 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
636 Value *RetVal =
nullptr;
647 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
659 RetVal =
Builder.CreateBinOp(InnerOpcode,
A, V);
667 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
679 RetVal =
Builder.CreateBinOp(InnerOpcode, V,
B);
690 if (isa<OverflowingBinaryOperator>(RetVal)) {
693 if (isa<OverflowingBinaryOperator>(&
I)) {
694 HasNSW =
I.hasNoSignedWrap();
695 HasNUW =
I.hasNoUnsignedWrap();
697 if (
auto *LOBO = dyn_cast<OverflowingBinaryOperator>(
LHS)) {
698 HasNSW &= LOBO->hasNoSignedWrap();
699 HasNUW &= LOBO->hasNoUnsignedWrap();
702 if (
auto *ROBO = dyn_cast<OverflowingBinaryOperator>(
RHS)) {
703 HasNSW &= ROBO->hasNoSignedWrap();
704 HasNUW &= ROBO->hasNoUnsignedWrap();
707 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
717 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
720 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
750 auto IsValidBinOpc = [](
unsigned Opc) {
754 case Instruction::And:
755 case Instruction::Or:
756 case Instruction::Xor:
757 case Instruction::Add:
766 auto IsCompletelyDistributable = [](
unsigned BinOpc1,
unsigned BinOpc2,
768 assert(ShOpc != Instruction::AShr);
769 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
770 ShOpc == Instruction::Shl;
773 auto GetInvShift = [](
unsigned ShOpc) {
774 assert(ShOpc != Instruction::AShr);
775 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
778 auto CanDistributeBinops = [&](
unsigned BinOpc1,
unsigned BinOpc2,
782 if (BinOpc1 == Instruction::And)
787 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
793 if (BinOpc2 == Instruction::And)
803 auto MatchBinOp = [&](
unsigned ShOpnum) ->
Instruction * {
805 Value *
X, *
Y, *ShiftedX, *Mask, *Shift;
806 if (!
match(
I.getOperand(ShOpnum),
809 if (!
match(
I.getOperand(1 - ShOpnum),
817 auto *IY = dyn_cast<Instruction>(
I.getOperand(ShOpnum));
818 auto *IX = dyn_cast<Instruction>(ShiftedX);
823 unsigned ShOpc = IY->getOpcode();
824 if (ShOpc != IX->getOpcode())
828 auto *BO2 = dyn_cast<Instruction>(
I.getOperand(1 - ShOpnum));
832 unsigned BinOpc = BO2->getOpcode();
834 if (!IsValidBinOpc(
I.getOpcode()) || !IsValidBinOpc(BinOpc))
837 if (ShOpc == Instruction::AShr) {
851 if (BinOpc ==
I.getOpcode() &&
852 IsCompletelyDistributable(
I.getOpcode(), BinOpc, ShOpc)) {
867 if (!CanDistributeBinops(
I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
880 return MatchBinOp(1);
898 Value *
A, *CondVal, *TrueVal, *FalseVal;
901 auto MatchSelectAndCast = [&](
Value *CastOp,
Value *SelectOp) {
903 A->getType()->getScalarSizeInBits() == 1 &&
910 if (MatchSelectAndCast(
LHS,
RHS))
912 else if (MatchSelectAndCast(
RHS,
LHS))
917 auto NewFoldedConst = [&](
bool IsTrueArm,
Value *V) {
918 bool IsCastOpRHS = (CastOp ==
RHS);
919 bool IsZExt = isa<ZExtOperator>(CastOp);
925 unsigned BitWidth = V->getType()->getScalarSizeInBits();
938 Value *NewTrueVal = NewFoldedConst(
false, TrueVal);
940 NewFoldedConst(
true, FalseVal));
944 Value *NewTrueVal = NewFoldedConst(
true, TrueVal);
946 NewFoldedConst(
false, FalseVal));
967 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1093 if (!LHSIsSelect && !RHSIsSelect)
1098 if (isa<FPMathOperator>(&
I)) {
1099 FMF =
I.getFastMathFlags();
1106 Value *
Cond, *True =
nullptr, *False =
nullptr;
1114 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1129 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
1138 else if (True && !False)
1146 if (
Value *NewSel = foldAddNegate(
B,
C,
RHS))
1153 if (
Value *NewSel = foldAddNegate(
E,
F,
LHS))
1157 if (!True || !False)
1168 assert(!isa<Constant>(
I) &&
"Shouldn't invert users of constant");
1170 if (U == IgnoredUser)
1172 switch (cast<Instruction>(U)->
getOpcode()) {
1173 case Instruction::Select: {
1174 auto *SI = cast<SelectInst>(U);
1176 SI->swapProfMetadata();
1179 case Instruction::Br:
1180 cast<BranchInst>(U)->swapSuccessors();
1182 case Instruction::Xor:
1187 "canFreelyInvertAllUsersOf() ?");
1194Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
1204 if (
C->getType()->getElementType()->isIntegerTy())
1208 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1213 if (isa<UndefValue>(Elt))
1216 if (!isa<ConstantInt>(Elt))
1223 if (
auto *CV = dyn_cast<Constant>(V))
1224 if (CV->getType()->isVectorTy() &&
1225 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1243 !
X->getType()->isIntOrIntVectorTy(1))
1262 C = dyn_cast<Constant>(IsTrueArm ? SI->getTrueValue()
1263 : SI->getFalseValue());
1264 }
else if (
match(SI->getCondition(),
1270 C = dyn_cast<Constant>(
Op);
1290 bool FoldWithMultiUse) {
1292 if (!SI->hasOneUse() && !FoldWithMultiUse)
1295 Value *TV = SI->getTrueValue();
1296 Value *FV = SI->getFalseValue();
1297 if (!(isa<Constant>(TV) || isa<Constant>(FV)))
1301 if (SI->getType()->isIntOrIntVectorTy(1))
1306 if (
auto *BC = dyn_cast<BitCastInst>(&
Op)) {
1307 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
1308 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
1311 if ((SrcTy ==
nullptr) != (DestTy ==
nullptr))
1326 if (
auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1327 if (CI->hasOneUse()) {
1328 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1329 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1337 if (!NewTV && !NewFV)
1375 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&
I);
1379 std::optional<bool> ImpliedCond =
1381 Ops[0], Ops[1],
DL, LHSIsTrue);
1391 if (NumPHIValues == 0)
1401 if (UI != &
I && !
I.isIdenticalTo(UI))
1412 Value *NonSimplifiedInVal =
nullptr;
1413 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1422 if (NonSimplifiedBB)
return nullptr;
1424 NonSimplifiedBB = InBB;
1425 NonSimplifiedInVal = InVal;
1430 if (isa<InvokeInst>(InVal))
1431 if (cast<Instruction>(InVal)->
getParent() == NonSimplifiedBB)
1448 if (NonSimplifiedBB !=
nullptr) {
1464 if (NonSimplifiedBB) {
1468 U = NonSimplifiedInVal;
1470 U = U->DoPHITranslation(PN->
getParent(), NonSimplifiedBB);
1475 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1476 if (NewPhiValues[i])
1484 if (
User == &
I)
continue;
1490 const_cast<PHINode &
>(*NewPN),
1499 auto *Phi0 = dyn_cast<PHINode>(BO.
getOperand(0));
1500 auto *Phi1 = dyn_cast<PHINode>(BO.
getOperand(1));
1501 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1502 Phi0->getNumOperands() != Phi1->getNumOperands())
1523 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
1524 auto &Phi0Use = std::get<0>(
T);
1525 auto &Phi1Use = std::get<1>(
T);
1526 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1528 Value *Phi0UseV = Phi0Use.get();
1529 Value *Phi1UseV = Phi1Use.get();
1532 else if (Phi1UseV ==
C)
1539 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
1540 CanFoldIncomingValuePair)) {
1543 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
1544 "The number of collected incoming values should equal the number "
1545 "of the original PHINode operands!");
1546 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
1547 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
1552 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1559 ConstBB = Phi0->getIncomingBlock(0);
1560 OtherBB = Phi0->getIncomingBlock(1);
1562 ConstBB = Phi0->getIncomingBlock(1);
1563 OtherBB = Phi0->getIncomingBlock(0);
1573 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->
getTerminator());
1574 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
1581 for (
auto BBIter = BO.
getParent()->
begin(); &*BBIter != &BO; ++BBIter)
1594 Phi0->getIncomingValueForBlock(OtherBB),
1595 Phi1->getIncomingValueForBlock(OtherBB));
1596 if (
auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
1597 NotFoldedNewBO->copyIRFlags(&BO);
1607 if (!isa<Constant>(
I.getOperand(1)))
1610 if (
auto *Sel = dyn_cast<SelectInst>(
I.getOperand(0))) {
1613 }
else if (
auto *PN = dyn_cast<PHINode>(
I.getOperand(0))) {
1624 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1631 if (!isa<VectorType>(Inst.
getType()))
1637 cast<VectorType>(Inst.
getType())->getElementCount());
1639 cast<VectorType>(Inst.
getType())->getElementCount());
1644 Value *L0, *L1, *R0, *R1;
1649 cast<ShuffleVectorInst>(
LHS)->isConcat() &&
1650 cast<ShuffleVectorInst>(
RHS)->isConcat()) {
1657 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO0))
1660 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO1))
1667 if (
auto *BO = dyn_cast<BinaryOperator>(V))
1671 M, Intrinsic::experimental_vector_reverse, V->getType());
1684 return createBinOpReverse(V1, V2);
1688 return createBinOpReverse(V1,
RHS);
1692 return createBinOpReverse(
LHS, V2);
1702 if (
auto *BO = dyn_cast<BinaryOperator>(XY))
1711 V1->
getType() == V2->getType() &&
1714 return createBinOpShuffle(V1, V2, Mask);
1723 auto *LShuf = cast<ShuffleVectorInst>(
LHS);
1724 auto *RShuf = cast<ShuffleVectorInst>(
RHS);
1729 if (LShuf->isSelect() &&
1731 RShuf->isSelect() &&
1749 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.
getType());
1754 cast<FixedVectorType>(V1->
getType())->getNumElements() <=
1755 InstVTy->getNumElements()) {
1757 "Shuffle should not change scalar type");
1764 bool ConstOp1 = isa<Constant>(
RHS);
1766 unsigned SrcVecNumElts =
1767 cast<FixedVectorType>(V1->
getType())->getNumElements();
1770 bool MayChange =
true;
1771 unsigned NumElts = InstVTy->getNumElements();
1772 for (
unsigned I = 0;
I < NumElts; ++
I) {
1774 if (ShMask[
I] >= 0) {
1775 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
1783 if (!CElt || (!isa<UndefValue>(NewCElt) && NewCElt != CElt) ||
1784 I >= SrcVecNumElts) {
1788 NewVecC[ShMask[
I]] = CElt;
1799 if (
I >= SrcVecNumElts || ShMask[
I] < 0) {
1820 Value *NewLHS = ConstOp1 ? V1 : NewC;
1821 Value *NewRHS = ConstOp1 ? NewC : V1;
1822 return createBinOpShuffle(NewLHS, NewRHS, Mask);
1829 if (isa<ShuffleVectorInst>(
RHS))
1862 if (isa<FPMathOperator>(R)) {
1863 R->copyFastMathFlags(&Inst);
1866 if (
auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
1867 NewInstBO->copyIRFlags(R);
1896 cast<Operator>(Op1)->getOpcode() == CastOpc &&
1897 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
1915 if (!willNotOverflow(BO.
getOpcode(),
X,
Y, BO, IsSext))
1921 if (
auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
1923 NewBinOp->setHasNoSignedWrap();
1925 NewBinOp->setHasNoUnsignedWrap();
1943 if (!
GEP.hasAllConstantIndices())
1958 bool IsInBounds =
GEP.isInBounds();
1959 Type *Ty =
GEP.getSourceElementType();
1960 Value *NewTrueC =
Builder.CreateGEP(Ty, TrueC, IndexC,
"", IsInBounds);
1961 Value *NewFalseC =
Builder.CreateGEP(Ty, FalseC, IndexC,
"", IsInBounds);
1974 Type *PtrTy = Src->getType()->getScalarType();
1975 if (
GEP.hasAllConstantIndices() &&
1976 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
1980 bool IsFirstType =
true;
1981 unsigned NumVarIndices = 0;
1982 for (
auto Pair :
enumerate(Src->indices())) {
1983 if (!isa<ConstantInt>(Pair.value())) {
1985 IsFirstType =
false;
1986 NumVarIndices = Pair.index() + 1;
1993 if (NumVarIndices != Src->getNumIndices()) {
2014 if (!
Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero())) {
2017 if (Src->hasAllConstantIndices())
2029 Src->getNumIndices() - NumVarIndices));
2036 IsInBounds &=
Idx.isNonNegative() == ConstIndices[0].isNonNegative();
2041 Indices,
"", IsInBounds));
2044 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2050 bool EndsWithSequential =
false;
2053 EndsWithSequential =
I.isSequential();
2056 if (EndsWithSequential) {
2059 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2077 if (Src->getNumOperands() == 2) {
2083 Indices.
append(Src->op_begin()+1, Src->op_end()-1);
2086 }
else if (isa<Constant>(*
GEP.idx_begin()) &&
2087 cast<Constant>(*
GEP.idx_begin())->isNullValue() &&
2088 Src->getNumOperands() != 1) {
2090 Indices.
append(Src->op_begin()+1, Src->op_end());
2094 if (!Indices.
empty())
2097 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
2106 Type *GEPType =
GEP.getType();
2107 Type *GEPEltType =
GEP.getSourceElementType();
2116 if (
auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2117 auto VWidth = GEPFVTy->getNumElements();
2118 APInt UndefElts(VWidth, 0);
2134 bool MadeChange =
false;
2138 Type *NewScalarIndexTy =
2148 Type *IndexTy = (*I)->getType();
2149 Type *NewIndexType =
2152 cast<VectorType>(IndexTy)->getElementCount())
2164 if (IndexTy != NewIndexType) {
2176 if (
auto *PN = dyn_cast<PHINode>(PtrOp)) {
2177 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2192 for (
auto I = PN->op_begin()+1,
E = PN->op_end();
I !=
E; ++
I) {
2193 auto *Op2 = dyn_cast<GetElementPtrInst>(*
I);
2194 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2195 Op1->getSourceElementType() != Op2->getSourceElementType())
2203 Type *CurTy =
nullptr;
2205 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
2206 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2209 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2218 assert(CurTy &&
"No current type?");
2238 CurTy = Op1->getSourceElementType();
2250 if (DI != -1 && !PN->hasOneUse())
2253 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2266 PN->getNumOperands());
2269 for (
auto &
I : PN->operands())
2270 NewPN->
addIncoming(cast<GEPOperator>(
I)->getOperand(DI),
2271 PN->getIncomingBlock(
I));
2273 NewGEP->setOperand(DI, NewPN);
2276 NewGEP->insertBefore(*
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
2280 if (
auto *Src = dyn_cast<GEPOperator>(PtrOp))
2286 if (
GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) {
2287 unsigned AS =
GEP.getPointerAddressSpace();
2288 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2292 bool Matched =
false;
2295 if (TyAllocSize == 1) {
2296 V =
GEP.getOperand(1);
2298 }
else if (
match(
GEP.getOperand(1),
2300 if (TyAllocSize == 1ULL <<
C)
2302 }
else if (
match(
GEP.getOperand(1),
2304 if (TyAllocSize ==
C)
2324 if (!
GEP.isInBounds()) {
2327 APInt BasePtrOffset(IdxWidth, 0);
2328 Value *UnderlyingPtrOp =
2331 bool CanBeNull, CanBeFreed;
2333 DL, CanBeNull, CanBeFreed);
2334 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
2335 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
2337 APInt AllocSize(IdxWidth, DerefBytes);
2338 if (BasePtrOffset.
ule(AllocSize)) {
2340 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
2354 if (isa<ConstantPointerNull>(V))
2356 if (
auto *LI = dyn_cast<LoadInst>(V))
2357 return isa<GlobalVariable>(LI->getPointerOperand());
2381 return Dest && Dest->Ptr == UsedV;
2395 switch (
I->getOpcode()) {
2400 case Instruction::AddrSpaceCast:
2401 case Instruction::BitCast:
2402 case Instruction::GetElementPtr:
2407 case Instruction::ICmp: {
2414 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
2421 case Instruction::Call:
2428 case Intrinsic::memmove:
2429 case Intrinsic::memcpy:
2430 case Intrinsic::memset: {
2432 if (
MI->isVolatile() ||
MI->getRawDest() != PI)
2436 case Intrinsic::assume:
2437 case Intrinsic::invariant_start:
2438 case Intrinsic::invariant_end:
2439 case Intrinsic::lifetime_start:
2440 case Intrinsic::lifetime_end:
2441 case Intrinsic::objectsize:
2444 case Intrinsic::launder_invariant_group:
2445 case Intrinsic::strip_invariant_group:
2474 case Instruction::Store: {
2476 if (SI->isVolatile() || SI->getPointerOperand() != PI)
2484 }
while (!Worklist.
empty());
2506 std::unique_ptr<DIBuilder> DIB;
2507 if (isa<AllocaInst>(
MI)) {
2513 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
2525 II,
DL, &
TLI,
AA,
true, &InsertedInstructions);
2526 for (
Instruction *Inserted : InsertedInstructions)
2534 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
2543 C->isFalseWhenEqual()));
2544 }
else if (
auto *SI = dyn_cast<StoreInst>(
I)) {
2545 for (
auto *DVI : DVIs)
2546 if (DVI->isAddressOfVariable())
2586 for (
auto *DVI : DVIs)
2587 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
2588 DVI->eraseFromParent();
2634 if (FreeInstrBB->
size() != 2) {
2636 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
2638 auto *Cast = dyn_cast<CastInst>(&Inst);
2639 if (!Cast || !Cast->isNoopCast(
DL))
2660 "Broken CFG: missing edge from predecessor to successor");
2665 if (&Instr == FreeInstrBBTerminator)
2667 Instr.moveBeforePreserving(TI);
2670 "Only the branch instruction should remain");
2681 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
2682 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
2683 if (Dereferenceable.
isValid()) {
2685 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
2686 Attribute::Dereferenceable);
2687 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
2696 if (isa<UndefValue>(
Op)) {
2704 if (isa<ConstantPointerNull>(
Op))
2744 bool Changed =
false;
2745 while (
Instruction *Prev =
I.getPrevNonDebugInstruction()) {
2750 if (Prev->isEHPad())
2781 return BBI->isDebugOrPseudoInst() ||
2782 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
2787 if (BBI != FirstInstr)
2789 }
while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
2791 return dyn_cast<StoreInst>(BBI);
2803 if (!
DeadEdges.insert({From, To}).second)
2808 for (
Use &U : PN.incoming_values())
2809 if (PN.getIncomingBlock(U) ==
From && !isa<PoisonValue>(U)) {
2825 std::next(
I->getReverseIterator())))) {
2826 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
2830 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
2859 if (Succ == LiveSucc)
2885 if (isa<SelectInst>(
Cond) &&
2904 auto *Cmp = cast<CmpInst>(
Cond);
2911 if (isa<UndefValue>(
Cond)) {
2915 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
2930 for (
auto Case : SI.cases()) {
2932 assert(isa<ConstantInt>(NewCase) &&
2933 "Result of expression should be constant");
2934 Case.setValue(cast<ConstantInt>(NewCase));
2945 for (
const auto &
C : SI.cases()) {
2947 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
2949 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
2952 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
2958 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
2959 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
2964 for (
auto Case : SI.cases()) {
2965 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
2971 if (isa<UndefValue>(
Cond)) {
2975 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
2977 SI.findCaseValue(CI)->getCaseSuccessor());
2991 const APInt *
C =
nullptr;
2993 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
2994 OvID == Intrinsic::umul_with_overflow)) {
2999 if (
C->isPowerOf2()) {
3000 return BinaryOperator::CreateShl(
3010 if (!WO->hasOneUse())
3024 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
3027 if (OvID == Intrinsic::usub_with_overflow)
3032 if (OvID == Intrinsic::smul_with_overflow &&
3033 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
3034 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
3043 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
3048 auto *OpTy = WO->getRHS()->getType();
3049 auto *NewLHS = WO->getLHS();
3071 const unsigned *exti, *exte, *insi, *inse;
3072 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
3073 exte = EV.
idx_end(), inse =
IV->idx_end();
3074 exti != exte && insi != inse;
3088 if (exti == exte && insi == inse)
3121 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
3124 if (
LoadInst *L = dyn_cast<LoadInst>(Agg)) {
3126 if (
auto *STy = dyn_cast<StructType>(Agg->
getType());
3127 STy && STy->containsScalableVectorType())
3135 if (L->isSimple() && L->hasOneUse()) {
3147 L->getPointerOperand(), Indices);
3151 NL->setAAMetadata(L->getAAMetadata());
3158 if (
auto *PN = dyn_cast<PHINode>(Agg))
3175 switch (Personality) {
3204 cast<ArrayType>(
LHS->
getType())->getNumElements()
3206 cast<ArrayType>(
RHS->
getType())->getNumElements();
3218 bool MakeNewInstruction =
false;
3220 bool CleanupFlag =
LI.isCleanup();
3223 for (
unsigned i = 0, e =
LI.getNumClauses(); i != e; ++i) {
3224 bool isLastClause = i + 1 == e;
3225 if (
LI.isCatch(i)) {
3232 if (AlreadyCaught.
insert(TypeInfo).second) {
3237 MakeNewInstruction =
true;
3244 MakeNewInstruction =
true;
3245 CleanupFlag =
false;
3256 assert(
LI.isFilter(i) &&
"Unsupported landingpad clause!");
3264 if (!NumTypeInfos) {
3267 MakeNewInstruction =
true;
3268 CleanupFlag =
false;
3272 bool MakeNewFilter =
false;
3274 if (isa<ConstantAggregateZero>(FilterClause)) {
3276 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
3282 MakeNewInstruction =
true;
3289 if (NumTypeInfos > 1)
3290 MakeNewFilter =
true;
3294 NewFilterElts.
reserve(NumTypeInfos);
3299 bool SawCatchAll =
false;
3300 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
3328 if (SeenInFilter.
insert(TypeInfo).second)
3329 NewFilterElts.
push_back(cast<Constant>(Elt));
3334 MakeNewInstruction =
true;
3339 if (NewFilterElts.
size() < NumTypeInfos)
3340 MakeNewFilter =
true;
3342 if (MakeNewFilter) {
3344 NewFilterElts.
size());
3346 MakeNewInstruction =
true;
3355 if (MakeNewFilter && !NewFilterElts.
size()) {
3356 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
3357 CleanupFlag =
false;
3368 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
3371 for (j = i; j != e; ++j)
3372 if (!isa<ArrayType>(NewClauses[j]->
getType()))
3378 for (
unsigned k = i; k + 1 < j; ++k)
3382 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
3384 MakeNewInstruction =
true;
3403 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
3413 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
3414 Value *LFilter = NewClauses[j];
3425 NewClauses.
erase(J);
3426 MakeNewInstruction =
true;
3436 if (isa<ConstantAggregateZero>(LFilter)) {
3439 if (isa<ConstantAggregateZero>(
Filter)) {
3440 assert(FElts <= LElts &&
"Should have handled this case earlier!");
3442 NewClauses.
erase(J);
3443 MakeNewInstruction =
true;
3449 if (isa<ConstantAggregateZero>(
Filter)) {
3452 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
3453 for (
unsigned l = 0; l != LElts; ++l)
3456 NewClauses.
erase(J);
3457 MakeNewInstruction =
true;
3468 bool AllFound =
true;
3469 for (
unsigned f = 0; f != FElts; ++f) {
3472 for (
unsigned l = 0; l != LElts; ++l) {
3474 if (LTypeInfo == FTypeInfo) {
3484 NewClauses.
erase(J);
3485 MakeNewInstruction =
true;
3493 if (MakeNewInstruction) {
3496 for (
unsigned i = 0, e = NewClauses.
size(); i != e; ++i)
3501 if (NewClauses.
empty())
3509 if (
LI.isCleanup() != CleanupFlag) {
3510 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
3511 LI.setCleanup(CleanupFlag);
3535 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
3540 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
3554 Use *MaybePoisonOperand =
nullptr;
3555 for (
Use &U : OrigOpInst->operands()) {
3556 if (isa<MetadataAsValue>(U.get()) ||
3559 if (!MaybePoisonOperand)
3560 MaybePoisonOperand = &U;
3565 OrigOpInst->dropPoisonGeneratingFlagsAndMetadata();
3568 if (!MaybePoisonOperand)
3573 MaybePoisonOperand->get(), MaybePoisonOperand->get()->
getName() +
".fr");
3575 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
3586 Use *StartU =
nullptr;
3604 Value *StartV = StartU->get();
3616 if (!Visited.
insert(V).second)
3619 if (Visited.
size() > 32)
3636 I->dropPoisonGeneratingFlagsAndMetadata();
3638 if (StartNeedsFreeze) {
3650 if (isa<Constant>(
Op) ||
Op->hasOneUse())
3659 if (isa<Argument>(
Op)) {
3668 bool Changed =
false;
3669 if (&FI != MoveBefore) {
3674 Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
3676 Changed |= Dominates;
3685 for (
auto *U : V->users()) {
3686 if (isa<ShuffleVectorInst>(U))
3695 Value *Op0 =
I.getOperand(0);
3701 if (
auto *PN = dyn_cast<PHINode>(Op0)) {
3724 auto getUndefReplacement = [&
I](
Type *Ty) {
3727 for (
const auto *U :
I.users()) {
3736 else if (BestValue !=
C)
3737 BestValue = NullValue;
3739 assert(BestValue &&
"Must have at least one use");
3754 Constant *ReplaceC = getUndefReplacement(
I.getType()->getScalarType());
3769 auto *CB = dyn_cast<CallBase>(
I);
3788 for (
const User *U :
I.users()) {
3789 if (Visited.
insert(U).second)
3794 while (!AllocaUsers.
empty()) {
3795 auto *UserI = cast<Instruction>(AllocaUsers.
pop_back_val());
3796 if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
3797 isa<AddrSpaceCastInst>(UserI)) {
3818 if (isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
3826 if (isa<AllocaInst>(
I))
3834 if (
auto *CI = dyn_cast<CallInst>(
I)) {
3835 if (CI->isConvergent())
3841 if (
I->mayWriteToMemory()) {
3848 if (
I->mayReadFromMemory()) {
3855 E =
I->getParent()->end();
3857 if (Scan->mayWriteToMemory())
3861 I->dropDroppableUses([&](
const Use *U) {
3862 auto *
I = dyn_cast<Instruction>(U->getUser());
3863 if (
I &&
I->getParent() != DestBlock) {
3873 I->moveBefore(*DestBlock, InsertPos);
3887 for (
auto &DbgUser : DbgUsers)
3888 if (DbgUser->getParent() != DestBlock)
3895 if (DVI->getParent() == SrcBlock)
3898 [](
auto *
A,
auto *
B) {
return B->comesBefore(
A); });
3902 for (
auto *
User : DbgUsersToSink) {
3907 if (isa<DbgDeclareInst>(
User))
3912 User->getDebugLoc()->getInlinedAt());
3914 if (!SunkVariables.
insert(DbgUserVariable).second)
3919 if (isa<DbgAssignIntrinsic>(
User))
3922 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(
User->clone()));
3923 if (isa<DbgDeclareInst>(
User) && isa<CastInst>(
I))
3924 DIIClones.back()->replaceVariableLocationOp(
I,
I->getOperand(0));
3929 if (!DIIClones.empty()) {
3934 DIIClone->insertBefore(&*InsertPos);
3961 if (
I ==
nullptr)
continue;
3976 auto getOptionalSinkBlockForInst =
3977 [
this](
Instruction *
I) -> std::optional<BasicBlock *> {
3979 return std::nullopt;
3983 unsigned NumUsers = 0;
3985 for (
auto *U :
I->users()) {
3986 if (U->isDroppable())
3989 return std::nullopt;
3993 if (
PHINode *PN = dyn_cast<PHINode>(UserInst)) {
3994 for (
unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
3995 if (PN->getIncomingValue(i) ==
I) {
3999 if (UserParent && UserParent != PN->getIncomingBlock(i))
4000 return std::nullopt;
4001 UserParent = PN->getIncomingBlock(i);
4004 assert(UserParent &&
"expected to find user block!");
4006 if (UserParent && UserParent != UserInst->
getParent())
4007 return std::nullopt;
4013 if (NumUsers == 0) {
4017 return std::nullopt;
4029 return std::nullopt;
4039 return std::nullopt;
4044 auto OptBB = getOptionalSinkBlockForInst(
I);
4046 auto *UserParent = *OptBB;
4054 for (
Use &U :
I->operands())
4055 if (
Instruction *OpI = dyn_cast<Instruction>(U.get()))
4063 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4076 <<
" New = " << *Result <<
'\n');
4078 Result->copyMetadata(*
I,
4079 {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4081 I->replaceAllUsesWith(Result);
4084 Result->takeName(
I);
4091 if (isa<PHINode>(Result) != isa<PHINode>(
I)) {
4093 if (isa<PHINode>(
I))
4099 Result->insertInto(InstParent, InsertPos);
4108 <<
" New = " << *
I <<
'\n');
4140 if (!
I->hasMetadataOtherThanDebugLoc())
4143 auto Track = [](
Metadata *ScopeList,
auto &Container) {
4144 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
4145 if (!MDScopeList || !Container.insert(MDScopeList).second)
4147 for (
const auto &
MDOperand : MDScopeList->operands())
4148 if (
auto *MDScope = dyn_cast<MDNode>(
MDOperand))
4149 Container.insert(MDScope);
4152 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
4153 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
4162 "llvm.experimental.noalias.scope.decl in use ?");
4165 "llvm.experimental.noalias.scope should refer to a single scope");
4167 if (
auto *MD = dyn_cast<MDNode>(
MDOperand))
4168 return !UsedAliasScopesAndLists.
contains(MD) ||
4169 !UsedNoAliasScopesAndLists.
contains(MD);
4194 if (Succ != LiveSucc &&
DeadEdges.insert({BB, Succ}).second)
4195 for (
PHINode &PN : Succ->phis())
4196 for (
Use &U : PN.incoming_values())
4197 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
4207 HandleOnlyLiveSuccessor(BB,
nullptr);
4214 if (!Inst.use_empty() &&
4215 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
4219 Inst.replaceAllUsesWith(
C);
4222 Inst.eraseFromParent();
4228 for (
Use &U : Inst.operands()) {
4229 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
4232 auto *
C = cast<Constant>(U);
4233 Constant *&FoldRes = FoldedConstants[
C];
4239 <<
"\n Old = " << *
C
4240 <<
"\n New = " << *FoldRes <<
'\n');
4249 if (!Inst.isDebugOrPseudoInst()) {
4250 InstrsForInstructionWorklist.
push_back(&Inst);
4251 SeenAliasScopes.
analyse(&Inst);
4259 if (isa<UndefValue>(BI->getCondition())) {
4261 HandleOnlyLiveSuccessor(BB,
nullptr);
4264 if (
auto *
Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
4265 bool CondVal =
Cond->getZExtValue();
4266 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
4269 }
else if (
SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
4270 if (isa<UndefValue>(SI->getCondition())) {
4272 HandleOnlyLiveSuccessor(BB,
nullptr);
4275 if (
auto *
Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
4276 HandleOnlyLiveSuccessor(BB,
4277 SI->findCaseValue(
Cond)->getCaseSuccessor());
4287 if (LiveBlocks.
count(&BB))
4290 unsigned NumDeadInstInBB;
4291 unsigned NumDeadDbgInstInBB;
4292 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
4295 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
4296 NumDeadInst += NumDeadInstInBB;
4313 Inst->eraseFromParent();
4330 auto &
DL =
F.getParent()->getDataLayout();
4338 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
4346 bool MadeIRChange =
false;
4351 unsigned Iteration = 0;
4355 if (Iteration > MaxIterations && !VerifyFixpoint) {
4356 LLVM_DEBUG(
dbgs() <<
"\n\n[IC] Iteration limit #" << MaxIterations
4357 <<
" on " <<
F.getName()
4358 <<
" reached; stopping without verifying fixpoint\n");
4362 ++NumWorklistIterations;
4363 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
4364 <<
F.getName() <<
"\n");
4367 ORE, BFI, PSI,
DL, LI);
4370 MadeChangeInThisIteration |= IC.
run();
4371 if (!MadeChangeInThisIteration)
4374 MadeIRChange =
true;
4375 if (Iteration > MaxIterations) {
4377 "Instruction Combining did not reach a fixpoint after " +
4378 Twine(MaxIterations) +
" iterations");
4384 else if (Iteration == 2)
4386 else if (Iteration == 3)
4387 ++NumThreeIterations;
4389 ++NumFourOrMoreIterations;
4391 return MadeIRChange;
4399 OS, MapClassName2PassName);
4402 OS << (Options.
UseLoopInfo ?
"" :
"no-") <<
"use-loop-info;";
4425 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
4461 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4462 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
4463 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
4464 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
4465 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
4466 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
4469 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
4470 auto *LI = LIWP ? &LIWP->getLoopInfo() :
nullptr;
4472 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
4475 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
4491 "Combine redundant instructions",
false,
false)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, unsigned MaxIterations, bool VerifyFixpoint, LoopInfo *LI)
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Constant * constantFoldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2)
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
Select target instructions out of generic instructions
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
This defines the Use class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static const uint32_t IV[8]
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.