108#define DEBUG_TYPE "instcombine"
116 "Number of instruction combining iterations performed");
117STATISTIC(NumOneIteration,
"Number of functions with one iteration");
118STATISTIC(NumTwoIterations,
"Number of functions with two iterations");
119STATISTIC(NumThreeIterations,
"Number of functions with three iterations");
121 "Number of functions with four or more iterations");
125STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
131 "Controls which instructions are visited");
138 "instcombine-max-sink-users",
cl::init(32),
139 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
143 cl::desc(
"Maximum array size considered when doing a combine"));
155std::optional<Instruction *>
166 bool &KnownBitsComputed) {
183 *
this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
202bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
221bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
222 unsigned ToWidth)
const {
228 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
233 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
238 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
249bool InstCombinerImpl::shouldChangeType(
Type *
From,
Type *To)
const {
255 unsigned FromWidth =
From->getPrimitiveSizeInBits();
257 return shouldChangeType(FromWidth, ToWidth);
266 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
267 if (!OBO || !OBO->hasNoSignedWrap())
272 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
275 const APInt *BVal, *CVal;
279 bool Overflow =
false;
280 if (Opcode == Instruction::Add)
281 (void)BVal->
sadd_ov(*CVal, Overflow);
283 (
void)BVal->
ssub_ov(*CVal, Overflow);
289 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
290 return OBO && OBO->hasNoUnsignedWrap();
294 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
295 return OBO && OBO->hasNoSignedWrap();
304 I.clearSubclassOptionalData();
309 I.clearSubclassOptionalData();
310 I.setFastMathFlags(FMF);
319 auto *Cast = dyn_cast<CastInst>(BinOp1->
getOperand(0));
320 if (!Cast || !Cast->hasOneUse())
324 auto CastOpcode = Cast->getOpcode();
325 if (CastOpcode != Instruction::ZExt)
333 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
334 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
360 Cast->dropPoisonGeneratingFlags();
366Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
367 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
370 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
371 Type *CastTy = IntToPtr->getDestTy();
374 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
377 return PtrToInt->getOperand(0);
404 bool Changed =
false;
412 Changed = !
I.swapOperands();
414 if (
I.isCommutative()) {
415 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
425 if (
I.isAssociative()) {
448 I.setHasNoUnsignedWrap(
true);
451 I.setHasNoSignedWrap(
true);
480 if (
I.isAssociative() &&
I.isCommutative()) {
543 if (isa<FPMathOperator>(NewBO)) {
557 I.setHasNoUnsignedWrap(
true);
575 if (LOp == Instruction::And)
576 return ROp == Instruction::Or || ROp == Instruction::Xor;
579 if (LOp == Instruction::Or)
580 return ROp == Instruction::And;
584 if (LOp == Instruction::Mul)
585 return ROp == Instruction::Add || ROp == Instruction::Sub;
608 if (isa<Constant>(V))
622 assert(
Op &&
"Expected a binary operator");
623 LHS =
Op->getOperand(0);
624 RHS =
Op->getOperand(1);
625 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
630 return Instruction::Mul;
635 if (OtherOp && OtherOp->
getOpcode() == Instruction::AShr &&
638 return Instruction::AShr;
641 return Op->getOpcode();
650 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
653 Value *RetVal =
nullptr;
664 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
684 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
707 if (isa<OverflowingBinaryOperator>(RetVal)) {
710 if (isa<OverflowingBinaryOperator>(&
I)) {
711 HasNSW =
I.hasNoSignedWrap();
712 HasNUW =
I.hasNoUnsignedWrap();
714 if (
auto *LOBO = dyn_cast<OverflowingBinaryOperator>(
LHS)) {
715 HasNSW &= LOBO->hasNoSignedWrap();
716 HasNUW &= LOBO->hasNoUnsignedWrap();
719 if (
auto *ROBO = dyn_cast<OverflowingBinaryOperator>(
RHS)) {
720 HasNSW &= ROBO->hasNoSignedWrap();
721 HasNUW &= ROBO->hasNoUnsignedWrap();
724 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
734 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
737 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
752 unsigned Opc =
I->getOpcode();
753 unsigned ConstIdx = 1;
760 case Instruction::Sub:
763 case Instruction::ICmp:
770 case Instruction::Or:
774 case Instruction::Add:
780 if (!
match(
I->getOperand(1 - ConstIdx),
793 if (Opc == Instruction::ICmp && !cast<ICmpInst>(
I)->isEquality() &&
798 bool Consumes =
false;
802 assert(NotOp !=
nullptr &&
803 "Desync between isFreeToInvert and getFreelyInverted");
812 case Instruction::Sub:
815 case Instruction::Or:
816 case Instruction::Add:
819 case Instruction::ICmp:
855 auto IsValidBinOpc = [](
unsigned Opc) {
859 case Instruction::And:
860 case Instruction::Or:
861 case Instruction::Xor:
862 case Instruction::Add:
871 auto IsCompletelyDistributable = [](
unsigned BinOpc1,
unsigned BinOpc2,
873 assert(ShOpc != Instruction::AShr);
874 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
875 ShOpc == Instruction::Shl;
878 auto GetInvShift = [](
unsigned ShOpc) {
879 assert(ShOpc != Instruction::AShr);
880 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
883 auto CanDistributeBinops = [&](
unsigned BinOpc1,
unsigned BinOpc2,
887 if (BinOpc1 == Instruction::And)
892 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
898 if (BinOpc2 == Instruction::And)
909 auto MatchBinOp = [&](
unsigned ShOpnum) ->
Instruction * {
911 Value *
X, *
Y, *ShiftedX, *Mask, *Shift;
912 if (!
match(
I.getOperand(ShOpnum),
915 if (!
match(
I.getOperand(1 - ShOpnum),
923 auto *IY = dyn_cast<Instruction>(
I.getOperand(ShOpnum));
924 auto *IX = dyn_cast<Instruction>(ShiftedX);
929 unsigned ShOpc = IY->getOpcode();
930 if (ShOpc != IX->getOpcode())
934 auto *BO2 = dyn_cast<Instruction>(
I.getOperand(1 - ShOpnum));
938 unsigned BinOpc = BO2->getOpcode();
940 if (!IsValidBinOpc(
I.getOpcode()) || !IsValidBinOpc(BinOpc))
943 if (ShOpc == Instruction::AShr) {
957 if (BinOpc ==
I.getOpcode() &&
958 IsCompletelyDistributable(
I.getOpcode(), BinOpc, ShOpc)) {
973 if (!CanDistributeBinops(
I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
987 return MatchBinOp(1);
1005 Value *
A, *CondVal, *TrueVal, *FalseVal;
1008 auto MatchSelectAndCast = [&](
Value *CastOp,
Value *SelectOp) {
1010 A->getType()->getScalarSizeInBits() == 1 &&
1017 if (MatchSelectAndCast(
LHS,
RHS))
1019 else if (MatchSelectAndCast(
RHS,
LHS))
1024 auto NewFoldedConst = [&](
bool IsTrueArm,
Value *V) {
1025 bool IsCastOpRHS = (CastOp ==
RHS);
1026 bool IsZExt = isa<ZExtInst>(CastOp);
1031 }
else if (IsZExt) {
1032 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1045 Value *NewTrueVal = NewFoldedConst(
false, TrueVal);
1047 NewFoldedConst(
true, FalseVal));
1051 Value *NewTrueVal = NewFoldedConst(
true, TrueVal);
1053 NewFoldedConst(
false, FalseVal));
1074 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1194static std::optional<std::pair<Value *, Value *>>
1196 if (
LHS->getParent() !=
RHS->getParent())
1197 return std::nullopt;
1199 if (
LHS->getNumIncomingValues() < 2)
1200 return std::nullopt;
1203 return std::nullopt;
1205 Value *L0 =
LHS->getIncomingValue(0);
1206 Value *R0 =
RHS->getIncomingValue(0);
1208 for (
unsigned I = 1,
E =
LHS->getNumIncomingValues();
I !=
E; ++
I) {
1212 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1215 return std::nullopt;
1218 return std::optional(std::pair(L0, R0));
1221std::optional<std::pair<Value *, Value *>>
1222InstCombinerImpl::matchSymmetricPair(
Value *LHS,
Value *RHS) {
1223 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1224 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1226 return std::nullopt;
1228 case Instruction::PHI:
1230 case Instruction::Select: {
1236 return std::pair(TrueVal, FalseVal);
1237 return std::nullopt;
1239 case Instruction::Call: {
1243 if (LHSMinMax && RHSMinMax &&
1250 return std::pair(LHSMinMax->
getLHS(), LHSMinMax->
getRHS());
1251 return std::nullopt;
1254 return std::nullopt;
1264 if (!LHSIsSelect && !RHSIsSelect)
1269 if (isa<FPMathOperator>(&
I)) {
1270 FMF =
I.getFastMathFlags();
1277 Value *
Cond, *True =
nullptr, *False =
nullptr;
1285 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1300 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
1309 else if (True && !False)
1317 if (
Value *NewSel = foldAddNegate(
B,
C,
RHS))
1324 if (
Value *NewSel = foldAddNegate(
E,
F,
LHS))
1328 if (!True || !False)
1339 assert(!isa<Constant>(
I) &&
"Shouldn't invert users of constant");
1341 if (U == IgnoredUser)
1343 switch (cast<Instruction>(U)->
getOpcode()) {
1344 case Instruction::Select: {
1345 auto *SI = cast<SelectInst>(U);
1347 SI->swapProfMetadata();
1350 case Instruction::Br:
1351 cast<BranchInst>(U)->swapSuccessors();
1353 case Instruction::Xor:
1360 "canFreelyInvertAllUsersOf() ?");
1367Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
1377 if (
C->getType()->getElementType()->isIntegerTy())
1381 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1386 if (isa<UndefValue>(Elt))
1389 if (!isa<ConstantInt>(Elt))
1396 if (
auto *CV = dyn_cast<Constant>(V))
1397 if (CV->getType()->isVectorTy() &&
1398 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1411Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1412 BinaryOperator &BO,
bool OpsFromSigned, std::array<Value *, 2> IntOps,
1416 Type *IntTy = IntOps[0]->getType();
1421 unsigned MaxRepresentableBits =
1426 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1430 auto IsNonZero = [&](
unsigned OpNo) ->
bool {
1431 if (OpsKnown[OpNo].hasKnownBits() &&
1432 OpsKnown[OpNo].getKnownBits(
SQ).isNonZero())
1437 auto IsNonNeg = [&](
unsigned OpNo) ->
bool {
1441 return OpsKnown[OpNo].getKnownBits(
SQ).isNonNegative();
1445 auto IsValidPromotion = [&](
unsigned OpNo) ->
bool {
1447 if (OpsFromSigned != isa<SIToFPInst>(BO.
getOperand(OpNo)) &&
1456 if (MaxRepresentableBits < IntSz) {
1466 NumUsedLeadingBits[OpNo] =
1467 IntSz - OpsKnown[OpNo].getKnownBits(
SQ).countMinLeadingZeros();
1475 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1478 return !OpsFromSigned || BO.
getOpcode() != Instruction::FMul ||
1483 if (Op1FpC !=
nullptr) {
1485 if (OpsFromSigned && BO.
getOpcode() == Instruction::FMul &&
1490 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1492 if (Op1IntC ==
nullptr)
1495 : Instruction::UIToFP,
1496 Op1IntC, FPTy,
DL) != Op1FpC)
1500 IntOps[1] = Op1IntC;
1504 if (IntTy != IntOps[1]->
getType())
1507 if (Op1FpC ==
nullptr) {
1508 if (!IsValidPromotion(1))
1511 if (!IsValidPromotion(0))
1517 bool NeedsOverflowCheck =
true;
1520 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1521 unsigned OverflowMaxCurBits =
1522 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1523 bool OutputSigned = OpsFromSigned;
1525 case Instruction::FAdd:
1526 IntOpc = Instruction::Add;
1527 OverflowMaxOutputBits += OverflowMaxCurBits;
1529 case Instruction::FSub:
1530 IntOpc = Instruction::Sub;
1531 OverflowMaxOutputBits += OverflowMaxCurBits;
1533 case Instruction::FMul:
1534 IntOpc = Instruction::Mul;
1535 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1541 if (OverflowMaxOutputBits < IntSz) {
1542 NeedsOverflowCheck =
false;
1545 if (IntOpc == Instruction::Sub)
1546 OutputSigned =
true;
1552 if (NeedsOverflowCheck &&
1553 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1557 if (
auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1558 IntBO->setHasNoSignedWrap(OutputSigned);
1559 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1572 std::array<Value *, 2> IntOps = {
nullptr,
nullptr};
1592 if (
Instruction *R = foldFBinOpOfIntCastsFromSign(BO,
false,
1593 IntOps, Op1FpC, OpsKnown))
1595 return foldFBinOpOfIntCastsFromSign(BO,
true, IntOps,
1611 !
X->getType()->isIntOrIntVectorTy(1))
1630 C = dyn_cast<Constant>(IsTrueArm ? SI->getTrueValue()
1631 : SI->getFalseValue());
1632 }
else if (
match(SI->getCondition(),
1638 C = dyn_cast<Constant>(
Op);
1659 bool FoldWithMultiUse) {
1661 if (!SI->hasOneUse() && !FoldWithMultiUse)
1664 Value *TV = SI->getTrueValue();
1665 Value *FV = SI->getFalseValue();
1666 if (!(isa<Constant>(TV) || isa<Constant>(FV)))
1670 if (SI->getType()->isIntOrIntVectorTy(1))
1680 if (
auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1681 if (CI->hasOneUse()) {
1682 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1683 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1691 if (!NewTV && !NewFV)
1729 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&
I);
1733 std::optional<bool> ImpliedCond =
1735 Ops[0], Ops[1],
DL, LHSIsTrue);
1745 if (NumPHIValues == 0)
1755 if (UI != &
I && !
I.isIdenticalTo(UI))
1766 Value *NonSimplifiedInVal =
nullptr;
1767 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1776 if (NonSimplifiedBB)
return nullptr;
1778 NonSimplifiedBB = InBB;
1779 NonSimplifiedInVal = InVal;
1784 if (isa<InvokeInst>(InVal))
1785 if (cast<Instruction>(InVal)->
getParent() == NonSimplifiedBB)
1802 if (NonSimplifiedBB !=
nullptr) {
1818 if (NonSimplifiedBB) {
1822 U = NonSimplifiedInVal;
1824 U = U->DoPHITranslation(PN->
getParent(), NonSimplifiedBB);
1829 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1830 if (NewPhiValues[i])
1838 if (
User == &
I)
continue;
1844 const_cast<PHINode &
>(*NewPN),
1853 auto *Phi0 = dyn_cast<PHINode>(BO.
getOperand(0));
1854 auto *Phi1 = dyn_cast<PHINode>(BO.
getOperand(1));
1855 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1856 Phi0->getNumOperands() != Phi1->getNumOperands())
1877 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
1878 auto &Phi0Use = std::get<0>(
T);
1879 auto &Phi1Use = std::get<1>(
T);
1880 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1882 Value *Phi0UseV = Phi0Use.get();
1883 Value *Phi1UseV = Phi1Use.get();
1886 else if (Phi1UseV ==
C)
1893 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
1894 CanFoldIncomingValuePair)) {
1897 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
1898 "The number of collected incoming values should equal the number "
1899 "of the original PHINode operands!");
1900 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
1901 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
1906 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1913 ConstBB = Phi0->getIncomingBlock(0);
1914 OtherBB = Phi0->getIncomingBlock(1);
1916 ConstBB = Phi0->getIncomingBlock(1);
1917 OtherBB = Phi0->getIncomingBlock(0);
1927 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->
getTerminator());
1928 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
1935 for (
auto BBIter = BO.
getParent()->
begin(); &*BBIter != &BO; ++BBIter)
1948 Phi0->getIncomingValueForBlock(OtherBB),
1949 Phi1->getIncomingValueForBlock(OtherBB));
1950 if (
auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
1951 NotFoldedNewBO->copyIRFlags(&BO);
1961 if (!isa<Constant>(
I.getOperand(1)))
1964 if (
auto *Sel = dyn_cast<SelectInst>(
I.getOperand(0))) {
1967 }
else if (
auto *PN = dyn_cast<PHINode>(
I.getOperand(0))) {
1978 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1985 if (!isa<VectorType>(Inst.
getType()))
1991 cast<VectorType>(Inst.
getType())->getElementCount());
1993 cast<VectorType>(Inst.
getType())->getElementCount());
1998 Value *L0, *L1, *R0, *R1;
2003 cast<ShuffleVectorInst>(
LHS)->isConcat() &&
2004 cast<ShuffleVectorInst>(
RHS)->isConcat()) {
2011 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2014 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2021 if (
auto *BO = dyn_cast<BinaryOperator>(V))
2025 M, Intrinsic::experimental_vector_reverse, V->getType());
2038 return createBinOpReverse(V1, V2);
2042 return createBinOpReverse(V1,
RHS);
2046 return createBinOpReverse(
LHS, V2);
2056 if (
auto *BO = dyn_cast<BinaryOperator>(XY))
2065 V1->
getType() == V2->getType() &&
2068 return createBinOpShuffle(V1, V2, Mask);
2077 auto *LShuf = cast<ShuffleVectorInst>(
LHS);
2078 auto *RShuf = cast<ShuffleVectorInst>(
RHS);
2083 if (LShuf->isSelect() &&
2085 RShuf->isSelect() &&
2103 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.
getType());
2108 cast<FixedVectorType>(V1->
getType())->getNumElements() <=
2109 InstVTy->getNumElements()) {
2111 "Shuffle should not change scalar type");
2118 bool ConstOp1 = isa<Constant>(
RHS);
2120 unsigned SrcVecNumElts =
2121 cast<FixedVectorType>(V1->
getType())->getNumElements();
2124 bool MayChange =
true;
2125 unsigned NumElts = InstVTy->getNumElements();
2126 for (
unsigned I = 0;
I < NumElts; ++
I) {
2128 if (ShMask[
I] >= 0) {
2129 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
2137 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2138 I >= SrcVecNumElts) {
2142 NewVecC[ShMask[
I]] = CElt;
2153 if (
I >= SrcVecNumElts || ShMask[
I] < 0) {
2158 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2175 Value *NewLHS = ConstOp1 ? V1 : NewC;
2176 Value *NewRHS = ConstOp1 ? NewC : V1;
2177 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2184 if (isa<ShuffleVectorInst>(
RHS))
2217 if (isa<FPMathOperator>(R)) {
2218 R->copyFastMathFlags(&Inst);
2221 if (
auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2222 NewInstBO->copyIRFlags(R);
2251 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2252 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
2270 if (!willNotOverflow(BO.
getOpcode(),
X,
Y, BO, IsSext))
2276 if (
auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2278 NewBinOp->setHasNoSignedWrap();
2280 NewBinOp->setHasNoUnsignedWrap();
2298 if (!
GEP.hasAllConstantIndices())
2313 bool IsInBounds =
GEP.isInBounds();
2314 Type *Ty =
GEP.getSourceElementType();
2315 Value *NewTrueC = Builder.
CreateGEP(Ty, TrueC, IndexC,
"", IsInBounds);
2316 Value *NewFalseC = Builder.
CreateGEP(Ty, FalseC, IndexC,
"", IsInBounds);
2329 Type *PtrTy = Src->getType()->getScalarType();
2330 if (
GEP.hasAllConstantIndices() &&
2331 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2335 bool IsFirstType =
true;
2336 unsigned NumVarIndices = 0;
2337 for (
auto Pair :
enumerate(Src->indices())) {
2338 if (!isa<ConstantInt>(Pair.value())) {
2340 IsFirstType =
false;
2341 NumVarIndices = Pair.index() + 1;
2348 if (NumVarIndices != Src->getNumIndices()) {
2369 if (!
Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero())) {
2372 if (Src->hasAllConstantIndices())
2384 Src->getNumIndices() - NumVarIndices));
2391 IsInBounds &=
Idx.isNonNegative() == ConstIndices[0].isNonNegative();
2396 Indices,
"", IsInBounds));
2399 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2405 bool EndsWithSequential =
false;
2408 EndsWithSequential =
I.isSequential();
2411 if (EndsWithSequential) {
2414 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2432 if (Src->getNumOperands() == 2) {
2438 Indices.
append(Src->op_begin()+1, Src->op_end()-1);
2441 }
else if (isa<Constant>(*
GEP.idx_begin()) &&
2442 cast<Constant>(*
GEP.idx_begin())->isNullValue() &&
2443 Src->getNumOperands() != 1) {
2445 Indices.
append(Src->op_begin()+1, Src->op_end());
2449 if (!Indices.
empty())
2452 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
2460 bool &DoesConsume,
unsigned Depth) {
2461 static Value *
const NonNull =
reinterpret_cast<Value *
>(uintptr_t(1));
2479 if (!WillInvertAllUses)
2484 if (
auto *
I = dyn_cast<CmpInst>(V)) {
2495 DoesConsume,
Depth))
2498 DoesConsume,
Depth))
2507 DoesConsume,
Depth))
2510 DoesConsume,
Depth))
2519 DoesConsume,
Depth))
2528 DoesConsume,
Depth))
2540 bool LocalDoesConsume = DoesConsume;
2542 LocalDoesConsume,
Depth))
2545 LocalDoesConsume,
Depth)) {
2546 DoesConsume = LocalDoesConsume;
2549 DoesConsume,
Depth);
2550 assert(NotB !=
nullptr &&
2551 "Unable to build inverted value for known freely invertable op");
2552 if (
auto *II = dyn_cast<IntrinsicInst>(V))
2561 if (
PHINode *PN = dyn_cast<PHINode>(V)) {
2562 bool LocalDoesConsume = DoesConsume;
2564 for (
Use &U : PN->operands()) {
2565 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2569 if (NewIncomingVal ==
nullptr)
2572 if (NewIncomingVal == V)
2575 IncomingValues.
emplace_back(NewIncomingVal, IncomingBlock);
2578 DoesConsume = LocalDoesConsume;
2584 for (
auto [Val, Pred] : IncomingValues)
2593 DoesConsume,
Depth))
2600 DoesConsume,
Depth))
2609 bool IsLogical,
Value *
A,
2611 bool LocalDoesConsume = DoesConsume;
2613 LocalDoesConsume,
Depth))
2616 LocalDoesConsume,
Depth)) {
2618 LocalDoesConsume,
Depth);
2619 DoesConsume = LocalDoesConsume;
2629 return TryInvertAndOrUsingDeMorgan(Instruction::And,
false,
A,
2633 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
false,
A,
2637 return TryInvertAndOrUsingDeMorgan(Instruction::And,
true,
A,
2641 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
true,
A,
2650 Type *GEPType =
GEP.getType();
2651 Type *GEPEltType =
GEP.getSourceElementType();
2660 if (
auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2661 auto VWidth = GEPFVTy->getNumElements();
2662 APInt PoisonElts(VWidth, 0);
2678 bool MadeChange =
false;
2682 Type *NewScalarIndexTy =
2692 Type *IndexTy = (*I)->getType();
2693 Type *NewIndexType =
2696 cast<VectorType>(IndexTy)->getElementCount())
2708 if (IndexTy != NewIndexType) {
2720 if (!GEPEltType->
isIntegerTy(8) &&
GEP.hasAllConstantIndices()) {
2729 if (
auto *PN = dyn_cast<PHINode>(PtrOp)) {
2730 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2745 for (
auto I = PN->op_begin()+1,
E = PN->op_end();
I !=
E; ++
I) {
2746 auto *Op2 = dyn_cast<GetElementPtrInst>(*
I);
2747 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2748 Op1->getSourceElementType() != Op2->getSourceElementType())
2756 Type *CurTy =
nullptr;
2758 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
2759 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2762 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2771 assert(CurTy &&
"No current type?");
2791 CurTy = Op1->getSourceElementType();
2803 if (DI != -1 && !PN->hasOneUse())
2806 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2819 PN->getNumOperands());
2822 for (
auto &
I : PN->operands())
2823 NewPN->
addIncoming(cast<GEPOperator>(
I)->getOperand(DI),
2824 PN->getIncomingBlock(
I));
2826 NewGEP->setOperand(DI, NewPN);
2829 NewGEP->insertBefore(*
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
2833 if (
auto *Src = dyn_cast<GEPOperator>(PtrOp))
2839 if (
GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) {
2840 unsigned AS =
GEP.getPointerAddressSpace();
2841 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2845 if (TyAllocSize == 1) {
2854 GEPType ==
Y->getType()) {
2855 bool HasSameUnderlyingObject =
2857 bool Changed =
false;
2858 GEP.replaceUsesWithIf(
Y, [&](
Use &U) {
2859 bool ShouldReplace = HasSameUnderlyingObject ||
2860 isa<ICmpInst>(U.getUser()) ||
2861 isa<PtrToIntInst>(U.getUser());
2862 Changed |= ShouldReplace;
2863 return ShouldReplace;
2865 return Changed ? &
GEP :
nullptr;
2888 if (
GEP.getNumIndices() == 1) {
2899 GEP.getPointerOperand(), Idx1);
2913 GEP.getResultElementType(),
GEP.getPointerOperand(),
2916 GEP.getResultElementType(), NewPtr,
2921 if (!
GEP.isInBounds()) {
2924 APInt BasePtrOffset(IdxWidth, 0);
2925 Value *UnderlyingPtrOp =
2928 bool CanBeNull, CanBeFreed;
2930 DL, CanBeNull, CanBeFreed);
2931 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
2932 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
2934 APInt AllocSize(IdxWidth, DerefBytes);
2935 if (BasePtrOffset.
ule(AllocSize)) {
2937 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
2951 if (isa<ConstantPointerNull>(V))
2953 if (
auto *LI = dyn_cast<LoadInst>(V))
2954 return isa<GlobalVariable>(LI->getPointerOperand());
2978 return Dest && Dest->Ptr == UsedV;
2992 switch (
I->getOpcode()) {
2997 case Instruction::AddrSpaceCast:
2998 case Instruction::BitCast:
2999 case Instruction::GetElementPtr:
3004 case Instruction::ICmp: {
3011 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
3018 auto AlignmentAndSizeKnownValid = [](
CallBase *CB) {
3022 const APInt *Alignment;
3024 return match(CB->getArgOperand(0),
m_APInt(Alignment)) &&
3028 auto *CB = dyn_cast<CallBase>(AI);
3030 if (CB && TLI.
getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3031 TLI.
has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3032 !AlignmentAndSizeKnownValid(CB))
3038 case Instruction::Call:
3045 case Intrinsic::memmove:
3046 case Intrinsic::memcpy:
3047 case Intrinsic::memset: {
3049 if (
MI->isVolatile() ||
MI->getRawDest() != PI)
3053 case Intrinsic::assume:
3054 case Intrinsic::invariant_start:
3055 case Intrinsic::invariant_end:
3056 case Intrinsic::lifetime_start:
3057 case Intrinsic::lifetime_end:
3058 case Intrinsic::objectsize:
3061 case Intrinsic::launder_invariant_group:
3062 case Intrinsic::strip_invariant_group:
3091 case Instruction::Store: {
3093 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3101 }
while (!Worklist.
empty());
3124 std::unique_ptr<DIBuilder> DIB;
3125 if (isa<AllocaInst>(
MI)) {
3131 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3143 II,
DL, &
TLI,
AA,
true, &InsertedInstructions);
3144 for (
Instruction *Inserted : InsertedInstructions)
3152 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3161 C->isFalseWhenEqual()));
3162 }
else if (
auto *SI = dyn_cast<StoreInst>(
I)) {
3163 for (
auto *DVI : DVIs)
3164 if (DVI->isAddressOfVariable())
3166 for (
auto *DPV : DPVs)
3167 if (DPV->isAddressOfVariable())
3210 for (
auto *DVI : DVIs)
3211 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3212 DVI->eraseFromParent();
3213 for (
auto *DPV : DPVs)
3214 if (DPV->isAddressOfVariable() || DPV->getExpression()->startsWithDeref())
3215 DPV->eraseFromParent();
3261 if (FreeInstrBB->
size() != 2) {
3263 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3265 auto *Cast = dyn_cast<CastInst>(&Inst);
3266 if (!Cast || !Cast->isNoopCast(
DL))
3287 "Broken CFG: missing edge from predecessor to successor");
3292 if (&Instr == FreeInstrBBTerminator)
3294 Instr.moveBeforePreserving(TI);
3297 "Only the branch instruction should remain");
3308 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
3309 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3310 if (Dereferenceable.
isValid()) {
3312 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
3313 Attribute::Dereferenceable);
3314 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
3323 if (isa<UndefValue>(
Op)) {
3331 if (isa<ConstantPointerNull>(
Op))
3367 FPClassTest ReturnClass =
F->getAttributes().getRetNoFPClass();
3368 if (ReturnClass ==
fcNone)
3385 bool Changed =
false;
3386 while (
Instruction *Prev =
I.getPrevNonDebugInstruction()) {
3391 if (Prev->isEHPad())
3422 return BBI->isDebugOrPseudoInst() ||
3423 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3428 if (BBI != FirstInstr)
3430 }
while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3432 return dyn_cast<StoreInst>(BBI);
3444 if (!
DeadEdges.insert({From, To}).second)
3449 for (
Use &U : PN.incoming_values())
3450 if (PN.getIncomingBlock(U) ==
From && !isa<PoisonValue>(U)) {
3466 std::next(
I->getReverseIterator())))) {
3467 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3471 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3474 Inst.dropDbgRecords();
3482 for (
Value *V : Changed)
3509 if (Succ == LiveSucc)
3535 if (isa<SelectInst>(
Cond) &&
3554 auto *Cmp = cast<CmpInst>(
Cond);
3561 if (isa<UndefValue>(
Cond)) {
3565 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3581 for (
auto Case : SI.cases()) {
3583 assert(isa<ConstantInt>(NewCase) &&
3584 "Result of expression should be constant");
3585 Case.setValue(cast<ConstantInt>(NewCase));
3593 for (
auto Case : SI.cases()) {
3595 assert(isa<ConstantInt>(NewCase) &&
3596 "Result of expression should be constant");
3597 Case.setValue(cast<ConstantInt>(NewCase));
3605 all_of(SI.cases(), [&](
const auto &Case) {
3606 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3612 Value *NewCond = Op0;
3619 for (
auto Case : SI.cases()) {
3620 const APInt &CaseVal = Case.getCaseValue()->getValue();
3622 : CaseVal.
lshr(ShiftAmt);
3623 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3631 bool IsZExt = isa<ZExtInst>(
Cond);
3635 if (
all_of(SI.cases(), [&](
const auto &Case) {
3636 const APInt &CaseVal = Case.getCaseValue()->getValue();
3637 return IsZExt ? CaseVal.isIntN(NewWidth)
3638 : CaseVal.isSignedIntN(NewWidth);
3640 for (
auto &Case : SI.cases()) {
3641 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3642 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3654 for (
const auto &
C : SI.cases()) {
3656 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
3658 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
3661 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3667 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
3668 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
3673 for (
auto Case : SI.cases()) {
3674 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3675 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3680 if (isa<UndefValue>(
Cond)) {
3684 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3686 SI.findCaseValue(CI)->getCaseSuccessor());
3700 const APInt *
C =
nullptr;
3702 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3703 OvID == Intrinsic::umul_with_overflow)) {
3708 if (
C->isPowerOf2()) {
3709 return BinaryOperator::CreateShl(
3711 ConstantInt::get(WO->getLHS()->getType(),
C->logBase2()));
3719 if (!WO->hasOneUse())
3733 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
3736 if (OvID == Intrinsic::usub_with_overflow)
3741 if (OvID == Intrinsic::smul_with_overflow &&
3742 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
3743 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
3746 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
3747 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
3752 ConstantInt::get(WO->getLHS()->getType(),
3763 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
3768 auto *OpTy = WO->getRHS()->getType();
3769 auto *NewLHS = WO->getLHS();
3773 ConstantInt::get(OpTy, NewRHSC));
3791 const unsigned *exti, *exte, *insi, *inse;
3792 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
3793 exte = EV.
idx_end(), inse =
IV->idx_end();
3794 exti != exte && insi != inse;
3808 if (exti == exte && insi == inse)
3841 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
3844 if (
LoadInst *L = dyn_cast<LoadInst>(Agg)) {
3846 if (
auto *STy = dyn_cast<StructType>(Agg->
getType());
3847 STy && STy->containsScalableVectorType())
3855 if (L->isSimple() && L->hasOneUse()) {
3867 L->getPointerOperand(), Indices);
3871 NL->setAAMetadata(L->getAAMetadata());
3878 if (
auto *PN = dyn_cast<PHINode>(Agg))
3884 if (
auto *SI = dyn_cast<SelectInst>(Agg))
3901 switch (Personality) {
3930 cast<ArrayType>(
LHS->
getType())->getNumElements()
3932 cast<ArrayType>(
RHS->
getType())->getNumElements();
3944 bool MakeNewInstruction =
false;
3946 bool CleanupFlag =
LI.isCleanup();
3949 for (
unsigned i = 0, e =
LI.getNumClauses(); i != e; ++i) {
3950 bool isLastClause = i + 1 == e;
3951 if (
LI.isCatch(i)) {
3958 if (AlreadyCaught.
insert(TypeInfo).second) {
3963 MakeNewInstruction =
true;
3970 MakeNewInstruction =
true;
3971 CleanupFlag =
false;
3982 assert(
LI.isFilter(i) &&
"Unsupported landingpad clause!");
3990 if (!NumTypeInfos) {
3993 MakeNewInstruction =
true;
3994 CleanupFlag =
false;
3998 bool MakeNewFilter =
false;
4000 if (isa<ConstantAggregateZero>(FilterClause)) {
4002 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
4008 MakeNewInstruction =
true;
4015 if (NumTypeInfos > 1)
4016 MakeNewFilter =
true;
4020 NewFilterElts.
reserve(NumTypeInfos);
4025 bool SawCatchAll =
false;
4026 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
4054 if (SeenInFilter.
insert(TypeInfo).second)
4055 NewFilterElts.
push_back(cast<Constant>(Elt));
4060 MakeNewInstruction =
true;
4065 if (NewFilterElts.
size() < NumTypeInfos)
4066 MakeNewFilter =
true;
4068 if (MakeNewFilter) {
4070 NewFilterElts.
size());
4072 MakeNewInstruction =
true;
4081 if (MakeNewFilter && !NewFilterElts.
size()) {
4082 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
4083 CleanupFlag =
false;
4094 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
4097 for (j = i; j != e; ++j)
4098 if (!isa<ArrayType>(NewClauses[j]->
getType()))
4104 for (
unsigned k = i; k + 1 < j; ++k)
4108 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
4110 MakeNewInstruction =
true;
4129 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
4139 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
4140 Value *LFilter = NewClauses[j];
4151 NewClauses.
erase(J);
4152 MakeNewInstruction =
true;
4162 if (isa<ConstantAggregateZero>(LFilter)) {
4165 if (isa<ConstantAggregateZero>(
Filter)) {
4166 assert(FElts <= LElts &&
"Should have handled this case earlier!");
4168 NewClauses.
erase(J);
4169 MakeNewInstruction =
true;
4175 if (isa<ConstantAggregateZero>(
Filter)) {
4178 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
4179 for (
unsigned l = 0; l != LElts; ++l)
4182 NewClauses.
erase(J);
4183 MakeNewInstruction =
true;
4194 bool AllFound =
true;
4195 for (
unsigned f = 0; f != FElts; ++f) {
4198 for (
unsigned l = 0; l != LElts; ++l) {
4200 if (LTypeInfo == FTypeInfo) {
4210 NewClauses.
erase(J);
4211 MakeNewInstruction =
true;
4219 if (MakeNewInstruction) {
4222 for (
unsigned i = 0, e = NewClauses.
size(); i != e; ++i)
4227 if (NewClauses.
empty())
4235 if (
LI.isCleanup() != CleanupFlag) {
4236 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
4237 LI.setCleanup(CleanupFlag);
4261 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4266 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4280 Use *MaybePoisonOperand =
nullptr;
4281 for (
Use &U : OrigOpInst->operands()) {
4282 if (isa<MetadataAsValue>(U.get()) ||
4285 if (!MaybePoisonOperand)
4286 MaybePoisonOperand = &U;
4291 OrigOpInst->dropPoisonGeneratingFlagsAndMetadata();
4294 if (!MaybePoisonOperand)
4299 MaybePoisonOperand->get(), MaybePoisonOperand->get()->
getName() +
".fr");
4301 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4312 Use *StartU =
nullptr;
4330 Value *StartV = StartU->get();
4342 if (!Visited.
insert(V).second)
4345 if (Visited.
size() > 32)
4362 I->dropPoisonGeneratingFlagsAndMetadata();
4364 if (StartNeedsFreeze) {
4376 if (isa<Constant>(
Op) ||
Op->hasOneUse())
4385 if (isa<Argument>(
Op)) {
4389 auto MoveBeforeOpt = cast<Instruction>(
Op)->getInsertionPointAfterDef();
4392 MoveBefore = *MoveBeforeOpt;
4396 if (isa<DbgInfoIntrinsic>(MoveBefore))
4397 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4400 MoveBefore.setHeadBit(
false);
4402 bool Changed =
false;
4403 if (&FI != &*MoveBefore) {
4404 FI.
moveBefore(*MoveBefore->getParent(), MoveBefore);
4408 Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
4410 Changed |= Dominates;
4419 for (
auto *U : V->users()) {
4420 if (isa<ShuffleVectorInst>(U))
4429 Value *Op0 =
I.getOperand(0);
4435 if (
auto *PN = dyn_cast<PHINode>(Op0)) {
4458 auto getUndefReplacement = [&
I](
Type *Ty) {
4461 for (
const auto *U :
I.users()) {
4470 else if (BestValue !=
C)
4471 BestValue = NullValue;
4473 assert(BestValue &&
"Must have at least one use");
4488 Constant *ReplaceC = getUndefReplacement(
I.getType()->getScalarType());
4503 auto *CB = dyn_cast<CallBase>(
I);
4522 for (
const User *U :
I.users()) {
4523 if (Visited.
insert(U).second)
4528 while (!AllocaUsers.
empty()) {
4529 auto *UserI = cast<Instruction>(AllocaUsers.
pop_back_val());
4530 if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
4531 isa<AddrSpaceCastInst>(UserI)) {
4552 if (isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
4560 if (isa<AllocaInst>(
I))
4568 if (
auto *CI = dyn_cast<CallInst>(
I)) {
4569 if (CI->isConvergent())
4575 if (
I->mayWriteToMemory()) {
4582 if (
I->mayReadFromMemory()) {
4589 E =
I->getParent()->end();
4591 if (Scan->mayWriteToMemory())
4595 I->dropDroppableUses([&](
const Use *U) {
4596 auto *
I = dyn_cast<Instruction>(U->getUser());
4597 if (
I &&
I->getParent() != DestBlock) {
4607 I->moveBefore(*DestBlock, InsertPos);
4618 if (!DbgUsers.
empty())
4620 if (!DPValues.
empty())
4641 for (
auto &DbgUser : DbgUsers)
4642 if (DbgUser->getParent() != DestBlock)
4649 if (DVI->getParent() == SrcBlock)
4652 [](
auto *
A,
auto *
B) {
return B->comesBefore(
A); });
4656 for (
auto *
User : DbgUsersToSink) {
4661 if (isa<DbgDeclareInst>(
User))
4666 User->getDebugLoc()->getInlinedAt());
4668 if (!SunkVariables.
insert(DbgUserVariable).second)
4673 if (isa<DbgAssignIntrinsic>(
User))
4676 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(
User->clone()));
4677 if (isa<DbgDeclareInst>(
User) && isa<CastInst>(
I))
4678 DIIClones.back()->replaceVariableLocationOp(
I,
I->getOperand(0));
4683 if (!DIIClones.empty()) {
4688 DIIClone->insertBefore(&*InsertPos);
4702 for (
auto &DPV : DPValues)
4703 if (DPV->getParent() != DestBlock)
4709 for (
DPValue *DPV : DPValuesToSalvage)
4710 if (DPV->getParent() == SrcBlock)
4718 return B->getInstruction()->comesBefore(
A->getInstruction());
4725 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
4727 if (DPValuesToSink.
size() > 1) {
4730 for (
DPValue *DPV : DPValuesToSink) {
4733 DPV->getDebugLoc()->getInlinedAt());
4734 CountMap[std::make_pair(DPV->getInstruction(), DbgUserVariable)] += 1;
4740 for (
auto It : CountMap) {
4741 if (It.second > 1) {
4742 FilterOutMap[It.first] =
nullptr;
4743 DupSet.
insert(It.first.first);
4754 DPV.getDebugLoc()->getInlinedAt());
4756 FilterOutMap.
find(std::make_pair(Inst, DbgUserVariable));
4757 if (FilterIt == FilterOutMap.
end())
4759 if (FilterIt->second !=
nullptr)
4761 FilterIt->second = &DPV;
4770 for (
DPValue *DPV : DPValuesToSink) {
4776 DPV->getDebugLoc()->getInlinedAt());
4780 if (!FilterOutMap.
empty()) {
4781 InstVarPair IVP = std::make_pair(DPV->getInstruction(), DbgUserVariable);
4782 auto It = FilterOutMap.
find(IVP);
4785 if (It != FilterOutMap.
end() && It->second != DPV)
4789 if (!SunkVariables.
insert(DbgUserVariable).second)
4792 if (DPV->isDbgAssign())
4800 if (DPVClones.
empty())
4814 assert(InsertPos.getHeadBit());
4815 for (
DPValue *DPVClone : DPVClones) {
4816 InsertPos->getParent()->insertDbgRecordBefore(DPVClone, InsertPos);
4840 if (
I ==
nullptr)
continue;
4855 auto getOptionalSinkBlockForInst =
4856 [
this](
Instruction *
I) -> std::optional<BasicBlock *> {
4858 return std::nullopt;
4862 unsigned NumUsers = 0;
4864 for (
auto *U :
I->users()) {
4865 if (U->isDroppable())
4868 return std::nullopt;
4872 if (
PHINode *PN = dyn_cast<PHINode>(UserInst)) {
4873 for (
unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
4874 if (PN->getIncomingValue(i) ==
I) {
4878 if (UserParent && UserParent != PN->getIncomingBlock(i))
4879 return std::nullopt;
4880 UserParent = PN->getIncomingBlock(i);
4883 assert(UserParent &&
"expected to find user block!");
4885 if (UserParent && UserParent != UserInst->
getParent())
4886 return std::nullopt;
4892 if (NumUsers == 0) {
4896 return std::nullopt;
4908 return std::nullopt;
4918 return std::nullopt;
4923 auto OptBB = getOptionalSinkBlockForInst(
I);
4925 auto *UserParent = *OptBB;
4933 for (
Use &U :
I->operands())
4934 if (
Instruction *OpI = dyn_cast<Instruction>(U.get()))
4942 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4955 <<
" New = " << *Result <<
'\n');
4957 Result->copyMetadata(*
I,
4958 {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4960 I->replaceAllUsesWith(Result);
4963 Result->takeName(
I);
4970 if (isa<PHINode>(Result) != isa<PHINode>(
I)) {
4972 if (isa<PHINode>(
I))
4978 Result->insertInto(InstParent, InsertPos);
4987 <<
" New = " << *
I <<
'\n');
5019 if (!
I->hasMetadataOtherThanDebugLoc())
5022 auto Track = [](
Metadata *ScopeList,
auto &Container) {
5023 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5024 if (!MDScopeList || !Container.insert(MDScopeList).second)
5026 for (
const auto &
MDOperand : MDScopeList->operands())
5027 if (
auto *MDScope = dyn_cast<MDNode>(
MDOperand))
5028 Container.insert(MDScope);
5031 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5032 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5041 "llvm.experimental.noalias.scope.decl in use ?");
5044 "llvm.experimental.noalias.scope should refer to a single scope");
5046 if (
auto *MD = dyn_cast<MDNode>(
MDOperand))
5047 return !UsedAliasScopesAndLists.
contains(MD) ||
5048 !UsedNoAliasScopesAndLists.
contains(MD);
5073 if (Succ != LiveSucc &&
DeadEdges.insert({BB, Succ}).second)
5074 for (
PHINode &PN : Succ->phis())
5075 for (
Use &U : PN.incoming_values())
5076 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5086 HandleOnlyLiveSuccessor(BB,
nullptr);
5093 if (!Inst.use_empty() &&
5094 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5098 Inst.replaceAllUsesWith(
C);
5101 Inst.eraseFromParent();
5107 for (
Use &U : Inst.operands()) {
5108 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5111 auto *
C = cast<Constant>(U);
5112 Constant *&FoldRes = FoldedConstants[
C];
5118 <<
"\n Old = " << *
C
5119 <<
"\n New = " << *FoldRes <<
'\n');
5128 if (!Inst.isDebugOrPseudoInst()) {
5129 InstrsForInstructionWorklist.
push_back(&Inst);
5130 SeenAliasScopes.
analyse(&Inst);
5138 if (isa<UndefValue>(BI->getCondition())) {
5140 HandleOnlyLiveSuccessor(BB,
nullptr);
5143 if (
auto *
Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5144 bool CondVal =
Cond->getZExtValue();
5145 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5148 }
else if (
SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5149 if (isa<UndefValue>(SI->getCondition())) {
5151 HandleOnlyLiveSuccessor(BB,
nullptr);
5154 if (
auto *
Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5155 HandleOnlyLiveSuccessor(BB,
5156 SI->findCaseValue(
Cond)->getCaseSuccessor());
5166 if (LiveBlocks.
count(&BB))
5169 unsigned NumDeadInstInBB;
5170 unsigned NumDeadDbgInstInBB;
5171 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5174 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5175 NumDeadInst += NumDeadInstInBB;
5192 Inst->eraseFromParent();
5208 auto &
DL =
F.getParent()->getDataLayout();
5216 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
5224 bool MadeIRChange =
false;
5229 unsigned Iteration = 0;
5235 <<
" on " <<
F.getName()
5236 <<
" reached; stopping without verifying fixpoint\n");
5240 ++NumWorklistIterations;
5241 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
5242 <<
F.getName() <<
"\n");
5245 ORE, BFI, PSI,
DL, LI);
5248 MadeChangeInThisIteration |= IC.
run();
5249 if (!MadeChangeInThisIteration)
5252 MadeIRChange =
true;
5255 "Instruction Combining did not reach a fixpoint after " +
5263 else if (Iteration == 2)
5265 else if (Iteration == 3)
5266 ++NumThreeIterations;
5268 ++NumFourOrMoreIterations;
5270 return MadeIRChange;
5278 OS, MapClassName2PassName);
5281 OS << (Options.
UseLoopInfo ?
"" :
"no-") <<
"use-loop-info;";
5304 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5308 BFI, PSI, LI, Options))
5339 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5340 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
5341 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
5342 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
5343 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5344 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5347 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
5348 auto *LI = LIWP ? &LIWP->getLoopInfo() :
nullptr;
5350 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5353 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5367 "Combine redundant instructions",
false,
false)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, LoopInfo *LI, const InstCombineOptions &Opts)
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Constant * constantFoldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2)
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
This defines the Use class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static const uint32_t IV[8]
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
APInt trunc(unsigned width) const
Truncate to new width.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Class to represent array types.
uint64_t getNumElements() const
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
const Instruction & front() const
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
BinaryOps getOpcode() const
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the parameter attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
ConstantArray - Constant Array Declarations.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNot(Constant *C)
static Constant * getICmp(unsigned short pred, Constant *LHS, Constant *RHS, bool OnlyIfReduced=false)
get* - Return some common constants without having to specify the full Instruction::OPCODE identifier...
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static Constant * getNeg(Constant *C, bool HasNUW=false, bool HasNSW=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
static ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
This is the common base class for debug info intrinsics for variables.
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
iterator find(const_arg_type_t< KeyT > Val)
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
static bool isTargetIntrinsic(Intrinsic::ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Create an "inbounds" getelementptr.
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateFreeze(Value *V, const Twine &Name="")
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr, BasicBlock::iterator InsertBefore)
InstCombinePass(InstCombineOptions Opts={})
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void tryToSinkInstructionDPValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DPValue * > &DPUsers)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
bool prepareWorklist(Function &F, ReversePostOrderTraversal< BasicBlock * > &RPOT)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
static bool isCanonicalPredicate(CmpInst::Predicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
void visit(Iterator Start, Iterator End)
The legacy pass manager's instcombine pass.
InstructionCombiningPass()
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
Instruction * removeOne()
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * popDeferred()
void zap()
Check that the worklist is empty and nuke the backing store for the map.
void reserve(size_t Size)
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
void dropUBImplyingAttrsAndMetadata()
Drop any attributes or metadata that can cause immediate undefined behavior.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
This is the common base class for memset/memcpy/memmove.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
This class represents min/max intrinsics.
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
MDNode * getScopeList() const
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal, BasicBlock::iterator InsertBefore)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr, BasicBlock::iterator InsertBefore, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isZero() const
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
reverse_self_iterator getReverseIterator()
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
apint_match m_APIntAllowUndef(const APInt *&Res)
Match APInt while allowing undefs in splat vector constants.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
CmpClass_match< LHS, RHS, FCmpInst, FCmpInst::Predicate > m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R)
CastOperator_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
Exact_match< T > m_Exact(const T &SubPattern)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DPValue * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool succ_empty(const Instruction *I)
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
FunctionPass * createInstructionCombiningPass()
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
gep_type_iterator gep_type_end(const User *GEP)
Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DPValue * > *DPValues=nullptr)
Finds the debug info intrinsics describing a value.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, bool InBounds, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DPValue types only and downcast.
void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
void initializeInstructionCombiningPassPass(PassRegistry &)
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static unsigned int semanticsPrecision(const fltSemantics &)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const