46#include "llvm/IR/IntrinsicsAArch64.h"
47#include "llvm/IR/IntrinsicsAMDGPU.h"
48#include "llvm/IR/IntrinsicsARM.h"
49#include "llvm/IR/IntrinsicsHexagon.h"
78#define DEBUG_TYPE "instcombine"
82using namespace PatternMatch;
84STATISTIC(NumSimplified,
"Number of library calls simplified");
87 "instcombine-guard-widening-window",
89 cl::desc(
"How wide an instruction window to bypass looking for "
96 if (ITy->getBitWidth() < 32)
106 auto *Src =
MI->getRawSource();
107 while (isa<GetElementPtrInst>(Src)) {
108 if (!Src->hasOneUse())
110 Src = cast<Instruction>(Src)->getOperand(0);
112 return isa<AllocaInst>(Src) && Src->hasOneUse();
118 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
119 MI->setDestAlignment(DstAlign);
125 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
126 MI->setSourceAlignment(SrcAlign);
149 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(
MI->getLength());
150 if (!MemOpLength)
return nullptr;
157 assert(
Size &&
"0-sized memory transferring should be removed already.");
166 if (isa<AtomicMemTransferInst>(
MI))
167 if (*CopyDstAlign <
Size || *CopySrcAlign <
Size)
177 Value *Src =
MI->getArgOperand(1);
178 Value *Dest =
MI->getArgOperand(0);
181 L->setAlignment(*CopySrcAlign);
182 L->setAAMetadata(AACopyMD);
183 MDNode *LoopMemParallelMD =
184 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
185 if (LoopMemParallelMD)
186 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
187 MDNode *AccessGroupMD =
MI->getMetadata(LLVMContext::MD_access_group);
189 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
195 if (LoopMemParallelMD)
196 S->
setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
198 S->
setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
201 if (
auto *MT = dyn_cast<MemTransferInst>(
MI)) {
203 L->setVolatile(MT->isVolatile());
206 if (isa<AtomicMemTransferInst>(
MI)) {
218 const Align KnownAlignment =
221 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
222 MI->setDestAlignment(KnownAlignment);
238 if (isa<UndefValue>(
MI->getValue())) {
250 assert(Len &&
"0-sized memory setting should be removed already.");
251 const Align Alignment =
MI->getDestAlign().valueOrOne();
257 if (isa<AtomicMemSetInst>(
MI))
266 Constant *FillVal = ConstantInt::get(
270 auto replaceOpForAssignmentMarkers = [FillC, FillVal](
auto *DbgAssign) {
272 DbgAssign->replaceVariableLocationOp(FillC, FillVal);
278 if (isa<AtomicMemSetInst>(
MI))
292 Value *LoadPtr =
II.getArgOperand(0);
293 const Align Alignment =
294 cast<ConstantInt>(
II.getArgOperand(1))->getAlignValue();
308 II.getDataLayout(), &
II, &
AC)) {
311 LI->copyMetadata(
II);
322 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(3));
327 if (ConstMask->isNullValue())
331 if (ConstMask->isAllOnesValue()) {
332 Value *StorePtr =
II.getArgOperand(1);
333 Align Alignment = cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
335 new StoreInst(
II.getArgOperand(0), StorePtr,
false, Alignment);
340 if (isa<ScalableVectorType>(ConstMask->getType()))
360 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(2));
367 if (ConstMask->isAllOnesValue())
369 auto *VecTy = cast<VectorType>(
II.getType());
370 const Align Alignment =
371 cast<ConstantInt>(
II.getArgOperand(1))->getAlignValue();
373 Alignment,
"load.scalar");
388 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(3));
393 if (ConstMask->isNullValue())
402 cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
411 if (ConstMask->isAllOnesValue()) {
412 Align Alignment = cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
413 VectorType *WideLoadTy = cast<VectorType>(
II.getArgOperand(1)->getType());
420 new StoreInst(Extract, SplatPtr,
false, Alignment);
425 if (isa<ScalableVectorType>(ConstMask->getType()))
451 auto *Arg =
II.getArgOperand(0);
452 auto *StrippedArg = Arg->stripPointerCasts();
453 auto *StrippedInvariantGroupsArg = StrippedArg;
454 while (
auto *
Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
455 if (
Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
456 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
458 StrippedInvariantGroupsArg =
Intr->getArgOperand(0)->stripPointerCasts();
460 if (StrippedArg == StrippedInvariantGroupsArg)
463 Value *Result =
nullptr;
465 if (
II.getIntrinsicID() == Intrinsic::launder_invariant_group)
467 else if (
II.getIntrinsicID() == Intrinsic::strip_invariant_group)
471 "simplifyInvariantGroupIntrinsic only handles launder and strip");
472 if (Result->getType()->getPointerAddressSpace() !=
473 II.getType()->getPointerAddressSpace())
476 return cast<Instruction>(Result);
480 assert((
II.getIntrinsicID() == Intrinsic::cttz ||
481 II.getIntrinsicID() == Intrinsic::ctlz) &&
482 "Expected cttz or ctlz intrinsic");
483 bool IsTZ =
II.getIntrinsicID() == Intrinsic::cttz;
484 Value *Op0 =
II.getArgOperand(0);
485 Value *Op1 =
II.getArgOperand(1);
495 if (
II.getType()->isIntOrIntVectorTy(1)) {
553 return BinaryOperator::CreateAdd(ConstCttz,
X);
561 return BinaryOperator::CreateSub(ConstCttz,
X);
567 ConstantInt::get(
II.getType(),
II.getType()->getScalarSizeInBits());
568 return BinaryOperator::CreateSub(Width,
X);
576 return BinaryOperator::CreateAdd(ConstCtlz,
X);
584 return BinaryOperator::CreateSub(ConstCtlz,
X);
600 if (PossibleZeros == DefiniteZeros) {
601 auto *
C = ConstantInt::get(Op0->
getType(), DefiniteZeros);
616 if (
BitWidth != 1 && !
II.hasRetAttr(Attribute::Range) &&
617 !
II.getMetadata(LLVMContext::MD_range)) {
628 assert(
II.getIntrinsicID() == Intrinsic::ctpop &&
629 "Expected ctpop intrinsic");
632 Value *Op0 =
II.getArgOperand(0);
679 if ((~Known.
Zero).isPowerOf2())
680 return BinaryOperator::CreateLShr(
681 Op0, ConstantInt::get(Ty, (~Known.
Zero).exactLogBase2()));
695 II.getRange().value_or(ConstantRange::getFull(
BitWidth));
707 if (
Range != OldRange) {
723 auto *
C = dyn_cast<Constant>(
II.getArgOperand(1));
727 auto *VecTy = cast<FixedVectorType>(
II.getType());
728 unsigned NumElts = VecTy->getNumElements();
731 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
736 for (
unsigned I = 0;
I < NumElts; ++
I) {
739 if (!COp || !isa<ConstantInt>(COp))
742 Indexes[
I] = cast<ConstantInt>(COp)->getLimitedValue();
745 if ((
unsigned)Indexes[
I] >= NumElts)
749 auto *V1 =
II.getArgOperand(0);
757 unsigned NumOperands) {
758 assert(
I.arg_size() >= NumOperands &&
"Not enough operands");
760 for (
unsigned i = 0; i < NumOperands; i++)
782 for (; BI != BE; ++BI) {
783 if (
auto *
I = dyn_cast<IntrinsicInst>(&*BI)) {
784 if (
I->isDebugOrPseudoInst() ||
805 return I.getIntrinsicID() == Intrinsic::vastart ||
806 I.getIntrinsicID() == Intrinsic::vacopy;
812 assert(Call.arg_size() > 1 &&
"Need at least 2 args to swap");
813 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
814 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
815 Call.setArgOperand(0, Arg1);
816 Call.setArgOperand(1, Arg0);
835 Value *OperationResult =
nullptr;
858 switch (
static_cast<unsigned>(Mask)) {
899 case ~fcZero & ~fcNan:
915 Value *Src0 =
II.getArgOperand(0);
916 Value *Src1 =
II.getArgOperand(1);
917 const ConstantInt *CMask = cast<ConstantInt>(Src1);
922 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
924 const bool IsStrict =
925 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);
931 II.setArgOperand(1, ConstantInt::get(Src1->
getType(),
fneg(Mask)));
941 if ((OrderedMask ==
fcInf || OrderedInvertedMask ==
fcInf) &&
942 (IsOrdered || IsUnordered) && !IsStrict) {
950 if (OrderedInvertedMask ==
fcInf)
960 (IsOrdered || IsUnordered) && !IsStrict) {
975 (IsOrdered || IsUnordered) && !IsStrict) {
988 if (Mask ==
fcNan && !IsStrict) {
1020 if (!IsStrict && (IsOrdered || IsUnordered) &&
1065 return std::nullopt;
1077 return std::nullopt;
1089 return *Known0 == *Known1;
1097 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1098 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1099 "Expected a min or max intrinsic");
1102 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
1104 const APInt *C0, *C1;
1110 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1111 auto *
Add = cast<BinaryOperator>(Op0);
1112 if ((IsSigned && !
Add->hasNoSignedWrap()) ||
1113 (!IsSigned && !
Add->hasNoUnsignedWrap()))
1120 IsSigned ? C1->
ssub_ov(*C0, Overflow) : C1->
usub_ov(*C0, Overflow);
1121 assert(!Overflow &&
"Expected simplify of min/max");
1125 Constant *NewMinMaxC = ConstantInt::get(
II->getType(), CDiff);
1127 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax,
Add->getOperand(1))
1128 : BinaryOperator::CreateNUWAdd(NewMinMax,
Add->getOperand(1));
1139 const APInt *MinValue, *MaxValue;
1143 }
else if (
match(&MinMax1,
1152 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1155 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1169 if (
AddSub->getOpcode() == Instruction::Add)
1170 IntrinsicID = Intrinsic::sadd_sat;
1171 else if (
AddSub->getOpcode() == Instruction::Sub)
1172 IntrinsicID = Intrinsic::ssub_sat;
1197 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1199 const APInt *C0, *C1;
1204 switch (
II->getIntrinsicID()) {
1205 case Intrinsic::smax:
1209 case Intrinsic::smin:
1213 case Intrinsic::umax:
1217 case Intrinsic::umin:
1239 auto *
LHS = dyn_cast<MinMaxIntrinsic>(
II->getArgOperand(0));
1253 if (InnerMinMaxID != MinMaxID &&
1254 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||
1255 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&
1263 {LHS->getArgOperand(0), NewC});
1283 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1284 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1299 auto *
LHS = dyn_cast<IntrinsicInst>(
II->getArgOperand(0));
1300 auto *
RHS = dyn_cast<IntrinsicInst>(
II->getArgOperand(1));
1302 if (!
LHS || !
RHS ||
LHS->getIntrinsicID() != MinMaxID ||
1303 RHS->getIntrinsicID() != MinMaxID ||
1313 Value *MinMaxOp =
nullptr;
1314 Value *ThirdOp =
nullptr;
1318 if (
D ==
A ||
C ==
A) {
1323 }
else if (
D ==
B ||
C ==
B) {
1332 if (
D ==
A ||
D ==
B) {
1337 }
else if (
C ==
A ||
C ==
B) {
1345 if (!MinMaxOp || !ThirdOp)
1361 switch (
II->getIntrinsicID()) {
1362 case Intrinsic::smax:
1363 case Intrinsic::smin:
1364 case Intrinsic::umax:
1365 case Intrinsic::umin:
1366 case Intrinsic::fma:
1367 case Intrinsic::fshl:
1368 case Intrinsic::fshr:
1376 if (!
match(
II->getArgOperand(0),
1381 if (
none_of(
II->args(), [](
Value *V) { return V->hasOneUse(); }))
1387 Type *SrcTy =
X->getType();
1388 for (
unsigned i = 1, e =
II->arg_size(); i != e; ++i) {
1389 if (!
match(
II->getArgOperand(i),
1391 X->getType() != SrcTy)
1398 Value *NewIntrinsic =
1406template <Intrinsic::ID IntrID>
1409 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1410 "This helper only supports BSWAP and BITREVERSE intrinsics");
1416 isa<BinaryOperator>(V)) {
1417 Value *OldReorderX, *OldReorderY;
1443 if (!CanReorderLanes)
1451 if (!isa<FixedVectorType>(Arg->
getType()) ||
1453 !cast<ShuffleVectorInst>(Arg)->isSingleSource())
1456 int Sz = Mask.size();
1458 for (
int Idx : Mask) {
1466 return UsedIndices.
all() ? V :
nullptr;
1473template <Intrinsic::ID IntrID>
1478 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,
1479 "This helper only supports cttz and ctlz intrinsics");
1487 unsigned BitWidth = I1->getType()->getScalarSizeInBits();
1494 Type *Ty = I1->getType();
1496 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,
1497 IntrID == Intrinsic::cttz
1498 ? ConstantInt::get(Ty, 1)
1500 cast<Constant>(I1),
DL);
1502 IntrID, Builder.
CreateOr(CtOp, NewConst),
1530 if (!
II)
return visitCallBase(CI);
1534 if (
auto *AMI = dyn_cast<AtomicMemIntrinsic>(
II))
1535 if (
ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1536 if (NumBytes->isNegative() ||
1537 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1539 assert(AMI->getType()->isVoidTy() &&
1540 "non void atomic unordered mem intrinsic");
1546 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(
II)) {
1547 bool Changed =
false;
1550 if (
Constant *NumBytes = dyn_cast<Constant>(
MI->getLength())) {
1551 if (NumBytes->isNullValue())
1556 if (
auto *M = dyn_cast<MemIntrinsic>(
MI))
1557 if (M->isVolatile())
1563 if (
auto *MMI = dyn_cast<AnyMemMoveInst>(
MI)) {
1564 if (
GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1565 if (GVSrc->isConstant()) {
1568 isa<AtomicMemMoveInst>(MMI)
1569 ? Intrinsic::memcpy_element_unordered_atomic
1570 : Intrinsic::memcpy;
1581 if (MTI->getSource() == MTI->getDest())
1586 return isa<ConstantPointerNull>(
Ptr) &&
1589 cast<PointerType>(
Ptr->getType())->getAddressSpace());
1591 bool SrcIsUndefined =
false;
1594 if (
auto *MTI = dyn_cast<AnyMemTransferInst>(
MI)) {
1597 SrcIsUndefined = IsPointerUndefined(MTI->getRawSource());
1598 }
else if (
auto *MSI = dyn_cast<AnyMemSetInst>(
MI)) {
1604 if (SrcIsUndefined || IsPointerUndefined(
MI->getRawDest())) {
1609 if (Changed)
return II;
1614 if (
auto *IIFVTy = dyn_cast<FixedVectorType>(
II->getType())) {
1615 auto VWidth = IIFVTy->getNumElements();
1616 APInt PoisonElts(VWidth, 0);
1625 if (
II->isCommutative()) {
1626 if (
auto Pair = matchSymmetricPair(
II->getOperand(0),
II->getOperand(1))) {
1640 if (CI.
use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1647 case Intrinsic::objectsize: {
1650 &InsertedInstructions)) {
1651 for (
Instruction *Inserted : InsertedInstructions)
1657 case Intrinsic::abs: {
1658 Value *IIOperand =
II->getArgOperand(0);
1659 bool IntMinIsPoison = cast<Constant>(
II->getArgOperand(1))->isOneValue();
1673 if (
match(IIOperand,
1675 m_Intrinsic<Intrinsic::abs>(
m_Value(
Y)))))) {
1677 cast<Instruction>(IIOperand)->hasNoSignedWrap() && IntMinIsPoison;
1682 if (std::optional<bool> Known =
1708 return BinaryOperator::CreateAnd(
X, ConstantInt::get(
II->getType(), 1));
1712 case Intrinsic::umin: {
1713 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1716 assert(
II->getType()->getScalarSizeInBits() != 1 &&
1717 "Expected simplify of umin with max constant");
1723 if (
Value *FoldedCttz =
1724 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::cttz>(
1728 if (
Value *FoldedCtlz =
1729 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::ctlz>(
1734 case Intrinsic::umax: {
1735 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1738 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1754 case Intrinsic::smax:
1755 case Intrinsic::smin: {
1756 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1759 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1775 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
1776 II->getType()->isIntOrIntVectorTy(1)) {
1777 return BinaryOperator::CreateAnd(I0, I1);
1782 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
1783 II->getType()->isIntOrIntVectorTy(1)) {
1784 return BinaryOperator::CreateOr(I0, I1);
1787 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1814 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
1815 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
1817 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1819 if (KnownSign == std::nullopt) {
1822 }
else if (*KnownSign ) {
1834 return BinaryOperator::CreateOr(I0,
X);
1872 ConstantInt::get(
II->getType(), *RHSC));
1882 if (I0->
hasOneUse() && !I1->hasOneUse())
1894 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
1922 if (LHS_CR.
icmp(Pred, *RHSC))
1926 ConstantInt::get(
II->getType(), *RHSC));
1932 case Intrinsic::bitreverse: {
1933 Value *IIOperand =
II->getArgOperand(0);
1937 X->getType()->isIntOrIntVectorTy(1)) {
1938 Type *Ty =
II->getType();
1945 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand,
Builder))
1946 return crossLogicOpFold;
1950 case Intrinsic::bswap: {
1951 Value *IIOperand =
II->getArgOperand(0);
1963 cast<BinaryOperator>(IIOperand)->
getOpcode() == Instruction::Shl
1976 if (BW - LZ - TZ == 8) {
1977 assert(LZ != TZ &&
"active byte cannot be in the middle");
1979 return BinaryOperator::CreateNUWShl(
1980 IIOperand, ConstantInt::get(IIOperand->
getType(), LZ - TZ));
1982 return BinaryOperator::CreateExactLShr(
1983 IIOperand, ConstantInt::get(IIOperand->
getType(), TZ - LZ));
1988 unsigned C =
X->getType()->getScalarSizeInBits() - BW;
1989 Value *CV = ConstantInt::get(
X->getType(),
C);
1995 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand,
Builder)) {
1996 return crossLogicOpFold;
2005 case Intrinsic::masked_load:
2006 if (
Value *SimplifiedMaskedOp = simplifyMaskedLoad(*
II))
2009 case Intrinsic::masked_store:
2010 return simplifyMaskedStore(*
II);
2011 case Intrinsic::masked_gather:
2012 return simplifyMaskedGather(*
II);
2013 case Intrinsic::masked_scatter:
2014 return simplifyMaskedScatter(*
II);
2015 case Intrinsic::launder_invariant_group:
2016 case Intrinsic::strip_invariant_group:
2020 case Intrinsic::powi:
2021 if (
ConstantInt *Power = dyn_cast<ConstantInt>(
II->getArgOperand(1))) {
2024 if (Power->isMinusOne())
2026 II->getArgOperand(0),
II);
2028 if (Power->equalsInt(2))
2030 II->getArgOperand(0),
II);
2032 if (!Power->getValue()[0]) {
2047 case Intrinsic::cttz:
2048 case Intrinsic::ctlz:
2053 case Intrinsic::ctpop:
2058 case Intrinsic::fshl:
2059 case Intrinsic::fshr: {
2060 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
2061 Type *Ty =
II->getType();
2071 if (ModuloC != ShAmtC)
2077 "Shift amount expected to be modulo bitwidth");
2082 if (IID == Intrinsic::fshr) {
2092 assert(IID == Intrinsic::fshl &&
2093 "All funnel shifts by simple constants should go left");
2098 return BinaryOperator::CreateShl(Op0, ShAmtC);
2103 return BinaryOperator::CreateLShr(Op1,
2133 case Intrinsic::ptrmask: {
2139 Value *InnerPtr, *InnerMask;
2140 bool Changed =
false;
2144 if (
match(
II->getArgOperand(0),
2148 "Mask types must match");
2165 unsigned NewAlignmentLog =
2179 case Intrinsic::uadd_with_overflow:
2180 case Intrinsic::sadd_with_overflow: {
2188 const APInt *C0, *C1;
2189 Value *Arg0 =
II->getArgOperand(0);
2190 Value *Arg1 =
II->getArgOperand(1);
2191 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2192 bool HasNWAdd = IsSigned
2198 IsSigned ? C1->
sadd_ov(*C0, Overflow) : C1->
uadd_ov(*C0, Overflow);
2202 IID,
X, ConstantInt::get(Arg1->
getType(), NewC)));
2207 case Intrinsic::umul_with_overflow:
2208 case Intrinsic::smul_with_overflow:
2209 case Intrinsic::usub_with_overflow:
2214 case Intrinsic::ssub_with_overflow: {
2219 Value *Arg0 =
II->getArgOperand(0);
2220 Value *Arg1 =
II->getArgOperand(1);
2237 case Intrinsic::uadd_sat:
2238 case Intrinsic::sadd_sat:
2239 case Intrinsic::usub_sat:
2240 case Intrinsic::ssub_sat: {
2242 Type *Ty = SI->getType();
2243 Value *Arg0 = SI->getLHS();
2244 Value *Arg1 = SI->getRHS();
2275 if (IID == Intrinsic::usub_sat &&
2286 C->isNotMinSignedValue()) {
2290 Intrinsic::sadd_sat, Arg0, NegVal));
2296 if (
auto *
Other = dyn_cast<IntrinsicInst>(Arg0)) {
2298 const APInt *Val, *Val2;
2301 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2302 if (
Other->getIntrinsicID() == IID &&
2310 NewVal = Val->
sadd_ov(*Val2, Overflow);
2323 IID,
X, ConstantInt::get(
II->getType(), NewVal)));
2329 case Intrinsic::minnum:
2330 case Intrinsic::maxnum:
2331 case Intrinsic::minimum:
2332 case Intrinsic::maximum: {
2333 Value *Arg0 =
II->getArgOperand(0);
2334 Value *Arg1 =
II->getArgOperand(1);
2343 case Intrinsic::maxnum:
2344 NewIID = Intrinsic::minnum;
2346 case Intrinsic::minnum:
2347 NewIID = Intrinsic::maxnum;
2349 case Intrinsic::maximum:
2350 NewIID = Intrinsic::minimum;
2352 case Intrinsic::minimum:
2353 NewIID = Intrinsic::maximum;
2359 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2366 if (
auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2374 case Intrinsic::maxnum:
2377 case Intrinsic::minnum:
2380 case Intrinsic::maximum:
2383 case Intrinsic::minimum:
2390 IID,
X, ConstantFP::get(Arg0->
getType(), Res),
II);
2394 if (
auto *CI = dyn_cast<CallInst>(V))
2403 X->getType() ==
Y->getType()) {
2415 auto IsMinMaxOrXNegX = [IID, &
X](
Value *Op0,
Value *Op1) {
2417 return Op0->hasOneUse() ||
2418 (IID != Intrinsic::minimum && IID != Intrinsic::minnum);
2422 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {
2424 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2431 case Intrinsic::matrix_multiply: {
2443 Value *Op0 =
II->getOperand(0);
2444 Value *Op1 =
II->getOperand(1);
2445 Value *OpNotNeg, *NegatedOp;
2446 unsigned NegatedOpArg, OtherOpArg;
2463 Value *OtherOp =
II->getOperand(OtherOpArg);
2481 NewArgs[NegatedOpArg] = OpNotNeg;
2488 case Intrinsic::fmuladd: {
2491 II->getFastMathFlags(),
2493 auto *
FAdd = BinaryOperator::CreateFAdd(V,
II->getArgOperand(2));
2494 FAdd->copyFastMathFlags(
II);
2500 case Intrinsic::fma: {
2502 Value *Src0 =
II->getArgOperand(0);
2503 Value *Src1 =
II->getArgOperand(1);
2504 Value *Src2 =
II->getArgOperand(2);
2524 auto *
FAdd = BinaryOperator::CreateFAdd(V, Src2);
2525 FAdd->copyFastMathFlags(
II);
2542 case Intrinsic::copysign: {
2543 Value *Mag =
II->getArgOperand(0), *Sign =
II->getArgOperand(1);
2546 if (*KnownSignBit) {
2583 case Intrinsic::fabs: {
2585 Value *Arg =
II->getArgOperand(0);
2595 if (isa<Constant>(TVal) || isa<Constant>(FVal)) {
2600 FastMathFlags FMF2 = cast<SelectInst>(Arg)->getFastMathFlags();
2602 SI->setFastMathFlags(FMF1 | FMF2);
2613 Value *Magnitude, *Sign;
2614 if (
match(
II->getArgOperand(0),
2625 case Intrinsic::ceil:
2626 case Intrinsic::floor:
2627 case Intrinsic::round:
2628 case Intrinsic::roundeven:
2629 case Intrinsic::nearbyint:
2630 case Intrinsic::rint:
2631 case Intrinsic::trunc: {
2640 case Intrinsic::cos:
2641 case Intrinsic::amdgcn_cos: {
2643 Value *Src =
II->getArgOperand(0);
2653 case Intrinsic::sin:
2654 case Intrinsic::amdgcn_sin: {
2663 case Intrinsic::ldexp: {
2676 Value *Src =
II->getArgOperand(0);
2677 Value *Exp =
II->getArgOperand(1);
2682 Exp->getType() == InnerExp->
getType()) {
2684 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags();
2691 II->setArgOperand(1, NewExp);
2692 II->setFastMathFlags(InnerFlags);
2704 ConstantFP::get(
II->getType(), 1.0));
2711 ConstantFP::get(
II->getType(), 1.0));
2719 Value *SelectCond, *SelectLHS, *SelectRHS;
2720 if (
match(
II->getArgOperand(1),
2723 Value *NewLdexp =
nullptr;
2735 cast<Instruction>(NewLdexp)->copyFastMathFlags(
II);
2742 case Intrinsic::ptrauth_auth:
2743 case Intrinsic::ptrauth_resign: {
2746 bool NeedSign =
II->getIntrinsicID() == Intrinsic::ptrauth_resign;
2748 Value *Key =
II->getArgOperand(1);
2749 Value *Disc =
II->getArgOperand(2);
2753 Value *AuthKey =
nullptr, *AuthDisc =
nullptr, *BasePtr;
2754 if (
const auto *CI = dyn_cast<CallBase>(
Ptr)) {
2766 }
else if (
const auto *PtrToInt = dyn_cast<PtrToIntOperator>(
Ptr)) {
2769 const auto *CPA = dyn_cast<ConstantPtrAuth>(PtrToInt->getOperand(0));
2770 if (!CPA || !CPA->isKnownCompatibleWith(Key, Disc,
DL))
2774 if (NeedSign && isa<ConstantInt>(
II->getArgOperand(4))) {
2775 auto *SignKey = cast<ConstantInt>(
II->getArgOperand(3));
2776 auto *SignDisc = cast<ConstantInt>(
II->getArgOperand(4));
2779 SignDisc, SignAddrDisc);
2791 if (AuthKey && NeedSign) {
2793 NewIntrin = Intrinsic::ptrauth_resign;
2794 }
else if (AuthKey) {
2796 NewIntrin = Intrinsic::ptrauth_auth;
2797 }
else if (NeedSign) {
2799 NewIntrin = Intrinsic::ptrauth_sign;
2821 case Intrinsic::arm_neon_vtbl1:
2822 case Intrinsic::aarch64_neon_tbl1:
2827 case Intrinsic::arm_neon_vmulls:
2828 case Intrinsic::arm_neon_vmullu:
2829 case Intrinsic::aarch64_neon_smull:
2830 case Intrinsic::aarch64_neon_umull: {
2831 Value *Arg0 =
II->getArgOperand(0);
2832 Value *Arg1 =
II->getArgOperand(1);
2835 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
2840 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
2841 IID == Intrinsic::aarch64_neon_umull);
2843 if (
Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2844 if (
Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2855 if (
Constant *CV1 = dyn_cast<Constant>(Arg1))
2857 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2864 case Intrinsic::arm_neon_aesd:
2865 case Intrinsic::arm_neon_aese:
2866 case Intrinsic::aarch64_crypto_aesd:
2867 case Intrinsic::aarch64_crypto_aese: {
2868 Value *DataArg =
II->getArgOperand(0);
2869 Value *KeyArg =
II->getArgOperand(1);
2881 case Intrinsic::hexagon_V6_vandvrt:
2882 case Intrinsic::hexagon_V6_vandvrt_128B: {
2884 if (
auto Op0 = dyn_cast<IntrinsicInst>(
II->getArgOperand(0))) {
2886 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
2887 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
2889 Value *Bytes = Op0->getArgOperand(1), *Mask =
II->getArgOperand(1);
2894 if ((
C & 0xFF) && (
C & 0xFF00) && (
C & 0xFF0000) && (
C & 0xFF000000))
2899 case Intrinsic::stackrestore: {
2900 enum class ClassifyResult {
2904 CallWithSideEffects,
2907 if (isa<AllocaInst>(
I))
2908 return ClassifyResult::Alloca;
2910 if (
auto *CI = dyn_cast<CallInst>(
I)) {
2911 if (
auto *
II = dyn_cast<IntrinsicInst>(CI)) {
2912 if (
II->getIntrinsicID() == Intrinsic::stackrestore)
2913 return ClassifyResult::StackRestore;
2915 if (
II->mayHaveSideEffects())
2916 return ClassifyResult::CallWithSideEffects;
2919 return ClassifyResult::CallWithSideEffects;
2923 return ClassifyResult::None;
2929 if (
IntrinsicInst *SS = dyn_cast<IntrinsicInst>(
II->getArgOperand(0))) {
2930 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
2931 SS->getParent() ==
II->getParent()) {
2933 bool CannotRemove =
false;
2934 for (++BI; &*BI !=
II; ++BI) {
2935 switch (Classify(&*BI)) {
2936 case ClassifyResult::None:
2940 case ClassifyResult::StackRestore:
2943 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
2944 CannotRemove =
true;
2947 case ClassifyResult::Alloca:
2948 case ClassifyResult::CallWithSideEffects:
2951 CannotRemove =
true;
2967 bool CannotRemove =
false;
2968 for (++BI; &*BI != TI; ++BI) {
2969 switch (Classify(&*BI)) {
2970 case ClassifyResult::None:
2974 case ClassifyResult::StackRestore:
2978 case ClassifyResult::Alloca:
2979 case ClassifyResult::CallWithSideEffects:
2983 CannotRemove =
true;
2993 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
2997 case Intrinsic::lifetime_end:
3000 if (
II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3001 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3002 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3006 return I.getIntrinsicID() == Intrinsic::lifetime_start;
3010 case Intrinsic::assume: {
3011 Value *IIOperand =
II->getArgOperand(0);
3013 II->getOperandBundlesAsDefs(OpBundles);
3019 assert(isa<AssumeInst>(Assume));
3029 if (
match(Next, m_Intrinsic<Intrinsic::assume>(
m_Specific(IIOperand))))
3030 return RemoveConditionFromAssume(Next);
3036 Value *AssumeIntrinsic =
II->getCalledOperand();
3058 LHS->getOpcode() == Instruction::Load &&
3064 return RemoveConditionFromAssume(
II);
3074 for (
unsigned Idx = 0;
Idx <
II->getNumOperandBundles();
Idx++) {
3076 if (OBU.
getTagName() ==
"separate_storage") {
3078 auto MaybeSimplifyHint = [&](
const Use &U) {
3079 Value *Hint = U.get();
3086 MaybeSimplifyHint(OBU.
Inputs[0]);
3087 MaybeSimplifyHint(OBU.
Inputs[1]);
3099 A->getType()->isPointerTy()) {
3103 Replacement->insertBefore(Next);
3105 return RemoveConditionFromAssume(
II);
3132 if (
auto *Replacement =
3135 Replacement->insertAfter(
II);
3138 return RemoveConditionFromAssume(
II);
3145 for (
unsigned Idx = 0;
Idx <
II->getNumOperandBundles();
Idx++) {
3146 auto &BOI =
II->bundle_op_info_begin()[
Idx];
3149 if (BOI.End - BOI.Begin > 2)
3160 if (BOI.End - BOI.Begin > 0) {
3167 if (BOI.End - BOI.Begin > 0)
3168 II->op_begin()[BOI.Begin].set(CanonRK.
WasOn);
3169 if (BOI.End - BOI.Begin > 1)
3170 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
3196 case Intrinsic::experimental_guard: {
3207 Value *NextCond =
nullptr;
3209 m_Intrinsic<Intrinsic::experimental_guard>(
m_Value(NextCond)))) {
3210 Value *CurrCond =
II->getArgOperand(0);
3214 if (CurrCond != NextCond) {
3216 while (MoveI != NextInst) {
3228 case Intrinsic::vector_insert: {
3229 Value *Vec =
II->getArgOperand(0);
3230 Value *SubVec =
II->getArgOperand(1);
3232 auto *DstTy = dyn_cast<FixedVectorType>(
II->getType());
3233 auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType());
3234 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->
getType());
3238 if (DstTy && VecTy && SubVecTy) {
3239 unsigned DstNumElts = DstTy->getNumElements();
3240 unsigned VecNumElts = VecTy->getNumElements();
3241 unsigned SubVecNumElts = SubVecTy->getNumElements();
3242 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3245 if (VecNumElts == SubVecNumElts)
3254 for (i = 0; i != SubVecNumElts; ++i)
3256 for (; i != VecNumElts; ++i)
3262 for (
unsigned i = 0; i != IdxN; ++i)
3264 for (
unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
3266 for (
unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
3274 case Intrinsic::vector_extract: {
3275 Value *Vec =
II->getArgOperand(0);
3278 Type *ReturnType =
II->getType();
3281 unsigned ExtractIdx = cast<ConstantInt>(
Idx)->getZExtValue();
3282 Value *InsertTuple, *InsertIdx, *InsertValue;
3283 if (
match(Vec, m_Intrinsic<Intrinsic::vector_insert>(
m_Value(InsertTuple),
3286 InsertValue->
getType() == ReturnType) {
3287 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue();
3291 if (ExtractIdx ==
Index)
3302 auto *DstTy = dyn_cast<VectorType>(ReturnType);
3303 auto *VecTy = dyn_cast<VectorType>(Vec->
getType());
3305 if (DstTy && VecTy) {
3306 auto DstEltCnt = DstTy->getElementCount();
3307 auto VecEltCnt = VecTy->getElementCount();
3308 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3311 if (DstEltCnt == VecTy->getElementCount()) {
3318 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())
3322 for (
unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)
3323 Mask.push_back(IdxN + i);
3330 case Intrinsic::vector_reverse: {
3332 Value *Vec =
II->getArgOperand(0);
3334 auto *OldBinOp = cast<BinaryOperator>(Vec);
3339 OldBinOp->getOpcode(),
X,
Y,
3340 OldBinOp, OldBinOp->getName(),
3341 II->getIterator()));
3345 OldBinOp->getOpcode(),
X, BO1,
3346 OldBinOp, OldBinOp->
getName(),
3347 II->getIterator()));
3353 OldBinOp->getOpcode(), BO0,
Y, OldBinOp,
3354 OldBinOp->getName(),
II->getIterator()));
3358 auto *OldUnOp = cast<UnaryOperator>(Vec);
3360 OldUnOp->getOpcode(),
X, OldUnOp, OldUnOp->getName(),
3366 case Intrinsic::vector_reduce_or:
3367 case Intrinsic::vector_reduce_and: {
3375 Value *Arg =
II->getArgOperand(0);
3385 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3389 if (IID == Intrinsic::vector_reduce_and) {
3393 assert(IID == Intrinsic::vector_reduce_or &&
3394 "Expected or reduction.");
3405 case Intrinsic::vector_reduce_add: {
3406 if (IID == Intrinsic::vector_reduce_add) {
3413 Value *Arg =
II->getArgOperand(0);
3423 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3431 cast<Instruction>(Arg)->
getOpcode() == Instruction::SExt)
3439 case Intrinsic::vector_reduce_xor: {
3440 if (IID == Intrinsic::vector_reduce_xor) {
3448 Value *Arg =
II->getArgOperand(0);
3458 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3470 case Intrinsic::vector_reduce_mul: {
3471 if (IID == Intrinsic::vector_reduce_mul) {
3478 Value *Arg =
II->getArgOperand(0);
3488 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3499 case Intrinsic::vector_reduce_umin:
3500 case Intrinsic::vector_reduce_umax: {
3501 if (IID == Intrinsic::vector_reduce_umin ||
3502 IID == Intrinsic::vector_reduce_umax) {
3509 Value *Arg =
II->getArgOperand(0);
3519 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3521 Value *Res = IID == Intrinsic::vector_reduce_umin
3533 case Intrinsic::vector_reduce_smin:
3534 case Intrinsic::vector_reduce_smax: {
3535 if (IID == Intrinsic::vector_reduce_smin ||
3536 IID == Intrinsic::vector_reduce_smax) {
3551 Value *Arg =
II->getArgOperand(0);
3561 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3565 ExtOpc = cast<CastInst>(Arg)->getOpcode();
3566 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
3567 (ExtOpc == Instruction::CastOps::ZExt))
3578 case Intrinsic::vector_reduce_fmax:
3579 case Intrinsic::vector_reduce_fmin:
3580 case Intrinsic::vector_reduce_fadd:
3581 case Intrinsic::vector_reduce_fmul: {
3582 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&
3583 IID != Intrinsic::vector_reduce_fmul) ||
3584 II->hasAllowReassoc();
3585 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
3586 IID == Intrinsic::vector_reduce_fmul)
3589 Value *Arg =
II->getArgOperand(ArgIdx);
3596 case Intrinsic::is_fpclass: {
3601 case Intrinsic::threadlocal_address: {
3624 case Intrinsic::ctlz:
3625 case Intrinsic::cttz:
3626 case Intrinsic::ctpop:
3627 case Intrinsic::umin:
3628 case Intrinsic::umax:
3629 case Intrinsic::smin:
3630 case Intrinsic::smax:
3631 case Intrinsic::usub_sat:
3632 case Intrinsic::uadd_sat:
3633 case Intrinsic::ssub_sat:
3634 case Intrinsic::sadd_sat:
3636 if (
auto *Sel = dyn_cast<SelectInst>(
Op))
3649 return visitCallBase(*
II);
3664 if (FI1SyncScope != FI2->getSyncScopeID() ||
3671 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
3675 if (isIdenticalOrStrongerFence(PFI, &FI))
3682 return visitCallBase(
II);
3687 return visitCallBase(CBI);
3707 if (
Value *With = Simplifier.optimizeCall(CI,
Builder)) {
3719 if (Underlying != TrampMem &&
3720 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
3722 if (!isa<AllocaInst>(Underlying))
3730 if (
II->getIntrinsicID() == Intrinsic::init_trampoline) {
3734 InitTrampoline =
II;
3737 if (
II->getIntrinsicID() == Intrinsic::adjust_trampoline)
3744 if (!InitTrampoline)
3748 if (InitTrampoline->
getOperand(0) != TrampMem)
3751 return InitTrampoline;
3763 if (
II->getIntrinsicID() == Intrinsic::init_trampoline &&
3764 II->getOperand(0) == TrampMem)
3776 Callee = Callee->stripPointerCasts();
3777 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
3791bool InstCombinerImpl::annotateAnyAllocSite(
CallBase &Call,
3797 bool Changed =
false;
3799 if (!
Call.getType()->isPointerTy())
3806 if (
Call.hasRetAttr(Attribute::NonNull)) {
3807 Changed = !
Call.hasRetAttr(Attribute::Dereferenceable);
3809 Call.getContext(),
Size->getLimitedValue()));
3811 Changed = !
Call.hasRetAttr(Attribute::DereferenceableOrNull);
3813 Call.getContext(),
Size->getLimitedValue()));
3822 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
3826 Align ExistingAlign =
Call.getRetAlign().valueOrOne();
3828 if (NewAlign > ExistingAlign) {
3840 bool Changed = annotateAnyAllocSite(Call, &
TLI);
3849 if (
V->getType()->isPointerTy() &&
3850 !
Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
3856 assert(ArgNo ==
Call.arg_size() &&
"Call arguments not processed correctly.");
3858 if (!ArgNos.
empty()) {
3863 Call.setAttributes(AS);
3870 Function *CalleeF = dyn_cast<Function>(Callee);
3872 transformConstExprCastCall(Call))
3879 LLVM_DEBUG(
dbgs() <<
"Removing convergent attr from instr " << Call
3881 Call.setNotConvergent();
3903 if (isa<CallInst>(OldCall))
3908 cast<CallBase>(OldCall)->setCalledFunction(
3917 if ((isa<ConstantPointerNull>(Callee) &&
3919 isa<UndefValue>(Callee)) {
3922 if (!
Call.getType()->isVoidTy())
3925 if (
Call.isTerminator()) {
3936 return transformCallThroughTrampoline(Call, *
II);
3938 if (isa<InlineAsm>(Callee) && !
Call.doesNotThrow()) {
3940 if (!
IA->canThrow()) {
3943 Call.setDoesNotThrow();
3951 if (
CallInst *CI = dyn_cast<CallInst>(&Call)) {
3958 if (!
Call.use_empty() && !
Call.isMustTailCall())
3959 if (
Value *ReturnedArg =
Call.getReturnedArgOperand()) {
3961 Type *RetArgTy = ReturnedArg->getType();
3970 if (Bundle && !
Call.isIndirectCall()) {
3974 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]);
3977 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0));
3981 dbgs() <<
Call.getModule()->getName()
3982 <<
": warning: kcfi: " <<
Call.getCaller()->getName()
3983 <<
": call to " << CalleeF->
getName()
3984 <<
" using a mismatching function pointer type\n";
3995 switch (
Call.getIntrinsicID()) {
3996 case Intrinsic::experimental_gc_statepoint: {
4012 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
4018 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
4022 if (isa<ConstantPointerNull>(DerivedPtr)) {
4051 LiveGcValues.
insert(BasePtr);
4052 LiveGcValues.
insert(DerivedPtr);
4054 std::optional<OperandBundleUse> Bundle =
4056 unsigned NumOfGCLives = LiveGcValues.
size();
4057 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
4061 std::vector<Value *> NewLiveGc;
4062 for (
Value *V : Bundle->Inputs) {
4063 if (Val2Idx.
count(V))
4065 if (LiveGcValues.
count(V)) {
4066 Val2Idx[
V] = NewLiveGc.
size();
4067 NewLiveGc.push_back(V);
4069 Val2Idx[
V] = NumOfGCLives;
4075 assert(Val2Idx.
count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
4076 "Missed live gc for base pointer");
4078 GCR.
setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
4080 assert(Val2Idx.
count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
4081 "Missed live gc for derived pointer");
4083 GCR.
setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
4092 return Changed ? &
Call :
nullptr;
4098bool InstCombinerImpl::transformConstExprCastCall(
CallBase &Call) {
4100 dyn_cast<Function>(
Call.getCalledOperand()->stripPointerCasts());
4104 assert(!isa<CallBrInst>(Call) &&
4105 "CallBr's don't have a single point after a def to insert at");
4110 if (
Callee->isDeclaration())
4116 if (
Callee->hasFnAttribute(
"thunk"))
4122 if (
Callee->hasFnAttribute(Attribute::Naked))
4129 if (
Call.isMustTailCall())
4140 Type *NewRetTy = FT->getReturnType();
4143 if (OldRetTy != NewRetTy) {
4149 if (!
Caller->use_empty())
4163 if (!
Caller->use_empty()) {
4165 if (
auto *
II = dyn_cast<InvokeInst>(Caller))
4166 PhisNotSupportedBlock =
II->getNormalDest();
4167 if (PhisNotSupportedBlock)
4169 if (
PHINode *PN = dyn_cast<PHINode>(U))
4170 if (PN->getParent() == PhisNotSupportedBlock)
4175 unsigned NumActualArgs =
Call.arg_size();
4176 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4186 if (
Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4187 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
4190 auto AI =
Call.arg_begin();
4191 for (
unsigned i = 0, e = NumCommonArgs; i !=
e; ++i, ++AI) {
4192 Type *ParamTy = FT->getParamType(i);
4193 Type *ActTy = (*AI)->getType();
4204 if (
Call.isInAllocaArgument(i) ||
4212 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
4216 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4231 Args.reserve(NumActualArgs);
4232 ArgAttrs.
reserve(NumActualArgs);
4242 AI =
Call.arg_begin();
4243 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4244 Type *ParamTy = FT->getParamType(i);
4246 Value *NewArg = *AI;
4247 if ((*AI)->getType() != ParamTy)
4249 Args.push_back(NewArg);
4261 for (
unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4267 if (FT->getNumParams() < NumActualArgs) {
4269 if (FT->isVarArg()) {
4271 for (
unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4273 Value *NewArg = *AI;
4274 if (PTy != (*AI)->getType()) {
4280 Args.push_back(NewArg);
4293 assert((ArgAttrs.
size() == FT->getNumParams() || FT->isVarArg()) &&
4294 "missing argument attributes");
4299 Call.getOperandBundlesAsDefs(OpBundles);
4304 II->getUnwindDest(), Args, OpBundles);
4308 cast<CallInst>(Caller)->getTailCallKind());
4315 NewCall->
copyMetadata(*Caller, {LLVMContext::MD_prof});
4320 if (OldRetTy !=
NV->getType() && !
Caller->use_empty()) {
4321 assert(!
NV->getType()->isVoidTy());
4323 NC->setDebugLoc(
Caller->getDebugLoc());
4326 assert(OptInsertPt &&
"No place to insert cast");
4331 if (!
Caller->use_empty())
4333 else if (
Caller->hasValueHandle()) {
4334 if (OldRetTy ==
NV->getType())
4349InstCombinerImpl::transformCallThroughTrampoline(
CallBase &Call,