46#include "llvm/IR/IntrinsicsAArch64.h"
47#include "llvm/IR/IntrinsicsAMDGPU.h"
48#include "llvm/IR/IntrinsicsARM.h"
49#include "llvm/IR/IntrinsicsHexagon.h"
78#define DEBUG_TYPE "instcombine"
82using namespace PatternMatch;
84STATISTIC(NumSimplified,
"Number of library calls simplified");
87 "instcombine-guard-widening-window",
89 cl::desc(
"How wide an instruction window to bypass looking for "
96 if (ITy->getBitWidth() < 32)
106 auto *Src =
MI->getRawSource();
107 while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) {
108 if (!Src->hasOneUse())
110 Src = cast<Instruction>(Src)->getOperand(0);
112 return isa<AllocaInst>(Src) && Src->hasOneUse();
118 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
119 MI->setDestAlignment(DstAlign);
125 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
126 MI->setSourceAlignment(SrcAlign);
149 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(
MI->getLength());
150 if (!MemOpLength)
return nullptr;
157 assert(
Size &&
"0-sized memory transferring should be removed already.");
166 if (isa<AtomicMemTransferInst>(
MI))
167 if (*CopyDstAlign <
Size || *CopySrcAlign <
Size)
177 Value *Src =
MI->getArgOperand(1);
178 Value *Dest =
MI->getArgOperand(0);
181 L->setAlignment(*CopySrcAlign);
182 L->setAAMetadata(AACopyMD);
183 MDNode *LoopMemParallelMD =
184 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
185 if (LoopMemParallelMD)
186 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
187 MDNode *AccessGroupMD =
MI->getMetadata(LLVMContext::MD_access_group);
189 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
195 if (LoopMemParallelMD)
196 S->
setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
198 S->
setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
201 if (
auto *MT = dyn_cast<MemTransferInst>(
MI)) {
203 L->setVolatile(MT->isVolatile());
206 if (isa<AtomicMemTransferInst>(
MI)) {
218 const Align KnownAlignment =
221 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
222 MI->setDestAlignment(KnownAlignment);
238 if (isa<UndefValue>(
MI->getValue())) {
250 assert(Len &&
"0-sized memory setting should be removed already.");
251 const Align Alignment =
MI->getDestAlign().valueOrOne();
257 if (isa<AtomicMemSetInst>(
MI))
269 Constant *FillVal = ConstantInt::get(ITy, Fill);
272 auto replaceOpForAssignmentMarkers = [FillC, FillVal](
auto *DbgAssign) {
274 DbgAssign->replaceVariableLocationOp(FillC, FillVal);
280 if (isa<AtomicMemSetInst>(
MI))
294 Value *LoadPtr =
II.getArgOperand(0);
295 const Align Alignment =
296 cast<ConstantInt>(
II.getArgOperand(1))->getAlignValue();
310 II.getDataLayout(), &
II, &
AC)) {
313 LI->copyMetadata(
II);
324 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(3));
329 if (ConstMask->isNullValue())
333 if (ConstMask->isAllOnesValue()) {
334 Value *StorePtr =
II.getArgOperand(1);
335 Align Alignment = cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
337 new StoreInst(
II.getArgOperand(0), StorePtr,
false, Alignment);
342 if (isa<ScalableVectorType>(ConstMask->getType()))
362 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(2));
369 if (ConstMask->isAllOnesValue())
371 auto *VecTy = cast<VectorType>(
II.getType());
372 const Align Alignment =
373 cast<ConstantInt>(
II.getArgOperand(1))->getAlignValue();
375 Alignment,
"load.scalar");
390 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(3));
395 if (ConstMask->isNullValue())
404 cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
413 if (ConstMask->isAllOnesValue()) {
414 Align Alignment = cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
415 VectorType *WideLoadTy = cast<VectorType>(
II.getArgOperand(1)->getType());
422 new StoreInst(Extract, SplatPtr,
false, Alignment);
427 if (isa<ScalableVectorType>(ConstMask->getType()))
453 auto *Arg =
II.getArgOperand(0);
454 auto *StrippedArg = Arg->stripPointerCasts();
455 auto *StrippedInvariantGroupsArg = StrippedArg;
456 while (
auto *
Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
457 if (
Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
458 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
460 StrippedInvariantGroupsArg =
Intr->getArgOperand(0)->stripPointerCasts();
462 if (StrippedArg == StrippedInvariantGroupsArg)
465 Value *Result =
nullptr;
467 if (
II.getIntrinsicID() == Intrinsic::launder_invariant_group)
469 else if (
II.getIntrinsicID() == Intrinsic::strip_invariant_group)
473 "simplifyInvariantGroupIntrinsic only handles launder and strip");
474 if (Result->getType()->getPointerAddressSpace() !=
475 II.getType()->getPointerAddressSpace())
478 return cast<Instruction>(Result);
482 assert((
II.getIntrinsicID() == Intrinsic::cttz ||
483 II.getIntrinsicID() == Intrinsic::ctlz) &&
484 "Expected cttz or ctlz intrinsic");
485 bool IsTZ =
II.getIntrinsicID() == Intrinsic::cttz;
486 Value *Op0 =
II.getArgOperand(0);
487 Value *Op1 =
II.getArgOperand(1);
497 if (
II.getType()->isIntOrIntVectorTy(1)) {
555 return BinaryOperator::CreateAdd(ConstCttz,
X);
563 return BinaryOperator::CreateSub(ConstCttz,
X);
569 ConstantInt::get(
II.getType(),
II.getType()->getScalarSizeInBits());
570 return BinaryOperator::CreateSub(Width,
X);
578 return BinaryOperator::CreateAdd(ConstCtlz,
X);
586 return BinaryOperator::CreateSub(ConstCtlz,
X);
602 if (PossibleZeros == DefiniteZeros) {
603 auto *
C = ConstantInt::get(Op0->
getType(), DefiniteZeros);
618 if (
BitWidth != 1 && !
II.hasRetAttr(Attribute::Range) &&
619 !
II.getMetadata(LLVMContext::MD_range)) {
630 assert(
II.getIntrinsicID() == Intrinsic::ctpop &&
631 "Expected ctpop intrinsic");
634 Value *Op0 =
II.getArgOperand(0);
681 if ((~Known.
Zero).isPowerOf2())
682 return BinaryOperator::CreateLShr(
683 Op0, ConstantInt::get(Ty, (~Known.
Zero).exactLogBase2()));
697 II.getRange().value_or(ConstantRange::getFull(
BitWidth));
709 if (
Range != OldRange) {
725 auto *
C = dyn_cast<Constant>(
II.getArgOperand(1));
729 auto *VecTy = cast<FixedVectorType>(
II.getType());
730 unsigned NumElts = VecTy->getNumElements();
733 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
738 for (
unsigned I = 0;
I < NumElts; ++
I) {
741 if (!COp || !isa<ConstantInt>(COp))
744 Indexes[
I] = cast<ConstantInt>(COp)->getLimitedValue();
747 if ((
unsigned)Indexes[
I] >= NumElts)
751 auto *V1 =
II.getArgOperand(0);
759 unsigned NumOperands) {
760 assert(
I.arg_size() >= NumOperands &&
"Not enough operands");
762 for (
unsigned i = 0; i < NumOperands; i++)
784 for (; BI != BE; ++BI) {
785 if (
auto *
I = dyn_cast<IntrinsicInst>(&*BI)) {
786 if (
I->isDebugOrPseudoInst() ||
807 return I.getIntrinsicID() == Intrinsic::vastart ||
808 I.getIntrinsicID() == Intrinsic::vacopy;
814 assert(Call.arg_size() > 1 &&
"Need at least 2 args to swap");
815 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
816 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
817 Call.setArgOperand(0, Arg1);
818 Call.setArgOperand(1, Arg0);
837 Value *OperationResult =
nullptr;
860 switch (
static_cast<unsigned>(Mask)) {
901 case ~fcZero & ~fcNan:
917 Value *Src0 =
II.getArgOperand(0);
918 Value *Src1 =
II.getArgOperand(1);
919 const ConstantInt *CMask = cast<ConstantInt>(Src1);
924 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
926 const bool IsStrict =
927 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);
933 II.setArgOperand(1, ConstantInt::get(Src1->
getType(),
fneg(Mask)));
943 if ((OrderedMask ==
fcInf || OrderedInvertedMask ==
fcInf) &&
944 (IsOrdered || IsUnordered) && !IsStrict) {
952 if (OrderedInvertedMask ==
fcInf)
962 (IsOrdered || IsUnordered) && !IsStrict) {
977 (IsOrdered || IsUnordered) && !IsStrict) {
990 if (Mask ==
fcNan && !IsStrict) {
1022 if (!IsStrict && (IsOrdered || IsUnordered) &&
1067 return std::nullopt;
1079 return std::nullopt;
1091 return *Known0 == *Known1;
1099 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1100 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1101 "Expected a min or max intrinsic");
1104 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
1106 const APInt *C0, *C1;
1112 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1113 auto *
Add = cast<BinaryOperator>(Op0);
1114 if ((IsSigned && !
Add->hasNoSignedWrap()) ||
1115 (!IsSigned && !
Add->hasNoUnsignedWrap()))
1122 IsSigned ? C1->
ssub_ov(*C0, Overflow) : C1->
usub_ov(*C0, Overflow);
1123 assert(!Overflow &&
"Expected simplify of min/max");
1127 Constant *NewMinMaxC = ConstantInt::get(
II->getType(), CDiff);
1129 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax,
Add->getOperand(1))
1130 : BinaryOperator::CreateNUWAdd(NewMinMax,
Add->getOperand(1));
1141 const APInt *MinValue, *MaxValue;
1145 }
else if (
match(&MinMax1,
1154 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1157 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1171 if (
AddSub->getOpcode() == Instruction::Add)
1172 IntrinsicID = Intrinsic::sadd_sat;
1173 else if (
AddSub->getOpcode() == Instruction::Sub)
1174 IntrinsicID = Intrinsic::ssub_sat;
1199 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1201 const APInt *C0, *C1;
1206 switch (
II->getIntrinsicID()) {
1207 case Intrinsic::smax:
1211 case Intrinsic::smin:
1215 case Intrinsic::umax:
1219 case Intrinsic::umin:
1241 auto *
LHS = dyn_cast<MinMaxIntrinsic>(
II->getArgOperand(0));
1255 if (InnerMinMaxID != MinMaxID &&
1256 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||
1257 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&
1265 {LHS->getArgOperand(0), NewC});
1285 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1286 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1301 auto *
LHS = dyn_cast<IntrinsicInst>(
II->getArgOperand(0));
1302 auto *
RHS = dyn_cast<IntrinsicInst>(
II->getArgOperand(1));
1304 if (!
LHS || !
RHS ||
LHS->getIntrinsicID() != MinMaxID ||
1305 RHS->getIntrinsicID() != MinMaxID ||
1315 Value *MinMaxOp =
nullptr;
1316 Value *ThirdOp =
nullptr;
1320 if (
D ==
A ||
C ==
A) {
1325 }
else if (
D ==
B ||
C ==
B) {
1334 if (
D ==
A ||
D ==
B) {
1339 }
else if (
C ==
A ||
C ==
B) {
1347 if (!MinMaxOp || !ThirdOp)
1363 switch (
II->getIntrinsicID()) {
1364 case Intrinsic::smax:
1365 case Intrinsic::smin:
1366 case Intrinsic::umax:
1367 case Intrinsic::umin:
1368 case Intrinsic::fma:
1369 case Intrinsic::fshl:
1370 case Intrinsic::fshr:
1378 if (!
match(
II->getArgOperand(0),
1383 if (
none_of(
II->args(), [](
Value *V) { return V->hasOneUse(); }))
1389 Type *SrcTy =
X->getType();
1390 for (
unsigned i = 1, e =
II->arg_size(); i != e; ++i) {
1391 if (!
match(
II->getArgOperand(i),
1393 X->getType() != SrcTy)
1400 Value *NewIntrinsic =
1408template <Intrinsic::ID IntrID>
1411 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1412 "This helper only supports BSWAP and BITREVERSE intrinsics");
1418 isa<BinaryOperator>(V)) {
1419 Value *OldReorderX, *OldReorderY;
1445 if (!CanReorderLanes)
1453 if (!isa<FixedVectorType>(Arg->
getType()) ||
1455 !cast<ShuffleVectorInst>(Arg)->isSingleSource())
1458 int Sz = Mask.size();
1460 for (
int Idx : Mask) {
1468 return UsedIndices.
all() ? V :
nullptr;
1475template <Intrinsic::ID IntrID>
1480 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,
1481 "This helper only supports cttz and ctlz intrinsics");
1489 unsigned BitWidth = I1->getType()->getScalarSizeInBits();
1496 Type *Ty = I1->getType();
1498 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,
1499 IntrID == Intrinsic::cttz
1500 ? ConstantInt::get(Ty, 1)
1502 cast<Constant>(I1),
DL);
1504 IntrID, Builder.
CreateOr(CtOp, NewConst),
1532 if (!
II)
return visitCallBase(CI);
1536 if (
auto *AMI = dyn_cast<AtomicMemIntrinsic>(
II))
1537 if (
ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1538 if (NumBytes->isNegative() ||
1539 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1541 assert(AMI->getType()->isVoidTy() &&
1542 "non void atomic unordered mem intrinsic");
1548 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(
II)) {
1549 bool Changed =
false;
1552 if (
Constant *NumBytes = dyn_cast<Constant>(
MI->getLength())) {
1553 if (NumBytes->isNullValue())
1558 if (
auto *M = dyn_cast<MemIntrinsic>(
MI))
1559 if (M->isVolatile())
1565 if (
auto *MMI = dyn_cast<AnyMemMoveInst>(
MI)) {
1566 if (
GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1567 if (GVSrc->isConstant()) {
1570 isa<AtomicMemMoveInst>(MMI)
1571 ? Intrinsic::memcpy_element_unordered_atomic
1572 : Intrinsic::memcpy;
1583 if (MTI->getSource() == MTI->getDest())
1588 return isa<ConstantPointerNull>(
Ptr) &&
1591 cast<PointerType>(
Ptr->getType())->getAddressSpace());
1593 bool SrcIsUndefined =
false;
1596 if (
auto *MTI = dyn_cast<AnyMemTransferInst>(
MI)) {
1599 SrcIsUndefined = IsPointerUndefined(MTI->getRawSource());
1600 }
else if (
auto *MSI = dyn_cast<AnyMemSetInst>(
MI)) {
1606 if (SrcIsUndefined || IsPointerUndefined(
MI->getRawDest())) {
1611 if (Changed)
return II;
1616 if (
auto *IIFVTy = dyn_cast<FixedVectorType>(
II->getType())) {
1617 auto VWidth = IIFVTy->getNumElements();
1618 APInt PoisonElts(VWidth, 0);
1627 if (
II->isCommutative()) {
1628 if (
auto Pair = matchSymmetricPair(
II->getOperand(0),
II->getOperand(1))) {
1642 if (CI.
use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1649 case Intrinsic::objectsize: {
1652 &InsertedInstructions)) {
1653 for (
Instruction *Inserted : InsertedInstructions)
1659 case Intrinsic::abs: {
1660 Value *IIOperand =
II->getArgOperand(0);
1661 bool IntMinIsPoison = cast<Constant>(
II->getArgOperand(1))->isOneValue();
1675 if (
match(IIOperand,
1677 m_Intrinsic<Intrinsic::abs>(
m_Value(
Y)))))) {
1679 cast<Instruction>(IIOperand)->hasNoSignedWrap() && IntMinIsPoison;
1684 if (std::optional<bool> Known =
1710 return BinaryOperator::CreateAnd(
X, ConstantInt::get(
II->getType(), 1));
1714 case Intrinsic::umin: {
1715 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1718 assert(
II->getType()->getScalarSizeInBits() != 1 &&
1719 "Expected simplify of umin with max constant");
1725 if (
Value *FoldedCttz =
1726 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::cttz>(
1730 if (
Value *FoldedCtlz =
1731 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::ctlz>(
1736 case Intrinsic::umax: {
1737 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1740 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1756 case Intrinsic::smax:
1757 case Intrinsic::smin: {
1758 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1761 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1777 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
1778 II->getType()->isIntOrIntVectorTy(1)) {
1779 return BinaryOperator::CreateAnd(I0, I1);
1784 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
1785 II->getType()->isIntOrIntVectorTy(1)) {
1786 return BinaryOperator::CreateOr(I0, I1);
1789 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1816 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
1817 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
1819 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1821 if (KnownSign == std::nullopt) {
1824 }
else if (*KnownSign ) {
1836 return BinaryOperator::CreateOr(I0,
X);
1874 ConstantInt::get(
II->getType(), *RHSC));
1884 if (I0->
hasOneUse() && !I1->hasOneUse())
1896 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
1924 if (LHS_CR.
icmp(Pred, *RHSC))
1928 ConstantInt::get(
II->getType(), *RHSC));
1934 case Intrinsic::bitreverse: {
1935 Value *IIOperand =
II->getArgOperand(0);
1939 X->getType()->isIntOrIntVectorTy(1)) {
1940 Type *Ty =
II->getType();
1947 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand,
Builder))
1948 return crossLogicOpFold;
1952 case Intrinsic::bswap: {
1953 Value *IIOperand =
II->getArgOperand(0);
1965 cast<BinaryOperator>(IIOperand)->
getOpcode() == Instruction::Shl
1978 if (BW - LZ - TZ == 8) {
1979 assert(LZ != TZ &&
"active byte cannot be in the middle");
1981 return BinaryOperator::CreateNUWShl(
1982 IIOperand, ConstantInt::get(IIOperand->
getType(), LZ - TZ));
1984 return BinaryOperator::CreateExactLShr(
1985 IIOperand, ConstantInt::get(IIOperand->
getType(), TZ - LZ));
1990 unsigned C =
X->getType()->getScalarSizeInBits() - BW;
1991 Value *CV = ConstantInt::get(
X->getType(),
C);
1997 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand,
Builder)) {
1998 return crossLogicOpFold;
2007 case Intrinsic::masked_load:
2008 if (
Value *SimplifiedMaskedOp = simplifyMaskedLoad(*
II))
2011 case Intrinsic::masked_store:
2012 return simplifyMaskedStore(*
II);
2013 case Intrinsic::masked_gather:
2014 return simplifyMaskedGather(*
II);
2015 case Intrinsic::masked_scatter:
2016 return simplifyMaskedScatter(*
II);
2017 case Intrinsic::launder_invariant_group:
2018 case Intrinsic::strip_invariant_group:
2022 case Intrinsic::powi:
2023 if (
ConstantInt *Power = dyn_cast<ConstantInt>(
II->getArgOperand(1))) {
2026 if (Power->isMinusOne())
2028 II->getArgOperand(0),
II);
2030 if (Power->equalsInt(2))
2032 II->getArgOperand(0),
II);
2034 if (!Power->getValue()[0]) {
2049 case Intrinsic::cttz:
2050 case Intrinsic::ctlz:
2055 case Intrinsic::ctpop:
2060 case Intrinsic::fshl:
2061 case Intrinsic::fshr: {
2062 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
2063 Type *Ty =
II->getType();
2073 if (ModuloC != ShAmtC)
2079 "Shift amount expected to be modulo bitwidth");
2084 if (IID == Intrinsic::fshr) {
2094 assert(IID == Intrinsic::fshl &&
2095 "All funnel shifts by simple constants should go left");
2100 return BinaryOperator::CreateShl(Op0, ShAmtC);
2105 return BinaryOperator::CreateLShr(Op1,
2135 case Intrinsic::ptrmask: {
2141 Value *InnerPtr, *InnerMask;
2142 bool Changed =
false;
2146 if (
match(
II->getArgOperand(0),
2150 "Mask types must match");
2167 unsigned NewAlignmentLog =
2181 case Intrinsic::uadd_with_overflow:
2182 case Intrinsic::sadd_with_overflow: {
2190 const APInt *C0, *C1;
2191 Value *Arg0 =
II->getArgOperand(0);
2192 Value *Arg1 =
II->getArgOperand(1);
2193 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2194 bool HasNWAdd = IsSigned
2200 IsSigned ? C1->
sadd_ov(*C0, Overflow) : C1->
uadd_ov(*C0, Overflow);
2204 IID,
X, ConstantInt::get(Arg1->
getType(), NewC)));
2209 case Intrinsic::umul_with_overflow:
2210 case Intrinsic::smul_with_overflow:
2211 case Intrinsic::usub_with_overflow:
2216 case Intrinsic::ssub_with_overflow: {
2221 Value *Arg0 =
II->getArgOperand(0);
2222 Value *Arg1 =
II->getArgOperand(1);
2239 case Intrinsic::uadd_sat:
2240 case Intrinsic::sadd_sat:
2241 case Intrinsic::usub_sat:
2242 case Intrinsic::ssub_sat: {
2244 Type *Ty = SI->getType();
2245 Value *Arg0 = SI->getLHS();
2246 Value *Arg1 = SI->getRHS();
2277 if (IID == Intrinsic::usub_sat &&
2288 C->isNotMinSignedValue()) {
2292 Intrinsic::sadd_sat, Arg0, NegVal));
2298 if (
auto *
Other = dyn_cast<IntrinsicInst>(Arg0)) {
2300 const APInt *Val, *Val2;
2303 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2304 if (
Other->getIntrinsicID() == IID &&
2312 NewVal = Val->
sadd_ov(*Val2, Overflow);
2325 IID,
X, ConstantInt::get(
II->getType(), NewVal)));
2331 case Intrinsic::minnum:
2332 case Intrinsic::maxnum:
2333 case Intrinsic::minimum:
2334 case Intrinsic::maximum: {
2335 Value *Arg0 =
II->getArgOperand(0);
2336 Value *Arg1 =
II->getArgOperand(1);
2345 case Intrinsic::maxnum:
2346 NewIID = Intrinsic::minnum;
2348 case Intrinsic::minnum:
2349 NewIID = Intrinsic::maxnum;
2351 case Intrinsic::maximum:
2352 NewIID = Intrinsic::minimum;
2354 case Intrinsic::minimum:
2355 NewIID = Intrinsic::maximum;
2361 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2368 if (
auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2376 case Intrinsic::maxnum:
2379 case Intrinsic::minnum:
2382 case Intrinsic::maximum:
2385 case Intrinsic::minimum:
2392 IID,
X, ConstantFP::get(Arg0->
getType(), Res),
II);
2396 if (
auto *CI = dyn_cast<CallInst>(V))
2405 X->getType() ==
Y->getType()) {
2417 auto IsMinMaxOrXNegX = [IID, &
X](
Value *Op0,
Value *Op1) {
2419 return Op0->hasOneUse() ||
2420 (IID != Intrinsic::minimum && IID != Intrinsic::minnum);
2424 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {
2426 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2433 case Intrinsic::matrix_multiply: {
2445 Value *Op0 =
II->getOperand(0);
2446 Value *Op1 =
II->getOperand(1);
2447 Value *OpNotNeg, *NegatedOp;
2448 unsigned NegatedOpArg, OtherOpArg;
2465 Value *OtherOp =
II->getOperand(OtherOpArg);
2483 NewArgs[NegatedOpArg] = OpNotNeg;
2490 case Intrinsic::fmuladd: {
2493 II->getFastMathFlags(),
2495 auto *
FAdd = BinaryOperator::CreateFAdd(V,
II->getArgOperand(2));
2496 FAdd->copyFastMathFlags(
II);
2502 case Intrinsic::fma: {
2504 Value *Src0 =
II->getArgOperand(0);
2505 Value *Src1 =
II->getArgOperand(1);
2506 Value *Src2 =
II->getArgOperand(2);
2526 auto *
FAdd = BinaryOperator::CreateFAdd(V, Src2);
2527 FAdd->copyFastMathFlags(
II);
2544 case Intrinsic::copysign: {
2545 Value *Mag =
II->getArgOperand(0), *Sign =
II->getArgOperand(1);
2548 if (*KnownSignBit) {
2585 case Intrinsic::fabs: {
2587 Value *Arg =
II->getArgOperand(0);
2597 if (isa<Constant>(TVal) || isa<Constant>(FVal)) {
2602 FastMathFlags FMF2 = cast<SelectInst>(Arg)->getFastMathFlags();
2604 SI->setFastMathFlags(FMF1 | FMF2);
2615 Value *Magnitude, *Sign;
2616 if (
match(
II->getArgOperand(0),
2627 case Intrinsic::ceil:
2628 case Intrinsic::floor:
2629 case Intrinsic::round:
2630 case Intrinsic::roundeven:
2631 case Intrinsic::nearbyint:
2632 case Intrinsic::rint:
2633 case Intrinsic::trunc: {
2642 case Intrinsic::cos:
2643 case Intrinsic::amdgcn_cos: {
2645 Value *Src =
II->getArgOperand(0);
2655 case Intrinsic::sin:
2656 case Intrinsic::amdgcn_sin: {
2665 case Intrinsic::ldexp: {
2678 Value *Src =
II->getArgOperand(0);
2679 Value *Exp =
II->getArgOperand(1);
2684 Exp->getType() == InnerExp->
getType()) {
2686 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags();
2693 II->setArgOperand(1, NewExp);
2694 II->setFastMathFlags(InnerFlags);
2706 ConstantFP::get(
II->getType(), 1.0));
2713 ConstantFP::get(
II->getType(), 1.0));
2721 Value *SelectCond, *SelectLHS, *SelectRHS;
2722 if (
match(
II->getArgOperand(1),
2725 Value *NewLdexp =
nullptr;
2737 cast<Instruction>(NewLdexp)->copyFastMathFlags(
II);
2744 case Intrinsic::ptrauth_auth:
2745 case Intrinsic::ptrauth_resign: {
2748 bool NeedSign =
II->getIntrinsicID() == Intrinsic::ptrauth_resign;
2750 Value *Key =
II->getArgOperand(1);
2751 Value *Disc =
II->getArgOperand(2);
2755 Value *AuthKey =
nullptr, *AuthDisc =
nullptr, *BasePtr;
2756 if (
const auto *CI = dyn_cast<CallBase>(
Ptr)) {
2768 }
else if (
const auto *PtrToInt = dyn_cast<PtrToIntOperator>(
Ptr)) {
2771 const auto *CPA = dyn_cast<ConstantPtrAuth>(PtrToInt->getOperand(0));
2772 if (!CPA || !CPA->isKnownCompatibleWith(Key, Disc,
DL))
2776 if (NeedSign && isa<ConstantInt>(
II->getArgOperand(4))) {
2777 auto *SignKey = cast<ConstantInt>(
II->getArgOperand(3));
2778 auto *SignDisc = cast<ConstantInt>(
II->getArgOperand(4));
2781 SignDisc, SignAddrDisc);
2793 if (AuthKey && NeedSign) {
2795 NewIntrin = Intrinsic::ptrauth_resign;
2796 }
else if (AuthKey) {
2798 NewIntrin = Intrinsic::ptrauth_auth;
2799 }
else if (NeedSign) {
2801 NewIntrin = Intrinsic::ptrauth_sign;
2823 case Intrinsic::arm_neon_vtbl1:
2824 case Intrinsic::aarch64_neon_tbl1:
2829 case Intrinsic::arm_neon_vmulls:
2830 case Intrinsic::arm_neon_vmullu:
2831 case Intrinsic::aarch64_neon_smull:
2832 case Intrinsic::aarch64_neon_umull: {
2833 Value *Arg0 =
II->getArgOperand(0);
2834 Value *Arg1 =
II->getArgOperand(1);
2837 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
2842 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
2843 IID == Intrinsic::aarch64_neon_umull);
2845 if (
Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2846 if (
Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2857 if (
Constant *CV1 = dyn_cast<Constant>(Arg1))
2859 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2866 case Intrinsic::arm_neon_aesd:
2867 case Intrinsic::arm_neon_aese:
2868 case Intrinsic::aarch64_crypto_aesd:
2869 case Intrinsic::aarch64_crypto_aese: {
2870 Value *DataArg =
II->getArgOperand(0);
2871 Value *KeyArg =
II->getArgOperand(1);
2883 case Intrinsic::hexagon_V6_vandvrt:
2884 case Intrinsic::hexagon_V6_vandvrt_128B: {
2886 if (
auto Op0 = dyn_cast<IntrinsicInst>(
II->getArgOperand(0))) {
2888 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
2889 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
2891 Value *Bytes = Op0->getArgOperand(1), *Mask =
II->getArgOperand(1);
2896 if ((
C & 0xFF) && (
C & 0xFF00) && (
C & 0xFF0000) && (
C & 0xFF000000))
2901 case Intrinsic::stackrestore: {
2902 enum class ClassifyResult {
2906 CallWithSideEffects,
2909 if (isa<AllocaInst>(
I))
2910 return ClassifyResult::Alloca;
2912 if (
auto *CI = dyn_cast<CallInst>(
I)) {
2913 if (
auto *
II = dyn_cast<IntrinsicInst>(CI)) {
2914 if (
II->getIntrinsicID() == Intrinsic::stackrestore)
2915 return ClassifyResult::StackRestore;
2917 if (
II->mayHaveSideEffects())
2918 return ClassifyResult::CallWithSideEffects;
2921 return ClassifyResult::CallWithSideEffects;
2925 return ClassifyResult::None;
2931 if (
IntrinsicInst *SS = dyn_cast<IntrinsicInst>(
II->getArgOperand(0))) {
2932 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
2933 SS->getParent() ==
II->getParent()) {
2935 bool CannotRemove =
false;
2936 for (++BI; &*BI !=
II; ++BI) {
2937 switch (Classify(&*BI)) {
2938 case ClassifyResult::None:
2942 case ClassifyResult::StackRestore:
2945 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
2946 CannotRemove =
true;
2949 case ClassifyResult::Alloca:
2950 case ClassifyResult::CallWithSideEffects:
2953 CannotRemove =
true;
2969 bool CannotRemove =
false;
2970 for (++BI; &*BI != TI; ++BI) {
2971 switch (Classify(&*BI)) {
2972 case ClassifyResult::None:
2976 case ClassifyResult::StackRestore:
2980 case ClassifyResult::Alloca:
2981 case ClassifyResult::CallWithSideEffects:
2985 CannotRemove =
true;
2995 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
2999 case Intrinsic::lifetime_end:
3002 if (
II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3003 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3004 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3008 return I.getIntrinsicID() == Intrinsic::lifetime_start;
3012 case Intrinsic::assume: {
3013 Value *IIOperand =
II->getArgOperand(0);
3015 II->getOperandBundlesAsDefs(OpBundles);
3021 assert(isa<AssumeInst>(Assume));
3031 if (
match(Next, m_Intrinsic<Intrinsic::assume>(
m_Specific(IIOperand))))
3032 return RemoveConditionFromAssume(Next);
3038 Value *AssumeIntrinsic =
II->getCalledOperand();
3060 LHS->getOpcode() == Instruction::Load &&
3066 return RemoveConditionFromAssume(
II);
3076 for (
unsigned Idx = 0;
Idx <
II->getNumOperandBundles();
Idx++) {
3078 if (OBU.
getTagName() ==
"separate_storage") {
3080 auto MaybeSimplifyHint = [&](
const Use &U) {
3081 Value *Hint = U.get();
3088 MaybeSimplifyHint(OBU.
Inputs[0]);
3089 MaybeSimplifyHint(OBU.
Inputs[1]);
3101 A->getType()->isPointerTy()) {
3105 Replacement->insertBefore(Next);
3107 return RemoveConditionFromAssume(
II);
3134 if (
auto *Replacement =
3137 Replacement->insertAfter(
II);
3140 return RemoveConditionFromAssume(
II);
3147 for (
unsigned Idx = 0;
Idx <
II->getNumOperandBundles();
Idx++) {
3148 auto &BOI =
II->bundle_op_info_begin()[
Idx];
3151 if (BOI.End - BOI.Begin > 2)
3162 if (BOI.End - BOI.Begin > 0) {
3169 if (BOI.End - BOI.Begin > 0)
3170 II->op_begin()[BOI.Begin].set(CanonRK.
WasOn);
3171 if (BOI.End - BOI.Begin > 1)
3172 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
3198 case Intrinsic::experimental_guard: {
3209 Value *NextCond =
nullptr;
3211 m_Intrinsic<Intrinsic::experimental_guard>(
m_Value(NextCond)))) {
3212 Value *CurrCond =
II->getArgOperand(0);
3216 if (CurrCond != NextCond) {
3218 while (MoveI != NextInst) {
3230 case Intrinsic::vector_insert: {
3231 Value *Vec =
II->getArgOperand(0);
3232 Value *SubVec =
II->getArgOperand(1);
3234 auto *DstTy = dyn_cast<FixedVectorType>(
II->getType());
3235 auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType());
3236 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->
getType());
3240 if (DstTy && VecTy && SubVecTy) {
3241 unsigned DstNumElts = DstTy->getNumElements();
3242 unsigned VecNumElts = VecTy->getNumElements();
3243 unsigned SubVecNumElts = SubVecTy->getNumElements();
3244 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3247 if (VecNumElts == SubVecNumElts)
3256 for (i = 0; i != SubVecNumElts; ++i)
3258 for (; i != VecNumElts; ++i)
3264 for (
unsigned i = 0; i != IdxN; ++i)
3266 for (
unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
3268 for (
unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
3276 case Intrinsic::vector_extract: {
3277 Value *Vec =
II->getArgOperand(0);
3280 Type *ReturnType =
II->getType();
3283 unsigned ExtractIdx = cast<ConstantInt>(
Idx)->getZExtValue();
3284 Value *InsertTuple, *InsertIdx, *InsertValue;
3285 if (
match(Vec, m_Intrinsic<Intrinsic::vector_insert>(
m_Value(InsertTuple),
3288 InsertValue->
getType() == ReturnType) {
3289 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue();
3293 if (ExtractIdx ==
Index)
3304 auto *DstTy = dyn_cast<VectorType>(ReturnType);
3305 auto *VecTy = dyn_cast<VectorType>(Vec->
getType());
3307 if (DstTy && VecTy) {
3308 auto DstEltCnt = DstTy->getElementCount();
3309 auto VecEltCnt = VecTy->getElementCount();
3310 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3313 if (DstEltCnt == VecTy->getElementCount()) {
3320 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())
3324 for (
unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)
3325 Mask.push_back(IdxN + i);
3332 case Intrinsic::vector_reverse: {
3334 Value *Vec =
II->getArgOperand(0);
3336 auto *OldBinOp = cast<BinaryOperator>(Vec);
3341 OldBinOp->getOpcode(),
X,
Y,
3342 OldBinOp, OldBinOp->getName(),
3343 II->getIterator()));
3347 OldBinOp->getOpcode(),
X, BO1,
3348 OldBinOp, OldBinOp->
getName(),
3349 II->getIterator()));
3355 OldBinOp->getOpcode(), BO0,
Y, OldBinOp,
3356 OldBinOp->getName(),
II->getIterator()));
3360 auto *OldUnOp = cast<UnaryOperator>(Vec);
3362 OldUnOp->getOpcode(),
X, OldUnOp, OldUnOp->getName(),
3368 case Intrinsic::vector_reduce_or:
3369 case Intrinsic::vector_reduce_and: {
3377 Value *Arg =
II->getArgOperand(0);
3387 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3391 if (IID == Intrinsic::vector_reduce_and) {
3395 assert(IID == Intrinsic::vector_reduce_or &&
3396 "Expected or reduction.");
3407 case Intrinsic::vector_reduce_add: {
3408 if (IID == Intrinsic::vector_reduce_add) {
3415 Value *Arg =
II->getArgOperand(0);
3425 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3433 cast<Instruction>(Arg)->
getOpcode() == Instruction::SExt)
3441 case Intrinsic::vector_reduce_xor: {
3442 if (IID == Intrinsic::vector_reduce_xor) {
3450 Value *Arg =
II->getArgOperand(0);
3460 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3472 case Intrinsic::vector_reduce_mul: {
3473 if (IID == Intrinsic::vector_reduce_mul) {
3480 Value *Arg =
II->getArgOperand(0);
3490 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3501 case Intrinsic::vector_reduce_umin:
3502 case Intrinsic::vector_reduce_umax: {
3503 if (IID == Intrinsic::vector_reduce_umin ||
3504 IID == Intrinsic::vector_reduce_umax) {
3511 Value *Arg =
II->getArgOperand(0);
3521 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3523 Value *Res = IID == Intrinsic::vector_reduce_umin
3535 case Intrinsic::vector_reduce_smin:
3536 case Intrinsic::vector_reduce_smax: {
3537 if (IID == Intrinsic::vector_reduce_smin ||
3538 IID == Intrinsic::vector_reduce_smax) {
3553 Value *Arg =
II->getArgOperand(0);
3563 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3567 ExtOpc = cast<CastInst>(Arg)->getOpcode();
3568 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
3569 (ExtOpc == Instruction::CastOps::ZExt))
3580 case Intrinsic::vector_reduce_fmax:
3581 case Intrinsic::vector_reduce_fmin:
3582 case Intrinsic::vector_reduce_fadd:
3583 case Intrinsic::vector_reduce_fmul: {
3584 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&
3585 IID != Intrinsic::vector_reduce_fmul) ||
3586 II->hasAllowReassoc();
3587 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
3588 IID == Intrinsic::vector_reduce_fmul)
3591 Value *Arg =
II->getArgOperand(ArgIdx);
3598 case Intrinsic::is_fpclass: {
3603 case Intrinsic::threadlocal_address: {
3626 case Intrinsic::ctlz:
3627 case Intrinsic::cttz:
3628 case Intrinsic::ctpop:
3629 case Intrinsic::umin:
3630 case Intrinsic::umax:
3631 case Intrinsic::smin:
3632 case Intrinsic::smax:
3633 case Intrinsic::usub_sat:
3634 case Intrinsic::uadd_sat:
3635 case Intrinsic::ssub_sat:
3636 case Intrinsic::sadd_sat:
3638 if (
auto *Sel = dyn_cast<SelectInst>(
Op))
3651 return visitCallBase(*
II);
3666 if (FI1SyncScope != FI2->getSyncScopeID() ||
3673 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
3677 if (isIdenticalOrStrongerFence(PFI, &FI))
3684 return visitCallBase(
II);
3689 return visitCallBase(CBI);
3709 if (
Value *With = Simplifier.optimizeCall(CI,
Builder)) {
3721 if (Underlying != TrampMem &&
3722 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
3724 if (!isa<AllocaInst>(Underlying))
3732 if (
II->getIntrinsicID() == Intrinsic::init_trampoline) {
3736 InitTrampoline =
II;
3739 if (
II->getIntrinsicID() == Intrinsic::adjust_trampoline)
3746 if (!InitTrampoline)
3750 if (InitTrampoline->
getOperand(0) != TrampMem)
3753 return InitTrampoline;
3765 if (
II->getIntrinsicID() == Intrinsic::init_trampoline &&
3766 II->getOperand(0) == TrampMem)
3778 Callee = Callee->stripPointerCasts();
3779 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
3793bool InstCombinerImpl::annotateAnyAllocSite(
CallBase &Call,
3799 bool Changed =
false;
3801 if (!
Call.getType()->isPointerTy())
3808 if (
Call.hasRetAttr(Attribute::NonNull)) {
3809 Changed = !
Call.hasRetAttr(Attribute::Dereferenceable);
3811 Call.getContext(),
Size->getLimitedValue()));
3813 Changed = !
Call.hasRetAttr(Attribute::DereferenceableOrNull);
3815 Call.getContext(),
Size->getLimitedValue()));
3824 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
3828 Align ExistingAlign =
Call.getRetAlign().valueOrOne();
3830 if (NewAlign > ExistingAlign) {
3842 bool Changed = annotateAnyAllocSite(Call, &
TLI);
3851 if (
V->getType()->isPointerTy() &&
3852 !
Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
3858 assert(ArgNo ==
Call.arg_size() &&
"Call arguments not processed correctly.");
3860 if (!ArgNos.
empty()) {
3865 Call.setAttributes(AS);
3872 Function *CalleeF = dyn_cast<Function>(Callee);
3874 transformConstExprCastCall(Call))
3881 LLVM_DEBUG(
dbgs() <<
"Removing convergent attr from instr " << Call
3883 Call.setNotConvergent();
3905 if (isa<CallInst>(OldCall))
3910 cast<CallBase>(OldCall)->setCalledFunction(
3919 if ((isa<ConstantPointerNull>(Callee) &&
3921 isa<UndefValue>(Callee)) {
3924 if (!
Call.getType()->isVoidTy())
3927 if (
Call.isTerminator()) {
3938 return transformCallThroughTrampoline(Call, *
II);
3940 if (isa<InlineAsm>(Callee) && !
Call.doesNotThrow()) {
3942 if (!
IA->canThrow()) {
3945 Call.setDoesNotThrow();
3953 if (
CallInst *CI = dyn_cast<CallInst>(&Call)) {
3960 if (!
Call.use_empty() && !
Call.isMustTailCall())
3961 if (
Value *ReturnedArg =
Call.getReturnedArgOperand()) {
3963 Type *RetArgTy = ReturnedArg->getType();
3972 if (Bundle && !
Call.isIndirectCall()) {
3976 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]);
3979 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0));
3983 dbgs() <<
Call.getModule()->getName()
3984 <<
": warning: kcfi: " <<
Call.getCaller()->getName()
3985 <<
": call to " << CalleeF->
getName()
3986 <<
" using a mismatching function pointer type\n";
3997 switch (
Call.getIntrinsicID()) {
3998 case Intrinsic::experimental_gc_statepoint: {
4014 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
4020 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
4024 if (isa<ConstantPointerNull>(DerivedPtr)) {
4053 LiveGcValues.
insert(BasePtr);
4054 LiveGcValues.
insert(DerivedPtr);
4056 std::optional<OperandBundleUse> Bundle =
4058 unsigned NumOfGCLives = LiveGcValues.
size();
4059 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
4063 std::vector<Value *> NewLiveGc;
4064 for (
Value *V : Bundle->Inputs) {
4065 if (Val2Idx.
count(V))
4067 if (LiveGcValues.
count(V)) {
4068 Val2Idx[
V] = NewLiveGc.
size();
4069 NewLiveGc.push_back(V);
4071 Val2Idx[
V] = NumOfGCLives;
4077 assert(Val2Idx.
count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
4078 "Missed live gc for base pointer");
4080 GCR.
setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
4082 assert(Val2Idx.
count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
4083 "Missed live gc for derived pointer");
4085 GCR.
setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
4094 return Changed ? &
Call :
nullptr;
4100bool InstCombinerImpl::transformConstExprCastCall(
CallBase &Call) {
4102 dyn_cast<Function>(
Call.getCalledOperand()->stripPointerCasts());
4106 assert(!isa<CallBrInst>(Call) &&
4107 "CallBr's don't have a single point after a def to insert at");
4112 if (
Callee->hasFnAttribute(
"thunk"))
4118 if (
Callee->hasFnAttribute(Attribute::Naked))
4125 if (
Call.isMustTailCall())
4136 Type *NewRetTy = FT->getReturnType();
4139 if (OldRetTy != NewRetTy) {
4145 if (
Callee->isDeclaration())
4148 if (!
Caller->use_empty())
4162 if (!
Caller->use_empty()) {
4164 if (
auto *
II = dyn_cast<InvokeInst>(Caller))
4165 PhisNotSupportedBlock =
II->getNormalDest();
4166 if (PhisNotSupportedBlock)
4168 if (
PHINode *PN = dyn_cast<PHINode>(U))
4169 if (PN->getParent() == PhisNotSupportedBlock)
4174 unsigned NumActualArgs =
Call.arg_size();
4175 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4185 if (
Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4186 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
4189 auto AI =
Call.arg_begin();
4190 for (
unsigned i = 0, e = NumCommonArgs; i !=
e; ++i, ++AI) {
4191 Type *ParamTy = FT->getParamType(i);
4192 Type *ActTy = (*AI)->getType();
4203 if (
Call.isInAllocaArgument(i) ||
4211 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
4215 if (
Callee->isDeclaration()) {
4217 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
4223 if (FT->isVarArg() !=
Call.getFunctionType()->isVarArg())
4229 if (FT->isVarArg() &&
Call.getFunctionType()->isVarArg() &&
4230 FT->getNumParams() !=
Call.getFunctionType()->getNumParams())
4234 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4249 Args.reserve(NumActualArgs);
4250 ArgAttrs.
reserve(NumActualArgs);
4260 AI =
Call.arg_begin();
4261 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4262 Type *ParamTy = FT->getParamType(i);
4264 Value *NewArg = *AI;
4265 if ((*AI)->getType() != ParamTy)
4267 Args.push_back(NewArg);
4279 for (
unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4285 if (FT->getNumParams() < NumActualArgs) {
4287 if (FT->isVarArg()) {
4289 for (
unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4291 Value *NewArg = *AI;
4292 if (PTy != (*AI)->getType()) {
4298 Args.push_back(NewArg);
4311 assert((ArgAttrs.
size() == FT->getNumParams() || FT->isVarArg()) &&
4312 "missing argument attributes");
4317 Call.getOperandBundlesAsDefs(OpBundles);
4322 II->getUnwindDest(), Args, OpBundles);
4326 cast<CallInst>(Caller)->getTailCallKind());
4333 NewCall->
copyMetadata(*Caller, {LLVMContext::MD_prof});
4338 if (OldRetTy !=
NV->getType() && !
Caller->use_empty()) {
4339 assert(!
NV->getType()->isVoidTy());
4341 NC->setDebugLoc(
Caller->getDebugLoc());
4344 assert(OptInsertPt &&
"No place to insert cast");
4349 if (!
Caller->use_empty())