46#include "llvm/IR/IntrinsicsAArch64.h"
47#include "llvm/IR/IntrinsicsAMDGPU.h"
48#include "llvm/IR/IntrinsicsARM.h"
49#include "llvm/IR/IntrinsicsHexagon.h"
78#define DEBUG_TYPE "instcombine"
82using namespace PatternMatch;
84STATISTIC(NumSimplified,
"Number of library calls simplified");
87 "instcombine-guard-widening-window",
89 cl::desc(
"How wide an instruction window to bypass looking for "
96 if (ITy->getBitWidth() < 32)
106 auto *Src =
MI->getRawSource();
107 while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) {
108 if (!Src->hasOneUse())
110 Src = cast<Instruction>(Src)->getOperand(0);
112 return isa<AllocaInst>(Src) && Src->hasOneUse();
118 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
119 MI->setDestAlignment(DstAlign);
125 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
126 MI->setSourceAlignment(SrcAlign);
149 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(
MI->getLength());
150 if (!MemOpLength)
return nullptr;
157 assert(
Size &&
"0-sized memory transferring should be removed already.");
166 if (isa<AtomicMemTransferInst>(
MI))
167 if (*CopyDstAlign <
Size || *CopySrcAlign <
Size)
176 if (
MDNode *M =
MI->getMetadata(LLVMContext::MD_tbaa)) {
178 }
else if (
MDNode *M =
MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
179 if (M->getNumOperands() == 3 && M->getOperand(0) &&
180 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
181 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
183 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
184 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
186 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
187 CopyMD = cast<MDNode>(M->getOperand(2));
190 Value *Src =
MI->getArgOperand(1);
191 Value *Dest =
MI->getArgOperand(0);
194 L->setAlignment(*CopySrcAlign);
196 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
197 MDNode *LoopMemParallelMD =
198 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
199 if (LoopMemParallelMD)
200 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
201 MDNode *AccessGroupMD =
MI->getMetadata(LLVMContext::MD_access_group);
203 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
210 if (LoopMemParallelMD)
211 S->
setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
213 S->
setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
216 if (
auto *MT = dyn_cast<MemTransferInst>(
MI)) {
218 L->setVolatile(MT->isVolatile());
221 if (isa<AtomicMemTransferInst>(
MI)) {
233 const Align KnownAlignment =
236 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
237 MI->setDestAlignment(KnownAlignment);
253 if (isa<UndefValue>(
MI->getValue())) {
265 assert(Len &&
"0-sized memory setting should be removed already.");
266 const Align Alignment =
MI->getDestAlign().valueOrOne();
272 if (isa<AtomicMemSetInst>(
MI))
289 DAI->replaceVariableLocationOp(FillC, FillVal);
293 if (isa<AtomicMemSetInst>(
MI))
308 const Align Alignment =
326 LI->copyMetadata(II);
342 if (ConstMask->isNullValue())
346 if (ConstMask->isAllOnesValue()) {
355 if (isa<ScalableVectorType>(ConstMask->getType()))
382 if (ConstMask->isAllOnesValue())
384 auto *VecTy = cast<VectorType>(II.
getType());
385 const Align Alignment =
388 Alignment,
"load.scalar");
408 if (ConstMask->isNullValue())
417 new StoreInst(SplatValue, SplatPtr,
false, Alignment);
423 if (ConstMask->isAllOnesValue()) {
432 new StoreInst(Extract, SplatPtr,
false, Alignment);
437 if (isa<ScalableVectorType>(ConstMask->getType()))
465 auto *StrippedInvariantGroupsArg = StrippedArg;
466 while (
auto *
Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
467 if (
Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
468 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
470 StrippedInvariantGroupsArg =
Intr->getArgOperand(0)->stripPointerCasts();
472 if (StrippedArg == StrippedInvariantGroupsArg)
475 Value *Result =
nullptr;
483 "simplifyInvariantGroupIntrinsic only handles launder and strip");
484 if (Result->getType()->getPointerAddressSpace() !=
488 return cast<Instruction>(Result);
494 "Expected cttz or ctlz intrinsic");
566 if (PossibleZeros == DefiniteZeros) {
583 if (
IT &&
IT->getBitWidth() != 1 && !II.
getMetadata(LLVMContext::MD_range)) {
597 "Expected ctpop intrinsic");
647 if ((~Known.
Zero).isPowerOf2())
648 return BinaryOperator::CreateLShr(
664 if (
IT->getBitWidth() != 1 && !II.
getMetadata(LLVMContext::MD_range)) {
687 auto *VecTy = cast<FixedVectorType>(II.
getType());
688 unsigned NumElts = VecTy->getNumElements();
691 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
696 for (
unsigned I = 0;
I < NumElts; ++
I) {
699 if (!COp || !isa<ConstantInt>(COp))
702 Indexes[
I] = cast<ConstantInt>(COp)->getLimitedValue();
705 if ((
unsigned)Indexes[
I] >= NumElts)
717 unsigned NumOperands) {
718 assert(
I.arg_size() >= NumOperands &&
"Not enough operands");
719 assert(
E.arg_size() >= NumOperands &&
"Not enough operands");
720 for (
unsigned i = 0; i < NumOperands; i++)
721 if (
I.getArgOperand(i) !=
E.getArgOperand(i))
742 for (; BI != BE; ++BI) {
743 if (
auto *
I = dyn_cast<IntrinsicInst>(&*BI)) {
744 if (
I->isDebugOrPseudoInst() ||
765 return I.getIntrinsicID() == Intrinsic::vastart ||
766 I.getIntrinsicID() == Intrinsic::vacopy;
772 assert(Call.arg_size() > 1 &&
"Need at least 2 args to swap");
773 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
774 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
775 Call.setArgOperand(0, Arg1);
776 Call.setArgOperand(1, Arg0);
793InstCombinerImpl::foldIntrinsicWithOverflowCommon(
IntrinsicInst *II) {
795 Value *OperationResult =
nullptr;
818 switch (
static_cast<unsigned>(Mask)) {
859 case ~fcZero & ~fcNan:
877 const ConstantInt *CMask = cast<ConstantInt>(Src1);
882 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
900 if ((OrderedMask ==
fcInf || OrderedInvertedMask ==
fcInf) &&
901 (IsOrdered || IsUnordered) && !IsStrict) {
909 if (OrderedInvertedMask ==
fcInf)
919 (IsOrdered || IsUnordered) && !IsStrict) {
934 (IsOrdered || IsUnordered) && !IsStrict) {
947 if (Mask ==
fcNan && !IsStrict) {
979 if (!IsStrict && (IsOrdered || IsUnordered) &&
1041 return std::nullopt;
1048 std::optional<bool> Known1 =
getKnownSign(Op1, CxtI,
DL, AC, DT);
1051 std::optional<bool> Known0 =
getKnownSign(Op0, CxtI,
DL, AC, DT);
1054 return *Known0 == *Known1;
1062 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1063 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1064 "Expected a min or max intrinsic");
1069 const APInt *C0, *C1;
1075 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1076 auto *
Add = cast<BinaryOperator>(Op0);
1077 if ((IsSigned && !
Add->hasNoSignedWrap()) ||
1078 (!IsSigned && !
Add->hasNoUnsignedWrap()))
1085 IsSigned ? C1->
ssub_ov(*C0, Overflow) : C1->
usub_ov(*C0, Overflow);
1086 assert(!Overflow &&
"Expected simplify of min/max");
1092 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax,
Add->getOperand(1))
1093 : BinaryOperator::CreateNUWAdd(NewMinMax,
Add->getOperand(1));
1104 const APInt *MinValue, *MaxValue;
1108 }
else if (
match(&MinMax1,
1117 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1120 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1134 if (
AddSub->getOpcode() == Instruction::Add)
1135 IntrinsicID = Intrinsic::sadd_sat;
1136 else if (
AddSub->getOpcode() == Instruction::Sub)
1137 IntrinsicID = Intrinsic::ssub_sat;
1164 const APInt *C0, *C1;
1170 case Intrinsic::smax:
1174 case Intrinsic::smin:
1178 case Intrinsic::umax:
1182 case Intrinsic::umin:
1204 if (!
LHS ||
LHS->getIntrinsicID() != MinMaxID)
1217 {LHS->getArgOperand(0), NewC});
1237 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1238 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1256 if (!
LHS || !
RHS ||
LHS->getIntrinsicID() != MinMaxID ||
1257 RHS->getIntrinsicID() != MinMaxID ||
1267 Value *MinMaxOp =
nullptr;
1268 Value *ThirdOp =
nullptr;
1272 if (
D ==
A ||
C ==
A) {
1277 }
else if (
D ==
B ||
C ==
B) {
1286 if (
D ==
A ||
D ==
B) {
1291 }
else if (
C ==
A ||
C ==
B) {
1299 if (!MinMaxOp || !ThirdOp)
1316 case Intrinsic::smax:
1317 case Intrinsic::smin:
1318 case Intrinsic::umax:
1319 case Intrinsic::umin:
1320 case Intrinsic::fma:
1321 case Intrinsic::fshl:
1322 case Intrinsic::fshr:
1341 Type *SrcTy =
X->getType();
1342 for (
unsigned i = 1, e = II->
arg_size(); i != e; ++i) {
1345 X->getType() != SrcTy)
1351 Instruction *FPI = isa<FPMathOperator>(II) ? II :
nullptr;
1352 Value *NewIntrinsic =
1360template <Intrinsic::ID IntrID>
1363 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1364 "This helper only supports BSWAP and BITREVERSE intrinsics");
1370 isa<BinaryOperator>(V)) {
1371 Value *OldReorderX, *OldReorderY;
1423 if (!II)
return visitCallBase(CI);
1427 if (
auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
1428 if (
ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1429 if (NumBytes->isNegative() ||
1430 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1432 assert(AMI->getType()->isVoidTy() &&
1433 "non void atomic unordered mem intrinsic");
1439 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(II)) {
1440 bool Changed =
false;
1443 if (
Constant *NumBytes = dyn_cast<Constant>(
MI->getLength())) {
1444 if (NumBytes->isNullValue())
1449 if (
auto *M = dyn_cast<MemIntrinsic>(
MI))
1450 if (M->isVolatile())
1456 if (
auto *MMI = dyn_cast<AnyMemMoveInst>(
MI)) {
1457 if (
GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1458 if (GVSrc->isConstant()) {
1461 isa<AtomicMemMoveInst>(MMI)
1462 ? Intrinsic::memcpy_element_unordered_atomic
1463 : Intrinsic::memcpy;
1474 if (MTI->getSource() == MTI->getDest())
1480 if (
auto *MTI = dyn_cast<AnyMemTransferInst>(
MI)) {
1483 }
else if (
auto *MSI = dyn_cast<AnyMemSetInst>(
MI)) {
1488 if (Changed)
return II;
1493 if (
auto *IIFVTy = dyn_cast<FixedVectorType>(II->
getType())) {
1494 auto VWidth = IIFVTy->getNumElements();
1495 APInt UndefElts(VWidth, 0);
1513 if (CI.
use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1520 case Intrinsic::objectsize: {
1523 &InsertedInstructions)) {
1524 for (
Instruction *Inserted : InsertedInstructions)
1530 case Intrinsic::abs: {
1532 bool IntMinIsPoison = cast<Constant>(II->
getArgOperand(1))->isOneValue();
1544 if (std::optional<bool> Known =
1574 case Intrinsic::umin: {
1579 "Expected simplify of umin with max constant");
1586 case Intrinsic::umax: {
1590 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1606 case Intrinsic::smax:
1607 case Intrinsic::smin: {
1611 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1627 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
1629 return BinaryOperator::CreateAnd(I0, I1);
1634 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
1636 return BinaryOperator::CreateOr(I0, I1);
1639 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1666 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
1667 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
1669 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1671 if (KnownSign == std::nullopt) {
1674 }
else if (*KnownSign ) {
1686 return BinaryOperator::CreateOr(I0,
X);
1727 if (I0->
hasOneUse() && !I1->hasOneUse())
1739 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
1761 case Intrinsic::bitreverse: {
1766 X->getType()->isIntOrIntVectorTy(1)) {
1774 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand,
Builder))
1775 return crossLogicOpFold;
1779 case Intrinsic::bswap: {
1796 cast<BinaryOperator>(IIOperand)->
getOpcode() == Instruction::Shl
1809 if (BW - LZ - TZ == 8) {
1810 assert(LZ != TZ &&
"active byte cannot be in the middle");
1812 return BinaryOperator::CreateNUWShl(
1815 return BinaryOperator::CreateExactLShr(
1821 unsigned C =
X->getType()->getScalarSizeInBits() - BW;
1828 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand,
Builder)) {
1829 return crossLogicOpFold;
1834 case Intrinsic::masked_load:
1835 if (
Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1838 case Intrinsic::masked_store:
1839 return simplifyMaskedStore(*II);
1840 case Intrinsic::masked_gather:
1841 return simplifyMaskedGather(*II);
1842 case Intrinsic::masked_scatter:
1843 return simplifyMaskedScatter(*II);
1844 case Intrinsic::launder_invariant_group:
1845 case Intrinsic::strip_invariant_group:
1849 case Intrinsic::powi:
1853 if (Power->isMinusOne())
1857 if (Power->equalsInt(2))
1861 if (!Power->getValue()[0]) {
1876 case Intrinsic::cttz:
1877 case Intrinsic::ctlz:
1882 case Intrinsic::ctpop:
1887 case Intrinsic::fshl:
1888 case Intrinsic::fshr: {
1900 if (ModuloC != ShAmtC)
1905 "Shift amount expected to be modulo bitwidth");
1910 if (IID == Intrinsic::fshr) {
1917 assert(IID == Intrinsic::fshl &&
1918 "All funnel shifts by simple constants should go left");
1923 return BinaryOperator::CreateShl(Op0, ShAmtC);
1928 return BinaryOperator::CreateLShr(Op1,
1958 case Intrinsic::ptrmask: {
1964 Value *InnerPtr, *InnerMask;
1965 bool Changed =
false;
1973 "Mask types must match");
1990 unsigned NewAlignmentLog =
2004 case Intrinsic::uadd_with_overflow:
2005 case Intrinsic::sadd_with_overflow: {
2006 if (
Instruction *
I = foldIntrinsicWithOverflowCommon(II))
2013 const APInt *C0, *C1;
2016 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2022 IsSigned ? C1->
sadd_ov(*C0, Overflow) : C1->
uadd_ov(*C0, Overflow);
2031 case Intrinsic::umul_with_overflow:
2032 case Intrinsic::smul_with_overflow:
2033 case Intrinsic::usub_with_overflow:
2034 if (
Instruction *
I = foldIntrinsicWithOverflowCommon(II))
2038 case Intrinsic::ssub_with_overflow: {
2039 if (
Instruction *
I = foldIntrinsicWithOverflowCommon(II))
2061 case Intrinsic::uadd_sat:
2062 case Intrinsic::sadd_sat:
2063 case Intrinsic::usub_sat:
2064 case Intrinsic::ssub_sat: {
2066 Type *Ty = SI->getType();
2067 Value *Arg0 = SI->getLHS();
2068 Value *Arg1 = SI->getRHS();
2096 C->isNotMinSignedValue()) {
2100 Intrinsic::sadd_sat, Arg0, NegVal));
2106 if (
auto *
Other = dyn_cast<IntrinsicInst>(Arg0)) {
2108 const APInt *Val, *Val2;
2111 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2112 if (
Other->getIntrinsicID() == IID &&
2120 NewVal = Val->
sadd_ov(*Val2, Overflow);
2139 case Intrinsic::minnum:
2140 case Intrinsic::maxnum:
2141 case Intrinsic::minimum:
2142 case Intrinsic::maximum: {
2153 case Intrinsic::maxnum:
2154 NewIID = Intrinsic::minnum;
2156 case Intrinsic::minnum:
2157 NewIID = Intrinsic::maxnum;
2159 case Intrinsic::maximum:
2160 NewIID = Intrinsic::minimum;
2162 case Intrinsic::minimum:
2163 NewIID = Intrinsic::maximum;
2169 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2176 if (
auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2184 case Intrinsic::maxnum:
2187 case Intrinsic::minnum:
2190 case Intrinsic::maximum:
2193 case Intrinsic::minimum:
2212 X->getType() ==
Y->getType()) {
2226 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2233 case Intrinsic::matrix_multiply: {
2247 Value *OpNotNeg, *NegatedOp;
2248 unsigned NegatedOpArg, OtherOpArg;
2283 NewArgs[NegatedOpArg] = OpNotNeg;
2290 case Intrinsic::fmuladd: {
2307 FAdd->copyFastMathFlags(II);
2313 case Intrinsic::fma: {
2338 FAdd->copyFastMathFlags(II);
2352 case Intrinsic::copysign: {
2383 case Intrinsic::fabs: {
2388 if (isa<Constant>(TVal) && isa<Constant>(FVal)) {
2401 Value *Magnitude, *Sign;
2413 case Intrinsic::ceil:
2414 case Intrinsic::floor:
2415 case Intrinsic::round:
2416 case Intrinsic::roundeven:
2417 case Intrinsic::nearbyint:
2418 case Intrinsic::rint:
2419 case Intrinsic::trunc: {
2428 case Intrinsic::cos:
2429 case Intrinsic::amdgcn_cos: {
2439 case Intrinsic::sin: {
2444 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin);
2450 case Intrinsic::ldexp: {
2469 Exp->getType() == InnerExp->
getType()) {
2471 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags();
2486 case Intrinsic::ptrauth_auth:
2487 case Intrinsic::ptrauth_resign: {
2490 bool NeedSign = II->
getIntrinsicID() == Intrinsic::ptrauth_resign;
2496 Value *AuthKey =
nullptr, *AuthDisc =
nullptr, *BasePtr;
2513 if (AuthKey && NeedSign) {
2515 NewIntrin = Intrinsic::ptrauth_resign;
2516 }
else if (AuthKey) {
2518 NewIntrin = Intrinsic::ptrauth_auth;
2519 }
else if (NeedSign) {
2521 NewIntrin = Intrinsic::ptrauth_sign;
2544 case Intrinsic::arm_neon_vtbl1:
2545 case Intrinsic::aarch64_neon_tbl1:
2550 case Intrinsic::arm_neon_vmulls:
2551 case Intrinsic::arm_neon_vmullu:
2552 case Intrinsic::aarch64_neon_smull:
2553 case Intrinsic::aarch64_neon_umull: {
2558 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
2563 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
2564 IID == Intrinsic::aarch64_neon_umull);
2566 if (
Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2567 if (
Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2578 if (
Constant *CV1 = dyn_cast<Constant>(Arg1))
2580 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2587 case Intrinsic::arm_neon_aesd:
2588 case Intrinsic::arm_neon_aese:
2589 case Intrinsic::aarch64_crypto_aesd:
2590 case Intrinsic::aarch64_crypto_aese: {
2604 case Intrinsic::hexagon_V6_vandvrt:
2605 case Intrinsic::hexagon_V6_vandvrt_128B: {
2607 if (
auto Op0 = dyn_cast<IntrinsicInst>(II->
getArgOperand(0))) {
2609 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
2610 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
2617 if ((
C & 0xFF) && (
C & 0xFF00) && (
C & 0xFF0000) && (
C & 0xFF000000))
2622 case Intrinsic::stackrestore: {
2623 enum class ClassifyResult {
2627 CallWithSideEffects,
2630 if (isa<AllocaInst>(
I))
2631 return ClassifyResult::Alloca;
2633 if (
auto *CI = dyn_cast<CallInst>(
I)) {
2634 if (
auto *II = dyn_cast<IntrinsicInst>(CI)) {
2636 return ClassifyResult::StackRestore;
2639 return ClassifyResult::CallWithSideEffects;
2642 return ClassifyResult::CallWithSideEffects;
2646 return ClassifyResult::None;
2653 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
2656 bool CannotRemove =
false;
2657 for (++BI; &*BI != II; ++BI) {
2658 switch (Classify(&*BI)) {
2659 case ClassifyResult::None:
2663 case ClassifyResult::StackRestore:
2666 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
2667 CannotRemove =
true;
2670 case ClassifyResult::Alloca:
2671 case ClassifyResult::CallWithSideEffects:
2674 CannotRemove =
true;
2690 bool CannotRemove =
false;
2691 for (++BI; &*BI != TI; ++BI) {
2692 switch (Classify(&*BI)) {
2693 case ClassifyResult::None:
2697 case ClassifyResult::StackRestore:
2701 case ClassifyResult::Alloca:
2702 case ClassifyResult::CallWithSideEffects:
2706 CannotRemove =
true;
2716 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
2720 case Intrinsic::lifetime_end:
2729 return I.getIntrinsicID() == Intrinsic::lifetime_start;
2733 case Intrinsic::assume: {
2742 assert(isa<AssumeInst>(Assume));
2752 if (
match(Next, m_Intrinsic<Intrinsic::assume>(
m_Specific(IIOperand))))
2753 return RemoveConditionFromAssume(Next);
2787 return RemoveConditionFromAssume(II);
2799 if (OBU.
getTagName() ==
"separate_storage") {
2801 auto MaybeSimplifyHint = [&](
const Use &U) {
2802 Value *Hint = U.get();
2809 MaybeSimplifyHint(OBU.
Inputs[0]);
2810 MaybeSimplifyHint(OBU.
Inputs[1]);
2825 Replacement->insertBefore(Next);
2827 return RemoveConditionFromAssume(II);
2854 if (
auto *Replacement =
2857 Replacement->insertAfter(II);
2860 return RemoveConditionFromAssume(II);
2871 if (BOI.End - BOI.Begin > 2)
2882 if (BOI.End - BOI.Begin > 0) {
2889 if (BOI.End - BOI.Begin > 0)
2891 if (BOI.End - BOI.Begin > 1)
2918 case Intrinsic::experimental_guard: {
2929 Value *NextCond =
nullptr;
2931 m_Intrinsic<Intrinsic::experimental_guard>(
m_Value(NextCond)))) {
2936 if (CurrCond != NextCond) {
2938 while (MoveI != NextInst) {
2950 case Intrinsic::vector_insert: {
2954 auto *DstTy = dyn_cast<FixedVectorType>(II->
getType());
2955 auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType());
2956 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->
getType());
2960 if (DstTy && VecTy && SubVecTy) {
2961 unsigned DstNumElts = DstTy->getNumElements();
2962 unsigned VecNumElts = VecTy->getNumElements();
2963 unsigned SubVecNumElts = SubVecTy->getNumElements();
2964 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
2967 if (VecNumElts == SubVecNumElts)
2976 for (i = 0; i != SubVecNumElts; ++i)
2978 for (; i != VecNumElts; ++i)
2984 for (
unsigned i = 0; i != IdxN; ++i)
2986 for (
unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
2988 for (
unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
2996 case Intrinsic::vector_extract: {
3003 unsigned ExtractIdx = cast<ConstantInt>(
Idx)->getZExtValue();
3004 Value *InsertTuple, *InsertIdx, *InsertValue;
3005 if (
match(Vec, m_Intrinsic<Intrinsic::vector_insert>(
m_Value(InsertTuple),
3008 InsertValue->
getType() == ReturnType) {
3009 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue();
3013 if (ExtractIdx ==
Index)
3024 auto *DstTy = dyn_cast<VectorType>(ReturnType);
3025 auto *VecTy = dyn_cast<VectorType>(Vec->
getType());
3027 if (DstTy && VecTy) {
3028 auto DstEltCnt = DstTy->getElementCount();
3029 auto VecEltCnt = VecTy->getElementCount();
3030 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3033 if (DstEltCnt == VecTy->getElementCount()) {
3040 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())
3044 for (
unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)
3045 Mask.push_back(IdxN + i);
3052 case Intrinsic::experimental_vector_reverse: {
3056 auto *OldBinOp = cast<BinaryOperator>(Vec);
3062 OldBinOp->getOpcode(),
X,
Y, OldBinOp,
3063 OldBinOp->getName(), II));
3068 OldBinOp->getOpcode(),
X, BO1,
3069 OldBinOp, OldBinOp->
getName(), II));
3074 OldBinOp->getOpcode(), BO0,
Y,
3075 OldBinOp, OldBinOp->getName(), II));
3079 auto *OldUnOp = cast<UnaryOperator>(Vec);
3081 OldUnOp->getOpcode(),
X, OldUnOp, OldUnOp->getName(), II);
3086 case Intrinsic::vector_reduce_or:
3087 case Intrinsic::vector_reduce_and: {
3098 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3102 if (IID == Intrinsic::vector_reduce_and) {
3106 assert(IID == Intrinsic::vector_reduce_or &&
3107 "Expected or reduction.");
3118 case Intrinsic::vector_reduce_add: {
3119 if (IID == Intrinsic::vector_reduce_add) {
3129 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3137 cast<Instruction>(Arg)->
getOpcode() == Instruction::SExt)
3145 case Intrinsic::vector_reduce_xor: {
3146 if (IID == Intrinsic::vector_reduce_xor) {
3157 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3169 case Intrinsic::vector_reduce_mul: {
3170 if (IID == Intrinsic::vector_reduce_mul) {
3180 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3191 case Intrinsic::vector_reduce_umin:
3192 case Intrinsic::vector_reduce_umax: {
3193 if (IID == Intrinsic::vector_reduce_umin ||
3194 IID == Intrinsic::vector_reduce_umax) {
3204 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3206 Value *Res = IID == Intrinsic::vector_reduce_umin
3218 case Intrinsic::vector_reduce_smin:
3219 case Intrinsic::vector_reduce_smax: {
3220 if (IID == Intrinsic::vector_reduce_smin ||
3221 IID == Intrinsic::vector_reduce_smax) {
3239 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3243 ExtOpc = cast<CastInst>(Arg)->getOpcode();
3244 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
3245 (ExtOpc == Instruction::CastOps::ZExt))
3256 case Intrinsic::vector_reduce_fmax:
3257 case Intrinsic::vector_reduce_fmin:
3258 case Intrinsic::vector_reduce_fadd:
3259 case Intrinsic::vector_reduce_fmul: {
3260 bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd &&
3261 IID != Intrinsic::vector_reduce_fmul) ||
3263 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
3264 IID == Intrinsic::vector_reduce_fmul)
3270 if (!isa<FixedVectorType>(Arg->
getType()) || !CanBeReassociated ||
3272 !cast<ShuffleVectorInst>(Arg)->isSingleSource())
3274 int Sz = Mask.size();
3276 for (
int Idx : Mask) {
3283 if (UsedIndices.
all()) {
3289 case Intrinsic::is_fpclass: {
3308 case Intrinsic::ctlz:
3309 case Intrinsic::cttz:
3310 case Intrinsic::ctpop:
3311 case Intrinsic::umin:
3312 case Intrinsic::umax:
3313 case Intrinsic::smin:
3314 case Intrinsic::smax:
3315 case Intrinsic::usub_sat:
3316 case Intrinsic::uadd_sat:
3317 case Intrinsic::ssub_sat:
3318 case Intrinsic::sadd_sat:
3320 if (
auto *Sel = dyn_cast<SelectInst>(
Op))
3333 return visitCallBase(*II);
3348 if (FI1SyncScope != FI2->getSyncScopeID() ||
3355 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
3359 if (isIdenticalOrStrongerFence(PFI, &FI))
3366 return visitCallBase(II);
3371 return visitCallBase(CBI);
3391 if (
Value *With = Simplifier.optimizeCall(CI,
Builder)) {
3403 if (Underlying != TrampMem &&
3404 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
3406 if (!isa<AllocaInst>(Underlying))
3418 InitTrampoline = II;
3428 if (!InitTrampoline)
3432 if (InitTrampoline->
getOperand(0) != TrampMem)
3435 return InitTrampoline;
3460 Callee = Callee->stripPointerCasts();
3461 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
3475bool InstCombinerImpl::annotateAnyAllocSite(
CallBase &Call,
3481 bool Changed =
false;
3483 if (!
Call.getType()->isPointerTy())
3490 if (
Call.hasRetAttr(Attribute::NonNull)) {
3491 Changed = !
Call.hasRetAttr(Attribute::Dereferenceable);
3493 Call.getContext(),
Size->getLimitedValue()));
3495 Changed = !
Call.hasRetAttr(Attribute::DereferenceableOrNull);
3497 Call.getContext(),
Size->getLimitedValue()));
3506 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
3510 Align ExistingAlign =
Call.getRetAlign().valueOrOne();
3512 if (NewAlign > ExistingAlign) {
3524 bool Changed = annotateAnyAllocSite(Call, &
TLI);
3533 if (
V->getType()->isPointerTy() &&
3534 !
Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
3540 assert(ArgNo ==
Call.arg_size() &&
"Call arguments not processed correctly.");
3542 if (!ArgNos.
empty()) {
3547 Call.setAttributes(AS);
3554 Function *CalleeF = dyn_cast<Function>(Callee);
3556 transformConstExprCastCall(Call))
3563 LLVM_DEBUG(
dbgs() <<
"Removing convergent attr from instr " << Call
3565 Call.setNotConvergent();
3587 if (isa<CallInst>(OldCall))
3592 cast<CallBase>(OldCall)->setCalledFunction(
3601 if ((isa<ConstantPointerNull>(Callee) &&
3603 isa<UndefValue>(Callee)) {
3606 if (!
Call.getType()->isVoidTy())
3609 if (
Call.isTerminator()) {
3620 return transformCallThroughTrampoline(Call, *II);
3622 if (isa<InlineAsm>(Callee) && !
Call.doesNotThrow()) {
3624 if (!
IA->canThrow()) {
3627 Call.setDoesNotThrow();
3635 if (
CallInst *CI = dyn_cast<CallInst>(&Call)) {
3642 if (!
Call.use_empty() && !
Call.isMustTailCall())
3643 if (
Value *ReturnedArg =
Call.getReturnedArgOperand()) {
3645 Type *RetArgTy = ReturnedArg->getType();
3654 if (Bundle && !
Call.isIndirectCall()) {
3658 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]);
3661 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0));
3665 dbgs() <<
Call.getModule()->getName()
3666 <<
": warning: kcfi: " <<
Call.getCaller()->getName()
3667 <<
": call to " << CalleeF->
getName()
3668 <<
" using a mismatching function pointer type\n";
3679 switch (
Call.getIntrinsicID()) {
3680 case Intrinsic::experimental_gc_statepoint: {
3696 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
3702 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
3706 if (isa<ConstantPointerNull>(DerivedPtr)) {
3734 LiveGcValues.
insert(BasePtr);
3735 LiveGcValues.
insert(DerivedPtr);
3737 std::optional<OperandBundleUse> Bundle =
3739 unsigned NumOfGCLives = LiveGcValues.
size();
3740 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
3744 std::vector<Value *> NewLiveGc;
3745 for (
Value *V : Bundle->Inputs) {
3746 if (Val2Idx.
count(V))
3748 if (LiveGcValues.
count(V)) {
3749 Val2Idx[
V] = NewLiveGc.
size();
3750 NewLiveGc.push_back(V);
3752 Val2Idx[
V] = NumOfGCLives;
3758 assert(Val2Idx.
count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
3759 "Missed live gc for base pointer");
3763 assert(Val2Idx.
count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
3764 "Missed live gc for derived pointer");
3775 return Changed ? &
Call :
nullptr;
3781bool InstCombinerImpl::transformConstExprCastCall(
CallBase &Call) {
3783 dyn_cast<Function>(
Call.getCalledOperand()->stripPointerCasts());
3787 assert(!isa<CallBrInst>(Call) &&
3788 "CallBr's don't have a single point after a def to insert at");
3793 if (
Callee->hasFnAttribute(
"thunk"))
3800 if (
Call.isMustTailCall())
3811 Type *NewRetTy = FT->getReturnType();
3814 if (OldRetTy != NewRetTy) {
3820 if (
Callee->isDeclaration())
3823 if (!
Caller->use_empty() &&
3839 if (!
Caller->use_empty()) {
3841 if (
auto *II = dyn_cast<InvokeInst>(Caller))
3842 PhisNotSupportedBlock = II->getNormalDest();
3843 if (PhisNotSupportedBlock)
3845 if (
PHINode *PN = dyn_cast<PHINode>(U))
3846 if (PN->getParent() == PhisNotSupportedBlock)
3851 unsigned NumActualArgs =
Call.arg_size();
3852 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
3862 if (
Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
3863 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
3866 auto AI =
Call.arg_begin();
3867 for (
unsigned i = 0, e = NumCommonArgs; i !=
e; ++i, ++AI) {
3868 Type *ParamTy = FT->getParamType(i);
3869 Type *ActTy = (*AI)->getType();
3880 if (
Call.isInAllocaArgument(i) ||
3888 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
3892 if (
Callee->isDeclaration()) {
3894 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
3900 if (FT->isVarArg() !=
Call.getFunctionType()->isVarArg())
3906 if (FT->isVarArg() &&
Call.getFunctionType()->isVarArg() &&
3907 FT->getNumParams() !=
Call.getFunctionType()->getNumParams())
3911 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
3926 Args.reserve(NumActualArgs);
3927 ArgAttrs.
reserve(NumActualArgs);
3937 AI =
Call.arg_begin();
3938 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
3939 Type *ParamTy = FT->getParamType(i);
3941 Value *NewArg = *AI;
3942 if ((*AI)->getType() != ParamTy)
3944 Args.push_back(NewArg);
3956 for (
unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
3962 if (FT->getNumParams() < NumActualArgs) {
3964 if (FT->isVarArg()) {
3966 for (
unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
3968 Value *NewArg = *AI;
3969 if (PTy != (*AI)->getType()) {
3975 Args.push_back(NewArg);
3988 assert((ArgAttrs.
size() == FT->getNumParams() || FT->isVarArg()) &&
3989 "missing argument attributes");
3994 Call.getOperandBundlesAsDefs(OpBundles);
3997 if (
InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3999 II->getUnwindDest(), Args, OpBundles);
4003 cast<CallInst>(Caller)->getTailCallKind());
4010 NewCall->
copyMetadata(*Caller, {LLVMContext::MD_prof});
4015 if (OldRetTy !=
NV->getType() && !
Caller->use_empty()) {
4016 if (!
NV->getType()->isVoidTy()) {
4018 NC->setDebugLoc(
Caller->getDebugLoc());
4021 assert(OptInsertPt &&
"No place to insert cast");
4029 if (!
Caller->use_empty())
4031 else if (
Caller->hasValueHandle()) {
4032 if (OldRetTy ==
NV->getType())
4047InstCombinerImpl::transformCallThroughTrampoline(
CallBase &Call,
4054 if (
Attrs.hasAttrSomewhere(Attribute::Nest))
4062 unsigned NestArgNo = 0;
4063 Type *NestTy =
nullptr;
4068 E = NestFTy->param_end();
4069 I !=
E; ++NestArgNo, ++
I) {
4080 std::vector<Value*> NewArgs;
4081 std::vector<AttributeSet> NewArgAttrs;
4082 NewArgs.reserve(
Call.arg_size() + 1);
4083 NewArgAttrs.reserve(
Call.arg_size());
4090 auto I =
Call.arg_begin(),
E =
Call.arg_end();
4092 if (ArgNo == NestArgNo) {
4095 if (NestVal->
getType() != NestTy)
4097 NewArgs.push_back(NestVal);
4098 NewArgAttrs.push_back(NestAttr);
4105 NewArgs.push_back(*
I);
4106 NewArgAttrs.push_back(
Attrs.getParamAttrs(ArgNo));
4117 std::vector<Type*> NewTypes;
4118 NewTypes.reserve(FTy->getNumParams()+1);
4125 E = FTy->param_end();
4128 if (ArgNo == NestArgNo)
4130 NewTypes.push_back(NestTy);
4136 NewTypes.push_back(*
I);
4149 Attrs.getRetAttrs(), NewArgAttrs);
4152 Call.getOperandBundlesAsDefs(OpBundles);
4155 if (
InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
4157 II->getUnwindDest(), NewArgs, OpBundles);
4158 cast<InvokeInst>(NewCaller)->setCallingConv(II->
getCallingConv());
4159 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4160 }
else if (
CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4163 CBI->getIndirectDests(), NewArgs, OpBundles);
4164 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4165 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4168 cast<CallInst>(NewCaller)->setTailCallKind(
4169 cast<CallInst>(Call).getTailCallKind());
4170 cast<CallInst>(NewCaller)->setCallingConv(
4171 cast<CallInst>(Call).getCallingConv());
4172 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4183 Call.setCalledFunction(FTy, NestF);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Type * getPromotedType(Type *Ty)
Return the specified type promoted as it would be to pass though a va_arg area.
static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)
Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...
static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)
static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)
static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)
static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If this min/max has a matching min/max operand with a constant, try to push the constant operand into...
static bool signBitMustBeTheSame(Value *Op0, Value *Op1, Instruction *CxtI, const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT)
Return true if two values Op0 and Op1 are known to have the same sign.
static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))
static bool hasUndefSource(AnyMemTransferInst *MI)
Recognize a memcpy/memmove from a trivially otherwise unused alloca.
static Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...
static Instruction * factorizeMinMaxTree(IntrinsicInst *II)
Reduce a sequence of min/max intrinsics with a common operand.
static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Convert a table lookup to shufflevector if the mask is constant.
static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder)
If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...
static std::optional< bool > getKnownSignOrZero(Value *Op, Instruction *CxtI, const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT)
static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)
static IntrinsicInst * findInitTrampoline(Value *Callee)
static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)
static std::optional< bool > getKnownSign(Value *Op, Instruction *CxtI, const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT)
static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static bool inputDenormalIsIEEE(const Function &F, const Type *Ty)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt uadd_sat(const APInt &RHS) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
This class represents any memset intrinsic.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
void updateAffectedValues(AssumeInst *CI)
Update the cache of values being affected by this assumption (i.e.
bool overlaps(const AttributeMask &AM) const
Return true if the builder has any attribute that's in the specified builder.
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool isEmpty() const
Return true if there are no attributes.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
AttributeSet removeAttributes(LLVMContext &C, const AttributeMask &AttrsToRemove) const
Remove the specified attributes from this set.
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
InstListType::reverse_iterator reverse_iterator
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
bool isSigned() const
Whether the intrinsic is signed or unsigned.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isStrictFP() const
Determine if the call requires strict floating point semantics.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, Instruction *InsertPt=nullptr)