45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsHexagon.h"
77#define DEBUG_TYPE "instcombine"
81using namespace PatternMatch;
83STATISTIC(NumSimplified,
"Number of library calls simplified");
86 "instcombine-guard-widening-window",
88 cl::desc(
"How wide an instruction window to bypass looking for "
100 if (
IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
101 if (ITy->getBitWidth() < 32)
111 auto *Src =
MI->getRawSource();
112 while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) {
113 if (!Src->hasOneUse())
115 Src = cast<Instruction>(Src)->getOperand(0);
117 return isa<AllocaInst>(Src) && Src->hasOneUse();
123 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
124 MI->setDestAlignment(DstAlign);
130 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
131 MI->setSourceAlignment(SrcAlign);
154 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(
MI->getLength());
155 if (!MemOpLength)
return nullptr;
162 assert(
Size &&
"0-sized memory transferring should be removed already.");
171 if (isa<AtomicMemTransferInst>(
MI))
172 if (*CopyDstAlign <
Size || *CopySrcAlign <
Size)
177 cast<PointerType>(
MI->getArgOperand(1)->getType())->getAddressSpace();
179 cast<PointerType>(
MI->getArgOperand(0)->getType())->getAddressSpace();
188 if (
MDNode *M =
MI->getMetadata(LLVMContext::MD_tbaa)) {
190 }
else if (
MDNode *M =
MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
191 if (M->getNumOperands() == 3 && M->getOperand(0) &&
192 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
193 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
195 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
196 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
198 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
199 CopyMD = cast<MDNode>(M->getOperand(2));
206 L->setAlignment(*CopySrcAlign);
208 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
209 MDNode *LoopMemParallelMD =
210 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
211 if (LoopMemParallelMD)
212 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
213 MDNode *AccessGroupMD =
MI->getMetadata(LLVMContext::MD_access_group);
215 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
222 if (LoopMemParallelMD)
223 S->
setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
225 S->
setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
228 if (
auto *MT = dyn_cast<MemTransferInst>(
MI)) {
230 L->setVolatile(MT->isVolatile());
233 if (isa<AtomicMemTransferInst>(
MI)) {
245 const Align KnownAlignment =
248 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
249 MI->setDestAlignment(KnownAlignment);
265 if (isa<UndefValue>(
MI->getValue())) {
277 assert(Len &&
"0-sized memory setting should be removed already.");
278 const Align Alignment =
MI->getDestAlign().valueOrOne();
284 if (isa<AtomicMemSetInst>(
MI))
293 unsigned DstAddrSp = cast<PointerType>(Dest->
getType())->getAddressSpace();
303 if (
any_of(DAI->location_ops(), [&](
Value *V) { return V == FillC; }))
304 DAI->replaceVariableLocationOp(FillC, FillVal);
308 if (isa<AtomicMemSetInst>(
MI))
323 const Align Alignment =
341 LI->copyMetadata(II);
357 if (ConstMask->isNullValue())
361 if (ConstMask->isAllOnesValue()) {
370 if (isa<ScalableVectorType>(ConstMask->getType()))
397 if (ConstMask->isAllOnesValue())
399 auto *VecTy = cast<VectorType>(II.
getType());
400 const Align Alignment =
403 Alignment,
"load.scalar");
423 if (ConstMask->isNullValue())
432 new StoreInst(SplatValue, SplatPtr,
false, Alignment);
438 if (ConstMask->isAllOnesValue()) {
447 new StoreInst(Extract, SplatPtr,
false, Alignment);
452 if (isa<ScalableVectorType>(ConstMask->getType()))
479 auto *StrippedArg =
Arg->stripPointerCasts();
480 auto *StrippedInvariantGroupsArg = StrippedArg;
481 while (
auto *
Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
482 if (
Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
483 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
485 StrippedInvariantGroupsArg =
Intr->getArgOperand(0)->stripPointerCasts();
487 if (StrippedArg == StrippedInvariantGroupsArg)
490 Value *Result =
nullptr;
498 "simplifyInvariantGroupIntrinsic only handles launder and strip");
499 if (Result->getType()->getPointerAddressSpace() !=
502 if (Result->getType() != II.
getType())
505 return cast<Instruction>(Result);
511 "Expected cttz or ctlz intrinsic");
579 if (PossibleZeros == DefiniteZeros) {
596 if (
IT &&
IT->getBitWidth() != 1 && !II.
getMetadata(LLVMContext::MD_range)) {
610 "Expected ctpop intrinsic");
660 if ((~Known.
Zero).isPowerOf2())
661 return BinaryOperator::CreateLShr(
677 if (
IT->getBitWidth() != 1 && !II.
getMetadata(LLVMContext::MD_range)) {
700 auto *VecTy = cast<FixedVectorType>(II.
getType());
701 unsigned NumElts = VecTy->getNumElements();
704 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
709 for (
unsigned I = 0;
I < NumElts; ++
I) {
712 if (!COp || !isa<ConstantInt>(COp))
715 Indexes[
I] = cast<ConstantInt>(COp)->getLimitedValue();
718 if ((
unsigned)Indexes[
I] >= NumElts)
730 unsigned NumOperands) {
731 assert(
I.arg_size() >= NumOperands &&
"Not enough operands");
732 assert(
E.arg_size() >= NumOperands &&
"Not enough operands");
733 for (
unsigned i = 0; i < NumOperands; i++)
734 if (
I.getArgOperand(i) !=
E.getArgOperand(i))
755 for (; BI != BE; ++BI) {
756 if (
auto *
I = dyn_cast<IntrinsicInst>(&*BI)) {
757 if (
I->isDebugOrPseudoInst() ||
778 return I.getIntrinsicID() == Intrinsic::vastart ||
779 I.getIntrinsicID() == Intrinsic::vacopy;
785 assert(Call.arg_size() > 1 &&
"Need at least 2 args to swap");
786 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
787 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
788 Call.setArgOperand(0, Arg1);
789 Call.setArgOperand(1, Arg0);
806InstCombinerImpl::foldIntrinsicWithOverflowCommon(
IntrinsicInst *II) {
808 Value *OperationResult =
nullptr;
831 switch (
static_cast<unsigned>(Mask)) {
872 case ~fcZero & ~fcNan:
890 const ConstantInt *CMask = cast<ConstantInt>(Src1);
895 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
916 (IsOrdered || IsUnordered) && !IsStrict) {
931 (IsOrdered || IsUnordered) && !IsStrict) {
944 if (Mask ==
fcNan && !IsStrict) {
976 if (!IsStrict && (IsOrdered || IsUnordered) &&
1033 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1034 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1035 "Expected a min or max intrinsic");
1040 const APInt *C0, *C1;
1046 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1047 auto *
Add = cast<BinaryOperator>(Op0);
1048 if ((IsSigned && !
Add->hasNoSignedWrap()) ||
1049 (!IsSigned && !
Add->hasNoUnsignedWrap()))
1056 IsSigned ? C1->
ssub_ov(*C0, Overflow) : C1->
usub_ov(*C0, Overflow);
1057 assert(!Overflow &&
"Expected simplify of min/max");
1062 Value *NewMinMax =
Builder.CreateBinaryIntrinsic(MinMaxID,
X, NewMinMaxC);
1063 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax,
Add->getOperand(1))
1064 : BinaryOperator::CreateNUWAdd(NewMinMax,
Add->getOperand(1));
1075 const APInt *MinValue, *MaxValue;
1079 }
else if (
match(&MinMax1,
1088 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1091 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1105 if (
AddSub->getOpcode() == Instruction::Add)
1106 IntrinsicID = Intrinsic::sadd_sat;
1107 else if (
AddSub->getOpcode() == Instruction::Sub)
1108 IntrinsicID = Intrinsic::ssub_sat;
1135 const APInt *C0, *C1;
1141 case Intrinsic::smax:
1145 case Intrinsic::smin:
1149 case Intrinsic::umax:
1153 case Intrinsic::umin:
1175 if (!
LHS ||
LHS->getIntrinsicID() != MinMaxID)
1188 {LHS->getArgOperand(0), NewC});
1208 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1209 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1227 if (!
LHS || !
RHS ||
LHS->getIntrinsicID() != MinMaxID ||
1228 RHS->getIntrinsicID() != MinMaxID ||
1238 Value *MinMaxOp =
nullptr;
1239 Value *ThirdOp =
nullptr;
1243 if (
D ==
A ||
C ==
A) {
1248 }
else if (
D ==
B ||
C ==
B) {
1257 if (
D ==
A ||
D ==
B) {
1262 }
else if (
C ==
A ||
C ==
B) {
1270 if (!MinMaxOp || !ThirdOp)
1287 case Intrinsic::smax:
1288 case Intrinsic::smin:
1289 case Intrinsic::umax:
1290 case Intrinsic::umin:
1291 case Intrinsic::fma:
1292 case Intrinsic::fshl:
1293 case Intrinsic::fshr:
1312 Type *SrcTy =
X->getType();
1313 for (
unsigned i = 1, e = II->
arg_size(); i != e; ++i) {
1316 X->getType() != SrcTy)
1322 Instruction *FPI = isa<FPMathOperator>(II) ? II :
nullptr;
1323 Value *NewIntrinsic =
1331template <Intrinsic::ID IntrID>
1334 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1335 "This helper only supports BSWAP and BITREVERSE intrinsics");
1341 isa<BinaryOperator>(V)) {
1342 Value *OldReorderX, *OldReorderY;
1355 Value *NewReorder =
Builder.CreateUnaryIntrinsic(IntrID,
Y);
1360 Value *NewReorder =
Builder.CreateUnaryIntrinsic(IntrID,
X);
1394 if (!II)
return visitCallBase(CI);
1398 if (
auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
1399 if (
ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1400 if (NumBytes->isNegative() ||
1401 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1403 assert(AMI->getType()->isVoidTy() &&
1404 "non void atomic unordered mem intrinsic");
1410 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(II)) {
1411 bool Changed =
false;
1414 if (
Constant *NumBytes = dyn_cast<Constant>(
MI->getLength())) {
1415 if (NumBytes->isNullValue())
1420 if (
auto *M = dyn_cast<MemIntrinsic>(
MI))
1421 if (M->isVolatile())
1427 if (
auto *MMI = dyn_cast<AnyMemMoveInst>(
MI)) {
1428 if (
GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1429 if (GVSrc->isConstant()) {
1432 isa<AtomicMemMoveInst>(MMI)
1433 ? Intrinsic::memcpy_element_unordered_atomic
1434 : Intrinsic::memcpy;
1445 if (MTI->getSource() == MTI->getDest())
1451 if (
auto *MTI = dyn_cast<AnyMemTransferInst>(
MI)) {
1454 }
else if (
auto *MSI = dyn_cast<AnyMemSetInst>(
MI)) {
1459 if (Changed)
return II;
1464 if (
auto *IIFVTy = dyn_cast<FixedVectorType>(II->
getType())) {
1465 auto VWidth = IIFVTy->getNumElements();
1466 APInt UndefElts(VWidth, 0);
1484 if (CI.
use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1491 case Intrinsic::objectsize:
1495 case Intrinsic::abs: {
1497 bool IntMinIsPoison = cast<Constant>(II->
getArgOperand(1))->isOneValue();
1536 case Intrinsic::umin: {
1541 "Expected simplify of umin with max constant");
1548 case Intrinsic::umax: {
1552 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1569 case Intrinsic::smax:
1570 case Intrinsic::smin: {
1574 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1589 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1616 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
1617 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
1619 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1621 if (KnownSign == std::nullopt) {
1624 }
else if (*KnownSign ) {
1636 return BinaryOperator::CreateOr(I0,
X);
1677 if (I0->
hasOneUse() && !I1->hasOneUse())
1689 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
1711 case Intrinsic::bitreverse: {
1716 X->getType()->isIntOrIntVectorTy(1)) {
1724 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand,
Builder))
1725 return crossLogicOpFold;
1729 case Intrinsic::bswap: {
1746 cast<BinaryOperator>(IIOperand)->
getOpcode() == Instruction::Shl
1759 if (BW - LZ - TZ == 8) {
1760 assert(LZ != TZ &&
"active byte cannot be in the middle");
1762 return BinaryOperator::CreateNUWShl(
1765 return BinaryOperator::CreateExactLShr(
1771 unsigned C =
X->getType()->getScalarSizeInBits() - BW;
1778 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand,
Builder)) {
1779 return crossLogicOpFold;
1784 case Intrinsic::masked_load:
1785 if (
Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1788 case Intrinsic::masked_store:
1789 return simplifyMaskedStore(*II);
1790 case Intrinsic::masked_gather:
1791 return simplifyMaskedGather(*II);
1792 case Intrinsic::masked_scatter:
1793 return simplifyMaskedScatter(*II);
1794 case Intrinsic::launder_invariant_group:
1795 case Intrinsic::strip_invariant_group:
1799 case Intrinsic::powi:
1803 if (Power->isMinusOne())
1807 if (Power->equalsInt(2))
1811 if (!Power->getValue()[0]) {
1826 case Intrinsic::cttz:
1827 case Intrinsic::ctlz:
1832 case Intrinsic::ctpop:
1837 case Intrinsic::fshl:
1838 case Intrinsic::fshr: {
1850 if (ModuloC != ShAmtC)
1855 "Shift amount expected to be modulo bitwidth");
1860 if (IID == Intrinsic::fshr) {
1867 assert(IID == Intrinsic::fshl &&
1868 "All funnel shifts by simple constants should go left");
1873 return BinaryOperator::CreateShl(Op0, ShAmtC);
1878 return BinaryOperator::CreateLShr(Op1,
1908 case Intrinsic::uadd_with_overflow:
1909 case Intrinsic::sadd_with_overflow: {
1910 if (
Instruction *
I = foldIntrinsicWithOverflowCommon(II))
1917 const APInt *C0, *C1;
1920 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
1926 IsSigned ? C1->
sadd_ov(*C0, Overflow) : C1->
uadd_ov(*C0, Overflow);
1935 case Intrinsic::umul_with_overflow:
1936 case Intrinsic::smul_with_overflow:
1937 case Intrinsic::usub_with_overflow:
1938 if (
Instruction *
I = foldIntrinsicWithOverflowCommon(II))
1942 case Intrinsic::ssub_with_overflow: {
1943 if (
Instruction *
I = foldIntrinsicWithOverflowCommon(II))
1965 case Intrinsic::uadd_sat:
1966 case Intrinsic::sadd_sat:
1967 case Intrinsic::usub_sat:
1968 case Intrinsic::ssub_sat: {
1970 Type *Ty =
SI->getType();
2000 C->isNotMinSignedValue()) {
2004 Intrinsic::sadd_sat, Arg0, NegVal));
2010 if (
auto *
Other = dyn_cast<IntrinsicInst>(Arg0)) {
2012 const APInt *Val, *Val2;
2015 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2016 if (
Other->getIntrinsicID() == IID &&
2024 NewVal = Val->
sadd_ov(*Val2, Overflow);
2043 case Intrinsic::minnum:
2044 case Intrinsic::maxnum:
2045 case Intrinsic::minimum:
2046 case Intrinsic::maximum: {
2057 case Intrinsic::maxnum:
2058 NewIID = Intrinsic::minnum;
2060 case Intrinsic::minnum:
2061 NewIID = Intrinsic::maxnum;
2063 case Intrinsic::maximum:
2064 NewIID = Intrinsic::minimum;
2066 case Intrinsic::minimum:
2067 NewIID = Intrinsic::maximum;
2073 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2080 if (
auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2088 case Intrinsic::maxnum:
2091 case Intrinsic::minnum:
2094 case Intrinsic::maximum:
2097 case Intrinsic::minimum:
2116 X->getType() ==
Y->getType()) {
2130 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2137 case Intrinsic::matrix_multiply: {
2151 Value *OpNotNeg, *NegatedOp;
2152 unsigned NegatedOpArg, OtherOpArg;
2187 NewArgs[NegatedOpArg] = OpNotNeg;
2194 case Intrinsic::fmuladd: {
2211 FAdd->copyFastMathFlags(II);
2217 case Intrinsic::fma: {
2242 FAdd->copyFastMathFlags(II);
2256 case Intrinsic::copysign: {
2287 case Intrinsic::fabs: {
2292 if (isa<Constant>(TVal) && isa<Constant>(FVal)) {
2305 Value *Magnitude, *Sign;
2317 case Intrinsic::ceil:
2318 case Intrinsic::floor:
2319 case Intrinsic::round:
2320 case Intrinsic::roundeven:
2321 case Intrinsic::nearbyint:
2322 case Intrinsic::rint:
2323 case Intrinsic::trunc: {
2332 case Intrinsic::cos:
2333 case Intrinsic::amdgcn_cos: {
2343 case Intrinsic::sin: {
2348 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin);
2354 case Intrinsic::ptrauth_auth:
2355 case Intrinsic::ptrauth_resign: {
2358 bool NeedSign = II->
getIntrinsicID() == Intrinsic::ptrauth_resign;
2364 Value *AuthKey =
nullptr, *AuthDisc =
nullptr, *BasePtr;
2381 if (AuthKey && NeedSign) {
2383 NewIntrin = Intrinsic::ptrauth_resign;
2384 }
else if (AuthKey) {
2386 NewIntrin = Intrinsic::ptrauth_auth;
2387 }
else if (NeedSign) {
2389 NewIntrin = Intrinsic::ptrauth_sign;
2412 case Intrinsic::arm_neon_vtbl1:
2413 case Intrinsic::aarch64_neon_tbl1:
2418 case Intrinsic::arm_neon_vmulls:
2419 case Intrinsic::arm_neon_vmullu:
2420 case Intrinsic::aarch64_neon_smull:
2421 case Intrinsic::aarch64_neon_umull: {
2426 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
2431 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
2432 IID == Intrinsic::aarch64_neon_umull);
2434 if (
Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2435 if (
Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2447 if (
Constant *CV1 = dyn_cast<Constant>(Arg1))
2449 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2456 case Intrinsic::arm_neon_aesd:
2457 case Intrinsic::arm_neon_aese:
2458 case Intrinsic::aarch64_crypto_aesd:
2459 case Intrinsic::aarch64_crypto_aese: {
2473 case Intrinsic::hexagon_V6_vandvrt:
2474 case Intrinsic::hexagon_V6_vandvrt_128B: {
2476 if (
auto Op0 = dyn_cast<IntrinsicInst>(II->
getArgOperand(0))) {
2478 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
2479 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
2486 if ((
C & 0xFF) && (
C & 0xFF00) && (
C & 0xFF0000) && (
C & 0xFF000000))
2491 case Intrinsic::stackrestore: {
2492 enum class ClassifyResult {
2496 CallWithSideEffects,
2499 if (isa<AllocaInst>(
I))
2500 return ClassifyResult::Alloca;
2502 if (
auto *CI = dyn_cast<CallInst>(
I)) {
2503 if (
auto *II = dyn_cast<IntrinsicInst>(CI)) {
2505 return ClassifyResult::StackRestore;
2508 return ClassifyResult::CallWithSideEffects;
2511 return ClassifyResult::CallWithSideEffects;
2515 return ClassifyResult::None;
2522 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
2525 bool CannotRemove =
false;
2526 for (++BI; &*BI != II; ++BI) {
2527 switch (Classify(&*BI)) {
2528 case ClassifyResult::None:
2532 case ClassifyResult::StackRestore:
2535 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
2536 CannotRemove =
true;
2539 case ClassifyResult::Alloca:
2540 case ClassifyResult::CallWithSideEffects:
2543 CannotRemove =
true;
2559 bool CannotRemove =
false;
2560 for (++BI; &*BI != TI; ++BI) {
2561 switch (Classify(&*BI)) {
2562 case ClassifyResult::None:
2566 case ClassifyResult::StackRestore:
2570 case ClassifyResult::Alloca:
2571 case ClassifyResult::CallWithSideEffects:
2575 CannotRemove =
true;
2585 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
2589 case Intrinsic::lifetime_end:
2598 return I.getIntrinsicID() == Intrinsic::lifetime_start;
2602 case Intrinsic::assume: {
2611 assert(isa<AssumeInst>(Assume));
2621 if (
match(Next, m_Intrinsic<Intrinsic::assume>(
m_Specific(IIOperand))))
2622 return RemoveConditionFromAssume(Next);
2655 return RemoveConditionFromAssume(II);
2667 if (OBU.
getTagName() ==
"separate_storage") {
2669 auto MaybeSimplifyHint = [&](
const Use &U) {
2670 Value *Hint = U.get();
2677 MaybeSimplifyHint(OBU.
Inputs[0]);
2678 MaybeSimplifyHint(OBU.
Inputs[1]);
2693 Replacement->insertBefore(Next);
2695 return RemoveConditionFromAssume(II);
2722 if (
auto *Replacement =
2725 Replacement->insertAfter(II);
2728 return RemoveConditionFromAssume(II);
2739 if (BOI.End - BOI.Begin > 2)
2750 if (BOI.End - BOI.Begin > 0) {
2757 if (BOI.End - BOI.Begin > 0)
2759 if (BOI.End - BOI.Begin > 1)
2780 case Intrinsic::experimental_guard: {
2791 Value *NextCond =
nullptr;
2793 m_Intrinsic<Intrinsic::experimental_guard>(
m_Value(NextCond)))) {
2798 if (CurrCond != NextCond) {
2800 while (MoveI != NextInst) {
2812 case Intrinsic::vector_insert: {
2816 auto *DstTy = dyn_cast<FixedVectorType>(II->
getType());
2817 auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType());
2818 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->
getType());
2822 if (DstTy && VecTy && SubVecTy) {
2823 unsigned DstNumElts = DstTy->getNumElements();
2824 unsigned VecNumElts = VecTy->getNumElements();
2825 unsigned SubVecNumElts = SubVecTy->getNumElements();
2826 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
2829 if (VecNumElts == SubVecNumElts)
2838 for (i = 0; i != SubVecNumElts; ++i)
2840 for (; i != VecNumElts; ++i)
2846 for (
unsigned i = 0; i != IdxN; ++i)
2848 for (
unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
2850 for (
unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
2858 case Intrinsic::vector_extract: {
2865 unsigned ExtractIdx = cast<ConstantInt>(
Idx)->getZExtValue();
2866 Value *InsertTuple, *InsertIdx, *InsertValue;
2867 if (
match(Vec, m_Intrinsic<Intrinsic::vector_insert>(
m_Value(InsertTuple),
2870 InsertValue->
getType() == ReturnType) {
2871 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue();
2875 if (ExtractIdx ==
Index)
2886 auto *DstTy = dyn_cast<FixedVectorType>(ReturnType);
2887 auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType());
2891 if (DstTy && VecTy) {
2892 unsigned DstNumElts = DstTy->getNumElements();
2893 unsigned VecNumElts = VecTy->getNumElements();
2894 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
2897 if (VecNumElts == DstNumElts) {
2903 for (
unsigned i = 0; i != DstNumElts; ++i)
2904 Mask.push_back(IdxN + i);
2911 case Intrinsic::experimental_vector_reverse: {
2915 auto *OldBinOp = cast<BinaryOperator>(Vec);
2921 OldBinOp->getOpcode(),
X,
Y, OldBinOp,
2922 OldBinOp->getName(), II));
2927 OldBinOp->getOpcode(),
X, BO1,
2928 OldBinOp, OldBinOp->
getName(), II));
2933 OldBinOp->getOpcode(), BO0,
Y,
2934 OldBinOp, OldBinOp->getName(), II));
2938 auto *OldUnOp = cast<UnaryOperator>(Vec);
2940 OldUnOp->getOpcode(),
X, OldUnOp, OldUnOp->getName(), II);
2945 case Intrinsic::vector_reduce_or:
2946 case Intrinsic::vector_reduce_and: {
2957 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
2961 if (IID == Intrinsic::vector_reduce_and) {
2965 assert(IID == Intrinsic::vector_reduce_or &&
2966 "Expected or reduction.");
2977 case Intrinsic::vector_reduce_add: {
2978 if (IID == Intrinsic::vector_reduce_add) {
2988 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
2996 cast<Instruction>(
Arg)->
getOpcode() == Instruction::SExt)
3004 case Intrinsic::vector_reduce_xor: {
3005 if (IID == Intrinsic::vector_reduce_xor) {
3016 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3028 case Intrinsic::vector_reduce_mul: {
3029 if (IID == Intrinsic::vector_reduce_mul) {
3039 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3050 case Intrinsic::vector_reduce_umin:
3051 case Intrinsic::vector_reduce_umax: {
3052 if (IID == Intrinsic::vector_reduce_umin ||
3053 IID == Intrinsic::vector_reduce_umax) {
3063 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3065 Value *Res = IID == Intrinsic::vector_reduce_umin
3077 case Intrinsic::vector_reduce_smin:
3078 case Intrinsic::vector_reduce_smax: {
3079 if (IID == Intrinsic::vector_reduce_smin ||
3080 IID == Intrinsic::vector_reduce_smax) {
3098 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3102 ExtOpc = cast<CastInst>(
Arg)->getOpcode();
3103 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
3104 (ExtOpc == Instruction::CastOps::ZExt))
3115 case Intrinsic::vector_reduce_fmax:
3116 case Intrinsic::vector_reduce_fmin:
3117 case Intrinsic::vector_reduce_fadd:
3118 case Intrinsic::vector_reduce_fmul: {
3119 bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd &&
3120 IID != Intrinsic::vector_reduce_fmul) ||
3122 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
3123 IID == Intrinsic::vector_reduce_fmul)
3129 if (!isa<FixedVectorType>(
Arg->getType()) || !CanBeReassociated ||
3131 !cast<ShuffleVectorInst>(
Arg)->isSingleSource())
3133 int Sz = Mask.size();
3135 for (
int Idx : Mask) {
3142 if (UsedIndices.
all()) {
3148 case Intrinsic::is_fpclass: {
3167 case Intrinsic::ctlz:
3168 case Intrinsic::cttz:
3169 case Intrinsic::ctpop:
3170 case Intrinsic::umin:
3171 case Intrinsic::umax:
3172 case Intrinsic::smin:
3173 case Intrinsic::smax:
3174 case Intrinsic::usub_sat:
3175 case Intrinsic::uadd_sat:
3176 case Intrinsic::ssub_sat:
3177 case Intrinsic::sadd_sat:
3179 if (
auto *Sel = dyn_cast<SelectInst>(Op))
3192 return visitCallBase(*II);
3207 if (FI1SyncScope != FI2->getSyncScopeID() ||
3214 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
3218 if (isIdenticalOrStrongerFence(PFI, &FI))
3225 return visitCallBase(II);
3230 return visitCallBase(CBI);
3250 if (
Value *With = Simplifier.optimizeCall(CI,
Builder)) {
3262 if (Underlying != TrampMem &&
3263 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
3265 if (!isa<AllocaInst>(Underlying))
3277 InitTrampoline = II;
3287 if (!InitTrampoline)
3291 if (InitTrampoline->
getOperand(0) != TrampMem)
3294 return InitTrampoline;
3334bool InstCombinerImpl::annotateAnyAllocSite(
CallBase &Call,
3340 bool Changed =
false;
3342 if (!
Call.getType()->isPointerTy())
3349 if (
Call.hasRetAttr(Attribute::NonNull)) {
3350 Changed = !
Call.hasRetAttr(Attribute::Dereferenceable);
3352 Call.getContext(),
Size->getLimitedValue()));
3354 Changed = !
Call.hasRetAttr(Attribute::DereferenceableOrNull);
3356 Call.getContext(),
Size->getLimitedValue()));
3365 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
3369 Align ExistingAlign =
Call.getRetAlign().valueOrOne();
3371 if (NewAlign > ExistingAlign) {
3383 bool Changed = annotateAnyAllocSite(Call, &
TLI);
3392 if (
V->getType()->isPointerTy() &&
3393 !
Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
3399 assert(ArgNo ==
Call.arg_size() &&
"Call arguments not processed correctly.");
3401 if (!ArgNos.
empty()) {
3406 Call.setAttributes(AS);
3415 transformConstExprCastCall(Call))
3422 LLVM_DEBUG(
dbgs() <<
"Removing convergent attr from instr " << Call
3424 Call.setNotConvergent();
3446 if (isa<CallInst>(OldCall))
3451 cast<CallBase>(OldCall)->setCalledFunction(
3460 if ((isa<ConstantPointerNull>(
Callee) &&
3462 isa<UndefValue>(
Callee)) {
3465 if (!
Call.getType()->isVoidTy())
3468 if (
Call.isTerminator()) {
3479 return transformCallThroughTrampoline(Call, *II);
3481 if (isa<InlineAsm>(
Callee) && !
Call.doesNotThrow()) {
3483 if (!
IA->canThrow()) {
3486 Call.setDoesNotThrow();
3494 if (
CallInst *CI = dyn_cast<CallInst>(&Call)) {
3501 if (!
Call.use_empty() && !
Call.isMustTailCall())
3502 if (
Value *ReturnedArg =
Call.getReturnedArgOperand()) {
3504 Type *RetArgTy = ReturnedArg->getType();
3513 if (Bundle && !
Call.isIndirectCall()) {
3517 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]);
3520 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0));
3524 dbgs() <<
Call.getModule()->getName()
3525 <<
": warning: kcfi: " <<
Call.getCaller()->getName()
3526 <<
": call to " << CalleeF->
getName()
3527 <<
" using a mismatching function pointer type\n";
3538 switch (
Call.getIntrinsicID()) {
3539 case Intrinsic::experimental_gc_statepoint: {
3555 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
3561 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
3565 if (isa<ConstantPointerNull>(DerivedPtr)) {
3593 LiveGcValues.
insert(BasePtr);
3594 LiveGcValues.
insert(DerivedPtr);
3596 std::optional<OperandBundleUse> Bundle =
3598 unsigned NumOfGCLives = LiveGcValues.
size();
3599 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
3603 std::vector<Value *> NewLiveGc;
3604 for (
Value *V : Bundle->Inputs) {
3605 if (Val2Idx.
count(V))
3607 if (LiveGcValues.
count(V)) {
3608 Val2Idx[
V] = NewLiveGc.
size();
3609 NewLiveGc.push_back(V);
3611 Val2Idx[
V] = NumOfGCLives;
3617 assert(Val2Idx.
count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
3618 "Missed live gc for base pointer");
3622 assert(Val2Idx.
count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
3623 "Missed live gc for derived pointer");
3634 return Changed ? &
Call :
nullptr;
3640bool InstCombinerImpl::transformConstExprCastCall(
CallBase &Call) {
3642 dyn_cast<Function>(
Call.getCalledOperand()->stripPointerCasts());
3646 assert(!isa<CallBrInst>(Call) &&
3647 "CallBr's don't have a single point after a def to insert at");
3652 if (
Callee->hasFnAttribute(
"thunk"))
3659 if (
Call.isMustTailCall())
3670 Type *NewRetTy = FT->getReturnType();
3673 if (OldRetTy != NewRetTy) {
3679 if (
Callee->isDeclaration())
3682 if (!
Caller->use_empty() &&
3698 if (!
Caller->use_empty()) {
3700 if (
auto *II = dyn_cast<InvokeInst>(Caller))
3701 PhisNotSupportedBlock = II->getNormalDest();
3702 if (PhisNotSupportedBlock)
3704 if (
PHINode *PN = dyn_cast<PHINode>(U))
3705 if (PN->getParent() == PhisNotSupportedBlock)
3710 unsigned NumActualArgs =
Call.arg_size();
3711 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
3721 if (
Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
3722 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
3725 auto AI =
Call.arg_begin();
3726 for (
unsigned i = 0, e = NumCommonArgs; i !=
e; ++i, ++AI) {
3727 Type *ParamTy = FT->getParamType(i);
3728 Type *ActTy = (*AI)->getType();
3739 if (
Call.isInAllocaArgument(i) ||
3747 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
3751 if (
Callee->isDeclaration()) {
3753 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
3759 if (FT->isVarArg() !=
Call.getFunctionType()->isVarArg())
3765 if (FT->isVarArg() &&
Call.getFunctionType()->isVarArg() &&
3766 FT->getNumParams() !=
Call.getFunctionType()->getNumParams())
3770 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
3785 Args.reserve(NumActualArgs);
3786 ArgAttrs.
reserve(NumActualArgs);
3796 AI =
Call.arg_begin();
3797 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
3798 Type *ParamTy = FT->getParamType(i);
3800 Value *NewArg = *AI;
3801 if ((*AI)->getType() != ParamTy)
3803 Args.push_back(NewArg);
3815 for (
unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
3821 if (FT->getNumParams() < NumActualArgs) {
3823 if (FT->isVarArg()) {
3825 for (
unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
3827 Value *NewArg = *AI;
3828 if (PTy != (*AI)->getType()) {
3834 Args.push_back(NewArg);
3847 assert((ArgAttrs.
size() == FT->getNumParams() || FT->isVarArg()) &&
3848 "missing argument attributes");
3853 Call.getOperandBundlesAsDefs(OpBundles);
3856 if (
InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3858 II->getUnwindDest(), Args, OpBundles);
3862 cast<CallInst>(Caller)->getTailCallKind());
3869 NewCall->
copyMetadata(*Caller, {LLVMContext::MD_prof});
3874 if (OldRetTy !=
NV->getType() && !
Caller->use_empty()) {
3875 if (!
NV->getType()->isVoidTy()) {
3877 NC->setDebugLoc(
Caller->getDebugLoc());
3880 assert(InsertPt &&
"No place to insert cast");
3888 if (!
Caller->use_empty())
3890 else if (
Caller->hasValueHandle()) {
3891 if (OldRetTy ==
NV->getType())
3906InstCombinerImpl::transformCallThroughTrampoline(
CallBase &Call,
3915 if (
Attrs.hasAttrSomewhere(Attribute::Nest))
3923 unsigned NestArgNo = 0;
3924 Type *NestTy =
nullptr;
3929 E = NestFTy->param_end();
3930 I !=
E; ++NestArgNo, ++
I) {
3941 std::vector<Value*> NewArgs;
3942 std::vector<AttributeSet> NewArgAttrs;
3943 NewArgs.reserve(
Call.arg_size() + 1);
3944 NewArgAttrs.reserve(
Call.arg_size());
3951 auto I =
Call.arg_begin(),
E =
Call.arg_end();
3953 if (ArgNo == NestArgNo) {
3956 if (NestVal->
getType() != NestTy)
3958 NewArgs.push_back(NestVal);
3959 NewArgAttrs.push_back(NestAttr);
3966 NewArgs.push_back(*
I);
3967 NewArgAttrs.push_back(
Attrs.getParamAttrs(ArgNo));
3978 std::vector<Type*> NewTypes;
3979 NewTypes.reserve(FTy->getNumParams()+1);
3986 E = FTy->param_end();
3989 if (ArgNo == NestArgNo)
3991 NewTypes.push_back(NestTy);
3997 NewTypes.push_back(*
I);
4014 Attrs.getRetAttrs(), NewArgAttrs);
4017 Call.getOperandBundlesAsDefs(OpBundles);
4020 if (
InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
4022 II->getNormalDest(), II->getUnwindDest(),
4023 NewArgs, OpBundles);
4024 cast<InvokeInst>(NewCaller)->setCallingConv(II->
getCallingConv());
4025 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4026 }
else if (
CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4029 CBI->getIndirectDests(), NewArgs, OpBundles);
4030 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4031 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4034 cast<CallInst>(NewCaller)->setTailCallKind(
4035 cast<CallInst>(Call).getTailCallKind());
4036 cast<CallInst>(NewCaller)->setCallingConv(
4037 cast<CallInst>(Call).getCallingConv());
4038 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4050 Call.setCalledFunction(FTy, NewCallee);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
SmallVector< MachineOperand, 4 > Cond
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Type * getPromotedType(Type *Ty)
Return the specified type promoted as it would be to pass though a va_arg area.
static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)
Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...
static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)
static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)
static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)
static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If this min/max has a matching min/max operand with a constant, try to push the constant operand into...
static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))
static bool hasUndefSource(AnyMemTransferInst *MI)
Recognize a memcpy/memmove from a trivially otherwise unused alloca.
static Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...
static Instruction * factorizeMinMaxTree(IntrinsicInst *II)
Reduce a sequence of min/max intrinsics with a common operand.
static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Convert a table lookup to shufflevector if the mask is constant.
static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder)
If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...
static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)
static IntrinsicInst * findInitTrampoline(Value *Callee)
static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)
static std::optional< bool > getKnownSign(Value *Op, Instruction *CxtI, const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT)
static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static bool inputDenormalIsIEEE(const Function &F, const Type *Ty)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt uadd_sat(const APInt &RHS) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
This class represents any memset intrinsic.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
void updateAffectedValues(AssumeInst *CI)
Update the cache of values being affected by this assumption (i.e.
bool overlaps(const AttributeMask &AM) const
Return true if the builder has any attribute that's in the specified builder.
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool isEmpty() const
Return true if there are no attributes.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
AttributeSet removeAttributes(LLVMContext &C, const AttributeMask &AttrsToRemove) const
Remove the specified attributes from this set.
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
InstListType::reverse_iterator reverse_iterator
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
bool isSigned() const
Whether the intrinsic is signed or unsigned.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Instruction *CopyO, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isStrictFP() const
Determine if the call requires strict floating point semantics.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, Instruction *InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
bool hasOperandBundles() const
Return true if this User has any operand bundles.
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
void setTailCallKind(TailCallKind TCK)
bool isMustTailCall() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Predicate getUnorderedPredicate() const
static ConstantAggregateZero * get(Type *Ty)
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getICmp(unsigned short pred, Constant *LHS, Constant *RHS, bool OnlyIfReduced=false)
get* - Return some common constants without having to specify the full Instruction::OPCODE identifier...
static Constant * getIntegerCast(Constant *C, Type *Ty, bool IsSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getMul(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getNeg(Constant *C, bool HasNUW=false, bool HasNSW=false)
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
static Constant * getInfinity(Type *Ty, bool Negative=false)