47#include "llvm/IR/IntrinsicsAArch64.h"
48#include "llvm/IR/IntrinsicsAMDGPU.h"
49#include "llvm/IR/IntrinsicsARM.h"
50#include "llvm/IR/IntrinsicsHexagon.h"
81#define DEBUG_TYPE "instcombine"
87STATISTIC(NumSimplified,
"Number of library calls simplified");
90 "instcombine-guard-widening-window",
92 cl::desc(
"How wide an instruction window to bypass looking for "
99 if (ITy->getBitWidth() < 32)
109 auto *Src =
MI->getRawSource();
111 if (!Src->hasOneUse())
121 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
122 MI->setDestAlignment(DstAlign);
128 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
129 MI->setSourceAlignment(SrcAlign);
153 if (!MemOpLength)
return nullptr;
160 assert(
Size &&
"0-sized memory transferring should be removed already.");
170 if (*CopyDstAlign <
Size || *CopySrcAlign <
Size)
180 Value *Src =
MI->getArgOperand(1);
181 Value *Dest =
MI->getArgOperand(0);
184 L->setAlignment(*CopySrcAlign);
185 L->setAAMetadata(AACopyMD);
186 MDNode *LoopMemParallelMD =
187 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
188 if (LoopMemParallelMD)
189 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
190 MDNode *AccessGroupMD =
MI->getMetadata(LLVMContext::MD_access_group);
192 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
198 if (LoopMemParallelMD)
199 S->
setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
201 S->
setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
206 L->setVolatile(MT->isVolatile());
209 if (
MI->isAtomic()) {
221 const Align KnownAlignment =
224 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
225 MI->setDestAlignment(KnownAlignment);
253 assert(Len &&
"0-sized memory setting should be removed already.");
254 const Align Alignment =
MI->getDestAlign().valueOrOne();
260 if (
MI->isAtomic() && Alignment < Len)
268 Constant *FillVal = ConstantInt::get(
274 DbgAssign->replaceVariableLocationOp(FillC, FillVal);
292 Value *LoadPtr =
II.getArgOperand(0);
293 const Align Alignment =
II.getParamAlign(0).valueOrOne();
298 LoadInst *L = Builder.CreateAlignedLoad(
II.getType(), LoadPtr, Alignment,
307 II.getDataLayout(), &
II, &
AC)) {
308 LoadInst *LI = Builder.CreateAlignedLoad(
II.getType(), LoadPtr, Alignment,
311 return Builder.CreateSelect(
II.getArgOperand(1), LI,
II.getArgOperand(2));
321 Value *StorePtr =
II.getArgOperand(1);
322 Align Alignment =
II.getParamAlign(1).valueOrOne();
334 new StoreInst(
II.getArgOperand(0), StorePtr,
false, Alignment);
366 if (ConstMask->isAllOnesValue())
369 const Align Alignment =
II.getParamAlign(0).valueOrOne();
370 LoadInst *
L =
Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr,
371 Alignment,
"load.scalar");
373 Builder.CreateVectorSplat(VecTy->getElementCount(), L,
"broadcast");
399 Align Alignment =
II.getParamAlign(1).valueOrOne();
400 StoreInst *S =
new StoreInst(SplatValue, SplatPtr,
false,
408 if (ConstMask->isAllOnesValue()) {
409 Align Alignment =
II.getParamAlign(1).valueOrOne();
411 ElementCount VF = WideLoadTy->getElementCount();
415 Builder.CreateExtractElement(
II.getArgOperand(0), LastLane);
417 new StoreInst(Extract, SplatPtr,
false, Alignment);
448 auto *Arg =
II.getArgOperand(0);
449 auto *StrippedArg = Arg->stripPointerCasts();
450 auto *StrippedInvariantGroupsArg = StrippedArg;
452 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
453 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
455 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts();
457 if (StrippedArg == StrippedInvariantGroupsArg)
460 Value *Result =
nullptr;
462 if (
II.getIntrinsicID() == Intrinsic::launder_invariant_group)
464 else if (
II.getIntrinsicID() == Intrinsic::strip_invariant_group)
468 "simplifyInvariantGroupIntrinsic only handles launder and strip");
469 if (Result->getType()->getPointerAddressSpace() !=
470 II.getType()->getPointerAddressSpace())
477 assert((
II.getIntrinsicID() == Intrinsic::cttz ||
478 II.getIntrinsicID() == Intrinsic::ctlz) &&
479 "Expected cttz or ctlz intrinsic");
480 bool IsTZ =
II.getIntrinsicID() == Intrinsic::cttz;
481 Value *Op0 =
II.getArgOperand(0);
482 Value *Op1 =
II.getArgOperand(1);
493 if (
II.getType()->isIntOrIntVectorTy(1)) {
506 II.dropUBImplyingAttrsAndMetadata();
553 return BinaryOperator::CreateAdd(ConstCttz,
X);
561 return BinaryOperator::CreateSub(ConstCttz,
X);
567 ConstantInt::get(
II.getType(),
II.getType()->getScalarSizeInBits());
568 return BinaryOperator::CreateSub(Width,
X);
576 return BinaryOperator::CreateAdd(ConstCtlz,
X);
584 return BinaryOperator::CreateSub(ConstCtlz,
X);
592 unsigned BitWidth = Ty->getScalarSizeInBits();
606 ConstantInt::get(R->getType(), R->getType()->getScalarSizeInBits() - 1),
625 if (PossibleZeros == DefiniteZeros) {
626 auto *
C = ConstantInt::get(Op0->
getType(), DefiniteZeros);
641 if (
BitWidth != 1 && !
II.hasRetAttr(Attribute::Range) &&
642 !
II.getMetadata(LLVMContext::MD_range)) {
653 assert(
II.getIntrinsicID() == Intrinsic::ctpop &&
654 "Expected ctpop intrinsic");
656 unsigned BitWidth = Ty->getScalarSizeInBits();
657 Value *Op0 =
II.getArgOperand(0);
703 if ((~Known.
Zero).isPowerOf2())
704 return BinaryOperator::CreateLShr(
705 Op0, ConstantInt::get(Ty, (~Known.
Zero).exactLogBase2()));
719 II.getRange().value_or(ConstantRange::getFull(
BitWidth));
731 if (
Range != OldRange) {
750 unsigned NumIndexes = RetTy->getNumElements();
753 if (!RetTy->getElementType()->isIntegerTy(8) ||
754 (NumIndexes != 8 && NumIndexes != 16))
759 unsigned int StartIndex = (
unsigned)IsExtension;
765 unsigned NumElementsPerSource = SourceTy->getNumElements();
771 if (NumIndexes > NumElementsPerSource)
776 unsigned int NumSourceOperands =
II.arg_size() - 1 - (
unsigned)IsExtension;
786 for (
unsigned I = 0;
I < NumIndexes; ++
I) {
800 unsigned SourceOperandIndex = Index / NumElementsPerSource;
802 unsigned SourceOperandElementIndex = Index % NumElementsPerSource;
804 Value *SourceOperand;
805 if (SourceOperandIndex >= NumSourceOperands) {
808 SourceOperandIndex = NumSourceOperands;
812 SourceOperand =
II.getArgOperand(0);
813 SourceOperandElementIndex =
I;
818 SourceOperandElementIndex = 0;
821 SourceOperand =
II.getArgOperand(SourceOperandIndex + StartIndex);
829 NumElementsPerSource)
834 unsigned NumSlots = ValueToShuffleSlot.
size();
837 if (NumSlots == 2 && !ValueToShuffleSlot.
contains(SourceOperand))
840 auto [It, Inserted] =
841 ValueToShuffleSlot.
try_emplace(SourceOperand, NumSlots);
843 ShuffleOperands[It->getSecond()] = SourceOperand;
845 unsigned RemappedIndex =
846 (It->getSecond() * NumElementsPerSource) + SourceOperandElementIndex;
847 Indexes[
I] = RemappedIndex;
851 ShuffleOperands[0], ShuffleOperands[1],
ArrayRef(Indexes, NumIndexes));
858 unsigned NumOperands) {
859 assert(
I.arg_size() >= NumOperands &&
"Not enough operands");
860 assert(
E.arg_size() >= NumOperands &&
"Not enough operands");
861 for (
unsigned i = 0; i < NumOperands; i++)
862 if (
I.getArgOperand(i) !=
E.getArgOperand(i))
883 for (; BI != BE; ++BI) {
885 if (
I->isDebugOrPseudoInst() ||
908 return II.getIntrinsicID() == Intrinsic::vastart ||
909 (
II.getIntrinsicID() == Intrinsic::vacopy &&
910 I.getArgOperand(0) !=
II.getArgOperand(1));
916 assert(
Call.arg_size() > 1 &&
"Need at least 2 args to swap");
917 Value *Arg0 =
Call.getArgOperand(0), *Arg1 =
Call.getArgOperand(1);
919 Call.setArgOperand(0, Arg1);
920 Call.setArgOperand(1, Arg0);
939 Value *OperationResult =
nullptr;
946 for (User *U : WO->
users()) {
950 for (
auto &AssumeVH :
AC.assumptionsFor(U)) {
964 Inst->setHasNoSignedWrap();
966 Inst->setHasNoUnsignedWrap();
977 Ty = Ty->getScalarType();
982 Ty = Ty->getScalarType();
983 return F.getDenormalMode(Ty->getFltSemantics()).inputsAreZero();
991 switch (
static_cast<unsigned>(Mask)) {
1048 Value *Src0 =
II.getArgOperand(0);
1049 Value *Src1 =
II.getArgOperand(1);
1055 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
1057 const bool IsStrict =
1058 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);
1064 II.setArgOperand(1, ConstantInt::get(Src1->
getType(),
fneg(Mask)));
1074 if ((OrderedMask ==
fcInf || OrderedInvertedMask ==
fcInf) &&
1075 (IsOrdered || IsUnordered) && !IsStrict) {
1083 if (OrderedInvertedMask ==
fcInf)
1086 Value *Fabs =
Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Src0);
1093 (IsOrdered || IsUnordered) && !IsStrict) {
1100 Value *EqInf = IsUnordered ?
Builder.CreateFCmpUEQ(Src0, Inf)
1101 :
Builder.CreateFCmpOEQ(Src0, Inf);
1107 if ((OrderedInvertedMask ==
fcPosInf || OrderedInvertedMask ==
fcNegInf) &&
1108 (IsOrdered || IsUnordered) && !IsStrict) {
1115 Value *NeInf = IsUnordered ?
Builder.CreateFCmpUNE(Src0, Inf)
1116 :
Builder.CreateFCmpONE(Src0, Inf);
1121 if (Mask ==
fcNan && !IsStrict) {
1153 if (!IsStrict && (IsOrdered || IsUnordered) &&
1198 return std::nullopt;
1210 return std::nullopt;
1222 return *Known0 == *Known1;
1230 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1231 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1232 "Expected a min or max intrinsic");
1235 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
1237 const APInt *C0, *C1;
1243 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1245 if ((IsSigned && !
Add->hasNoSignedWrap()) ||
1246 (!IsSigned && !
Add->hasNoUnsignedWrap()))
1253 IsSigned ? C1->
ssub_ov(*C0, Overflow) : C1->
usub_ov(*C0, Overflow);
1254 assert(!Overflow &&
"Expected simplify of min/max");
1258 Constant *NewMinMaxC = ConstantInt::get(
II->getType(), CDiff);
1259 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID,
X, NewMinMaxC);
1260 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax,
Add->getOperand(1))
1261 : BinaryOperator::CreateNUWAdd(NewMinMax,
Add->getOperand(1));
1272 const APInt *MinValue, *MaxValue;
1276 }
else if (
match(&MinMax1,
1285 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1288 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1302 if (
AddSub->getOpcode() == Instruction::Add)
1303 IntrinsicID = Intrinsic::sadd_sat;
1304 else if (
AddSub->getOpcode() == Instruction::Sub)
1305 IntrinsicID = Intrinsic::ssub_sat;
1318 Value *Sat =
Builder.CreateIntrinsic(IntrinsicID, NewTy, {AT,
BT});
1328 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1330 const APInt *C0, *C1;
1335 switch (
II->getIntrinsicID()) {
1336 case Intrinsic::smax:
1340 case Intrinsic::smin:
1344 case Intrinsic::umax:
1348 case Intrinsic::umin:
1360 Value *Cmp = Builder.CreateICmp(Pred,
X, I1);
1384 if (InnerMinMaxID != MinMaxID &&
1385 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||
1386 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&
1391 Value *CondC = Builder.CreateICmp(Pred, C0, C1);
1392 Value *NewC = Builder.CreateSelect(CondC, C0, C1);
1393 return Builder.CreateIntrinsic(InnerMinMaxID,
II->getType(),
1394 {LHS->getArgOperand(0), NewC});
1415 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1421 MinMaxID,
II->getType());
1422 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID,
X,
Y);
1433 if (!
LHS || !
RHS ||
LHS->getIntrinsicID() != MinMaxID ||
1434 RHS->getIntrinsicID() != MinMaxID ||
1435 (!
LHS->hasOneUse() && !
RHS->hasOneUse()))
1444 Value *MinMaxOp =
nullptr;
1445 Value *ThirdOp =
nullptr;
1446 if (
LHS->hasOneUse()) {
1449 if (
D ==
A ||
C ==
A) {
1454 }
else if (
D ==
B ||
C ==
B) {
1461 assert(
RHS->hasOneUse() &&
"Expected one-use operand");
1463 if (
D ==
A ||
D ==
B) {
1468 }
else if (
C ==
A ||
C ==
B) {
1476 if (!MinMaxOp || !ThirdOp)
1489 if (!
II->getType()->isVectorTy() ||
1491 !
II->getCalledFunction()->isSpeculatable())
1498 return isa<Constant>(Arg.get()) ||
1499 isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),
1500 Arg.getOperandNo(), nullptr);
1513 Type *SrcTy =
X->getType();
1514 for (
Use &Arg :
II->args()) {
1518 else if (
match(&Arg,
1520 X->getType() == SrcTy)
1539 Value *NewIntrinsic =
1540 Builder.CreateIntrinsic(ResTy,
II->getIntrinsicID(), NewArgs, FPI);
1553 return match(V, m_OneUse(m_VecReverse(m_Value())));
1560 for (
Use &Arg :
II->args()) {
1562 Arg.getOperandNo(),
nullptr))
1577 II->getType(),
II->getIntrinsicID(), NewArgs, FPI);
1578 return Builder.CreateVectorReverse(NewIntrinsic);
1584template <Intrinsic::ID IntrID>
1587 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1588 "This helper only supports BSWAP and BITREVERSE intrinsics");
1595 Value *OldReorderX, *OldReorderY;
1608 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID,
Y);
1613 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID,
X);
1624 case Intrinsic::smax:
1625 case Intrinsic::smin:
1626 case Intrinsic::umax:
1627 case Intrinsic::umin:
1628 case Intrinsic::maximum:
1629 case Intrinsic::minimum:
1630 case Intrinsic::maximumnum:
1631 case Intrinsic::minimumnum:
1632 case Intrinsic::maxnum:
1633 case Intrinsic::minnum:
1652 auto IID =
II->getIntrinsicID();
1658 auto *InvariantBinaryInst =
1662 return InvariantBinaryInst;
1666 if (!CanReorderLanes)
1679 int Sz = Mask.size();
1681 for (
int Idx : Mask) {
1684 UsedIndices.
set(Idx);
1689 return UsedIndices.
all() ? V :
nullptr;
1698template <Intrinsic::ID IntrID>
1703 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,
1704 "This helper only supports cttz and ctlz intrinsics");
1706 Value *CtOp1, *CtOp2;
1707 Value *ZeroUndef1, *ZeroUndef2;
1714 return Builder.CreateBinaryIntrinsic(
1715 IntrID, Builder.CreateOr(CtOp1, CtOp2),
1716 Builder.CreateOr(ZeroUndef1, ZeroUndef2));
1718 unsigned BitWidth = I1->getType()->getScalarSizeInBits();
1725 Type *Ty = I1->getType();
1727 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,
1728 IntrID == Intrinsic::cttz
1729 ? ConstantInt::get(Ty, 1)
1732 return Builder.CreateBinaryIntrinsic(
1733 IntrID, Builder.CreateOr(CtOp1, NewConst),
1742 case Intrinsic::umax:
1743 case Intrinsic::umin:
1744 if (HasNUW && LOp == Instruction::Add)
1746 if (HasNUW && LOp == Instruction::Shl)
1749 case Intrinsic::smax:
1750 case Intrinsic::smin:
1751 return HasNSW && LOp == Instruction::Add;
1794 if (
A ==
D ||
B ==
C)
1802 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode,
B,
D);
1805 }
else if (
B ==
D) {
1806 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode,
A,
C);
1820 Value *Arg0 =
II->getArgOperand(0);
1826 bool AllPositive =
true;
1827 bool AllNegative =
true;
1831 const APInt &V = CI->getValue();
1832 if (V.isNonNegative()) {
1833 AllNegative =
false;
1834 return AllPositive && V.ult(ElemBits);
1836 AllPositive =
false;
1837 return AllNegative && V.sgt(-ElemBits);
1843 for (
unsigned I = 0,
E = VTy->getNumElements();
I <
E; ++
I) {
1844 if (!
Check(ShiftConst->getAggregateElement(
I)))
1848 }
else if (!
Check(ShiftConst))
1855 Value *NegAmt =
B.CreateNeg(ShiftConst);
1857 const bool IsSigned =
1858 IID == Intrinsic::arm_neon_vshifts || IID == Intrinsic::aarch64_neon_sshl;
1860 IsSigned ?
B.CreateAShr(Arg0, NegAmt) :
B.CreateLShr(Arg0, NegAmt);
1873 SQ.getWithInstruction(&CI)))
1889 return visitCallBase(CI);
1894 if (
auto NumBytes =
MI->getLengthInBytes()) {
1896 if (NumBytes->isZero())
1901 if (
MI->isAtomic() &&
1902 (NumBytes->isNegative() ||
1903 (NumBytes->getZExtValue() %
MI->getElementSizeInBytes() != 0))) {
1905 assert(
MI->getType()->isVoidTy() &&
1906 "non void atomic unordered mem intrinsic");
1912 if (
MI->isVolatile())
1917 if (MTI->getSource() == MTI->getDest())
1921 auto IsPointerUndefined = [
MI](
Value *Ptr) {
1927 bool SrcIsUndefined =
false;
1933 SrcIsUndefined = IsPointerUndefined(MTI->getRawSource());
1940 if (SrcIsUndefined || IsPointerUndefined(
MI->getRawDest())) {
1950 if (GVSrc->isConstant()) {
1954 ? Intrinsic::memcpy_element_unordered_atomic
1955 : Intrinsic::memcpy;
1969 auto VWidth = IIFVTy->getNumElements();
1970 APInt PoisonElts(VWidth, 0);
1979 if (
II->isCommutative()) {
1980 if (
auto Pair = matchSymmetricPair(
II->getOperand(0),
II->getOperand(1))) {
2001 case Intrinsic::objectsize: {
2004 &InsertedInstructions)) {
2005 for (
Instruction *Inserted : InsertedInstructions)
2011 case Intrinsic::abs: {
2012 Value *IIOperand =
II->getArgOperand(0);
2027 if (
match(IIOperand,
2036 if (std::optional<bool> Known =
2062 return BinaryOperator::CreateAnd(
X, ConstantInt::get(
II->getType(), 1));
2066 case Intrinsic::umin: {
2067 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
2070 assert(
II->getType()->getScalarSizeInBits() != 1 &&
2071 "Expected simplify of umin with max constant");
2077 if (
Value *FoldedCttz =
2082 if (
Value *FoldedCtlz =
2088 case Intrinsic::umax: {
2089 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
2092 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
2100 Value *NarrowMaxMin =
Builder.CreateBinaryIntrinsic(IID,
X, NarrowC);
2119 Value *Cmp =
Builder.CreateICmpEQ(
X, ConstantInt::get(
X->getType(), 0));
2121 Builder.CreateSelect(Cmp, ConstantInt::get(
X->getType(), 1),
A);
2125 if (IID == Intrinsic::umax) {
2136 case Intrinsic::smax:
2137 case Intrinsic::smin: {
2138 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
2141 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
2150 Value *NarrowMaxMin =
Builder.CreateBinaryIntrinsic(IID,
X, NarrowC);
2157 const APInt *MinC, *MaxC;
2158 auto CreateCanonicalClampForm = [&](
bool IsSigned) {
2159 auto MaxIID = IsSigned ? Intrinsic::smax : Intrinsic::umax;
2160 auto MinIID = IsSigned ? Intrinsic::smin : Intrinsic::umin;
2162 MaxIID,
X, ConstantInt::get(
X->getType(), *MaxC));
2165 MinIID, NewMax, ConstantInt::get(
X->getType(), *MinC)));
2167 if (IID == Intrinsic::smax &&
2171 return CreateCanonicalClampForm(
true);
2172 if (IID == Intrinsic::umax &&
2176 return CreateCanonicalClampForm(
false);
2180 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
2181 II->getType()->isIntOrIntVectorTy(1)) {
2182 return BinaryOperator::CreateAnd(I0, I1);
2187 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
2188 II->getType()->isIntOrIntVectorTy(1)) {
2189 return BinaryOperator::CreateOr(I0, I1);
2197 if (IID == Intrinsic::smin) {
2200 Value *Zero = ConstantInt::get(
X->getType(), 0);
2203 Builder.CreateIntrinsic(
II->getType(), Intrinsic::scmp, {X, Zero}));
2207 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
2234 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
2235 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
2237 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
2239 if (KnownSign == std::nullopt) {
2242 }
else if (*KnownSign ) {
2254 return BinaryOperator::CreateOr(I0,
X);
2256 return BinaryOperator::CreateAnd(I0,
Builder.CreateNot(
X));
2272 Value *InvMaxMin =
Builder.CreateBinaryIntrinsic(InvID,
A, NotY);
2291 return BinaryOperator::CreateAnd(
Builder.CreateBinaryIntrinsic(IID,
X,
Y),
2292 ConstantInt::get(
II->getType(), *RHSC));
2302 if (I0->
hasOneUse() && !I1->hasOneUse())
2314 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
2315 Abs =
Builder.CreateNeg(Abs,
"nabs", IntMinIsPoison);
2340 I0, IsSigned,
SQ.getWithInstruction(
II));
2342 if (LHS_CR.
icmp(Pred, *RHSC))
2346 ConstantInt::get(
II->getType(), *RHSC));
2355 case Intrinsic::scmp: {
2356 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
2361 Builder.CreateIntrinsic(
II->getType(), Intrinsic::scmp, {LHS, RHS}));
2364 case Intrinsic::bitreverse: {
2365 Value *IIOperand =
II->getArgOperand(0);
2369 X->getType()->isIntOrIntVectorTy(1)) {
2370 Type *Ty =
II->getType();
2378 return crossLogicOpFold;
2382 case Intrinsic::bswap: {
2383 Value *IIOperand =
II->getArgOperand(0);
2393 Value *NewSwap =
Builder.CreateUnaryIntrinsic(Intrinsic::bswap,
X);
2408 if (BW - LZ - TZ == 8) {
2409 assert(LZ != TZ &&
"active byte cannot be in the middle");
2411 return BinaryOperator::CreateNUWShl(
2412 IIOperand, ConstantInt::get(IIOperand->
getType(), LZ - TZ));
2414 return BinaryOperator::CreateExactLShr(
2415 IIOperand, ConstantInt::get(IIOperand->
getType(), TZ - LZ));
2420 unsigned C =
X->getType()->getScalarSizeInBits() - BW;
2421 Value *CV = ConstantInt::get(
X->getType(),
C);
2428 return crossLogicOpFold;
2437 case Intrinsic::masked_load:
2438 if (
Value *SimplifiedMaskedOp = simplifyMaskedLoad(*
II))
2441 case Intrinsic::masked_store:
2442 return simplifyMaskedStore(*
II);
2443 case Intrinsic::masked_gather:
2444 return simplifyMaskedGather(*
II);
2445 case Intrinsic::masked_scatter:
2446 return simplifyMaskedScatter(*
II);
2447 case Intrinsic::launder_invariant_group:
2448 case Intrinsic::strip_invariant_group:
2452 case Intrinsic::powi:
2456 if (Power->isMinusOne())
2458 II->getArgOperand(0),
II);
2460 if (Power->equalsInt(2))
2462 II->getArgOperand(0),
II);
2464 if (!Power->getValue()[0]) {
2479 case Intrinsic::cttz:
2480 case Intrinsic::ctlz:
2485 case Intrinsic::ctpop:
2490 case Intrinsic::fshl:
2491 case Intrinsic::fshr: {
2492 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
2493 Type *Ty =
II->getType();
2494 unsigned BitWidth = Ty->getScalarSizeInBits();
2503 if (ModuloC != ShAmtC)
2509 "Shift amount expected to be modulo bitwidth");
2514 if (IID == Intrinsic::fshr) {
2525 assert(IID == Intrinsic::fshl &&
2526 "All funnel shifts by simple constants should go left");
2531 return BinaryOperator::CreateShl(Op0, ShAmtC);
2536 return BinaryOperator::CreateLShr(Op1,
2554 const APInt *ShAmtInnerC, *ShAmtOuterC;
2558 APInt Sum = *ShAmtOuterC + *ShAmtInnerC;
2562 Constant *ModuloC = ConstantInt::get(Ty, Modulo);
2564 {InnerOp, InnerOp, ModuloC});
2576 Mod, IID == Intrinsic::fshl ? Intrinsic::fshr : Intrinsic::fshl, Ty);
2584 Value *Op2 =
II->getArgOperand(2);
2586 return BinaryOperator::CreateShl(Op0,
And);
2604 case Intrinsic::ptrmask: {
2605 unsigned BitWidth =
DL.getPointerTypeSizeInBits(
II->getType());
2610 Value *InnerPtr, *InnerMask;
2615 if (
match(
II->getArgOperand(0),
2619 "Mask types must match");
2622 Value *NewMask =
Builder.CreateAnd(
II->getArgOperand(1), InnerMask);
2636 unsigned NewAlignmentLog =
2650 case Intrinsic::uadd_with_overflow:
2651 case Intrinsic::sadd_with_overflow: {
2659 const APInt *C0, *C1;
2660 Value *Arg0 =
II->getArgOperand(0);
2661 Value *Arg1 =
II->getArgOperand(1);
2662 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2663 bool HasNWAdd = IsSigned
2669 IsSigned ? C1->
sadd_ov(*C0, Overflow) : C1->
uadd_ov(*C0, Overflow);
2673 IID,
X, ConstantInt::get(Arg1->
getType(), NewC)));
2678 case Intrinsic::umul_with_overflow:
2679 case Intrinsic::smul_with_overflow:
2680 case Intrinsic::usub_with_overflow:
2685 case Intrinsic::ssub_with_overflow: {
2690 Value *Arg0 =
II->getArgOperand(0);
2691 Value *Arg1 =
II->getArgOperand(1);
2701 *
II,
Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
2708 case Intrinsic::uadd_sat:
2709 case Intrinsic::sadd_sat:
2710 case Intrinsic::usub_sat:
2711 case Intrinsic::ssub_sat: {
2713 Type *Ty =
SI->getType();
2729 unsigned BitWidth = Ty->getScalarSizeInBits();
2734 unsigned BitWidth = Ty->getScalarSizeInBits();
2746 if (IID == Intrinsic::usub_sat &&
2749 auto *NewC =
Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat,
C, C1);
2751 Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, NewC,
A);
2757 C->isNotMinSignedValue()) {
2761 Intrinsic::sadd_sat, Arg0, NegVal));
2769 const APInt *Val, *Val2;
2772 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2773 if (
Other->getIntrinsicID() == IID &&
2781 NewVal = Val->
sadd_ov(*Val2, Overflow);
2794 IID,
X, ConstantInt::get(
II->getType(), NewVal)));
2800 case Intrinsic::minnum:
2801 case Intrinsic::maxnum:
2802 case Intrinsic::minimum:
2803 case Intrinsic::maximum: {
2804 Value *Arg0 =
II->getArgOperand(0);
2805 Value *Arg1 =
II->getArgOperand(1);
2814 case Intrinsic::maxnum:
2815 NewIID = Intrinsic::minnum;
2817 case Intrinsic::minnum:
2818 NewIID = Intrinsic::maxnum;
2820 case Intrinsic::maximum:
2821 NewIID = Intrinsic::minimum;
2823 case Intrinsic::minimum:
2824 NewIID = Intrinsic::maximum;
2830 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2845 case Intrinsic::maxnum:
2848 case Intrinsic::minnum:
2851 case Intrinsic::maximum:
2854 case Intrinsic::minimum:
2864 IID,
X, ConstantFP::get(Arg0->
getType(), Res),
2873 X->getType() ==
Y->getType()) {
2875 Builder.CreateBinaryIntrinsic(IID,
X,
Y,
II,
II->getName());
2885 auto IsMinMaxOrXNegX = [IID, &
X](
Value *Op0,
Value *Op1) {
2887 return Op0->hasOneUse() ||
2888 (IID != Intrinsic::minimum && IID != Intrinsic::minnum);
2892 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {
2894 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2901 case Intrinsic::matrix_multiply: {
2913 Value *Op0 =
II->getOperand(0);
2914 Value *Op1 =
II->getOperand(1);
2915 Value *OpNotNeg, *NegatedOp;
2916 unsigned NegatedOpArg, OtherOpArg;
2933 Value *OtherOp =
II->getOperand(OtherOpArg);
2951 NewArgs[NegatedOpArg] = OpNotNeg;
2953 Builder.CreateIntrinsic(
II->getType(), IID, NewArgs,
II);
2958 case Intrinsic::fmuladd: {
2962 II->getFastMathFlags(),
SQ.getWithInstruction(
II)))
2964 II->getFastMathFlags());
2968 case Intrinsic::fma: {
2970 Value *Src0 =
II->getArgOperand(0);
2971 Value *Src1 =
II->getArgOperand(1);
2972 Value *Src2 =
II->getArgOperand(2);
2991 SQ.getWithInstruction(
II)))
3007 case Intrinsic::copysign: {
3008 Value *Mag =
II->getArgOperand(0), *Sign =
II->getArgOperand(1);
3011 if (*KnownSignBit) {
3014 Value *Fabs =
Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag,
II);
3020 Value *Fabs =
Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag,
II);
3065 case Intrinsic::fabs: {
3067 Value *Arg =
II->getArgOperand(0);
3085 SI->setFastMathFlags(FMF1 | FMF2);
3096 Value *Magnitude, *Sign;
3097 if (
match(
II->getArgOperand(0),
3101 Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Magnitude,
II);
3107 case Intrinsic::ceil:
3108 case Intrinsic::floor:
3109 case Intrinsic::round:
3110 case Intrinsic::roundeven:
3111 case Intrinsic::nearbyint:
3112 case Intrinsic::rint:
3113 case Intrinsic::trunc: {
3122 case Intrinsic::cos:
3123 case Intrinsic::amdgcn_cos:
3124 case Intrinsic::cosh: {
3126 Value *Src =
II->getArgOperand(0);
3137 case Intrinsic::sin:
3138 case Intrinsic::amdgcn_sin:
3139 case Intrinsic::sinh:
3140 case Intrinsic::tan:
3141 case Intrinsic::tanh: {
3151 case Intrinsic::ldexp: {
3164 Value *Src =
II->getArgOperand(0);
3165 Value *Exp =
II->getArgOperand(1);
3171 Src->getType()->getScalarType()->getFltSemantics();
3186 Exp->getType() == InnerExp->
getType()) {
3195 II->setArgOperand(1, NewExp);
3196 II->setFastMathFlags(InnerFlags);
3207 Builder.CreateSelect(ExtSrc, ConstantFP::get(
II->getType(), 2.0),
3208 ConstantFP::get(
II->getType(), 1.0));
3214 Builder.CreateSelect(ExtSrc, ConstantFP::get(
II->getType(), 0.5),
3215 ConstantFP::get(
II->getType(), 1.0));
3223 Value *SelectCond, *SelectLHS, *SelectRHS;
3224 if (
match(
II->getArgOperand(1),
3227 Value *NewLdexp =
nullptr;
3230 NewLdexp =
Builder.CreateLdexp(Src, SelectLHS,
II);
3233 NewLdexp =
Builder.CreateLdexp(Src, SelectRHS,
II);
3245 case Intrinsic::ptrauth_auth:
3246 case Intrinsic::ptrauth_resign: {
3249 if (
II->hasOperandBundles())
3254 bool NeedSign =
II->getIntrinsicID() == Intrinsic::ptrauth_resign;
3255 Value *Ptr =
II->getArgOperand(0);
3257 Value *Disc =
II->getArgOperand(2);
3261 Value *AuthKey =
nullptr, *AuthDisc =
nullptr, *BasePtr;
3283 if (!CPA || !CPA->isKnownCompatibleWith(
Key, Disc,
DL))
3300 BasePtr =
Builder.CreatePtrToInt(CPA->getPointer(),
II->getType());
3305 if (AuthKey && NeedSign) {
3307 NewIntrin = Intrinsic::ptrauth_resign;
3308 }
else if (AuthKey) {
3310 NewIntrin = Intrinsic::ptrauth_auth;
3311 }
else if (NeedSign) {
3313 NewIntrin = Intrinsic::ptrauth_sign;
3336 case Intrinsic::arm_neon_vtbl1:
3337 case Intrinsic::arm_neon_vtbl2:
3338 case Intrinsic::arm_neon_vtbl3:
3339 case Intrinsic::arm_neon_vtbl4:
3340 case Intrinsic::aarch64_neon_tbl1:
3341 case Intrinsic::aarch64_neon_tbl2:
3342 case Intrinsic::aarch64_neon_tbl3:
3343 case Intrinsic::aarch64_neon_tbl4:
3345 case Intrinsic::arm_neon_vtbx1:
3346 case Intrinsic::arm_neon_vtbx2:
3347 case Intrinsic::arm_neon_vtbx3:
3348 case Intrinsic::arm_neon_vtbx4:
3349 case Intrinsic::aarch64_neon_tbx1:
3350 case Intrinsic::aarch64_neon_tbx2:
3351 case Intrinsic::aarch64_neon_tbx3:
3352 case Intrinsic::aarch64_neon_tbx4:
3355 case Intrinsic::arm_neon_vmulls:
3356 case Intrinsic::arm_neon_vmullu:
3357 case Intrinsic::aarch64_neon_smull:
3358 case Intrinsic::aarch64_neon_umull: {
3359 Value *Arg0 =
II->getArgOperand(0);
3360 Value *Arg1 =
II->getArgOperand(1);
3368 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3369 IID == Intrinsic::aarch64_neon_umull);
3392 case Intrinsic::arm_neon_aesd:
3393 case Intrinsic::arm_neon_aese:
3394 case Intrinsic::aarch64_crypto_aesd:
3395 case Intrinsic::aarch64_crypto_aese:
3396 case Intrinsic::aarch64_sve_aesd:
3397 case Intrinsic::aarch64_sve_aese: {
3398 Value *DataArg =
II->getArgOperand(0);
3399 Value *KeyArg =
II->getArgOperand(1);
3415 case Intrinsic::arm_neon_vshifts:
3416 case Intrinsic::arm_neon_vshiftu:
3417 case Intrinsic::aarch64_neon_sshl:
3418 case Intrinsic::aarch64_neon_ushl:
3420 case Intrinsic::hexagon_V6_vandvrt:
3421 case Intrinsic::hexagon_V6_vandvrt_128B: {
3425 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
3426 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
3428 Value *Bytes = Op0->getArgOperand(1), *Mask =
II->getArgOperand(1);
3433 if ((
C & 0xFF) && (
C & 0xFF00) && (
C & 0xFF0000) && (
C & 0xFF000000))
3438 case Intrinsic::stackrestore: {
3439 enum class ClassifyResult {
3443 CallWithSideEffects,
3447 return ClassifyResult::Alloca;
3451 if (
II->getIntrinsicID() == Intrinsic::stackrestore)
3452 return ClassifyResult::StackRestore;
3454 if (
II->mayHaveSideEffects())
3455 return ClassifyResult::CallWithSideEffects;
3458 return ClassifyResult::CallWithSideEffects;
3462 return ClassifyResult::None;
3469 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
3470 SS->getParent() ==
II->getParent()) {
3472 bool CannotRemove =
false;
3473 for (++BI; &*BI !=
II; ++BI) {
3474 switch (Classify(&*BI)) {
3475 case ClassifyResult::None:
3479 case ClassifyResult::StackRestore:
3483 CannotRemove =
true;
3486 case ClassifyResult::Alloca:
3487 case ClassifyResult::CallWithSideEffects:
3490 CannotRemove =
true;
3506 bool CannotRemove =
false;
3507 for (++BI; &*BI != TI; ++BI) {
3508 switch (Classify(&*BI)) {
3509 case ClassifyResult::None:
3513 case ClassifyResult::StackRestore:
3517 case ClassifyResult::Alloca:
3518 case ClassifyResult::CallWithSideEffects:
3522 CannotRemove =
true;
3536 case Intrinsic::lifetime_end:
3539 if (
II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3540 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3541 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress) ||
3542 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag))
3546 return I.getIntrinsicID() == Intrinsic::lifetime_start;
3550 case Intrinsic::assume: {
3551 Value *IIOperand =
II->getArgOperand(0);
3553 II->getOperandBundlesAsDefs(OpBundles);
3570 return RemoveConditionFromAssume(
Next);
3576 Value *AssumeIntrinsic =
II->getCalledOperand();
3579 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
A, OpBundles,
3581 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
B,
II->getName());
3586 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3587 Builder.CreateNot(
A), OpBundles,
II->getName());
3588 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3598 LHS->getOpcode() == Instruction::Load &&
3599 LHS->getType()->isPointerTy() &&
3602 LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3603 LHS->setMetadata(LLVMContext::MD_noundef, MD);
3604 return RemoveConditionFromAssume(
II);
3610 for (
unsigned Idx = 0; Idx <
II->getNumOperandBundles(); Idx++) {
3617 if (OBU.
getTagName() ==
"separate_storage") {
3619 auto MaybeSimplifyHint = [&](
const Use &U) {
3620 Value *Hint = U.get();
3627 MaybeSimplifyHint(OBU.
Inputs[0]);
3628 MaybeSimplifyHint(OBU.
Inputs[1]);
3635 if (!RK || RK.
AttrKind != Attribute::Alignment ||
3670 A->getType()->isPointerTy()) {
3674 Replacement->insertBefore(
Next->getIterator());
3675 AC.registerAssumption(Replacement);
3676 return RemoveConditionFromAssume(
II);
3703 if (
auto *Replacement =
3706 Replacement->insertAfter(
II->getIterator());
3707 AC.registerAssumption(Replacement);
3709 return RemoveConditionFromAssume(
II);
3716 for (
unsigned Idx = 0; Idx <
II->getNumOperandBundles(); Idx++) {
3717 auto &BOI =
II->bundle_op_info_begin()[Idx];
3720 if (BOI.End - BOI.Begin > 2)
3731 if (BOI.End - BOI.Begin > 0) {
3732 Worklist.pushValue(
II->op_begin()[BOI.Begin]);
3738 if (BOI.End - BOI.Begin > 0)
3739 II->op_begin()[BOI.Begin].set(CanonRK.
WasOn);
3740 if (BOI.End - BOI.Begin > 1)
3741 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
3767 case Intrinsic::experimental_guard: {
3778 Value *NextCond =
nullptr;
3781 Value *CurrCond =
II->getArgOperand(0);
3785 if (CurrCond != NextCond) {
3787 while (MoveI != NextInst) {
3799 case Intrinsic::vector_insert: {
3800 Value *Vec =
II->getArgOperand(0);
3801 Value *SubVec =
II->getArgOperand(1);
3802 Value *Idx =
II->getArgOperand(2);
3809 if (DstTy && VecTy && SubVecTy) {
3810 unsigned DstNumElts = DstTy->getNumElements();
3811 unsigned VecNumElts = VecTy->getNumElements();
3812 unsigned SubVecNumElts = SubVecTy->getNumElements();
3816 if (VecNumElts == SubVecNumElts)
3825 for (i = 0; i != SubVecNumElts; ++i)
3827 for (; i != VecNumElts; ++i)
3830 Value *WidenShuffle =
Builder.CreateShuffleVector(SubVec, WidenMask);
3833 for (
unsigned i = 0; i != IdxN; ++i)
3835 for (
unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
3837 for (
unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
3840 Value *Shuffle =
Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);
3845 case Intrinsic::vector_extract: {
3846 Value *Vec =
II->getArgOperand(0);
3847 Value *Idx =
II->getArgOperand(1);
3849 Type *ReturnType =
II->getType();
3853 Value *InsertTuple, *InsertIdx, *InsertValue;
3857 InsertValue->
getType() == ReturnType) {
3862 if (ExtractIdx == Index)
3876 if (DstTy && VecTy) {
3877 auto DstEltCnt = DstTy->getElementCount();
3878 auto VecEltCnt = VecTy->getElementCount();
3882 if (DstEltCnt == VecTy->getElementCount()) {
3889 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())
3893 for (
unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)
3894 Mask.push_back(IdxN + i);
3896 Value *Shuffle =
Builder.CreateShuffleVector(Vec, Mask);
3901 case Intrinsic::experimental_vp_reverse: {
3903 Value *Vec =
II->getArgOperand(0);
3904 Value *Mask =
II->getArgOperand(1);
3907 Value *EVL =
II->getArgOperand(2);
3915 OldUnOp->getOpcode(),
X, OldUnOp, OldUnOp->getName(),
3921 case Intrinsic::vector_reduce_or:
3922 case Intrinsic::vector_reduce_and: {
3930 Value *Arg =
II->getArgOperand(0);
3941 if (FTy->getElementType() ==
Builder.getInt1Ty()) {
3943 Vect,
Builder.getIntNTy(FTy->getNumElements()));
3944 if (IID == Intrinsic::vector_reduce_and) {
3948 assert(IID == Intrinsic::vector_reduce_or &&
3949 "Expected or reduction.");
3950 Res =
Builder.CreateIsNotNull(Res);
3960 case Intrinsic::vector_reduce_add: {
3961 if (IID == Intrinsic::vector_reduce_add) {
3968 Value *Arg =
II->getArgOperand(0);
3979 if (FTy->getElementType() ==
Builder.getInt1Ty()) {
3981 Vect,
Builder.getIntNTy(FTy->getNumElements()));
3982 Value *Res =
Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V);
3983 Res =
Builder.CreateZExtOrTrunc(Res,
II->getType());
3995 if (VecToReduceCount.
isFixed()) {
3997 return BinaryOperator::CreateMul(
3999 ConstantInt::get(
Splat->getType(), VectorSize,
false,
4006 case Intrinsic::vector_reduce_xor: {
4007 if (IID == Intrinsic::vector_reduce_xor) {
4015 Value *Arg =
II->getArgOperand(0);
4026 if (VTy->getElementType() ==
Builder.getInt1Ty()) {
4037 case Intrinsic::vector_reduce_mul: {
4038 if (IID == Intrinsic::vector_reduce_mul) {
4045 Value *Arg =
II->getArgOperand(0);
4056 if (VTy->getElementType() ==
Builder.getInt1Ty()) {
4058 Res =
Builder.CreateZExt(Res,
II->getType());
4065 case Intrinsic::vector_reduce_umin:
4066 case Intrinsic::vector_reduce_umax: {
4067 if (IID == Intrinsic::vector_reduce_umin ||
4068 IID == Intrinsic::vector_reduce_umax) {
4075 Value *Arg =
II->getArgOperand(0);
4086 if (VTy->getElementType() ==
Builder.getInt1Ty()) {
4087 Value *Res = IID == Intrinsic::vector_reduce_umin
4088 ?
Builder.CreateAndReduce(Vect)
4089 :
Builder.CreateOrReduce(Vect);
4099 case Intrinsic::vector_reduce_smin:
4100 case Intrinsic::vector_reduce_smax: {
4101 if (IID == Intrinsic::vector_reduce_smin ||
4102 IID == Intrinsic::vector_reduce_smax) {
4117 Value *Arg =
II->getArgOperand(0);
4128 if (VTy->getElementType() ==
Builder.getInt1Ty()) {
4132 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
4133 (ExtOpc == Instruction::CastOps::ZExt))
4134 ?
Builder.CreateAndReduce(Vect)
4135 :
Builder.CreateOrReduce(Vect);
4137 Res =
Builder.CreateCast(ExtOpc, Res,
II->getType());
4144 case Intrinsic::vector_reduce_fmax:
4145 case Intrinsic::vector_reduce_fmin:
4146 case Intrinsic::vector_reduce_fadd:
4147 case Intrinsic::vector_reduce_fmul: {
4148 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&
4149 IID != Intrinsic::vector_reduce_fmul) ||
4150 II->hasAllowReassoc();
4151 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
4152 IID == Intrinsic::vector_reduce_fmul)
4155 Value *Arg =
II->getArgOperand(ArgIdx);
4162 case Intrinsic::is_fpclass: {
4167 case Intrinsic::threadlocal_address: {
4176 case Intrinsic::frexp: {
4191 case Intrinsic::get_active_lane_mask: {
4192 const APInt *Op0, *Op1;
4195 Type *OpTy =
II->getOperand(0)->getType();
4198 II->getType(), Intrinsic::get_active_lane_mask,
4199 {Constant::getNullValue(OpTy),
4200 ConstantInt::get(OpTy, Op1->usub_sat(*Op0))}));
4204 case Intrinsic::experimental_get_vector_length: {
4207 std::max(
II->getArgOperand(0)->getType()->getScalarSizeInBits(),
4208 II->getType()->getScalarSizeInBits());
4211 SQ.getWithInstruction(
II))
4222 *
II,
Builder.CreateZExtOrTrunc(
II->getArgOperand(0),
II->getType()));
4243 bool IsVectorCond = Sel->getCondition()->getType()->isVectorTy();
4249 bool SimplifyBothArms =
4250 !
Op->getType()->isVectorTy() &&
II->getType()->isVectorTy();
4252 *
II, Sel,
false, SimplifyBothArms))
4272 return visitCallBase(*
II);
4287 if (FI1SyncScope != FI2->getSyncScopeID() ||
4294 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
4298 if (isIdenticalOrStrongerFence(PFI, &FI))
4305 return visitCallBase(
II);
4310 return visitCallBase(CBI);
4320 unsigned FirstArgIdx;
4321 [[maybe_unused]]
bool Error;
4322 Error = Args[2].getAsInteger(10, FirstArgIdx);
4329 if (AllAspects.
empty())
4334 if (Aspect ==
"float") {
4338 [](
Value *V) { return V->getType()->isFloatingPointTy(); }))
4346 if (NeededAspects.
size() == AllAspects.
size())
4353 FnName, Callee->getFunctionType(),
4354 Callee->getAttributes().removeFnAttribute(Ctx,
"modular-format"));
4356 New->setCalledFunction(ModularFn);
4357 New->removeFnAttr(
"modular-format");
4360 const auto ReferenceAspect = [&](
StringRef Aspect) {
4366 B.CreateCall(RelocNoneFn,
4372 ReferenceAspect(Request);
4393 InstCombineRAUW, InstCombineErase);
4394 if (
Value *With = Simplifier.optimizeCall(CI,
Builder)) {
4410 if (Underlying != TrampMem &&
4411 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
4421 if (
II->getIntrinsicID() == Intrinsic::init_trampoline) {
4425 InitTrampoline =
II;
4428 if (
II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4435 if (!InitTrampoline)
4439 if (InitTrampoline->
getOperand(0) != TrampMem)
4442 return InitTrampoline;
4454 if (
II->getIntrinsicID() == Intrinsic::init_trampoline &&
4455 II->getOperand(0) == TrampMem)
4467 Callee = Callee->stripPointerCasts();
4485 if (!IPC || !IPC->isNoopCast(
DL))
4493 if (IIID != Intrinsic::ptrauth_resign && IIID != Intrinsic::ptrauth_sign)
4497 std::optional<OperandBundleUse> PtrAuthBundleOrNone;
4502 PtrAuthBundleOrNone = Bundle;
4507 if (!PtrAuthBundleOrNone)
4510 Value *NewCallee =
nullptr;
4514 case Intrinsic::ptrauth_resign: {
4516 if (
II->getOperand(3) != PtrAuthBundleOrNone->Inputs[0])
4519 if (
II->getOperand(4) != PtrAuthBundleOrNone->Inputs[1])
4524 if (
II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])
4527 Value *NewBundleOps[] = {
II->getOperand(1),
II->getOperand(2)};
4529 NewCallee =
II->getOperand(0);
4536 case Intrinsic::ptrauth_sign: {
4538 if (
II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])
4541 if (
II->getOperand(2) != PtrAuthBundleOrNone->Inputs[1])
4543 NewCallee =
II->getOperand(0);
4553 NewCallee =
Builder.CreateBitOrPointerCast(NewCallee,
Callee->getType());
4578 if (!CPA->isKnownCompatibleWith(
Key, Discriminator,
DL))
4587bool InstCombinerImpl::annotateAnyAllocSite(
CallBase &
Call,
4624 if (NewAlign > ExistingAlign) {
4641 SmallVector<unsigned, 4> ArgNos;
4645 if (
V->getType()->isPointerTy()) {
4650 (HasDereferenceable &&
4652 V->getType()->getPointerAddressSpace()))) {
4653 if (
Value *Res = simplifyNonNullOperand(V, HasDereferenceable)) {
4667 if (!ArgNos.
empty()) {
4670 AS = AS.addParamAttribute(Ctx, ArgNos,
4681 transformConstExprCastCall(
Call))
4745 return transformCallThroughTrampoline(
Call, *
II);
4748 if (Instruction *NewCall = foldPtrAuthIntrinsicCallee(
Call))
4752 if (Instruction *NewCall = foldPtrAuthConstantCallee(
Call))
4757 if (!
IA->canThrow()) {
4778 Type *RetArgTy = ReturnedArg->getType();
4781 Call,
Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));
4797 ConstantInt *FunctionType =
nullptr;
4800 if (MDNode *MD = CalleeF->
getMetadata(LLVMContext::MD_kcfi_type))
4807 <<
": call to " << CalleeF->
getName()
4808 <<
" using a mismatching function pointer type\n";
4820 case Intrinsic::experimental_gc_statepoint: {
4822 SmallPtrSet<Value *, 32> LiveGcValues;
4824 GCRelocateInst &GCR = *
const_cast<GCRelocateInst *
>(Reloc);
4875 LiveGcValues.
insert(BasePtr);
4876 LiveGcValues.
insert(DerivedPtr);
4878 std::optional<OperandBundleUse> Bundle =
4880 unsigned NumOfGCLives = LiveGcValues.
size();
4881 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
4884 DenseMap<Value *, unsigned> Val2Idx;
4885 std::vector<Value *> NewLiveGc;
4886 for (
Value *V : Bundle->Inputs) {
4890 if (LiveGcValues.
count(V)) {
4891 It->second = NewLiveGc.size();
4892 NewLiveGc.push_back(V);
4894 It->second = NumOfGCLives;
4898 GCRelocateInst &GCR = *
const_cast<GCRelocateInst *
>(Reloc);
4900 assert(Val2Idx.
count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
4901 "Missed live gc for base pointer");
4903 GCR.
setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
4905 assert(Val2Idx.
count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
4906 "Missed live gc for derived pointer");
4908 GCR.
setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
4923bool InstCombinerImpl::transformConstExprCastCall(
CallBase &
Call) {
4930 "CallBr's don't have a single point after a def to insert at");
4935 if (
Callee->isDeclaration())
4941 if (
Callee->hasFnAttribute(
"thunk"))
4947 if (
Callee->hasFnAttribute(Attribute::Naked))
4963 FunctionType *FT =
Callee->getFunctionType();
4965 Type *NewRetTy = FT->getReturnType();
4968 if (OldRetTy != NewRetTy) {
4974 if (!
Caller->use_empty())
4978 if (!CallerPAL.isEmpty() && !
Caller->use_empty()) {
4979 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
4980 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(
4981 NewRetTy, CallerPAL.getRetAttrs())))
4989 if (!
Caller->use_empty()) {
4992 PhisNotSupportedBlock =
II->getNormalDest();
4993 if (PhisNotSupportedBlock)
4994 for (User *U :
Caller->users())
4996 if (PN->getParent() == PhisNotSupportedBlock)
5002 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
5012 if (
Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
5013 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
5017 for (
unsigned i = 0, e = NumCommonArgs; i !=
e; ++i, ++AI) {
5018 Type *ParamTy = FT->getParamType(i);
5019 Type *ActTy = (*AI)->getType();
5025 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i))
5026 .overlaps(AttributeFuncs::typeIncompatible(
5027 ParamTy, CallerPAL.getParamAttrs(i),
5028 AttributeFuncs::ASK_UNSAFE_TO_DROP)))
5032 CallerPAL.hasParamAttr(i, Attribute::Preallocated))
5035 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError))
5038 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) !=
5039 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
5043 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
5044 !CallerPAL.isEmpty()) {
5049 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
5050 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams())
5056 SmallVector<Value *, 8>
Args;
5058 Args.reserve(NumActualArgs);
5059 ArgAttrs.
reserve(NumActualArgs);
5062 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
5067 AttributeFuncs::typeIncompatible(NewRetTy, CallerPAL.getRetAttrs()));
5071 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
5072 Type *ParamTy = FT->getParamType(i);
5074 Value *NewArg = *AI;
5075 if ((*AI)->getType() != ParamTy)
5076 NewArg =
Builder.CreateBitOrPointerCast(*AI, ParamTy);
5077 Args.push_back(NewArg);
5081 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(
5082 ParamTy, CallerPAL.getParamAttrs(i), AttributeFuncs::ASK_SAFE_TO_DROP);
5084 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs));
5089 for (
unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
5095 if (FT->getNumParams() < NumActualArgs) {
5097 if (FT->isVarArg()) {
5099 for (
unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
5101 Value *NewArg = *AI;
5102 if (PTy != (*AI)->getType()) {
5106 NewArg =
Builder.CreateCast(opcode, *AI, PTy);
5108 Args.push_back(NewArg);
5111 ArgAttrs.
push_back(CallerPAL.getParamAttrs(i));
5116 AttributeSet FnAttrs = CallerPAL.getFnAttrs();
5121 assert((ArgAttrs.
size() == FT->getNumParams() || FT->isVarArg()) &&
5122 "missing argument attributes");
5123 AttributeList NewCallerPAL = AttributeList::get(
5131 NewCall =
Builder.CreateInvoke(Callee,
II->getNormalDest(),
5132 II->getUnwindDest(), Args, OpBundles);
5134 NewCall =
Builder.CreateCall(Callee, Args, OpBundles);
5143 NewCall->
copyMetadata(*Caller, {LLVMContext::MD_prof});
5148 if (OldRetTy !=
NV->getType() && !
Caller->use_empty()) {
5149 assert(!
NV->getType()->isVoidTy());
5151 NC->setDebugLoc(
Caller->getDebugLoc());
5154 assert(OptInsertPt &&
"No place to insert cast");
5156 Worklist.pushUsersToWorkList(*Caller);
5159 if (!
Caller->use_empty())
5161 else if (
Caller->hasValueHandle()) {
5162 if (OldRetTy ==
NV->getType())
5177InstCombinerImpl::transformCallThroughTrampoline(
CallBase &
Call,
5184 if (
Attrs.hasAttrSomewhere(Attribute::Nest))
5191 if (!NestAttrs.isEmpty()) {
5192 unsigned NestArgNo = 0;
5193 Type *NestTy =
nullptr;
5194 AttributeSet NestAttr;
5198 E = NestFTy->param_end();
5199 I !=
E; ++NestArgNo, ++
I) {
5200 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo);
5210 std::vector<Value*> NewArgs;
5211 std::vector<AttributeSet> NewArgAttrs;
5222 if (ArgNo == NestArgNo) {
5225 if (NestVal->
getType() != NestTy)
5226 NestVal =
Builder.CreateBitCast(NestVal, NestTy,
"nest");
5227 NewArgs.push_back(NestVal);
5228 NewArgAttrs.push_back(NestAttr);
5235 NewArgs.push_back(*
I);
5236 NewArgAttrs.push_back(
Attrs.getParamAttrs(ArgNo));
5247 std::vector<Type*> NewTypes;
5248 NewTypes.reserve(FTy->getNumParams()+1);
5255 E = FTy->param_end();
5258 if (ArgNo == NestArgNo)
5260 NewTypes.push_back(NestTy);
5266 NewTypes.push_back(*
I);
5275 FunctionType *NewFTy =
5277 AttributeList NewPAL =
5278 AttributeList::get(FTy->getContext(),
Attrs.getFnAttrs(),
5279 Attrs.getRetAttrs(), NewArgAttrs);
5287 II->getUnwindDest(), NewArgs, OpBundles);
5293 CBI->getIndirectDests(), NewArgs, OpBundles);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)
static Type * getPromotedType(Type *Ty)
Return the specified type promoted as it would be to pass though a va_arg area.
static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)
Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...
static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)
static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)
static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)
static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If this min/max has a matching min/max operand with a constant, try to push the constant operand into...
static bool isIdempotentBinaryIntrinsic(Intrinsic::ID IID)
Helper to match idempotent binary intrinsics, namely, intrinsics where f(f(x, y), y) == f(x,...
static bool signBitMustBeTheSame(Value *Op0, Value *Op1, const SimplifyQuery &SQ)
Return true if two values Op0 and Op1 are known to have the same sign.
static Value * optimizeModularFormat(CallInst *CI, IRBuilderBase &B)
static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
static std::optional< bool > getKnownSign(Value *Op, const SimplifyQuery &SQ)
static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))
static bool hasUndefSource(AnyMemTransferInst *MI)
Recognize a memcpy/memmove from a trivially otherwise unused alloca.
static Instruction * factorizeMinMaxTree(IntrinsicInst *II)
Reduce a sequence of min/max intrinsics with a common operand.
static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...
static Value * simplifyReductionOperand(Value *Arg, bool CanReorderLanes)
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
static Value * foldIntrinsicUsingDistributiveLaws(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
static std::optional< bool > getKnownSignOrZero(Value *Op, const SimplifyQuery &SQ)
static Value * foldMinimumOverTrailingOrLeadingZeroCount(Value *I0, Value *I1, const DataLayout &DL, InstCombiner::BuilderTy &Builder)
Fold an unsigned minimum of trailing or leading zero bits counts: umin(cttz(CtOp1,...
static Value * foldIdempotentBinaryIntrinsicRecurrence(InstCombinerImpl &IC, IntrinsicInst *II)
Attempt to simplify value-accumulating recurrences of kind: umax.acc = phi i8 [ umax,...
static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)
static Instruction * simplifyNeonTbl(IntrinsicInst &II, InstCombiner &IC, bool IsExtension)
Convert tbl/tbx intrinsics to shufflevector if the mask is constant, and at most two source operands ...
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)
static IntrinsicInst * findInitTrampoline(Value *Callee)
static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder, const SimplifyQuery &SQ)
If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...
static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)
static Instruction * foldNeonShift(IntrinsicInst *II, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool hasNoSignedWrap(BinaryOperator &I)
static bool inputDenormalIsIEEE(DenormalMode Mode)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
static const Function * getCalledFunction(const Value *V)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
const SmallVectorImpl< MachineOperand > & Cond
This file implements the SmallBitVector class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static constexpr roundingMode rmNearestTiesToEven
static LLVM_ABI bool hasSignBitInMSB(const fltSemantics &)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
This class represents any memset intrinsic.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static LLVM_ABI Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
static LLVM_ABI Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
InstListType::reverse_iterator reverse_iterator
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI bool isSigned() const
Whether the intrinsic is signed or unsigned.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isInAllocaArgument(unsigned ArgNo) const
Determine whether this argument is passed in an alloca.
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
uint64_t getParamDereferenceableBytes(unsigned i) const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
Attribute getFnAttr(StringRef Kind) const
Get the attribute of a given kind for the function.
bool doesNotThrow() const
Determine if the call cannot unwind.
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
Value * getReturnedArgOperand() const
If one of the arguments has the 'returned' attribute, returns its operand value.
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getUnorderedPredicate() const
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI ConstantPtrAuth * get(Constant *Ptr, ConstantInt *Key, ConstantInt *Disc, Constant *AddrDisc, Constant *DeactivationSymbol)
Return a pointer signed with the specified parameters.
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Lightweight error class with error context and mandatory checking.
static FMFSource intersect(Value *A, Value *B)
Intersect the FMF from two instructions.
This class represents an extension of floating point types.
Convenience struct for specifying and reasoning about fast-math flags.
void setNoSignedZeros(bool B=true)
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Class to represent function types.
Type::subtype_iterator param_iterator
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
bool isConvergent() const
Determine if the call is convergent.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool doesNotThrow() const
Determine if the function cannot unwind.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVM_ABI Value * getBasePtr() const
unsigned getBasePtrIndex() const
The index into the associate statepoint's argument list which contains the base pointer of the pointe...
LLVM_ABI Value * getDerivedPtr() const
unsigned getDerivedPtrIndex() const
The index into the associate statepoint's argument list which contains the pointer whose relocation t...
std::vector< const GCRelocateInst * > getGCRelocates() const
Get list of all gc reloactes linked to this statepoint May contain several relocations for the same b...
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
PointerType * getType() const
Global values are always pointers.
Common base class shared among various IRBuilders.
LLVM_ABI Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
ConstantInt * getTrue()
Get the constant value for i1 true.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
ConstantInt * getFalse()
Get the constant value for i1 false.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
LLVM_ABI Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * SimplifyAnyMemSet(AnyMemSetInst *MI)
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitCallBrInst(CallBrInst &CBI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Value * foldReversedIntrinsicOperands(IntrinsicInst *II)
If all arguments of the intrinsic are reverses, try to pull the reverse after the intrinsic.
Value * tryGetLog2(Value *Op, bool AssumeNonZero)
Instruction * visitFenceInst(FenceInst &FI)
Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II)
If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...
Instruction * visitInvokeInst(InvokeInst &II)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Instruction * visitVAEndInst(VAEndInst &I)
Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)
Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * SimplifyAnyMemTransfer(AnyMemTransferInst *MI)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * visitCallInst(CallInst &CI)
CallInst simplification.
The core instruction combiner logic.
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
DominatorTree & getDominatorTree() const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
AssumptionCache & getAssumptionCache() const
OptimizationRemarkEmitter & ORE
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
bool isTerminator() const
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI std::optional< InstListType::iterator > getInsertionPointAfterDef()
Get the first insertion point at which the result of this instruction is defined.
LLVM_ABI bool isIdenticalTo(const Instruction *I) const LLVM_READONLY
Return true if the specified instruction is exactly identical to the current one.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
bool isSigned() const
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
StringRef getName() const
Get a short "name" for the module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
bool isCommutative() const
Return true if the instruction is commutative.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
bool test(unsigned Idx) const
bool all() const
Returns true if all bits are set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
static LLVM_ABI bool isCallingConvCCompatible(CallBase *CI)
Returns true if call site / callee has cdecl-compatible calling conventions.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM_ABI const fltSemantics & getFltSemantics() const
bool isVoidTy() const
Return true if this is 'void'.
static UnaryOperator * CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
This represents the llvm.va_end intrinsic.
static LLVM_ABI void ValueIsDeleted(Value *V)
static LLVM_ABI void ValueIsRAUWd(Value *Old, Value *New)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
static constexpr uint64_t MaximumAlignment
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
static LLVM_ABI void dropDroppableUse(Use &U)
Remove the droppable use U.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
match_combine_or< match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > >, OpTy > m_ZExtOrSExtOrSelf(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
cst_pred_ty< is_strictlypositive > m_StrictlyPositive()
Match an integer or vector of strictly positive values.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
class_match< UnaryOperator > m_UnOp()
Match an arbitrary unary operation and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)
Matches bitwise logic operations.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI cl::opt< bool > EnableKnowledgeRetention
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI APInt possiblyDemandedEltsInMask(Value *Mask)
Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y) for each lane which may be ...
LLVM_ABI RetainedKnowledge simplifyRetainedKnowledge(AssumeInst *Assume, RetainedKnowledge RK, AssumptionCache *AC, DominatorTree *DT)
canonicalize the RetainedKnowledge RK.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
LLVM_ABI Value * getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI)
Gets the alignment argument for an aligned_alloc-like function, using either built-in knowledge based...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI RetainedKnowledge getKnowledgeFromOperandInAssume(AssumeInst &Assume, unsigned Idx)
Retreive the information help by Assume on the operand at index Idx.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
auto dyn_cast_or_null(const Y &Val)
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
LLVM_ABI FPClassTest fneg(FPClassTest Mask)
Return the test mask which returns true if the value's sign bit is flipped.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool isModSet(const ModRefInfo MRI)
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)
Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
auto find_if_not(R &&Range, UnaryPredicate P)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
LLVM_ABI Constant * getLosslessSignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI AssumeInst * buildAssumeFromKnowledge(ArrayRef< RetainedKnowledge > Knowledge, Instruction *CtxI, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Build and return a new assume created from the provided knowledge if the knowledge in the assume is f...
LLVM_ABI FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI bool maskIsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
@ Mod
The access may modify the value stored in memory.
LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
OperandBundleDefT< Value * > OperandBundleDef
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI std::optional< APInt > getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, function_ref< const Value *(const Value *)> Mapper=[](const Value *V) { return V;})
Return the size of the requested allocation.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool maskContainsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if any of the elements of this predicate mask are known to be ...
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
@ IEEE
IEEE-754 denormal numbers preserved.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isNonZero() const
Returns true if this value is known to be non-zero.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind
SelectPatternFlavor Flavor
SimplifyQuery getWithInstruction(const Instruction *I) const