59#include "llvm/IR/IntrinsicsAArch64.h"
60#include "llvm/IR/IntrinsicsAMDGPU.h"
61#include "llvm/IR/IntrinsicsRISCV.h"
62#include "llvm/IR/IntrinsicsX86.h"
100 if (
unsigned BitWidth = Ty->getScalarSizeInBits())
103 return DL.getPointerTypeSizeInBits(Ty);
123 const APInt &DemandedElts,
127 DemandedLHS = DemandedRHS = DemandedElts;
134 DemandedElts, DemandedLHS, DemandedRHS);
155 bool UseInstrInfo,
unsigned Depth) {
230 R->uge(
LHS->getType()->getScalarSizeInBits()))
243 assert(LHS->getType() == RHS->getType() &&
244 "LHS and RHS should have the same type");
245 assert(LHS->getType()->isIntOrIntVectorTy() &&
246 "LHS and RHS should be integers");
257 return !
I->user_empty() &&
262 return !
I->user_empty() &&
all_of(
I->users(), [](
const User *U) {
264 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
273 return ::isKnownToBeAPowerOfTwo(
289 return CI->getValue().isStrictlyPositive();
315 return ::isKnownNonEqual(V1, V2, DemandedElts, Q,
Depth);
322 return Mask.isSubsetOf(Known.
Zero);
329 unsigned Depth = 0) {
340 return ::ComputeNumSignBits(
350 return V->getType()->getScalarSizeInBits() - SignBits + 1;
373 const APInt &DemandedElts,
379 const unsigned BitWidth = Ty->getScalarSizeInBits();
382 if (Ty->isVectorTy())
387 const Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
390 const auto MatchSubBC = [&]() {
407 const auto MatchASubBC = [&]() {
415 const auto MatchCD = [&]() {
432 if (!Match(Op0, Op1) && !Match(Op1, Op0))
435 const auto ComputeKnownBitsOrOne = [&](
const Value *V) {
443 const KnownBits KnownA = ComputeKnownBitsOrOne(
A);
447 const KnownBits KnownD = ComputeKnownBitsOrOne(
D);
464 if (SubBC->
getOpcode() == Instruction::Xor &&
482 const unsigned MinimumNumberOfLeadingZeros = UpperBound.
countl_zero();
488 const APInt &DemandedElts,
495 if (KnownOut.
isUnknown() && !NSW && !NUW)
512 bool NUW,
const APInt &DemandedElts,
529 bool isKnownNegativeOp0 = Known2.
isNegative();
532 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
544 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
546 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.
isNonZero());
550 bool SelfMultiply = Op0 == Op1;
559 unsigned OutValidBits = 2 * (TyBits - SignBits + 1);
561 if (OutValidBits < TyBits) {
562 APInt KnownZeroMask =
564 Known.
Zero |= KnownZeroMask;
582 unsigned NumRanges = Ranges.getNumOperands() / 2;
587 for (
unsigned i = 0; i < NumRanges; ++i) {
596 "Known bit width must match range bit width!");
599 unsigned CommonPrefixBits =
600 (
Range.getUnsignedMax() ^
Range.getUnsignedMin()).countl_zero();
603 Known.
One &= UnsignedMax & Mask;
604 Known.
Zero &= ~UnsignedMax & Mask;
619 while (!WorkSet.
empty()) {
621 if (!Visited.
insert(V).second)
626 return EphValues.count(cast<Instruction>(U));
631 if (V ==
I || (!V->mayHaveSideEffects() && !V->isTerminator())) {
635 for (
const Use &U : U->operands()) {
650 return CI->isAssumeLikeIntrinsic();
658 bool AllowEphemerals) {
676 if (!AllowEphemerals && Inv == CxtI)
708 auto hasNoFreeCalls = [](
auto Range) {
713 if (!CB->hasFnAttr(Attribute::NoFree))
726 const BasicBlock *AssumeBB = Assume->getParent();
728 if (CtxBB != AssumeBB) {
735 CtxIter = AssumeBB->
end();
738 if (!Assume->comesBefore(CtxI))
744 return hasNoFreeCalls(
make_range(Assume->getIterator(), CtxIter));
773 for (
unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
776 Pred, VC->getElementAsAPInt(ElemIdx));
785 const PHINode **PhiOut =
nullptr) {
789 CtxIOut =
PHI->getIncomingBlock(*U)->getTerminator();
805 IncPhi && IncPhi->getNumIncomingValues() == 2) {
806 for (
int Idx = 0; Idx < 2; ++Idx) {
807 if (IncPhi->getIncomingValue(Idx) ==
PHI) {
808 ValOut = IncPhi->getIncomingValue(1 - Idx);
811 CtxIOut = IncPhi->getIncomingBlock(1 - Idx)->getTerminator();
830 "Got assumption for the wrong function!");
833 if (!V->getType()->isPointerTy())
836 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
839 bool AssumeImpliesNonNull = [&]() {
840 if (RK.AttrKind == Attribute::NonNull)
843 if (RK.AttrKind == Attribute::Dereferenceable) {
848 "Dereferenceable attribute without IR argument?");
851 return CI && !CI->isZero();
882 if (
RHS->getType()->isPointerTy()) {
924 Known.
Zero |= ~*
C & *Mask;
930 Known.
One |= *
C & ~*Mask;
989 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate();
995 KnownBits DstKnown(
LHS->getType()->getScalarSizeInBits());
1009 bool Invert,
unsigned Depth) {
1091 "Got assumption for the wrong function!");
1094 if (!V->getType()->isPointerTy())
1097 *
I,
I->bundle_op_info_begin()[Elem.Index])) {
1101 if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment &&
1113 Value *Arg =
I->getArgOperand(0);
1129 if (Trunc && Trunc->getOperand(0) == V &&
1131 if (Trunc->hasNoUnsignedWrap()) {
1179 Known = KF(Known2, Known, ShAmtNonZero);
1190 Value *
X =
nullptr, *
Y =
nullptr;
1192 switch (
I->getOpcode()) {
1193 case Instruction::And:
1194 KnownOut = KnownLHS & KnownRHS;
1204 KnownOut = KnownLHS.
blsi();
1206 KnownOut = KnownRHS.
blsi();
1209 case Instruction::Or:
1210 KnownOut = KnownLHS | KnownRHS;
1212 case Instruction::Xor:
1213 KnownOut = KnownLHS ^ KnownRHS;
1223 const KnownBits &XBits =
I->getOperand(0) ==
X ? KnownLHS : KnownRHS;
1224 KnownOut = XBits.
blsmsk();
1237 if (!KnownOut.
Zero[0] && !KnownOut.
One[0] &&
1258 APInt DemandedEltsLHS, DemandedEltsRHS;
1260 DemandedElts, DemandedEltsLHS,
1263 const auto ComputeForSingleOpFunc =
1265 return KnownBitsFunc(
1270 if (DemandedEltsRHS.
isZero())
1271 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS);
1272 if (DemandedEltsLHS.
isZero())
1273 return ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS);
1275 return ComputeForSingleOpFunc(
I->getOperand(0), DemandedEltsLHS)
1276 .intersectWith(ComputeForSingleOpFunc(
I->getOperand(1), DemandedEltsRHS));
1286 APInt DemandedElts =
1294 Attribute Attr =
F->getFnAttribute(Attribute::VScaleRange);
1302 return ConstantRange::getEmpty(
BitWidth);
1313 Value *Arm,
bool Invert,
1352 "Input should be a Select!");
1362 const Value *LHS2 =
nullptr, *RHS2 =
nullptr;
1374 return CLow->
sle(*CHigh);
1379 const APInt *&CHigh) {
1380 assert((
II->getIntrinsicID() == Intrinsic::smin ||
1381 II->getIntrinsicID() == Intrinsic::smax) &&
1382 "Must be smin/smax");
1386 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
1391 if (
II->getIntrinsicID() == Intrinsic::smin)
1393 return CLow->
sle(*CHigh);
1398 const APInt *CLow, *CHigh;
1405 const APInt &DemandedElts,
1412 switch (
I->getOpcode()) {
1414 case Instruction::Load:
1419 case Instruction::And:
1425 case Instruction::Or:
1431 case Instruction::Xor:
1437 case Instruction::Mul: {
1441 DemandedElts, Known, Known2, Q,
Depth);
1444 case Instruction::UDiv: {
1451 case Instruction::SDiv: {
1458 case Instruction::Select: {
1459 auto ComputeForArm = [&](
Value *Arm,
bool Invert) {
1467 ComputeForArm(
I->getOperand(1),
false)
1471 case Instruction::FPTrunc:
1472 case Instruction::FPExt:
1473 case Instruction::FPToUI:
1474 case Instruction::FPToSI:
1475 case Instruction::SIToFP:
1476 case Instruction::UIToFP:
1478 case Instruction::PtrToInt:
1479 case Instruction::PtrToAddr:
1480 case Instruction::IntToPtr:
1483 case Instruction::ZExt:
1484 case Instruction::Trunc: {
1485 Type *SrcTy =
I->getOperand(0)->getType();
1487 unsigned SrcBitWidth;
1495 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
1499 Inst && Inst->hasNonNeg() && !Known.
isNegative())
1504 case Instruction::BitCast: {
1505 Type *SrcTy =
I->getOperand(0)->getType();
1506 if (SrcTy->isIntOrPtrTy() &&
1509 !
I->getType()->isVectorTy()) {
1517 V->getType()->isFPOrFPVectorTy()) {
1518 Type *FPType = V->getType()->getScalarType();
1530 if (FPClasses &
fcInf)
1542 if (Result.SignBit) {
1543 if (*Result.SignBit)
1554 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1555 !
I->getType()->isIntOrIntVectorTy() ||
1563 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1579 unsigned SubScale =
BitWidth / SubBitWidth;
1581 for (
unsigned i = 0; i != NumElts; ++i) {
1582 if (DemandedElts[i])
1583 SubDemandedElts.
setBit(i * SubScale);
1587 for (
unsigned i = 0; i != SubScale; ++i) {
1590 unsigned ShiftElt = IsLE ? i : SubScale - 1 - i;
1591 Known.
insertBits(KnownSrc, ShiftElt * SubBitWidth);
1597 unsigned SubScale = SubBitWidth /
BitWidth;
1599 APInt SubDemandedElts =
1605 for (
unsigned i = 0; i != NumElts; ++i) {
1606 if (DemandedElts[i]) {
1607 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
1617 case Instruction::SExt: {
1619 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
1621 Known = Known.
trunc(SrcBitWidth);
1628 case Instruction::Shl: {
1632 bool ShAmtNonZero) {
1633 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
1643 case Instruction::LShr: {
1646 bool ShAmtNonZero) {
1657 case Instruction::AShr: {
1660 bool ShAmtNonZero) {
1667 case Instruction::Sub: {
1671 DemandedElts, Known, Known2, Q,
Depth);
1674 case Instruction::Add: {
1678 DemandedElts, Known, Known2, Q,
Depth);
1681 case Instruction::SRem:
1687 case Instruction::URem:
1692 case Instruction::Alloca:
1695 case Instruction::GetElementPtr: {
1702 APInt AccConstIndices(IndexWidth, 0);
1704 auto AddIndexToKnown = [&](
KnownBits IndexBits) {
1713 "Index width can't be larger than pointer width");
1719 for (
unsigned i = 1, e =
I->getNumOperands(); i != e; ++i, ++GTI) {
1724 Value *Index =
I->getOperand(i);
1735 "Access to structure field must be known at compile time");
1743 AccConstIndices +=
Offset;
1760 CI->getValue().
sextOrTrunc(IndexWidth) * StrideInBytes;
1784 case Instruction::PHI: {
1787 Value *R =
nullptr, *L =
nullptr;
1800 case Instruction::LShr:
1801 case Instruction::AShr:
1802 case Instruction::Shl:
1803 case Instruction::UDiv:
1810 case Instruction::URem: {
1823 case Instruction::Shl:
1827 case Instruction::LShr:
1828 case Instruction::UDiv:
1829 case Instruction::URem:
1834 case Instruction::AShr:
1846 case Instruction::Add:
1847 case Instruction::Sub:
1848 case Instruction::And:
1849 case Instruction::Or:
1850 case Instruction::Mul: {
1857 unsigned OpNum =
P->getOperand(0) == R ? 0 : 1;
1858 Instruction *RInst =
P->getIncomingBlock(OpNum)->getTerminator();
1859 Instruction *LInst =
P->getIncomingBlock(1 - OpNum)->getTerminator();
1888 case Instruction::Add: {
1898 case Instruction::Sub: {
1909 case Instruction::Mul:
1926 if (
P->getNumIncomingValues() == 0)
1937 for (
const Use &U :
P->operands()) {
1972 if ((TrueSucc == CxtPhi->
getParent()) !=
1989 Known2 = KnownUnion;
2003 case Instruction::Call:
2004 case Instruction::Invoke: {
2014 if (std::optional<ConstantRange>
Range = CB->getRange())
2017 if (
const Value *RV = CB->getReturnedArgOperand()) {
2018 if (RV->getType() ==
I->getType()) {
2030 switch (
II->getIntrinsicID()) {
2033 case Intrinsic::abs: {
2035 bool IntMinIsPoison =
match(
II->getArgOperand(1),
m_One());
2039 case Intrinsic::bitreverse:
2043 case Intrinsic::bswap:
2047 case Intrinsic::ctlz: {
2053 PossibleLZ = std::min(PossibleLZ,
BitWidth - 1);
2058 case Intrinsic::cttz: {
2064 PossibleTZ = std::min(PossibleTZ,
BitWidth - 1);
2069 case Intrinsic::ctpop: {
2080 case Intrinsic::fshr:
2081 case Intrinsic::fshl: {
2088 if (
II->getIntrinsicID() == Intrinsic::fshr)
2095 Known2 <<= ShiftAmt;
2100 case Intrinsic::clmul:
2105 case Intrinsic::uadd_sat:
2110 case Intrinsic::usub_sat:
2115 case Intrinsic::sadd_sat:
2120 case Intrinsic::ssub_sat:
2126 case Intrinsic::vector_reverse:
2132 case Intrinsic::vector_reduce_and:
2133 case Intrinsic::vector_reduce_or:
2134 case Intrinsic::vector_reduce_umax:
2135 case Intrinsic::vector_reduce_umin:
2136 case Intrinsic::vector_reduce_smax:
2137 case Intrinsic::vector_reduce_smin:
2140 case Intrinsic::vector_reduce_xor: {
2147 bool EvenCnt = VecTy->getElementCount().isKnownEven();
2151 if (VecTy->isScalableTy() || EvenCnt)
2155 case Intrinsic::vector_reduce_add: {
2160 Known = Known.
reduceAdd(VecTy->getNumElements());
2163 case Intrinsic::umin:
2168 case Intrinsic::umax:
2173 case Intrinsic::smin:
2179 case Intrinsic::smax:
2185 case Intrinsic::ptrmask: {
2188 const Value *Mask =
I->getOperand(1);
2189 Known2 =
KnownBits(Mask->getType()->getScalarSizeInBits());
2195 case Intrinsic::x86_sse2_pmulh_w:
2196 case Intrinsic::x86_avx2_pmulh_w:
2197 case Intrinsic::x86_avx512_pmulh_w_512:
2202 case Intrinsic::x86_sse2_pmulhu_w:
2203 case Intrinsic::x86_avx2_pmulhu_w:
2204 case Intrinsic::x86_avx512_pmulhu_w_512:
2209 case Intrinsic::x86_sse42_crc32_64_64:
2212 case Intrinsic::x86_ssse3_phadd_d_128:
2213 case Intrinsic::x86_ssse3_phadd_w_128:
2214 case Intrinsic::x86_avx2_phadd_d:
2215 case Intrinsic::x86_avx2_phadd_w: {
2217 I, DemandedElts, Q,
Depth,
2223 case Intrinsic::x86_ssse3_phadd_sw_128:
2224 case Intrinsic::x86_avx2_phadd_sw: {
2229 case Intrinsic::x86_ssse3_phsub_d_128:
2230 case Intrinsic::x86_ssse3_phsub_w_128:
2231 case Intrinsic::x86_avx2_phsub_d:
2232 case Intrinsic::x86_avx2_phsub_w: {
2234 I, DemandedElts, Q,
Depth,
2240 case Intrinsic::x86_ssse3_phsub_sw_128:
2241 case Intrinsic::x86_avx2_phsub_sw: {
2246 case Intrinsic::riscv_vsetvli:
2247 case Intrinsic::riscv_vsetvlimax: {
2248 bool HasAVL =
II->getIntrinsicID() == Intrinsic::riscv_vsetvli;
2261 MaxVL = std::min(MaxVL, CI->getZExtValue());
2263 unsigned KnownZeroFirstBit =
Log2_32(MaxVL) + 1;
2268 case Intrinsic::vscale: {
2269 if (!
II->getParent() || !
II->getFunction())
2279 case Instruction::ShuffleVector: {
2293 APInt DemandedLHS, DemandedRHS;
2299 if (!!DemandedLHS) {
2300 const Value *
LHS = Shuf->getOperand(0);
2306 if (!!DemandedRHS) {
2307 const Value *
RHS = Shuf->getOperand(1);
2313 case Instruction::InsertElement: {
2318 const Value *Vec =
I->getOperand(0);
2319 const Value *Elt =
I->getOperand(1);
2322 APInt DemandedVecElts = DemandedElts;
2323 bool NeedsElt =
true;
2325 if (CIdx && CIdx->getValue().ult(NumElts)) {
2326 DemandedVecElts.
clearBit(CIdx->getZExtValue());
2327 NeedsElt = DemandedElts[CIdx->getZExtValue()];
2338 if (!DemandedVecElts.
isZero()) {
2344 case Instruction::ExtractElement: {
2347 const Value *Vec =
I->getOperand(0);
2348 const Value *Idx =
I->getOperand(1);
2357 if (CIdx && CIdx->getValue().ult(NumElts))
2362 case Instruction::ExtractValue:
2367 switch (
II->getIntrinsicID()) {
2369 case Intrinsic::uadd_with_overflow:
2370 case Intrinsic::sadd_with_overflow:
2372 true,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2373 false, DemandedElts, Known, Known2, Q,
Depth);
2375 case Intrinsic::usub_with_overflow:
2376 case Intrinsic::ssub_with_overflow:
2378 false,
II->getArgOperand(0),
II->getArgOperand(1),
false,
2379 false, DemandedElts, Known, Known2, Q,
Depth);
2381 case Intrinsic::umul_with_overflow:
2382 case Intrinsic::smul_with_overflow:
2384 false, DemandedElts, Known, Known2, Q,
Depth);
2390 case Instruction::Freeze:
2434 if (!DemandedElts) {
2440 assert(V &&
"No Value?");
2444 Type *Ty = V->getType();
2447 assert((Ty->isIntOrIntVectorTy(
BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
2448 "Not integer or pointer type!");
2452 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
2453 "DemandedElt width should equal the fixed vector number of elements");
2456 "DemandedElt width should be 1 for scalars or scalable vectors");
2462 "V and Known should have same BitWidth");
2465 "V and Known should have same BitWidth");
2487 for (
unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2488 if (!DemandedElts[i])
2490 APInt Elt = CDV->getElementAsAPInt(i);
2504 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2505 if (!DemandedElts[i])
2515 const APInt &Elt = ElementCI->getValue();
2536 if (std::optional<ConstantRange>
Range =
A->getRange())
2537 Known =
Range->toKnownBits();
2546 if (!GA->isInterposable())
2554 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
2555 Known = CR->toKnownBits();
2560 Align Alignment = V->getPointerAlignment(Q.
DL);
2576 Value *Start =
nullptr, *Step =
nullptr;
2582 if (U.get() == Start) {
2598 case Instruction::Mul:
2603 case Instruction::SDiv:
2609 case Instruction::UDiv:
2615 case Instruction::Shl:
2617 case Instruction::AShr:
2621 case Instruction::LShr:
2659 if (OrZero && V->getType()->getScalarSizeInBits() == 1)
2701 return F->hasFnAttribute(Attribute::VScaleRange);
2718 switch (
I->getOpcode()) {
2719 case Instruction::ZExt:
2721 case Instruction::Trunc:
2723 case Instruction::Shl:
2727 case Instruction::LShr:
2731 case Instruction::UDiv:
2735 case Instruction::Mul:
2739 case Instruction::And:
2750 case Instruction::Add: {
2756 if (
match(
I->getOperand(0),
2760 if (
match(
I->getOperand(1),
2765 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2774 if ((~(LHSBits.
Zero & RHSBits.
Zero)).isPowerOf2())
2787 case Instruction::Select:
2790 case Instruction::PHI: {
2811 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2812 return isKnownToBeAPowerOfTwo(U.get(), OrZero, RecQ, NewDepth);
2815 case Instruction::Invoke:
2816 case Instruction::Call: {
2818 switch (
II->getIntrinsicID()) {
2819 case Intrinsic::umax:
2820 case Intrinsic::smax:
2821 case Intrinsic::umin:
2822 case Intrinsic::smin:
2827 case Intrinsic::bitreverse:
2828 case Intrinsic::bswap:
2830 case Intrinsic::fshr:
2831 case Intrinsic::fshl:
2833 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
2857 F =
I->getFunction();
2861 if (!
GEP->hasNoUnsignedWrap() &&
2862 !(
GEP->isInBounds() &&
2867 assert(
GEP->getType()->isPointerTy() &&
"We only support plain pointer GEP");
2878 GTI != GTE; ++GTI) {
2880 if (
StructType *STy = GTI.getStructTypeOrNull()) {
2885 if (ElementOffset > 0)
2891 if (GTI.getSequentialElementStride(Q.
DL).isZero())
2925 unsigned NumUsesExplored = 0;
2926 for (
auto &U : V->uses()) {
2935 if (V->getType()->isPointerTy()) {
2937 if (CB->isArgOperand(&U) &&
2938 CB->paramHasNonNullAttr(CB->getArgOperandNo(&U),
2966 NonNullIfTrue =
true;
2968 NonNullIfTrue =
false;
2974 for (
const auto *CmpU : UI->
users()) {
2976 if (Visited.
insert(CmpU).second)
2979 while (!WorkList.
empty()) {
2988 for (
const auto *CurrU : Curr->users())
2989 if (Visited.
insert(CurrU).second)
2995 assert(BI->isConditional() &&
"uses a comparison!");
2998 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
3002 }
else if (NonNullIfTrue &&
isGuard(Curr) &&
3017 const unsigned NumRanges = Ranges->getNumOperands() / 2;
3019 for (
unsigned i = 0; i < NumRanges; ++i) {
3035 Value *Start =
nullptr, *Step =
nullptr;
3036 const APInt *StartC, *StepC;
3042 case Instruction::Add:
3048 case Instruction::Mul:
3051 case Instruction::Shl:
3053 case Instruction::AShr:
3054 case Instruction::LShr:
3070 bool NUW,
unsigned Depth) {
3127 return ::isKnownNonEqual(
X,
Y, DemandedElts, Q,
Depth);
3132 bool NUW,
unsigned Depth) {
3161 auto ShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
3162 switch (
I->getOpcode()) {
3163 case Instruction::Shl:
3164 return Lhs.
shl(Rhs);
3165 case Instruction::LShr:
3166 return Lhs.
lshr(Rhs);
3167 case Instruction::AShr:
3168 return Lhs.
ashr(Rhs);
3174 auto InvShiftOp = [&](
const APInt &Lhs,
const APInt &Rhs) {
3175 switch (
I->getOpcode()) {
3176 case Instruction::Shl:
3177 return Lhs.
lshr(Rhs);
3178 case Instruction::LShr:
3179 case Instruction::AShr:
3180 return Lhs.
shl(Rhs);
3193 if (MaxShift.
uge(NumBits))
3196 if (!ShiftOp(KnownVal.
One, MaxShift).isZero())
3201 if (InvShiftOp(KnownVal.
Zero, NumBits - MaxShift)
3210 const APInt &DemandedElts,
3213 switch (
I->getOpcode()) {
3214 case Instruction::Alloca:
3216 return I->getType()->getPointerAddressSpace() == 0;
3217 case Instruction::GetElementPtr:
3218 if (
I->getType()->isPointerTy())
3221 case Instruction::BitCast: {
3249 Type *FromTy =
I->getOperand(0)->getType();
3254 case Instruction::IntToPtr:
3263 case Instruction::PtrToAddr:
3267 case Instruction::PtrToInt:
3271 I->getType()->getScalarSizeInBits())
3274 case Instruction::Trunc:
3277 if (TI->hasNoSignedWrap() || TI->hasNoUnsignedWrap())
3283 case Instruction::Xor:
3284 case Instruction::Sub:
3286 I->getOperand(1),
Depth);
3287 case Instruction::Or:
3298 case Instruction::SExt:
3299 case Instruction::ZExt:
3303 case Instruction::Shl: {
3318 case Instruction::LShr:
3319 case Instruction::AShr: {
3334 case Instruction::UDiv:
3335 case Instruction::SDiv: {
3350 if (
I->getOpcode() == Instruction::SDiv) {
3352 XKnown = XKnown.
abs(
false);
3353 YKnown = YKnown.
abs(
false);
3359 return XUgeY && *XUgeY;
3361 case Instruction::Add: {
3371 case Instruction::Mul: {
3377 case Instruction::Select: {
3384 auto SelectArmIsNonZero = [&](
bool IsTrueArm) {
3386 Op = IsTrueArm ?
I->getOperand(1) :
I->getOperand(2);
3404 if (SelectArmIsNonZero(
true) &&
3405 SelectArmIsNonZero(
false))
3409 case Instruction::PHI: {
3420 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
3424 BasicBlock *TrueSucc, *FalseSucc;
3425 if (match(RecQ.CxtI,
3426 m_Br(m_c_ICmp(Pred, m_Specific(U.get()), m_Value(X)),
3427 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
3429 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) {
3431 if (FalseSucc == PN->getParent())
3432 Pred = CmpInst::getInversePredicate(Pred);
3433 if (cmpExcludesZero(Pred, X))
3441 case Instruction::InsertElement: {
3445 const Value *Vec =
I->getOperand(0);
3446 const Value *Elt =
I->getOperand(1);
3450 APInt DemandedVecElts = DemandedElts;
3451 bool SkipElt =
false;
3453 if (CIdx && CIdx->getValue().ult(NumElts)) {
3454 DemandedVecElts.
clearBit(CIdx->getZExtValue());
3455 SkipElt = !DemandedElts[CIdx->getZExtValue()];
3461 (DemandedVecElts.
isZero() ||
3464 case Instruction::ExtractElement:
3466 const Value *Vec = EEI->getVectorOperand();
3467 const Value *Idx = EEI->getIndexOperand();
3470 unsigned NumElts = VecTy->getNumElements();
3472 if (CIdx && CIdx->getValue().ult(NumElts))
3478 case Instruction::ShuffleVector: {
3482 APInt DemandedLHS, DemandedRHS;
3488 return (DemandedRHS.
isZero() ||
3493 case Instruction::Freeze:
3497 case Instruction::Load: {
3514 case Instruction::ExtractValue: {
3520 case Instruction::Add:
3525 case Instruction::Sub:
3528 case Instruction::Mul:
3531 false,
false,
Depth);
3537 case Instruction::Call:
3538 case Instruction::Invoke: {
3540 if (
I->getType()->isPointerTy()) {
3541 if (
Call->isReturnNonNull())
3548 if (std::optional<ConstantRange>
Range =
Call->getRange()) {
3549 const APInt ZeroValue(
Range->getBitWidth(), 0);
3550 if (!
Range->contains(ZeroValue))
3553 if (
const Value *RV =
Call->getReturnedArgOperand())
3559 switch (
II->getIntrinsicID()) {
3560 case Intrinsic::sshl_sat:
3561 case Intrinsic::ushl_sat:
3562 case Intrinsic::abs:
3563 case Intrinsic::bitreverse:
3564 case Intrinsic::bswap:
3565 case Intrinsic::ctpop:
3569 case Intrinsic::ssub_sat:
3577 case Intrinsic::sadd_sat:
3579 II->getArgOperand(1),
3580 true,
false,
Depth);
3582 case Intrinsic::vector_reverse:
3586 case Intrinsic::vector_reduce_or:
3587 case Intrinsic::vector_reduce_umax:
3588 case Intrinsic::vector_reduce_umin:
3589 case Intrinsic::vector_reduce_smax:
3590 case Intrinsic::vector_reduce_smin:
3592 case Intrinsic::umax:
3593 case Intrinsic::uadd_sat:
3601 case Intrinsic::smax: {
3604 auto IsNonZero = [&](
Value *
Op, std::optional<bool> &OpNonZero,
3606 if (!OpNonZero.has_value())
3607 OpNonZero = OpKnown.isNonZero() ||
3612 std::optional<bool> Op0NonZero, Op1NonZero;
3616 IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known))
3621 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known))
3623 return IsNonZero(
II->getArgOperand(1), Op1NonZero, Op1Known) &&
3624 IsNonZero(
II->getArgOperand(0), Op0NonZero, Op0Known);
3626 case Intrinsic::smin: {
3642 case Intrinsic::umin:
3645 case Intrinsic::cttz:
3648 case Intrinsic::ctlz:
3651 case Intrinsic::fshr:
3652 case Intrinsic::fshl:
3654 if (
II->getArgOperand(0) ==
II->getArgOperand(1))
3657 case Intrinsic::vscale:
3659 case Intrinsic::experimental_get_vector_length:
3673 return Known.
One != 0;
3684 Type *Ty = V->getType();
3691 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
3692 "DemandedElt width should equal the fixed vector number of elements");
3695 "DemandedElt width should be 1 for scalars");
3700 if (
C->isNullValue())
3709 for (
unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
3710 if (!DemandedElts[i])
3712 Constant *Elt =
C->getAggregateElement(i);
3729 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3730 GV->getType()->getAddressSpace() == 0)
3740 if (std::optional<ConstantRange>
Range =
A->getRange()) {
3741 const APInt ZeroValue(
Range->getBitWidth(), 0);
3742 if (!
Range->contains(ZeroValue))
3759 if (((
A->hasPassPointeeByValueCopyAttr() &&
3761 A->hasNonNullAttr()))
3783 APInt DemandedElts =
3785 return ::isKnownNonZero(V, DemandedElts, Q,
Depth);
3794static std::optional<std::pair<Value*, Value*>>
3798 return std::nullopt;
3807 case Instruction::Or:
3812 case Instruction::Xor:
3813 case Instruction::Add: {
3821 case Instruction::Sub:
3827 case Instruction::Mul: {
3833 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3834 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3844 case Instruction::Shl: {
3849 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3850 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3857 case Instruction::AShr:
3858 case Instruction::LShr: {
3861 if (!PEO1->isExact() || !PEO2->isExact())
3868 case Instruction::SExt:
3869 case Instruction::ZExt:
3873 case Instruction::PHI: {
3881 Value *Start1 =
nullptr, *Step1 =
nullptr;
3883 Value *Start2 =
nullptr, *Step2 =
nullptr;
3899 if (Values->first != PN1 || Values->second != PN2)
3902 return std::make_pair(Start1, Start2);
3905 return std::nullopt;
3912 const APInt &DemandedElts,
3920 case Instruction::Or:
3924 case Instruction::Xor:
3925 case Instruction::Add:
3946 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3947 !
C->isZero() && !
C->isOne() &&
3961 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3975 bool UsedFullRecursion =
false;
3977 if (!VisitedBBs.
insert(IncomBB).second)
3981 const APInt *C1, *C2;
3986 if (UsedFullRecursion)
3990 RecQ.
CxtI = IncomBB->getTerminator();
3993 UsedFullRecursion =
true;
4007 const Value *Cond2 = SI2->getCondition();
4010 DemandedElts, Q,
Depth + 1) &&
4012 DemandedElts, Q,
Depth + 1);
4025 if (!
A->getType()->isPointerTy() || !
B->getType()->isPointerTy())
4029 if (!GEPA || GEPA->getNumIndices() != 1 || !
isa<Constant>(GEPA->idx_begin()))
4034 if (!PN || PN->getNumIncomingValues() != 2)
4039 Value *Start =
nullptr;
4041 if (PN->getIncomingValue(0) == Step)
4042 Start = PN->getIncomingValue(1);
4043 else if (PN->getIncomingValue(1) == Step)
4044 Start = PN->getIncomingValue(0);
4055 APInt StartOffset(IndexWidth, 0);
4056 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, StartOffset);
4057 APInt StepOffset(IndexWidth, 0);
4063 APInt OffsetB(IndexWidth, 0);
4064 B =
B->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, OffsetB);
4065 return Start ==
B &&
4077 auto IsKnownNonEqualFromDominatingCondition = [&](
const Value *V) {
4098 if (IsKnownNonEqualFromDominatingCondition(V1) ||
4099 IsKnownNonEqualFromDominatingCondition(V2))
4113 "Got assumption for the wrong function!");
4114 assert(
I->getIntrinsicID() == Intrinsic::assume &&
4115 "must be an assume intrinsic");
4145 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
4147 return isKnownNonEqual(Values->first, Values->second, DemandedElts, Q,
4209 const APInt &DemandedElts,
4215 unsigned MinSignBits = TyBits;
4217 for (
unsigned i = 0; i != NumElts; ++i) {
4218 if (!DemandedElts[i])
4225 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
4232 const APInt &DemandedElts,
4238 assert(Result > 0 &&
"At least one sign bit needs to be present!");
4250 const APInt &DemandedElts,
4252 Type *Ty = V->getType();
4258 FVTy->getNumElements() == DemandedElts.
getBitWidth() &&
4259 "DemandedElt width should equal the fixed vector number of elements");
4262 "DemandedElt width should be 1 for scalars");
4276 unsigned FirstAnswer = 1;
4287 case Instruction::BitCast: {
4288 Value *Src = U->getOperand(0);
4289 Type *SrcTy = Src->getType();
4293 if (!SrcTy->isIntOrIntVectorTy())
4299 if ((SrcBits % TyBits) != 0)
4312 case Instruction::SExt:
4313 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
4317 case Instruction::SDiv: {
4318 const APInt *Denominator;
4331 return std::min(TyBits, NumBits + Denominator->
logBase2());
4336 case Instruction::SRem: {
4339 const APInt *Denominator;
4360 unsigned ResBits = TyBits - Denominator->
ceilLogBase2();
4361 Tmp = std::max(Tmp, ResBits);
4367 case Instruction::AShr: {
4372 if (ShAmt->
uge(TyBits))
4375 Tmp += ShAmtLimited;
4376 if (Tmp > TyBits) Tmp = TyBits;
4380 case Instruction::Shl: {
4385 if (ShAmt->
uge(TyBits))
4390 ShAmt->
uge(TyBits -
X->getType()->getScalarSizeInBits())) {
4392 Tmp += TyBits -
X->getType()->getScalarSizeInBits();
4396 if (ShAmt->
uge(Tmp))
4403 case Instruction::And:
4404 case Instruction::Or:
4405 case Instruction::Xor:
4410 FirstAnswer = std::min(Tmp, Tmp2);
4417 case Instruction::Select: {
4421 const APInt *CLow, *CHigh;
4429 return std::min(Tmp, Tmp2);
4432 case Instruction::Add:
4436 if (Tmp == 1)
break;
4440 if (CRHS->isAllOnesValue()) {
4446 if ((Known.
Zero | 1).isAllOnes())
4458 return std::min(Tmp, Tmp2) - 1;
4460 case Instruction::Sub:
4467 if (CLHS->isNullValue()) {
4472 if ((Known.
Zero | 1).isAllOnes())
4489 return std::min(Tmp, Tmp2) - 1;
4491 case Instruction::Mul: {
4494 unsigned SignBitsOp0 =
4496 if (SignBitsOp0 == 1)
4498 unsigned SignBitsOp1 =
4500 if (SignBitsOp1 == 1)
4502 unsigned OutValidBits =
4503 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
4504 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
4507 case Instruction::PHI: {
4511 if (NumIncomingValues > 4)
break;
4513 if (NumIncomingValues == 0)
break;
4519 for (
unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
4520 if (Tmp == 1)
return Tmp;
4523 DemandedElts, RecQ,
Depth + 1));
4528 case Instruction::Trunc: {
4533 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
4534 if (Tmp > (OperandTyBits - TyBits))
4535 return Tmp - (OperandTyBits - TyBits);
4540 case Instruction::ExtractElement:
4547 case Instruction::ShuffleVector: {
4555 APInt DemandedLHS, DemandedRHS;
4560 Tmp = std::numeric_limits<unsigned>::max();
4561 if (!!DemandedLHS) {
4562 const Value *
LHS = Shuf->getOperand(0);
4569 if (!!DemandedRHS) {
4570 const Value *
RHS = Shuf->getOperand(1);
4572 Tmp = std::min(Tmp, Tmp2);
4578 assert(Tmp <= TyBits &&
"Failed to determine minimum sign bits");
4581 case Instruction::Call: {
4583 switch (
II->getIntrinsicID()) {
4586 case Intrinsic::abs:
4594 case Intrinsic::smin:
4595 case Intrinsic::smax: {
4596 const APInt *CLow, *CHigh;
4611 if (
unsigned VecSignBits =
4629 if (
F->isIntrinsic())
4630 return F->getIntrinsicID();
4636 if (
F->hasLocalLinkage() || !TLI || !TLI->
getLibFunc(CB, Func) ||
4646 return Intrinsic::sin;
4650 return Intrinsic::cos;
4654 return Intrinsic::tan;
4658 return Intrinsic::asin;
4662 return Intrinsic::acos;
4666 return Intrinsic::atan;
4668 case LibFunc_atan2f:
4669 case LibFunc_atan2l:
4670 return Intrinsic::atan2;
4674 return Intrinsic::sinh;
4678 return Intrinsic::cosh;
4682 return Intrinsic::tanh;
4686 return Intrinsic::exp;
4690 return Intrinsic::exp2;
4692 case LibFunc_exp10f:
4693 case LibFunc_exp10l:
4694 return Intrinsic::exp10;
4698 return Intrinsic::log;
4700 case LibFunc_log10f:
4701 case LibFunc_log10l:
4702 return Intrinsic::log10;
4706 return Intrinsic::log2;
4710 return Intrinsic::fabs;
4714 return Intrinsic::minnum;
4718 return Intrinsic::maxnum;
4719 case LibFunc_copysign:
4720 case LibFunc_copysignf:
4721 case LibFunc_copysignl:
4722 return Intrinsic::copysign;
4724 case LibFunc_floorf:
4725 case LibFunc_floorl:
4726 return Intrinsic::floor;
4730 return Intrinsic::ceil;
4732 case LibFunc_truncf:
4733 case LibFunc_truncl:
4734 return Intrinsic::trunc;
4738 return Intrinsic::rint;
4739 case LibFunc_nearbyint:
4740 case LibFunc_nearbyintf:
4741 case LibFunc_nearbyintl:
4742 return Intrinsic::nearbyint;
4744 case LibFunc_roundf:
4745 case LibFunc_roundl:
4746 return Intrinsic::round;
4747 case LibFunc_roundeven:
4748 case LibFunc_roundevenf:
4749 case LibFunc_roundevenl:
4750 return Intrinsic::roundeven;
4754 return Intrinsic::pow;
4758 return Intrinsic::sqrt;
4768 bool &TrueIfSigned) {
4771 TrueIfSigned =
true;
4772 return RHS.isZero();
4774 TrueIfSigned =
true;
4775 return RHS.isAllOnes();
4777 TrueIfSigned =
false;
4778 return RHS.isAllOnes();
4780 TrueIfSigned =
false;
4781 return RHS.isZero();
4784 TrueIfSigned =
true;
4785 return RHS.isMaxSignedValue();
4788 TrueIfSigned =
true;
4789 return RHS.isMinSignedValue();
4792 TrueIfSigned =
false;
4793 return RHS.isMinSignedValue();
4796 TrueIfSigned =
false;
4797 return RHS.isMaxSignedValue();
4807 unsigned Depth = 0) {
4833 KnownFromContext.
knownNot(~(CondIsTrue ? MaskIfTrue : MaskIfFalse));
4837 KnownFromContext.
knownNot(CondIsTrue ? ~Mask : Mask);
4843 if (TrueIfSigned == CondIsTrue)
4859 return KnownFromContext;
4879 return KnownFromContext;
4889 "Got assumption for the wrong function!");
4890 assert(
I->getIntrinsicID() == Intrinsic::assume &&
4891 "must be an assume intrinsic");
4897 true, Q.
CxtI, KnownFromContext);
4900 return KnownFromContext;
4904 Value *Arm,
bool Invert,
4910 !Invert, SQ.
CxtI, KnownSrc,
4928 APInt DemandedElts =
4934 const APInt &DemandedElts,
4939 if ((InterestedClasses &
4945 KnownSrc, Q,
Depth + 1);
4951 case Intrinsic::minimum:
4953 case Intrinsic::maximum:
4955 case Intrinsic::minimumnum:
4957 case Intrinsic::maximumnum:
4959 case Intrinsic::minnum:
4961 case Intrinsic::maxnum:
4971 assert(Known.
isUnknown() &&
"should not be called with known information");
4973 if (!DemandedElts) {
5003 bool SignBitAllZero =
true;
5004 bool SignBitAllOne =
true;
5007 unsigned NumElts = VFVTy->getNumElements();
5008 for (
unsigned i = 0; i != NumElts; ++i) {
5009 if (!DemandedElts[i])
5025 const APFloat &
C = CElt->getValueAPF();
5028 SignBitAllZero =
false;
5030 SignBitAllOne =
false;
5032 if (SignBitAllOne != SignBitAllZero)
5033 Known.
SignBit = SignBitAllOne;
5039 KnownNotFromFlags |= CB->getRetNoFPClass();
5041 KnownNotFromFlags |= Arg->getNoFPClass();
5045 if (FPOp->hasNoNaNs())
5046 KnownNotFromFlags |=
fcNan;
5047 if (FPOp->hasNoInfs())
5048 KnownNotFromFlags |=
fcInf;
5052 KnownNotFromFlags |= ~AssumedClasses.KnownFPClasses;
5056 InterestedClasses &= ~KnownNotFromFlags;
5075 const unsigned Opc =
Op->getOpcode();
5077 case Instruction::FNeg: {
5079 Known, Q,
Depth + 1);
5083 case Instruction::Select: {
5084 auto ComputeForArm = [&](
Value *Arm,
bool Invert) {
5094 ComputeForArm(
Op->getOperand(1),
false)
5098 case Instruction::Load: {
5099 const MDNode *NoFPClass =
5109 case Instruction::Call: {
5113 case Intrinsic::fabs: {
5118 InterestedClasses, Known, Q,
Depth + 1);
5124 case Intrinsic::copysign: {
5128 Known, Q,
Depth + 1);
5130 KnownSign, Q,
Depth + 1);
5134 case Intrinsic::fma:
5135 case Intrinsic::fmuladd: {
5140 if (
II->getArgOperand(0) ==
II->getArgOperand(1)) {
5143 InterestedClasses, KnownAddend, Q,
Depth + 1);
5145 InterestedClasses, KnownSrc, Q,
Depth + 1);
5149 II->getType()->getScalarType()->getFltSemantics();
5153 if (KnownNotFromFlags &
fcNan) {
5158 if (KnownNotFromFlags &
fcInf) {
5168 for (
int I = 0;
I != 3; ++
I) {
5170 InterestedClasses, KnownSrc[
I], Q,
Depth + 1);
5171 if (KnownSrc[
I].isUnknown())
5174 if (KnownNotFromFlags &
fcNan)
5176 if (KnownNotFromFlags &
fcInf)
5182 II->getType()->getScalarType()->getFltSemantics();
5188 case Intrinsic::sqrt:
5189 case Intrinsic::experimental_constrained_sqrt: {
5192 if (InterestedClasses &
fcNan)
5196 KnownSrc, Q,
Depth + 1);
5204 II->getType()->getScalarType()->getFltSemantics();
5214 case Intrinsic::sin:
5215 case Intrinsic::cos: {
5219 KnownSrc, Q,
Depth + 1);
5224 case Intrinsic::maxnum:
5225 case Intrinsic::minnum:
5226 case Intrinsic::minimum:
5227 case Intrinsic::maximum:
5228 case Intrinsic::minimumnum:
5229 case Intrinsic::maximumnum: {
5232 KnownLHS, Q,
Depth + 1);
5234 KnownRHS, Q,
Depth + 1);
5239 F ?
F->getDenormalMode(
5240 II->getType()->getScalarType()->getFltSemantics())
5247 case Intrinsic::canonicalize: {
5250 KnownSrc, Q,
Depth + 1);
5254 F ?
F->getDenormalMode(
5255 II->getType()->getScalarType()->getFltSemantics())
5260 case Intrinsic::vector_reduce_fmax:
5261 case Intrinsic::vector_reduce_fmin:
5262 case Intrinsic::vector_reduce_fmaximum:
5263 case Intrinsic::vector_reduce_fminimum: {
5267 InterestedClasses, Q,
Depth + 1);
5274 case Intrinsic::vector_reverse:
5277 II->getFastMathFlags(), InterestedClasses, Q,
Depth + 1);
5279 case Intrinsic::trunc:
5280 case Intrinsic::floor:
5281 case Intrinsic::ceil:
5282 case Intrinsic::rint:
5283 case Intrinsic::nearbyint:
5284 case Intrinsic::round:
5285 case Intrinsic::roundeven: {
5293 KnownSrc, Q,
Depth + 1);
5296 KnownSrc, IID == Intrinsic::trunc,
5297 V->getType()->getScalarType()->isMultiUnitFPType());
5300 case Intrinsic::exp:
5301 case Intrinsic::exp2:
5302 case Intrinsic::exp10:
5303 case Intrinsic::amdgcn_exp2: {
5306 KnownSrc, Q,
Depth + 1);
5310 Type *EltTy =
II->getType()->getScalarType();
5311 if (IID == Intrinsic::amdgcn_exp2 && EltTy->
isFloatTy())
5316 case Intrinsic::fptrunc_round: {
5321 case Intrinsic::log:
5322 case Intrinsic::log10:
5323 case Intrinsic::log2:
5324 case Intrinsic::experimental_constrained_log:
5325 case Intrinsic::experimental_constrained_log10:
5326 case Intrinsic::experimental_constrained_log2:
5327 case Intrinsic::amdgcn_log: {
5328 Type *EltTy =
II->getType()->getScalarType();
5343 KnownSrc, Q,
Depth + 1);
5353 case Intrinsic::powi: {
5357 const Value *Exp =
II->getArgOperand(1);
5358 Type *ExpTy = Exp->getType();
5362 ExponentKnownBits, Q,
Depth + 1);
5364 if (ExponentKnownBits.
Zero[0]) {
5379 KnownSrc, Q,
Depth + 1);
5384 case Intrinsic::ldexp: {
5387 KnownSrc, Q,
Depth + 1);
5403 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
5409 II->getType()->getScalarType()->getFltSemantics();
5411 const Value *ExpArg =
II->getArgOperand(1);
5415 const int MantissaBits = Precision - 1;
5422 II->getType()->getScalarType()->getFltSemantics();
5423 if (ConstVal && ConstVal->
isZero()) {
5448 case Intrinsic::arithmetic_fence: {
5450 Known, Q,
Depth + 1);
5453 case Intrinsic::experimental_constrained_sitofp:
5454 case Intrinsic::experimental_constrained_uitofp:
5464 if (IID == Intrinsic::experimental_constrained_uitofp)
5469 case Intrinsic::amdgcn_rcp: {
5472 KnownSrc, Q,
Depth + 1);
5476 Type *EltTy =
II->getType()->getScalarType();
5499 case Intrinsic::amdgcn_rsq: {
5505 KnownSrc, Q,
Depth + 1);
5517 Type *EltTy =
II->getType()->getScalarType();
5543 case Instruction::FAdd:
5544 case Instruction::FSub: {
5547 Op->getOpcode() == Instruction::FAdd &&
5549 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
5552 if (!WantNaN && !WantNegative && !WantNegZero)
5558 if (InterestedClasses &
fcNan)
5559 InterestedSrcs |=
fcInf;
5561 KnownRHS, Q,
Depth + 1);
5564 bool Self =
Op->getOperand(0) ==
Op->getOperand(1) &&
5568 KnownLHS = KnownRHS;
5572 WantNegZero ||
Opc == Instruction::FSub) {
5577 Op->getType()->getScalarType()->getFltSemantics();
5581 if (Self &&
Opc == Instruction::FAdd) {
5589 KnownLHS, Q,
Depth + 1);
5592 Known =
Opc == Instruction::FAdd
5600 case Instruction::FMul: {
5603 F ?
F->getDenormalMode(
5604 Op->getType()->getScalarType()->getFltSemantics())
5609 if (
Op->getOperand(0) ==
Op->getOperand(1)) {
5619 bool CannotBeSubnormal =
false;
5628 Op->getType()->getScalarType()->getFltSemantics();
5630 const int MantissaBits = Precision - 1;
5632 int MinKnownExponent =
ilogb(*CRHS);
5633 if (MinKnownExponent >= MantissaBits)
5634 CannotBeSubnormal =
true;
5650 if (CannotBeSubnormal)
5654 case Instruction::FDiv:
5655 case Instruction::FRem: {
5656 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
5658 if (
Op->getOperand(0) ==
Op->getOperand(1) &&
5660 if (
Op->getOpcode() == Instruction::FDiv) {
5677 Op->getType()->getScalarType()->getFltSemantics();
5682 Known =
Op->getOpcode() == Instruction::FDiv
5689 const bool WantPositive =
5691 if (!WantNan && !WantNegative && !WantPositive)
5704 if (KnowSomethingUseful || WantPositive) {
5711 Op->getType()->getScalarType()->getFltSemantics();
5713 if (
Op->getOpcode() == Instruction::FDiv) {
5740 case Instruction::FPExt: {
5743 KnownSrc, Q,
Depth + 1);
5746 Op->getType()->getScalarType()->getFltSemantics();
5748 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5753 case Instruction::FPTrunc: {
5758 case Instruction::SIToFP:
5759 case Instruction::UIToFP: {
5768 if (
Op->getOpcode() == Instruction::UIToFP)
5771 if (InterestedClasses &
fcInf) {
5775 int IntSize =
Op->getOperand(0)->getType()->getScalarSizeInBits();
5776 if (
Op->getOpcode() == Instruction::SIToFP)
5781 Type *FPTy =
Op->getType()->getScalarType();
5788 case Instruction::ExtractElement: {
5791 const Value *Vec =
Op->getOperand(0);
5793 APInt DemandedVecElts;
5795 unsigned NumElts = VecTy->getNumElements();
5798 if (CIdx && CIdx->getValue().ult(NumElts))
5801 DemandedVecElts =
APInt(1, 1);
5807 case Instruction::InsertElement: {
5811 const Value *Vec =
Op->getOperand(0);
5812 const Value *Elt =
Op->getOperand(1);
5815 APInt DemandedVecElts = DemandedElts;
5816 bool NeedsElt =
true;
5818 if (CIdx && CIdx->getValue().ult(NumElts)) {
5819 DemandedVecElts.
clearBit(CIdx->getZExtValue());
5820 NeedsElt = DemandedElts[CIdx->getZExtValue()];
5834 if (!DemandedVecElts.
isZero()) {
5843 case Instruction::ShuffleVector: {
5852 APInt DemandedLHS, DemandedRHS;
5857 if (!!DemandedLHS) {
5858 const Value *
LHS = Shuf->getOperand(0);
5869 if (!!DemandedRHS) {
5871 const Value *
RHS = Shuf->getOperand(1);
5879 case Instruction::ExtractValue: {
5886 switch (
II->getIntrinsicID()) {
5887 case Intrinsic::frexp: {
5892 InterestedClasses, KnownSrc, Q,
Depth + 1);
5896 Op->getType()->getScalarType()->getFltSemantics();
5913 case Instruction::PHI: {
5916 if (
P->getNumIncomingValues() == 0)
5923 if (
Depth < PhiRecursionLimit) {
5930 for (
const Use &U :
P->operands()) {
5960 case Instruction::BitCast: {
5963 !Src->getType()->isIntOrIntVectorTy())
5966 const Type *Ty =
Op->getType()->getScalarType();
5967 KnownBits Bits(Ty->getScalarSizeInBits());
5971 if (Bits.isNonNegative())
5973 else if (Bits.isNegative())
5976 if (Ty->isIEEELikeFPTy()) {
5986 else if (!
APFloat(Ty->getFltSemantics(), ~Bits.Zero).
isNaN())
5993 InfKB.Zero.clearSignBit();
5995 assert(!InfResult.value());
5997 }
else if (Bits == InfKB) {
6005 ZeroKB.Zero.clearSignBit();
6007 assert(!ZeroResult.value());
6009 }
else if (Bits == ZeroKB) {
6022 const APInt &DemandedElts,
6029 return KnownClasses;
6055 InterestedClasses &=
~fcNan;
6057 InterestedClasses &=
~fcInf;
6063 Result.KnownFPClasses &=
~fcNan;
6065 Result.KnownFPClasses &=
~fcInf;
6074 APInt DemandedElts =
6128 if (FPOp->hasNoSignedZeros())
6132 switch (
User->getOpcode()) {
6133 case Instruction::FPToSI:
6134 case Instruction::FPToUI:
6136 case Instruction::FCmp:
6139 case Instruction::Call:
6141 switch (
II->getIntrinsicID()) {
6142 case Intrinsic::fabs:
6144 case Intrinsic::copysign:
6145 return U.getOperandNo() == 0;
6146 case Intrinsic::is_fpclass:
6147 case Intrinsic::vp_is_fpclass: {
6167 if (FPOp->hasNoNaNs())
6171 switch (
User->getOpcode()) {
6172 case Instruction::FPToSI:
6173 case Instruction::FPToUI:
6176 case Instruction::FAdd:
6177 case Instruction::FSub:
6178 case Instruction::FMul:
6179 case Instruction::FDiv:
6180 case Instruction::FRem:
6181 case Instruction::FPTrunc:
6182 case Instruction::FPExt:
6183 case Instruction::FCmp:
6186 case Instruction::FNeg:
6187 case Instruction::Select:
6188 case Instruction::PHI:
6190 case Instruction::Ret:
6191 return User->getFunction()->getAttributes().getRetNoFPClass() &
6193 case Instruction::Call:
6194 case Instruction::Invoke: {
6196 switch (
II->getIntrinsicID()) {
6197 case Intrinsic::fabs:
6199 case Intrinsic::copysign:
6200 return U.getOperandNo() == 0;
6202 case Intrinsic::maxnum:
6203 case Intrinsic::minnum:
6204 case Intrinsic::maximum:
6205 case Intrinsic::minimum:
6206 case Intrinsic::maximumnum:
6207 case Intrinsic::minimumnum:
6208 case Intrinsic::canonicalize:
6209 case Intrinsic::fma:
6210 case Intrinsic::fmuladd:
6211 case Intrinsic::sqrt:
6212 case Intrinsic::pow:
6213 case Intrinsic::powi:
6214 case Intrinsic::fptoui_sat:
6215 case Intrinsic::fptosi_sat:
6216 case Intrinsic::is_fpclass:
6217 case Intrinsic::vp_is_fpclass:
6247 switch (
I->getOpcode()) {
6248 case Instruction::SIToFP:
6249 case Instruction::UIToFP:
6257 case Instruction::Call: {
6260 case Intrinsic::trunc:
6261 case Intrinsic::floor:
6262 case Intrinsic::ceil:
6263 case Intrinsic::rint:
6264 case Intrinsic::nearbyint:
6265 case Intrinsic::round:
6266 case Intrinsic::roundeven:
6284 if (V->getType()->isIntegerTy(8))
6295 if (
DL.getTypeStoreSize(V->getType()).isZero())
6310 if (
C->isNullValue())
6317 if (CFP->getType()->isHalfTy())
6319 else if (CFP->getType()->isFloatTy())
6321 else if (CFP->getType()->isDoubleTy())
6330 if (CI->getBitWidth() % 8 == 0) {
6331 assert(CI->getBitWidth() > 8 &&
"8 bits should be handled above!");
6332 if (!CI->getValue().isSplat(8))
6334 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
6339 if (CE->getOpcode() == Instruction::IntToPtr) {
6341 unsigned BitWidth =
DL.getPointerSizeInBits(PtrTy->getAddressSpace());
6354 if (LHS == UndefInt8)
6356 if (RHS == UndefInt8)
6362 Value *Val = UndefInt8;
6363 for (
uint64_t I = 0, E = CA->getNumElements();
I != E; ++
I)
6370 Value *Val = UndefInt8;
6405 while (PrevTo != OrigTo) {
6452 unsigned IdxSkip = Idxs.
size();
6465 std::optional<BasicBlock::iterator> InsertBefore) {
6468 if (idx_range.
empty())
6471 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
6472 "Not looking at a struct or array?");
6474 "Invalid indices for type?");
6477 C =
C->getAggregateElement(idx_range[0]);
6478 if (!
C)
return nullptr;
6485 const unsigned *req_idx = idx_range.
begin();
6486 for (
const unsigned *i =
I->idx_begin(), *e =
I->idx_end();
6487 i != e; ++i, ++req_idx) {
6488 if (req_idx == idx_range.
end()) {
6518 ArrayRef(req_idx, idx_range.
end()), InsertBefore);
6527 unsigned size =
I->getNumIndices() + idx_range.
size();
6532 Idxs.
append(
I->idx_begin(),
I->idx_end());
6538 &&
"Number of indices added not correct?");
6555 assert(V &&
"V should not be null.");
6556 assert((ElementSize % 8) == 0 &&
6557 "ElementSize expected to be a multiple of the size of a byte.");
6558 unsigned ElementSizeInBytes = ElementSize / 8;
6570 APInt Off(
DL.getIndexTypeSizeInBits(V->getType()), 0);
6577 uint64_t StartIdx = Off.getLimitedValue();
6584 if ((StartIdx % ElementSizeInBytes) != 0)
6587 Offset += StartIdx / ElementSizeInBytes;
6593 uint64_t SizeInBytes =
DL.getTypeStoreSize(GVTy).getFixedValue();
6596 Slice.Array =
nullptr;
6608 Type *InitElTy = ArrayInit->getElementType();
6613 ArrayTy = ArrayInit->getType();
6618 if (ElementSize != 8)
6637 Slice.Array = Array;
6639 Slice.Length = NumElts -
Offset;
6653 if (Slice.Array ==
nullptr) {
6664 if (Slice.Length == 1) {
6676 Str = Str.
substr(Slice.Offset);
6682 Str = Str.substr(0, Str.find(
'\0'));
6695 unsigned CharSize) {
6697 V = V->stripPointerCasts();
6702 if (!PHIs.
insert(PN).second)
6707 for (
Value *IncValue : PN->incoming_values()) {
6709 if (Len == 0)
return 0;
6711 if (Len == ~0ULL)
continue;
6713 if (Len != LenSoFar && LenSoFar != ~0ULL)
6725 if (Len1 == 0)
return 0;
6727 if (Len2 == 0)
return 0;
6728 if (Len1 == ~0ULL)
return Len2;
6729 if (Len2 == ~0ULL)
return Len1;
6730 if (Len1 != Len2)
return 0;
6739 if (Slice.Array ==
nullptr)
6747 unsigned NullIndex = 0;
6748 for (
unsigned E = Slice.Length; NullIndex <
E; ++NullIndex) {
6749 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
6753 return NullIndex + 1;
6759 if (!V->getType()->isPointerTy())
6766 return Len == ~0ULL ? 1 : Len;
6771 bool MustPreserveNullness) {
6773 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
6774 if (
const Value *RV =
Call->getReturnedArgOperand())
6778 Call, MustPreserveNullness))
6779 return Call->getArgOperand(0);
6785 switch (
Call->getIntrinsicID()) {
6786 case Intrinsic::launder_invariant_group:
6787 case Intrinsic::strip_invariant_group:
6788 case Intrinsic::aarch64_irg:
6789 case Intrinsic::aarch64_tagp:
6799 case Intrinsic::amdgcn_make_buffer_rsrc:
6801 case Intrinsic::ptrmask:
6802 return !MustPreserveNullness;
6803 case Intrinsic::threadlocal_address:
6806 return !
Call->getParent()->getParent()->isPresplitCoroutine();
6823 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6825 if (!PrevValue || LI->
getLoopFor(PrevValue->getParent()) != L)
6834 if (!L->isLoopInvariant(Load->getPointerOperand()))
6840 for (
unsigned Count = 0; MaxLookup == 0 ||
Count < MaxLookup; ++
Count) {
6842 const Value *PtrOp =
GEP->getPointerOperand();
6853 if (GA->isInterposable())
6855 V = GA->getAliasee();
6859 if (
PHI->getNumIncomingValues() == 1) {
6860 V =
PHI->getIncomingValue(0);
6881 assert(V->getType()->isPointerTy() &&
"Unexpected operand type!");
6888 const LoopInfo *LI,
unsigned MaxLookup) {
6896 if (!Visited.
insert(
P).second)
6925 }
while (!Worklist.
empty());
6929 const unsigned MaxVisited = 8;
6934 const Value *Object =
nullptr;
6944 if (!Visited.
insert(
P).second)
6947 if (Visited.
size() == MaxVisited)
6963 else if (Object !=
P)
6965 }
while (!Worklist.
empty());
6967 return Object ? Object : FirstObject;
6977 if (U->getOpcode() == Instruction::PtrToInt)
6978 return U->getOperand(0);
6985 if (U->getOpcode() != Instruction::Add ||
6990 V = U->getOperand(0);
6994 assert(V->getType()->isIntegerTy() &&
"Unexpected operand type!");
7011 for (
const Value *V : Objs) {
7012 if (!Visited.
insert(V).second)
7017 if (O->getType()->isPointerTy()) {
7030 }
while (!Working.
empty());
7039 auto AddWork = [&](
Value *V) {
7040 if (Visited.
insert(V).second)
7050 if (Result && Result != AI)
7054 AddWork(CI->getOperand(0));
7056 for (
Value *IncValue : PN->incoming_values())
7059 AddWork(
SI->getTrueValue());
7060 AddWork(
SI->getFalseValue());
7062 if (OffsetZero && !
GEP->hasAllZeroIndices())
7064 AddWork(
GEP->getPointerOperand());
7066 Value *Returned = CB->getReturnedArgOperand();
7074 }
while (!Worklist.
empty());
7080 const Value *V,
bool AllowLifetime,
bool AllowDroppable) {
7086 if (AllowLifetime &&
II->isLifetimeStartOrEnd())
7089 if (AllowDroppable &&
II->isDroppable())
7110 return (!Shuffle || Shuffle->isSelect()) &&
7117 bool IgnoreUBImplyingAttrs) {
7119 AC, DT, TLI, UseVariableInfo,
7120 IgnoreUBImplyingAttrs);
7126 bool UseVariableInfo,
bool IgnoreUBImplyingAttrs) {
7130 auto hasEqualReturnAndLeadingOperandTypes =
7131 [](
const Instruction *Inst,
unsigned NumLeadingOperands) {
7135 for (
unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
7141 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
7143 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
7150 case Instruction::UDiv:
7151 case Instruction::URem: {
7158 case Instruction::SDiv:
7159 case Instruction::SRem: {
7161 const APInt *Numerator, *Denominator;
7165 if (*Denominator == 0)
7177 case Instruction::Load: {
7178 if (!UseVariableInfo)
7191 case Instruction::Call: {
7195 const Function *Callee = CI->getCalledFunction();
7199 if (!Callee || !Callee->isSpeculatable())
7203 return IgnoreUBImplyingAttrs || !CI->hasUBImplyingAttrs();
7205 case Instruction::VAArg:
7206 case Instruction::Alloca:
7207 case Instruction::Invoke:
7208 case Instruction::CallBr:
7209 case Instruction::PHI:
7210 case Instruction::Store:
7211 case Instruction::Ret:
7212 case Instruction::Br:
7213 case Instruction::IndirectBr:
7214 case Instruction::Switch:
7215 case Instruction::Unreachable:
7216 case Instruction::Fence:
7217 case Instruction::AtomicRMW:
7218 case Instruction::AtomicCmpXchg:
7219 case Instruction::LandingPad:
7220 case Instruction::Resume:
7221 case Instruction::CatchSwitch:
7222 case Instruction::CatchPad:
7223 case Instruction::CatchRet:
7224 case Instruction::CleanupPad:
7225 case Instruction::CleanupRet:
7231 if (
I.mayReadOrWriteMemory())
7299 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
7344 if (
Add &&
Add->hasNoSignedWrap()) {
7383 bool LHSOrRHSKnownNonNegative =
7385 bool LHSOrRHSKnownNegative =
7387 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
7390 if ((AddKnown.
isNonNegative() && LHSOrRHSKnownNonNegative) ||
7391 (AddKnown.
isNegative() && LHSOrRHSKnownNegative))
7466 assert(EVI->getNumIndices() == 1 &&
"Obvious from CI's type");
7468 if (EVI->getIndices()[0] == 0)
7471 assert(EVI->getIndices()[0] == 1 &&
"Obvious from CI's type");
7473 for (
const auto *U : EVI->users())
7475 assert(
B->isConditional() &&
"How else is it using an i1?");
7486 auto AllUsesGuardedByBranch = [&](
const BranchInst *BI) {
7492 for (
const auto *Result :
Results) {
7495 if (DT.
dominates(NoWrapEdge, Result->getParent()))
7498 for (
const auto &RU : Result->uses())
7506 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
7518 unsigned NumElts = FVTy->getNumElements();
7519 for (
unsigned i = 0; i < NumElts; ++i)
7520 ShiftAmounts.
push_back(
C->getAggregateElement(i));
7528 return CI && CI->getValue().ult(
C->getType()->getIntegerBitWidth());
7549 bool ConsiderFlagsAndMetadata) {
7552 Op->hasPoisonGeneratingAnnotations())
7555 unsigned Opcode =
Op->getOpcode();
7559 case Instruction::Shl:
7560 case Instruction::AShr:
7561 case Instruction::LShr:
7563 case Instruction::FPToSI:
7564 case Instruction::FPToUI:
7568 case Instruction::Call:
7570 switch (
II->getIntrinsicID()) {
7572 case Intrinsic::ctlz:
7573 case Intrinsic::cttz:
7574 case Intrinsic::abs:
7577 case Intrinsic::sshl_sat:
7578 case Intrinsic::ushl_sat:
7586 case Instruction::CallBr:
7587 case Instruction::Invoke: {
7589 return !CB->hasRetAttr(Attribute::NoUndef) &&
7590 !CB->hasFnAttr(Attribute::NoCreateUndefOrPoison);
7592 case Instruction::InsertElement:
7593 case Instruction::ExtractElement: {
7596 unsigned IdxOp =
Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
7600 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue());
7603 case Instruction::ShuffleVector: {
7609 case Instruction::FNeg:
7610 case Instruction::PHI:
7611 case Instruction::Select:
7612 case Instruction::ExtractValue:
7613 case Instruction::InsertValue:
7614 case Instruction::Freeze:
7615 case Instruction::ICmp:
7616 case Instruction::FCmp:
7617 case Instruction::GetElementPtr:
7619 case Instruction::AddrSpaceCast:
7634 bool ConsiderFlagsAndMetadata) {
7636 ConsiderFlagsAndMetadata);
7641 ConsiderFlagsAndMetadata);
7646 if (ValAssumedPoison == V)
7649 const unsigned MaxDepth = 2;
7650 if (
Depth >= MaxDepth)
7655 return propagatesPoison(Op) &&
7656 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
7680 const unsigned MaxDepth = 2;
7681 if (
Depth >= MaxDepth)
7687 return impliesPoison(Op, V, Depth + 1);
7694 return ::impliesPoison(ValAssumedPoison, V, 0);
7709 if (
A->hasAttribute(Attribute::NoUndef) ||
7710 A->hasAttribute(Attribute::Dereferenceable) ||
7711 A->hasAttribute(Attribute::DereferenceableOrNull))
7726 if (
C->getType()->isVectorTy()) {
7729 if (
Constant *SplatC =
C->getSplatValue())
7737 return !
C->containsConstantExpression();
7750 auto *StrippedV = V->stripPointerCastsSameRepresentation();
7755 auto OpCheck = [&](
const Value *V) {
7766 if (CB->hasRetAttr(Attribute::NoUndef) ||
7767 CB->hasRetAttr(Attribute::Dereferenceable) ||
7768 CB->hasRetAttr(Attribute::DereferenceableOrNull))
7775 unsigned Num = PN->getNumIncomingValues();
7776 bool IsWellDefined =
true;
7777 for (
unsigned i = 0; i < Num; ++i) {
7778 if (PN == PN->getIncomingValue(i))
7780 auto *TI = PN->getIncomingBlock(i)->getTerminator();
7782 DT,
Depth + 1, Kind)) {
7783 IsWellDefined =
false;
7794 }
else if (
all_of(Opr->operands(), OpCheck))
7800 if (
I->hasMetadata(LLVMContext::MD_noundef) ||
7801 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
7802 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
7822 auto *Dominator = DNode->
getIDom();
7827 auto *TI = Dominator->getBlock()->getTerminator();
7831 if (BI->isConditional())
7832 Cond = BI->getCondition();
7834 Cond =
SI->getCondition();
7843 if (
any_of(Opr->operands(), [V](
const Use &U) {
7844 return V == U && propagatesPoison(U);
7850 Dominator = Dominator->getIDom();
7863 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7870 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7877 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT,
Depth,
7901 while (!Worklist.
empty()) {
7910 if (
I != Root && !
any_of(
I->operands(), [&KnownPoison](
const Use &U) {
7911 return KnownPoison.contains(U) && propagatesPoison(U);
7915 if (KnownPoison.
insert(
I).second)
7927 return ::computeOverflowForSignedAdd(
Add->getOperand(0),
Add->getOperand(1),
7935 return ::computeOverflowForSignedAdd(LHS, RHS,
nullptr, SQ);
7967 return !
I->mayThrow() &&
I->willReturn();
7981 unsigned ScanLimit) {
7988 assert(ScanLimit &&
"scan limit must be non-zero");
7990 if (--ScanLimit == 0)
8004 if (
I->getParent() != L->getHeader())
return false;
8007 if (&LI ==
I)
return true;
8010 llvm_unreachable(
"Instruction not contained in its own parent basic block.");
8016 case Intrinsic::sadd_with_overflow:
8017 case Intrinsic::ssub_with_overflow:
8018 case Intrinsic::smul_with_overflow:
8019 case Intrinsic::uadd_with_overflow:
8020 case Intrinsic::usub_with_overflow:
8021 case Intrinsic::umul_with_overflow:
8026 case Intrinsic::ctpop:
8027 case Intrinsic::ctlz:
8028 case Intrinsic::cttz:
8029 case Intrinsic::abs:
8030 case Intrinsic::smax:
8031 case Intrinsic::smin:
8032 case Intrinsic::umax:
8033 case Intrinsic::umin:
8034 case Intrinsic::scmp:
8035 case Intrinsic::is_fpclass:
8036 case Intrinsic::ptrmask:
8037 case Intrinsic::ucmp:
8038 case Intrinsic::bitreverse:
8039 case Intrinsic::bswap:
8040 case Intrinsic::sadd_sat:
8041 case Intrinsic::ssub_sat:
8042 case Intrinsic::sshl_sat:
8043 case Intrinsic::uadd_sat:
8044 case Intrinsic::usub_sat:
8045 case Intrinsic::ushl_sat:
8046 case Intrinsic::smul_fix:
8047 case Intrinsic::smul_fix_sat:
8048 case Intrinsic::umul_fix:
8049 case Intrinsic::umul_fix_sat:
8050 case Intrinsic::pow:
8051 case Intrinsic::powi:
8052 case Intrinsic::sin:
8053 case Intrinsic::sinh:
8054 case Intrinsic::cos:
8055 case Intrinsic::cosh:
8056 case Intrinsic::sincos:
8057 case Intrinsic::sincospi:
8058 case Intrinsic::tan:
8059 case Intrinsic::tanh:
8060 case Intrinsic::asin:
8061 case Intrinsic::acos:
8062 case Intrinsic::atan:
8063 case Intrinsic::atan2:
8064 case Intrinsic::canonicalize:
8065 case Intrinsic::sqrt:
8066 case Intrinsic::exp:
8067 case Intrinsic::exp2:
8068 case Intrinsic::exp10:
8069 case Intrinsic::log:
8070 case Intrinsic::log2:
8071 case Intrinsic::log10:
8072 case Intrinsic::modf:
8073 case Intrinsic::floor:
8074 case Intrinsic::ceil:
8075 case Intrinsic::trunc:
8076 case Intrinsic::rint:
8077 case Intrinsic::nearbyint:
8078 case Intrinsic::round:
8079 case Intrinsic::roundeven:
8080 case Intrinsic::lrint:
8081 case Intrinsic::llrint:
8082 case Intrinsic::fshl:
8083 case Intrinsic::fshr:
8092 switch (
I->getOpcode()) {
8093 case Instruction::Freeze:
8094 case Instruction::PHI:
8095 case Instruction::Invoke:
8097 case Instruction::Select:
8099 case Instruction::Call:
8103 case Instruction::ICmp:
8104 case Instruction::FCmp:
8105 case Instruction::GetElementPtr:
8119template <
typename CallableT>
8121 const CallableT &Handle) {
8122 switch (
I->getOpcode()) {
8123 case Instruction::Store:
8128 case Instruction::Load:
8135 case Instruction::AtomicCmpXchg:
8140 case Instruction::AtomicRMW:
8145 case Instruction::Call:
8146 case Instruction::Invoke: {
8150 for (
unsigned i = 0; i < CB->
arg_size(); ++i)
8153 CB->
paramHasAttr(i, Attribute::DereferenceableOrNull)) &&
8158 case Instruction::Ret:
8159 if (
I->getFunction()->hasRetAttribute(Attribute::NoUndef) &&
8160 Handle(
I->getOperand(0)))
8163 case Instruction::Switch:
8167 case Instruction::Br: {
8169 if (BR->isConditional() && Handle(BR->getCondition()))
8181template <
typename CallableT>
8183 const CallableT &Handle) {
8186 switch (
I->getOpcode()) {
8188 case Instruction::UDiv:
8189 case Instruction::SDiv:
8190 case Instruction::URem:
8191 case Instruction::SRem:
8192 return Handle(
I->getOperand(1));
8201 I, [&](
const Value *V) {
return KnownPoison.
count(V); });
8220 if (Arg->getParent()->isDeclaration())
8223 Begin = BB->
begin();
8230 unsigned ScanLimit = 32;
8239 if (--ScanLimit == 0)
8243 return WellDefinedOp == V;
8263 if (--ScanLimit == 0)
8271 for (
const Use &
Op :
I.operands()) {
8281 if (
I.getOpcode() == Instruction::Select &&
8282 YieldsPoison.
count(
I.getOperand(1)) &&
8283 YieldsPoison.
count(
I.getOperand(2))) {
8289 if (!BB || !Visited.
insert(BB).second)
8299 return ::programUndefinedIfUndefOrPoison(Inst,
false);
8303 return ::programUndefinedIfUndefOrPoison(Inst,
true);
8314 if (!
C->getElementType()->isFloatingPointTy())
8316 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
8317 if (
C->getElementAsAPFloat(
I).isNaN())
8331 return !
C->isZero();
8334 if (!
C->getElementType()->isFloatingPointTy())
8336 for (
unsigned I = 0,
E =
C->getNumElements();
I <
E; ++
I) {
8337 if (
C->getElementAsAPFloat(
I).isZero())
8360 if (CmpRHS == FalseVal) {
8404 if (CmpRHS != TrueVal) {
8443 Value *
A =
nullptr, *
B =
nullptr;
8448 Value *
C =
nullptr, *
D =
nullptr;
8450 if (L.Flavor != R.Flavor)
8502 return {L.Flavor,
SPNB_NA,
false};
8509 return {L.Flavor,
SPNB_NA,
false};
8516 return {L.Flavor,
SPNB_NA,
false};
8523 return {L.Flavor,
SPNB_NA,
false};
8539 return ConstantInt::get(V->getType(), ~(*
C));
8596 if ((CmpLHS == TrueVal &&
match(FalseVal,
m_APInt(C2))) ||
8616 assert(
X &&
Y &&
"Invalid operand");
8618 auto IsNegationOf = [&](
const Value *
X,
const Value *
Y) {
8623 if (NeedNSW && !BO->hasNoSignedWrap())
8627 if (!AllowPoison && !Zero->isNullValue())
8634 if (IsNegationOf(
X,
Y) || IsNegationOf(
Y,
X))
8661 const APInt *RHSC1, *RHSC2;
8672 return CR1.inverse() == CR2;
8706std::optional<std::pair<CmpPredicate, Constant *>>
8709 "Only for relational integer predicates.");
8711 return std::nullopt;
8717 bool WillIncrement =
8722 auto ConstantIsOk = [WillIncrement, IsSigned](
ConstantInt *
C) {
8723 return WillIncrement ? !
C->isMaxValue(IsSigned) : !
C->isMinValue(IsSigned);
8726 Constant *SafeReplacementConstant =
nullptr;
8729 if (!ConstantIsOk(CI))
8730 return std::nullopt;
8732 unsigned NumElts = FVTy->getNumElements();
8733 for (
unsigned i = 0; i != NumElts; ++i) {
8734 Constant *Elt =
C->getAggregateElement(i);
8736 return std::nullopt;
8744 if (!CI || !ConstantIsOk(CI))
8745 return std::nullopt;
8747 if (!SafeReplacementConstant)
8748 SafeReplacementConstant = CI;
8752 Value *SplatC =
C->getSplatValue();
8755 if (!CI || !ConstantIsOk(CI))
8756 return std::nullopt;
8759 return std::nullopt;
8766 if (
C->containsUndefOrPoisonElement()) {
8767 assert(SafeReplacementConstant &&
"Replacement constant not set");
8774 Constant *OneOrNegOne = ConstantInt::get(
Type, WillIncrement ? 1 : -1,
true);
8777 return std::make_pair(NewPred, NewC);
8786 bool HasMismatchedZeros =
false;
8792 Value *OutputZeroVal =
nullptr;
8795 OutputZeroVal = TrueVal;
8798 OutputZeroVal = FalseVal;
8800 if (OutputZeroVal) {
8802 HasMismatchedZeros =
true;
8803 CmpLHS = OutputZeroVal;
8806 HasMismatchedZeros =
true;
8807 CmpRHS = OutputZeroVal;
8824 if (!HasMismatchedZeros)
8835 bool Ordered =
false;
8846 if (LHSSafe && RHSSafe) {
8877 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
8888 if (TrueVal == CmpLHS && FalseVal == CmpRHS)
8894 auto MaybeSExtCmpLHS =
8898 if (
match(TrueVal, MaybeSExtCmpLHS)) {
8920 else if (
match(FalseVal, MaybeSExtCmpLHS)) {
8960 case Instruction::ZExt:
8964 case Instruction::SExt:
8968 case Instruction::Trunc:
8971 CmpConst->
getType() == SrcTy) {
8993 CastedTo = CmpConst;
8995 unsigned ExtOp = CmpI->
isSigned() ? Instruction::SExt : Instruction::ZExt;
8999 case Instruction::FPTrunc:
9002 case Instruction::FPExt:
9005 case Instruction::FPToUI:
9008 case Instruction::FPToSI:
9011 case Instruction::UIToFP:
9014 case Instruction::SIToFP:
9027 if (CastedBack && CastedBack !=
C)
9055 *CastOp = Cast1->getOpcode();
9056 Type *SrcTy = Cast1->getSrcTy();
9059 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
9060 return Cast2->getOperand(0);
9068 Value *CastedTo =
nullptr;
9069 if (*CastOp == Instruction::Trunc) {
9083 "V2 and Cast1 should be the same type.");
9102 Value *TrueVal =
SI->getTrueValue();
9103 Value *FalseVal =
SI->getFalseValue();
9106 CmpI, TrueVal, FalseVal, LHS, RHS,
9125 if (CastOp && CmpLHS->
getType() != TrueVal->getType()) {
9129 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9131 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9138 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9140 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9145 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
9164 return Intrinsic::umin;
9166 return Intrinsic::umax;
9168 return Intrinsic::smin;
9170 return Intrinsic::smax;
9186 case Intrinsic::smax:
return Intrinsic::smin;
9187 case Intrinsic::smin:
return Intrinsic::smax;
9188 case Intrinsic::umax:
return Intrinsic::umin;
9189 case Intrinsic::umin:
return Intrinsic::umax;
9192 case Intrinsic::maximum:
return Intrinsic::minimum;
9193 case Intrinsic::minimum:
return Intrinsic::maximum;
9194 case Intrinsic::maxnum:
return Intrinsic::minnum;
9195 case Intrinsic::minnum:
return Intrinsic::maxnum;
9196 case Intrinsic::maximumnum:
9197 return Intrinsic::minimumnum;
9198 case Intrinsic::minimumnum:
9199 return Intrinsic::maximumnum;
9214std::pair<Intrinsic::ID, bool>
9219 bool AllCmpSingleUse =
true;
9222 if (
all_of(VL, [&SelectPattern, &AllCmpSingleUse](
Value *
I) {
9228 SelectPattern.
Flavor != CurrentPattern.Flavor)
9230 SelectPattern = CurrentPattern;
9235 switch (SelectPattern.
Flavor) {
9237 return {Intrinsic::smin, AllCmpSingleUse};
9239 return {Intrinsic::umin, AllCmpSingleUse};
9241 return {Intrinsic::smax, AllCmpSingleUse};
9243 return {Intrinsic::umax, AllCmpSingleUse};
9245 return {Intrinsic::maxnum, AllCmpSingleUse};
9247 return {Intrinsic::minnum, AllCmpSingleUse};
9255template <
typename InstTy>
9265 for (
unsigned I = 0;
I != 2; ++
I) {
9270 if (
LHS != PN &&
RHS != PN)
9306 if (
I->arg_size() != 2 ||
I->getType() !=
I->getArgOperand(0)->getType() ||
9307 I->getType() !=
I->getArgOperand(1)->getType())
9335 return !
C->isNegative();
9347 const APInt *CLHS, *CRHS;
9350 return CLHS->
sle(*CRHS);
9388 const APInt *CLHS, *CRHS;
9391 return CLHS->
ule(*CRHS);
9400static std::optional<bool>
9405 return std::nullopt;
9412 return std::nullopt;
9419 return std::nullopt;
9426 return std::nullopt;
9433 return std::nullopt;
9440static std::optional<bool>
9446 if (CR.
icmp(Pred, RCR))
9453 return std::nullopt;
9466 return std::nullopt;
9472static std::optional<bool>
9503 const APInt *Unused;
9522 return std::nullopt;
9526 if (L0 == R0 && L1 == R1)
9559 ((
A == R0 &&
B == R1) || (
A == R1 &&
B == R0) ||
9577 return std::nullopt;
9583static std::optional<bool>
9613 if (L0 == R0 && L1 == R1) {
9614 if ((LPred & RPred) == LPred)
9616 if ((LPred & ~RPred) == LPred)
9624 if (std::optional<ConstantFPRange> DomCR =
9626 if (std::optional<ConstantFPRange> ImpliedCR =
9628 if (ImpliedCR->contains(*DomCR))
9631 if (std::optional<ConstantFPRange> ImpliedCR =
9634 if (ImpliedCR->contains(*DomCR))
9640 return std::nullopt;
9647static std::optional<bool>
9652 assert((
LHS->getOpcode() == Instruction::And ||
9653 LHS->getOpcode() == Instruction::Or ||
9654 LHS->getOpcode() == Instruction::Select) &&
9655 "Expected LHS to be 'and', 'or', or 'select'.");
9662 const Value *ALHS, *ARHS;
9667 ALHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9670 ARHS, RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue,
Depth + 1))
9672 return std::nullopt;
9674 return std::nullopt;
9683 return std::nullopt;
9688 return std::nullopt;
9690 assert(LHS->getType()->isIntOrIntVectorTy(1) &&
9691 "Expected integer type only!");
9695 LHSIsTrue = !LHSIsTrue;
9701 LHSCmp->getOperand(0), LHSCmp->getOperand(1),
9702 RHSPred, RHSOp0, RHSOp1,
DL, LHSIsTrue);
9706 ConstantInt::get(V->getType(), 0), RHSPred,
9707 RHSOp0, RHSOp1,
DL, LHSIsTrue);
9710 "Expected floating point type only!");
9713 LHSCmp->getOperand(1), RHSPred, RHSOp0, RHSOp1,
9721 if ((LHSI->getOpcode() == Instruction::And ||
9722 LHSI->getOpcode() == Instruction::Or ||
9723 LHSI->getOpcode() == Instruction::Select))
9727 return std::nullopt;
9732 bool LHSIsTrue,
unsigned Depth) {
9738 bool InvertRHS =
false;
9747 LHS, RHSCmp->getCmpPredicate(), RHSCmp->getOperand(0),
9748 RHSCmp->getOperand(1),
DL, LHSIsTrue,
Depth))
9749 return InvertRHS ? !*Implied : *Implied;
9750 return std::nullopt;
9754 LHS, RHSCmp->getPredicate(), RHSCmp->getOperand(0),
9755 RHSCmp->getOperand(1),
DL, LHSIsTrue,
Depth))
9756 return InvertRHS ? !*Implied : *Implied;
9757 return std::nullopt;
9763 ConstantInt::get(V->getType(), 0),
DL,
9765 return InvertRHS ? !*Implied : *Implied;
9766 return std::nullopt;
9770 return std::nullopt;
9774 const Value *RHS1, *RHS2;
9776 if (std::optional<bool> Imp =
9780 if (std::optional<bool> Imp =
9786 if (std::optional<bool> Imp =
9790 if (std::optional<bool> Imp =
9796 return std::nullopt;
9801static std::pair<Value *, bool>
9803 if (!ContextI || !ContextI->
getParent())
9804 return {
nullptr,
false};
9811 return {
nullptr,
false};
9817 return {
nullptr,
false};
9820 if (TrueBB == FalseBB)
9821 return {
nullptr,
false};
9823 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
9824 "Predecessor block does not point to successor?");
9827 return {PredCond, TrueBB == ContextBB};
9833 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
"Condition must be bool");
9837 return std::nullopt;
9849 return std::nullopt;
9854 bool PreferSignedRange) {
9855 unsigned Width =
Lower.getBitWidth();
9858 case Instruction::Sub:
9868 if (PreferSignedRange && HasNSW && HasNUW)
9874 }
else if (HasNSW) {
9875 if (
C->isNegative()) {
9888 case Instruction::Add:
9897 if (PreferSignedRange && HasNSW && HasNUW)
9903 }
else if (HasNSW) {
9904 if (
C->isNegative()) {
9917 case Instruction::And:
9928 case Instruction::Or:
9934 case Instruction::AShr:
9940 unsigned ShiftAmount = Width - 1;
9941 if (!
C->isZero() && IIQ.
isExact(&BO))
9942 ShiftAmount =
C->countr_zero();
9943 if (
C->isNegative()) {
9946 Upper =
C->ashr(ShiftAmount) + 1;
9949 Lower =
C->ashr(ShiftAmount);
9955 case Instruction::LShr:
9961 unsigned ShiftAmount = Width - 1;
9962 if (!
C->isZero() && IIQ.
isExact(&BO))
9963 ShiftAmount =
C->countr_zero();
9964 Lower =
C->lshr(ShiftAmount);
9969 case Instruction::Shl:
9976 if (
C->isNegative()) {
9978 unsigned ShiftAmount =
C->countl_one() - 1;
9979 Lower =
C->shl(ShiftAmount);
9983 unsigned ShiftAmount =
C->countl_zero() - 1;
9985 Upper =
C->shl(ShiftAmount) + 1;
10004 case Instruction::SDiv:
10008 if (
C->isAllOnes()) {
10011 Lower = IntMin + 1;
10012 Upper = IntMax + 1;
10013 }
else if (
C->countl_zero() < Width - 1) {
10024 if (
C->isMinSignedValue()) {
10036 case Instruction::UDiv:
10046 case Instruction::SRem:
10052 if (
C->isNegative()) {
10063 case Instruction::URem:
10078 bool UseInstrInfo) {
10079 unsigned Width =
II.getType()->getScalarSizeInBits();
10081 switch (
II.getIntrinsicID()) {
10082 case Intrinsic::ctlz:
10083 case Intrinsic::cttz: {
10085 if (!UseInstrInfo || !
match(
II.getArgOperand(1),
m_One()))
10090 case Intrinsic::ctpop:
10093 APInt(Width, Width) + 1);
10094 case Intrinsic::uadd_sat:
10100 case Intrinsic::sadd_sat:
10103 if (
C->isNegative())
10114 case Intrinsic::usub_sat:
10124 case Intrinsic::ssub_sat:
10126 if (
C->isNegative())
10136 if (
C->isNegative())
10147 case Intrinsic::umin:
10148 case Intrinsic::umax:
10149 case Intrinsic::smin:
10150 case Intrinsic::smax:
10155 switch (
II.getIntrinsicID()) {
10156 case Intrinsic::umin:
10158 case Intrinsic::umax:
10160 case Intrinsic::smin:
10163 case Intrinsic::smax:
10170 case Intrinsic::abs:
10179 case Intrinsic::vscale:
10180 if (!
II.getParent() || !
II.getFunction())
10187 return ConstantRange::getFull(Width);
10192 unsigned BitWidth =
SI.getType()->getScalarSizeInBits();
10196 return ConstantRange::getFull(
BitWidth);
10219 return ConstantRange::getFull(
BitWidth);
10221 switch (R.Flavor) {
10233 return ConstantRange::getFull(
BitWidth);
10240 unsigned BitWidth =
I->getType()->getScalarSizeInBits();
10241 if (!
I->getOperand(0)->getType()->getScalarType()->isHalfTy())
10259 assert(V->getType()->isIntOrIntVectorTy() &&
"Expected integer instruction");
10262 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
10265 return C->toConstantRange();
10267 unsigned BitWidth = V->getType()->getScalarSizeInBits();
10280 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10282 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT,
Depth + 1);
10292 if (std::optional<ConstantRange>
Range =
A->getRange())
10300 if (std::optional<ConstantRange>
Range = CB->getRange())
10311 "Got assumption for the wrong function!");
10312 assert(
I->getIntrinsicID() == Intrinsic::assume &&
10313 "must be an assume intrinsic");
10317 Value *Arg =
I->getArgOperand(0);
10320 if (!Cmp || Cmp->getOperand(0) != V)
10325 UseInstrInfo, AC,
I, DT,
Depth + 1);
10348 InsertAffected(
Op);
10355 auto AddAffected = [&InsertAffected](
Value *V) {
10359 auto AddCmpOperands = [&AddAffected, IsAssume](
Value *LHS,
Value *RHS) {
10370 while (!Worklist.
empty()) {
10372 if (!Visited.
insert(V).second)
10418 AddCmpOperands(
A,
B);
10455 AddCmpOperands(
A,
B);
10483 if (BO->getOpcode() == Instruction::Add ||
10484 BO->getOpcode() == Instruction::Or) {
10486 const APInt *C1, *C2;
10505 unsigned MaxCount,
bool AllowUndefOrPoison) {
10508 auto Push = [&](
const Value *V) ->
bool {
10514 if (Constants.contains(
C))
10516 if (Constants.size() == MaxCount)
10518 Constants.insert(
C);
10523 if (Visited.
insert(Inst).second)
10531 while (!Worklist.
empty()) {
10534 case Instruction::Select:
10540 case Instruction::PHI:
10543 if (IncomingValue == CurInst)
10545 if (!Push(IncomingValue))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Utilities for dealing with flags related to floating point properties and mode controls.
static Value * getCondition(Instruction *I)
Module.h This file contains the declarations for the Module class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
std::pair< BasicBlock *, BasicBlock * > Edge
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static SmallVector< VPValue *, 4 > getOperands(ArrayRef< VPValue * > Values, unsigned OperandIndex)
static void computeKnownFPClassFromCond(const Value *V, Value *Cond, bool CondIsTrue, const Instruction *CxtI, KnownFPClass &KnownFromContext, unsigned Depth=0)
static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, SimplifyQuery &Q, unsigned Depth)
Try to detect a recurrence that the value of the induction variable is always a power of two (or zero...
static cl::opt< unsigned > DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20))
static unsigned computeNumSignBitsVectorConstant(const Value *V, const APInt &DemandedElts, unsigned TyBits)
For vector constants, loop over the elements and find the constant with the minimum number of sign bi...
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, const Value *RHS)
Return true if "icmp Pred LHS RHS" is always true.
static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V1 == (binop V2, X), where X is known non-zero.
static bool isGEPKnownNonNull(const GEPOperator *GEP, const SimplifyQuery &Q, unsigned Depth)
Test whether a GEP's result is known to be non-null.
static bool isNonEqualShl(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and the shift is nuw or nsw.
static bool isKnownNonNullFromDominatingCondition(const Value *V, const Instruction *CtxI, const DominatorTree *DT)
static const Value * getUnderlyingObjectFromInt(const Value *V)
This is the function that does the work of looking through basic ptrtoint+arithmetic+inttoptr sequenc...
static bool isNonZeroMul(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool rangeMetadataExcludesValue(const MDNode *Ranges, const APInt &Value)
Does the 'Range' metadata (which must be a valid MD_range operand list) ensure that the value it's at...
static KnownBits getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &Q, unsigned Depth)
static void breakSelfRecursivePHI(const Use *U, const PHINode *PHI, Value *&ValOut, Instruction *&CtxIOut, const PHINode **PhiOut=nullptr)
static bool isNonZeroSub(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, unsigned Depth)
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static void addValueAffectedByCondition(Value *V, function_ref< void(Value *)> InsertAffected)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, APInt &Upper, const InstrInfoQuery &IIQ, bool PreferSignedRange)
static Value * lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp)
Helps to match a select pattern in case of a type mismatch.
static std::pair< Value *, bool > getDomPredecessorCondition(const Instruction *ContextI)
static constexpr unsigned MaxInstrsToCheckForFree
Maximum number of instructions to check between assume and context instruction.
static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, const KnownBits &KnownVal, unsigned Depth)
static std::optional< bool > isImpliedCondFCmps(FCmpInst::Predicate LPred, const Value *L0, const Value *L1, FCmpInst::Predicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2, const SimplifyQuery &Q, unsigned Depth)
static bool includesPoison(UndefPoisonKind Kind)
static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS)
Match clamp pattern for float types without care about NaNs or signed zeros.
static std::optional< bool > isImpliedCondICmps(CmpPredicate LPred, const Value *L0, const Value *L1, CmpPredicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool includesUndef(UndefPoisonKind Kind)
static std::optional< bool > isImpliedCondCommonOperandWithCR(CmpPredicate LPred, const ConstantRange &LCR, CmpPredicate RPred, const ConstantRange &RCR)
Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
static ConstantRange getRangeForSelectPattern(const SelectInst &SI, const InstrInfoQuery &IIQ)
static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth)
static uint64_t GetStringLengthH(const Value *V, SmallPtrSetImpl< const PHINode * > &PHIs, unsigned CharSize)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
static void computeKnownBitsFromShiftOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth, function_ref< KnownBits(const KnownBits &, const KnownBits &, bool)> KF)
Compute known bits from a shift operator, including those with a non-constant shift amount.
static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value *V, bool AllowLifetime, bool AllowDroppable)
static std::optional< bool > isImpliedCondAndOr(const Instruction *LHS, CmpPredicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, bool LHSIsTrue, unsigned Depth)
Return true if LHS implies RHS is true.
static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, const APInt *&CLow, const APInt *&CHigh)
static bool isNonZeroAdd(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, unsigned Depth)
static bool isNonEqualSelect(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchTwoInputRecurrence(const PHINode *PN, InstTy *&Inst, Value *&Init, Value *&OtherOp)
static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS, KnownBits &Known, const SimplifyQuery &Q)
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TVal, Value *FVal, unsigned Depth)
Recognize variations of: a < c ?
static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II, KnownBits &Known)
static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper)
static bool isSameUnderlyingObjectInLoop(const PHINode *PN, const LoopInfo *LI)
PN defines a loop-variant pointer to an object.
static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B, const SimplifyQuery &Q)
static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, const APInt *&CLow, const APInt *&CHigh)
static Value * lookThroughCastConst(CmpInst *CmpI, Type *SrcTy, Constant *C, Instruction::CastOps *CastOp)
static bool handleGuaranteedWellDefinedOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be undef or poison.
static void computeKnownBitsFromLerpPattern(const Value *Op0, const Value *Op1, const APInt &DemandedElts, KnownBits &KnownOut, const SimplifyQuery &Q, unsigned Depth)
Try to detect the lerp pattern: a * (b - c) + c * d where a >= 0, b >= 0, c >= 0, d >= 0,...
static KnownFPClass computeKnownFPClassFromContext(const Value *V, const SimplifyQuery &Q)
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static Value * getNotValue(Value *V)
If the input value is the result of a 'not' op, constant integer, or vector splat of a constant integ...
static constexpr KnownFPClass::MinMaxKind getMinMaxKind(Intrinsic::ID IID)
static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return the number of times the sign bit of the register is replicated into the other bits.
static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, KnownBits &Known, const SimplifyQuery &SQ, bool Invert)
static bool isKnownNonZeroFromOperator(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchOpWithOpEqZero(Value *Op0, Value *Op1)
static bool isNonZeroRecurrence(const PHINode *PN)
Try to detect a recurrence that monotonically increases/decreases from a non-zero starting value.
static SelectPatternResult matchClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal)
Recognize variations of: CLAMP(v,l,h) ==> ((v) < (l) ?
static bool shiftAmountKnownInRange(const Value *ShiftAmount)
Shifts return poison if shiftwidth is larger than the bitwidth.
static bool isEphemeralValueOf(const Instruction *I, const Value *E)
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
Match non-obvious integer minimum and maximum sequences.
static KnownBits computeKnownBitsForHorizontalOperation(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth, const function_ref< KnownBits(const KnownBits &, const KnownBits &)> KnownBitsFunc)
static bool handleGuaranteedNonPoisonOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be poison.
static std::optional< std::pair< Value *, Value * > > getInvertibleOperands(const Operator *Op1, const Operator *Op2)
If the pair of operators are the same invertible function, return the the operands of the function co...
static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS)
static void computeKnownBitsFromCond(const Value *V, Value *Cond, KnownBits &Known, const SimplifyQuery &SQ, bool Invert, unsigned Depth)
static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q)
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
static const Instruction * safeCxtI(const Value *V, const Instruction *CxtI)
static bool isNonEqualMul(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and the multiplication is nuw o...
static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero, const Value *Cond, bool CondIsTrue)
Return true if we can infer that V is known to be a power of 2 from dominating condition Cond (e....
static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static bool isKnownNonNaN(const Value *V, FastMathFlags FMF)
static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II, bool UseInstrInfo)
static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, const SimplifyQuery &Q, unsigned Depth)
static Value * BuildSubAggregate(Value *From, Value *To, Type *IndexedType, SmallVectorImpl< unsigned > &Idxs, unsigned IdxSkip, BasicBlock::iterator InsertBefore)
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
LLVM_ABI APInt reverseBits() const
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool getBoolValue() const
Convert APInt to a boolean value.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void clearSignBit()
Set the sign bit to 0.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Class to represent array types.
This represents the llvm.assume intrinsic.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static bool isFPPredicate(Predicate P)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
static bool isIntPredicate(Predicate P)
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
bool hasSameSign() const
Query samesign information, for optimizations.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
StringRef getAsString() const
If this array is isString(), then this method returns the array as a StringRef.
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI std::optional< ConstantFPRange > makeExactFCmpRegion(FCmpInst::Predicate Pred, const APFloat &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This class represents a range of values.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI bool isAllNegative() const
Return true if all values in this range are negative.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This is an important base class in LLVM.
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
unsigned getAddressSizeInBits(unsigned AS) const
The size in bits of an address in for the given AS.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
DomTreeNodeBase * getIDom() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
void setNoSignedZeros(bool B=true)
void setNoNaNs(bool B=true)
const BasicBlock & getEntryBlock() const
bool hasNoSync() const
Determine if the call can synchroize with other threads.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
PointerType * getType() const
Global values are always pointers.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getSwappedCmpPredicate() const
CmpPredicate getInverseCmpPredicate() const
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
bool isLoopHeader(const BlockT *BB) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
const MDOperand & getOperand(unsigned I) const
This is a utility class that provides an abstraction for the common functionality between Instruction...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
iterator_range< const_block_iterator > blocks() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv, sdiv, lshr, or ashr instruction, which can be marked as "exact", indicating that no bits are ...
bool isExact() const
Test whether this division is known to be exact, with zero remainder.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI uint64_t getArrayNumElements() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
const KnownBits & getKnownBits(const SimplifyQuery &Q) const
PointerType getValue() const
Represents an op.with.overflow intrinsic.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
Type * getIndexedType() const
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
@ C
The default llvm calling convention, compatible with C.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
cst_pred_ty< is_power2_or_zero > m_Power2OrZero()
Match an integer or vector of 0 or power-of-2 values.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmin_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > > m_OrdOrUnordFMin(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point minimum function.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmax_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > > m_OrdOrUnordFMax(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point maximum function.
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
cstfp_pred_ty< custom_checkfn< APFloat > > m_CheckedFp(function_ref< bool(const APFloat &)> CheckFn)
Match a float or vector where CheckFn(ele) for each element is true.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
cst_pred_ty< is_nonpositive > m_NonPositive()
Match an integer or vector of non-positive values.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
LLVM_ABI bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root, Instruction *OnPathTo, DominatorTree *DT)
Return true if undefined behavior would provable be executed on the path to OnPathTo if Root produced...
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Merge bits known from context-dependent facts into Known.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
LLVM_ABI bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
LLVM_ABI Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI bool getUnderlyingObjectsForCodeGen(const Value *V, SmallVectorImpl< Value * > &Objects)
This is a wrapper around getUnderlyingObjects and adds support for basic ptrtoint+arithmetic+inttoptr...
LLVM_ABI std::pair< Intrinsic::ID, bool > canConvertToMinOrMaxIntrinsic(ArrayRef< Value * > VL)
Check if the values in VL are select instructions that can be converted to a min or max (vector) intr...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, const Loop *L)
Return true if this function can prove that the instruction I is executed for every iteration of the ...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
gep_type_iterator gep_type_end(const User *GEP)
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
bool isa_and_nonnull(const Y &Val)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool canIgnoreSignBitOfZero(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is zero.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ, bool IsNSW=false)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
LLVM_ABI bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase *Call, bool MustPreserveNullness)
{launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures point...
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
Compute the demanded elements mask of horizontal binary operations.
LLVM_ABI SelectPatternResult getSelectPattern(CmpInst::Predicate Pred, SelectPatternNaNBehavior NaNBehavior=SPNB_NA, bool Ordered=false)
Determine the pattern for predicate X Pred Y ? X : Y.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)
Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
LLVM_ABI bool programUndefinedIfUndefOrPoison(const Instruction *Inst)
Return true if this function can prove that if Inst is executed and yields a poison value or undef bi...
LLVM_ABI void adjustKnownFPClassForSelectArm(KnownFPClass &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI bool collectPossibleValues(const Value *V, SmallPtrSetImpl< const Constant * > &Constants, unsigned MaxCount, bool AllowUndefOrPoison=true)
Enumerates all possible immediate values of V and inserts them into the set Constants.
FunctionAddr VTableAddr Count
LLVM_ABI uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y)
Return true iff:
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, const Instruction *CtxI, const DominatorTree *DT=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
LLVM_ABI bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
LLVM_ABI bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
LLVM_ABI Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, const TargetLibraryInfo *TLI)
Map a call instruction to an intrinsic ID.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
LLVM_ABI Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF)
Convert given SPF to equivalent min/max intrinsic.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF=FastMathFlags(), Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
SelectPatternNaNBehavior
Behavior when a floating point min/max is given one NaN and one non-NaN as input.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &SQ, unsigned Depth=0)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point value can never contain a NaN or infinity.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI bool isKnownIntegral(const Value *V, const SimplifyQuery &SQ, FastMathFlags FMF)
Return true if the floating-point value V is known to be an integer value.
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_ABI Value * FindInsertedValue(Value *V, ArrayRef< unsigned > idx_range, std::optional< BasicBlock::iterator > InsertBefore=std::nullopt)
Given an aggregate and an sequence of indices, see if the scalar value indexed is already around as a...
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is either NaN or never less than -0....
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI bool mayHaveNonDefUseDependency(const Instruction &I)
Returns true if the result or effects of the given instructions I depend values not reachable through...
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
LLVM_ABI bool canIgnoreSignBitOfNaN(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is NaN.
LLVM_ABI void findValuesAffectedByCondition(Value *Cond, bool IsAssume, function_ref< void(Value *)> InsertAffected)
Call InsertAffected on all Values whose known bits / value may be affected by the condition Cond.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
SmallPtrSet< Value *, 4 > AffectedValues
Represents offset+length into a ConstantDataArray.
const ConstantDataArray * Array
ConstantDataArray pointer.
Represent subnormal handling kind for floating point instruction inputs and outputs.
static constexpr DenormalMode getDynamic()
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedZeros(const InstT *Op) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
static LLVM_ABI KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
LLVM_ABI KnownBits blsi() const
Compute known bits for X & -X, which has only the lowest bit set of X set.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
LLVM_ABI KnownBits reduceAdd(unsigned NumElts) const
Compute known bits for horizontal add for a vector with NumElts elements, where each element has the ...
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
LLVM_ABI KnownBits blsmsk() const
Compute known bits for X ^ (X - 1), which has all bits up to and including the lowest set bit of X se...
void makeNegative()
Make this value negative.
void setAllConflict()
Make all bits known to be both zero and one.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
bool hasConflict() const
Returns true if there is conflicting information.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
bool isConstant() const
Returns true if we know the value of all bits.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinTrailingOnes() const
Returns the minimum number of trailing one bits.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void setAllOnes()
Make all bits known to be one and discard any previous information.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static LLVM_ABI KnownBits clmul(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for clmul(LHS, RHS).
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
KnownBits sextOrTrunc(unsigned BitWidth) const
Return known bits for a sign extension or truncation of the value we're tracking.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static LLVM_ABI KnownFPClass sin(const KnownFPClass &Src)
Report known values for sin.
static LLVM_ABI KnownFPClass fdiv_self(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fdiv x, x.
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
static LLVM_ABI KnownFPClass fmul(const KnownFPClass &LHS, const KnownFPClass &RHS, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fmul.
static LLVM_ABI KnownFPClass fadd_self(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fadd x, x.
void copysign(const KnownFPClass &Sign)
static KnownFPClass square(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
static LLVM_ABI KnownFPClass fsub(const KnownFPClass &LHS, const KnownFPClass &RHS, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fsub.
KnownFPClass unionWith(const KnownFPClass &RHS) const
static LLVM_ABI KnownFPClass canonicalize(const KnownFPClass &Src, DenormalMode DenormMode=DenormalMode::getDynamic())
Apply the canonicalize intrinsic to this value.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
static LLVM_ABI KnownFPClass log(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Propagate known class for log/log2/log10.
static LLVM_ABI KnownFPClass fdiv(const KnownFPClass &LHS, const KnownFPClass &RHS, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fdiv.
static LLVM_ABI KnownFPClass roundToIntegral(const KnownFPClass &Src, bool IsTrunc, bool IsMultiUnitFPType)
Propagate known class for rounding intrinsics (trunc, floor, ceil, rint, nearbyint,...
static LLVM_ABI KnownFPClass cos(const KnownFPClass &Src)
Report known values for cos.
static LLVM_ABI KnownFPClass minMaxLike(const KnownFPClass &LHS, const KnownFPClass &RHS, MinMaxKind Kind, DenormalMode DenormMode=DenormalMode::getDynamic())
KnownFPClass intersectWith(const KnownFPClass &RHS) const
bool isKnownNeverNegInfinity() const
Return true if it's known this can never be -infinity.
bool isKnownNeverNegSubnormal() const
Return true if it's known this can never be a negative subnormal.
static LLVM_ABI KnownFPClass exp(const KnownFPClass &Src)
Report known values for exp, exp2 and exp10.
static LLVM_ABI KnownFPClass frexp_mant(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Propagate known class for mantissa component of frexp.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
static LLVM_ABI KnownFPClass fpext(const KnownFPClass &KnownSrc, const fltSemantics &DstTy, const fltSemantics &SrcTy)
Propagate known class for fpext.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
static LLVM_ABI KnownFPClass fma(const KnownFPClass &LHS, const KnownFPClass &RHS, const KnownFPClass &Addend, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fma.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
static LLVM_ABI KnownFPClass fptrunc(const KnownFPClass &KnownSrc)
Propagate known class for fptrunc.
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
LLVM_ABI void propagateCanonicalizingSrc(const KnownFPClass &Src, DenormalMode Mode)
Report known classes if Src is evaluated through a potentially canonicalizing operation.
void signBitMustBeZero()
Assume the sign bit is zero.
static LLVM_ABI KnownFPClass sqrt(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Propagate known class for sqrt.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
static LLVM_ABI KnownFPClass fadd(const KnownFPClass &LHS, const KnownFPClass &RHS, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fadd.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a negative zero.
static LLVM_ABI KnownFPClass fma_square(const KnownFPClass &Squared, const KnownFPClass &Addend, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fma squared, squared, addend.
static LLVM_ABI KnownFPClass frem_self(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for frem.
bool isKnownNeverPosSubnormal() const
Return true if it's known this can never be a positive subnormal.
Represent one information held inside an operand bundle of an llvm.assume.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithoutCondContext() const
SimplifyQuery getWithInstruction(const Instruction *I) const
const DomConditionCache * DC