42#define DEBUG_TYPE "gisel-known-bits"
50 "Analysis for ComputingKnownBits",
false,
true)
53 : MF(MF),
MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
58 switch (
MI->getOpcode()) {
59 case TargetOpcode::COPY:
61 case TargetOpcode::G_ASSERT_ALIGN: {
63 return Align(
MI->getOperand(2).getImm());
65 case TargetOpcode::G_FRAME_INDEX: {
66 int FrameIdx =
MI->getOperand(1).getIndex();
67 return MF.getFrameInfo().getObjectAlign(FrameIdx);
69 case TargetOpcode::G_INTRINSIC:
70 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
71 case TargetOpcode::G_INTRINSIC_CONVERGENT:
72 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
74 return TL.computeKnownAlignForTargetInstr(*
this, R, MRI,
Depth + 1);
79 assert(
MI.getNumExplicitDefs() == 1 &&
80 "expected single return generic instruction");
85 const LLT Ty = MRI.getType(R);
95 const APInt &DemandedElts,
103 LLT Ty = MRI.getType(R);
104 unsigned BitWidth = Ty.getScalarSizeInBits();
116[[maybe_unused]]
static void
119 <<
"] Computed for: " <<
MI <<
"[" <<
Depth <<
"] Known: 0x"
130 const APInt &DemandedElts,
161 const APInt &DemandedElts,
164 unsigned Opcode =
MI.getOpcode();
165 LLT DstTy = MRI.getType(R);
179 "DemandedElt width should equal the fixed vector number of elements");
182 "DemandedElt width should be 1 for scalars or scalable vectors");
207 TL.computeKnownBitsForTargetInstr(*
this, R, Known, DemandedElts, MRI,
210 case TargetOpcode::G_BUILD_VECTOR: {
215 if (!DemandedElts[
I])
229 case TargetOpcode::G_SPLAT_VECTOR: {
237 case TargetOpcode::COPY:
238 case TargetOpcode::G_PHI:
239 case TargetOpcode::PHI: {
245 assert(
MI.getOperand(0).getSubReg() == 0 &&
"Is this code in SSA?");
248 for (
unsigned Idx = 1; Idx <
MI.getNumOperands(); Idx += 2) {
251 LLT SrcTy = MRI.getType(SrcReg);
259 if (SrcReg.
isVirtual() && Src.getSubReg() == 0 &&
269 Depth + (Opcode != TargetOpcode::COPY));
284 case TargetOpcode::G_CONSTANT: {
288 case TargetOpcode::G_FRAME_INDEX: {
289 int FrameIdx =
MI.getOperand(1).getIndex();
290 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
293 case TargetOpcode::G_SUB: {
301 case TargetOpcode::G_XOR: {
310 case TargetOpcode::G_PTR_ADD: {
314 LLT Ty = MRI.getType(
MI.getOperand(1).getReg());
315 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
319 case TargetOpcode::G_ADD: {
327 case TargetOpcode::G_AND: {
337 case TargetOpcode::G_OR: {
347 case TargetOpcode::G_MUL: {
355 case TargetOpcode::G_UMULH: {
363 case TargetOpcode::G_SMULH: {
371 case TargetOpcode::G_SELECT: {
372 computeKnownBitsMin(
MI.getOperand(2).getReg(),
MI.getOperand(3).getReg(),
373 Known, DemandedElts,
Depth + 1);
376 case TargetOpcode::G_SMIN: {
386 case TargetOpcode::G_SMAX: {
396 case TargetOpcode::G_UMIN: {
405 case TargetOpcode::G_UMAX: {
414 case TargetOpcode::G_FCMP:
415 case TargetOpcode::G_ICMP: {
418 if (TL.getBooleanContents(DstTy.
isVector(),
419 Opcode == TargetOpcode::G_FCMP) ==
425 case TargetOpcode::G_SEXT: {
433 case TargetOpcode::G_ASSERT_SEXT:
434 case TargetOpcode::G_SEXT_INREG: {
437 Known = Known.
sextInReg(
MI.getOperand(2).getImm());
440 case TargetOpcode::G_ANYEXT: {
446 case TargetOpcode::G_LOAD: {
454 case TargetOpcode::G_SEXTLOAD:
455 case TargetOpcode::G_ZEXTLOAD: {
462 Known = Opcode == TargetOpcode::G_SEXTLOAD
467 case TargetOpcode::G_ASHR: {
476 case TargetOpcode::G_LSHR: {
485 case TargetOpcode::G_SHL: {
494 case TargetOpcode::G_INTTOPTR:
495 case TargetOpcode::G_PTRTOINT:
500 case TargetOpcode::G_ZEXT:
501 case TargetOpcode::G_TRUNC: {
507 case TargetOpcode::G_ASSERT_ZEXT: {
511 unsigned SrcBitWidth =
MI.getOperand(2).getImm();
512 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
514 Known.
Zero |= (~InMask);
515 Known.
One &= (~Known.Zero);
518 case TargetOpcode::G_ASSERT_ALIGN: {
519 int64_t LogOfAlign =
Log2_64(
MI.getOperand(2).getImm());
528 case TargetOpcode::G_MERGE_VALUES: {
529 unsigned NumOps =
MI.getNumOperands();
530 unsigned OpSize = MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
532 for (
unsigned I = 0;
I !=
NumOps - 1; ++
I) {
535 DemandedElts,
Depth + 1);
540 case TargetOpcode::G_UNMERGE_VALUES: {
541 unsigned NumOps =
MI.getNumOperands();
543 LLT SrcTy = MRI.getType(SrcReg);
545 if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.
getScalarType())
550 for (; DstIdx !=
NumOps - 1 &&
MI.getOperand(DstIdx).
getReg() != R;
554 APInt SubDemandedElts = DemandedElts;
555 if (SrcTy.isVector()) {
558 DemandedElts.
zext(SrcTy.getNumElements()).
shl(DstIdx * DstLanes);
564 if (SrcTy.isVector())
565 Known = std::move(SrcOpKnown);
570 case TargetOpcode::G_BSWAP: {
576 case TargetOpcode::G_BITREVERSE: {
582 case TargetOpcode::G_CTPOP: {
594 case TargetOpcode::G_UBFX: {
595 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
605 case TargetOpcode::G_SBFX: {
606 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
623 case TargetOpcode::G_UADDO:
624 case TargetOpcode::G_UADDE:
625 case TargetOpcode::G_SADDO:
626 case TargetOpcode::G_SADDE: {
627 if (
MI.getOperand(1).getReg() == R) {
630 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
637 assert(
MI.getOperand(0).getReg() == R &&
638 "We only compute knownbits for the sum here.");
641 if (Opcode == TargetOpcode::G_UADDE || Opcode == TargetOpcode::G_SADDE) {
645 Carry = Carry.
trunc(1);
657 case TargetOpcode::G_USUBO:
658 case TargetOpcode::G_USUBE:
659 case TargetOpcode::G_SSUBO:
660 case TargetOpcode::G_SSUBE:
661 case TargetOpcode::G_UMULO:
662 case TargetOpcode::G_SMULO: {
663 if (
MI.getOperand(1).getReg() == R) {
666 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
673 case TargetOpcode::G_CTLZ:
674 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
684 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
691 LLT VecVT = MRI.getType(InVec);
709 if (ConstEltNo && ConstEltNo->ult(NumSrcElts))
716 case TargetOpcode::G_SHUFFLE_VECTOR: {
717 APInt DemandedLHS, DemandedRHS;
720 unsigned NumElts = MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
722 DemandedElts, DemandedLHS, DemandedRHS))
743 case TargetOpcode::G_CONCAT_VECTORS: {
744 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
749 unsigned NumSubVectorElts =
750 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
754 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
766 case TargetOpcode::G_ABS: {
780 Ty = Ty.getScalarType();
789 LLT Ty = MRI.getType(R);
792 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth);
795void GISelValueTracking::computeKnownFPClassForFPTrunc(
803 KnownFPClass KnownSrc;
804 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
817void GISelValueTracking::computeKnownFPClass(
Register R,
818 const APInt &DemandedElts,
822 assert(Known.
isUnknown() &&
"should not be called with known information");
832 MachineInstr &
MI = *MRI.getVRegDef(R);
833 unsigned Opcode =
MI.getOpcode();
834 LLT DstTy = MRI.getType(R);
842 switch (Cst->getKind()) {
844 auto APF = Cst->getScalarValue();
846 Known.
SignBit = APF.isNegative();
851 bool SignBitAllZero =
true;
852 bool SignBitAllOne =
true;
854 for (
auto C : *Cst) {
857 SignBitAllZero =
false;
859 SignBitAllOne =
false;
862 if (SignBitAllOne != SignBitAllZero)
878 KnownNotFromFlags |=
fcNan;
880 KnownNotFromFlags |=
fcInf;
884 InterestedClasses &= ~KnownNotFromFlags;
887 [=, &Known] { Known.
knownNot(KnownNotFromFlags); });
893 const MachineFunction *MF =
MI.getMF();
897 TL.computeKnownFPClassForTargetInstr(*
this, R, Known, DemandedElts, MRI,
900 case TargetOpcode::G_FNEG: {
902 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
Depth + 1);
906 case TargetOpcode::G_SELECT: {
929 bool LookThroughFAbsFNeg = CmpLHS !=
LHS && CmpLHS !=
RHS;
930 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
936 MaskIfTrue = TestedMask;
937 MaskIfFalse = ~TestedMask;
940 if (TestedValue ==
LHS) {
942 FilterLHS = MaskIfTrue;
943 }
else if (TestedValue ==
RHS) {
945 FilterRHS = MaskIfFalse;
949 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
953 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses & FilterRHS,
960 case TargetOpcode::G_FCOPYSIGN: {
961 Register Magnitude =
MI.getOperand(1).getReg();
964 KnownFPClass KnownSign;
966 computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,
968 computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,
973 case TargetOpcode::G_FMA:
974 case TargetOpcode::G_STRICT_FMA:
975 case TargetOpcode::G_FMAD: {
990 KnownFPClass KnownAddend;
991 computeKnownFPClass(
C, DemandedElts, InterestedClasses, KnownAddend,
998 case TargetOpcode::G_FSQRT:
999 case TargetOpcode::G_STRICT_FSQRT: {
1000 KnownFPClass KnownSrc;
1002 if (InterestedClasses &
fcNan)
1007 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1022 case TargetOpcode::G_FABS: {
1027 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,
1033 case TargetOpcode::G_FSIN:
1034 case TargetOpcode::G_FCOS:
1035 case TargetOpcode::G_FSINCOS: {
1038 KnownFPClass KnownSrc;
1040 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1048 case TargetOpcode::G_FMAXNUM:
1049 case TargetOpcode::G_FMINNUM:
1050 case TargetOpcode::G_FMINNUM_IEEE:
1051 case TargetOpcode::G_FMAXIMUM:
1052 case TargetOpcode::G_FMINIMUM:
1053 case TargetOpcode::G_FMAXNUM_IEEE:
1054 case TargetOpcode::G_FMAXIMUMNUM:
1055 case TargetOpcode::G_FMINIMUMNUM: {
1058 KnownFPClass KnownLHS, KnownRHS;
1060 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses, KnownLHS,
1062 computeKnownFPClass(
RHS, DemandedElts, InterestedClasses, KnownRHS,
1066 Known = KnownLHS | KnownRHS;
1069 if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||
1070 Opcode == TargetOpcode::G_FMAXNUM ||
1071 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1072 Opcode == TargetOpcode::G_FMAXIMUMNUM))
1075 if (Opcode == TargetOpcode::G_FMAXNUM ||
1076 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1077 Opcode == TargetOpcode::G_FMAXNUM_IEEE) {
1085 }
else if (Opcode == TargetOpcode::G_FMAXIMUM) {
1091 }
else if (Opcode == TargetOpcode::G_FMINNUM ||
1092 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1093 Opcode == TargetOpcode::G_FMINNUM_IEEE) {
1101 }
else if (Opcode == TargetOpcode::G_FMINIMUM) {
1133 }
else if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1134 Opcode == TargetOpcode::G_FMINIMUM) ||
1135 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1136 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1137 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||
1138 Opcode == TargetOpcode::G_FMINNUM_IEEE ||
1144 if ((Opcode == TargetOpcode::G_FMAXIMUM ||
1145 Opcode == TargetOpcode::G_FMAXNUM ||
1146 Opcode == TargetOpcode::G_FMAXIMUMNUM ||
1147 Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&
1150 else if ((Opcode == TargetOpcode::G_FMINIMUM ||
1151 Opcode == TargetOpcode::G_FMINNUM ||
1152 Opcode == TargetOpcode::G_FMINIMUMNUM ||
1153 Opcode == TargetOpcode::G_FMINNUM_IEEE) &&
1160 case TargetOpcode::G_FCANONICALIZE: {
1162 KnownFPClass KnownSrc;
1163 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1185 DenormalMode DenormMode = MF->getDenormalMode(FPType);
1204 case TargetOpcode::G_VECREDUCE_FMAX:
1205 case TargetOpcode::G_VECREDUCE_FMIN:
1206 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1207 case TargetOpcode::G_VECREDUCE_FMINIMUM: {
1213 computeKnownFPClass(Val,
MI.getFlags(), InterestedClasses,
Depth + 1);
1219 case TargetOpcode::G_TRUNC:
1220 case TargetOpcode::G_FFLOOR:
1221 case TargetOpcode::G_FCEIL:
1222 case TargetOpcode::G_FRINT:
1223 case TargetOpcode::G_FNEARBYINT:
1224 case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
1225 case TargetOpcode::G_INTRINSIC_ROUND: {
1227 KnownFPClass KnownSrc;
1233 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1250 case TargetOpcode::G_FEXP:
1251 case TargetOpcode::G_FEXP2:
1252 case TargetOpcode::G_FEXP10: {
1258 KnownFPClass KnownSrc;
1259 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1268 case TargetOpcode::G_FLOG:
1269 case TargetOpcode::G_FLOG2:
1270 case TargetOpcode::G_FLOG10: {
1285 KnownFPClass KnownSrc;
1286 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc,
Depth + 1);
1296 DenormalMode
Mode = MF->getDenormalMode(FltSem);
1303 case TargetOpcode::G_FPOWI: {
1308 LLT ExpTy = MRI.getType(Exp);
1310 Exp, ExpTy.
isVector() ? DemandedElts : APInt(1, 1),
Depth + 1);
1312 if (ExponentKnownBits.
Zero[0]) {
1326 KnownFPClass KnownSrc;
1327 computeKnownFPClass(Val, DemandedElts,
fcNegative, KnownSrc,
Depth + 1);
1332 case TargetOpcode::G_FLDEXP:
1333 case TargetOpcode::G_STRICT_FLDEXP: {
1335 KnownFPClass KnownSrc;
1336 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,
1353 if ((InterestedClasses & ExpInfoMask) ==
fcNone)
1362 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {
1363 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1367 case TargetOpcode::G_FADD:
1368 case TargetOpcode::G_STRICT_FADD:
1369 case TargetOpcode::G_FSUB:
1370 case TargetOpcode::G_STRICT_FSUB: {
1373 KnownFPClass KnownLHS, KnownRHS;
1375 (Opcode == TargetOpcode::G_FADD ||
1376 Opcode == TargetOpcode::G_STRICT_FADD) &&
1378 bool WantNaN = (InterestedClasses &
fcNan) !=
fcNone;
1381 if (!WantNaN && !WantNegative && !WantNegZero)
1387 if (InterestedClasses &
fcNan)
1388 InterestedSrcs |=
fcInf;
1389 computeKnownFPClass(
RHS, DemandedElts, InterestedSrcs, KnownRHS,
Depth + 1);
1394 (Opcode == TargetOpcode::G_FSUB ||
1395 Opcode == TargetOpcode::G_STRICT_FSUB)) {
1399 computeKnownFPClass(
LHS, DemandedElts, InterestedSrcs, KnownLHS,
1407 if (Opcode == TargetOpcode::G_FADD ||
1408 Opcode == TargetOpcode::G_STRICT_FADD) {
1435 case TargetOpcode::G_FMUL:
1436 case TargetOpcode::G_STRICT_FMUL: {
1449 KnownFPClass KnownLHS, KnownRHS;
1450 computeKnownFPClass(
RHS, DemandedElts, NeedForNan, KnownRHS,
Depth + 1);
1454 computeKnownFPClass(
LHS, DemandedElts, NeedForNan, KnownLHS,
Depth + 1);
1481 case TargetOpcode::G_FDIV:
1482 case TargetOpcode::G_FREM: {
1488 if (Opcode == TargetOpcode::G_FDIV) {
1499 const bool WantNan = (InterestedClasses &
fcNan) !=
fcNone;
1501 const bool WantPositive = Opcode == TargetOpcode::G_FREM &&
1503 if (!WantNan && !WantNegative && !WantPositive)
1506 KnownFPClass KnownLHS, KnownRHS;
1509 KnownRHS,
Depth + 1);
1511 bool KnowSomethingUseful =
1514 if (KnowSomethingUseful || WantPositive) {
1519 computeKnownFPClass(
LHS, DemandedElts, InterestedClasses & InterestedLHS,
1520 KnownLHS,
Depth + 1);
1523 if (Opcode == TargetOpcode::G_FDIV) {
1564 case TargetOpcode::G_FPEXT: {
1568 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known,
Depth + 1);
1572 LLT SrcTy = MRI.getType(Src).getScalarType();
1589 case TargetOpcode::G_FPTRUNC: {
1590 computeKnownFPClassForFPTrunc(
MI, DemandedElts, InterestedClasses, Known,
1594 case TargetOpcode::G_SITOFP:
1595 case TargetOpcode::G_UITOFP: {
1604 if (Opcode == TargetOpcode::G_UITOFP)
1608 LLT Ty = MRI.getType(Val);
1610 if (InterestedClasses &
fcInf) {
1615 if (Opcode == TargetOpcode::G_SITOFP)
1629 case TargetOpcode::G_BUILD_VECTOR:
1630 case TargetOpcode::G_CONCAT_VECTORS: {
1637 for (
unsigned Idx = 0; Idx <
Merge.getNumSources(); ++Idx) {
1639 bool NeedsElt = DemandedElts[Idx];
1645 computeKnownFPClass(Src, Known, InterestedClasses,
Depth + 1);
1648 KnownFPClass Known2;
1649 computeKnownFPClass(Src, Known2, InterestedClasses,
Depth + 1);
1661 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1671 LLT VecTy = MRI.getType(Vec);
1676 if (CIdx && CIdx->ult(NumElts))
1678 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
1684 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1690 LLT VecTy = MRI.getType(Vec);
1698 APInt DemandedVecElts = DemandedElts;
1699 bool NeedsElt =
true;
1701 if (CIdx && CIdx->ult(NumElts)) {
1702 DemandedVecElts.
clearBit(CIdx->getZExtValue());
1703 NeedsElt = DemandedElts[CIdx->getZExtValue()];
1708 computeKnownFPClass(Elt, Known, InterestedClasses,
Depth + 1);
1717 if (!DemandedVecElts.
isZero()) {
1718 KnownFPClass Known2;
1719 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
1726 case TargetOpcode::G_SHUFFLE_VECTOR: {
1730 APInt DemandedLHS, DemandedRHS;
1732 assert(DemandedElts == APInt(1, 1));
1733 DemandedLHS = DemandedRHS = DemandedElts;
1736 DemandedElts, DemandedLHS,
1743 if (!!DemandedLHS) {
1745 computeKnownFPClass(
LHS, DemandedLHS, InterestedClasses, Known,
1755 if (!!DemandedRHS) {
1756 KnownFPClass Known2;
1758 computeKnownFPClass(
RHS, DemandedRHS, InterestedClasses, Known2,
1764 case TargetOpcode::COPY: {
1767 if (!Src.isVirtual())
1770 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known,
Depth + 1);
1781 computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses,
Depth);
1782 return KnownClasses;
1788 computeKnownFPClass(R, Known, InterestedClasses,
Depth);
1796 InterestedClasses &=
~fcNan;
1798 InterestedClasses &=
~fcInf;
1801 computeKnownFPClass(R, DemandedElts, InterestedClasses,
Depth);
1804 Result.KnownFPClasses &=
~fcNan;
1806 Result.KnownFPClasses &=
~fcInf;
1812 LLT Ty = MRI.getType(R);
1813 APInt DemandedElts =
1815 return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses,
Depth);
1819unsigned GISelValueTracking::computeNumSignBitsMin(
Register Src0,
Register Src1,
1820 const APInt &DemandedElts,
1824 if (Src1SignBits == 1)
1841 case TargetOpcode::G_SEXTLOAD:
1844 case TargetOpcode::G_ZEXTLOAD:
1857 const APInt &DemandedElts,
1860 unsigned Opcode =
MI.getOpcode();
1862 if (Opcode == TargetOpcode::G_CONSTANT)
1863 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
1871 LLT DstTy = MRI.getType(R);
1881 unsigned FirstAnswer = 1;
1883 case TargetOpcode::COPY: {
1885 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
1886 MRI.getType(Src.getReg()).isValid()) {
1893 case TargetOpcode::G_SEXT: {
1895 LLT SrcTy = MRI.getType(Src);
1899 case TargetOpcode::G_ASSERT_SEXT:
1900 case TargetOpcode::G_SEXT_INREG: {
1903 unsigned SrcBits =
MI.getOperand(2).getImm();
1904 unsigned InRegBits = TyBits - SrcBits + 1;
1908 case TargetOpcode::G_LOAD: {
1915 case TargetOpcode::G_SEXTLOAD: {
1930 case TargetOpcode::G_ZEXTLOAD: {
1945 case TargetOpcode::G_AND:
1946 case TargetOpcode::G_OR:
1947 case TargetOpcode::G_XOR: {
1949 unsigned Src1NumSignBits =
1951 if (Src1NumSignBits != 1) {
1953 unsigned Src2NumSignBits =
1955 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
1959 case TargetOpcode::G_ASHR: {
1964 FirstAnswer = std::min<uint64_t>(FirstAnswer + *
C, TyBits);
1967 case TargetOpcode::G_SHL: {
1970 if (std::optional<ConstantRange> ShAmtRange =
1972 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
1973 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
1983 if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||
1984 ExtOpc == TargetOpcode::G_ANYEXT) {
1985 LLT ExtTy = MRI.getType(Src1);
1987 LLT ExtendeeTy = MRI.getType(Extendee);
1991 if (SizeDiff <= MinShAmt) {
1995 return Tmp - MaxShAmt;
2001 return Tmp - MaxShAmt;
2005 case TargetOpcode::G_TRUNC: {
2007 LLT SrcTy = MRI.getType(Src);
2011 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
2013 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
2014 return NumSrcSignBits - (NumSrcBits - DstTyBits);
2017 case TargetOpcode::G_SELECT: {
2018 return computeNumSignBitsMin(
MI.getOperand(2).getReg(),
2019 MI.getOperand(3).getReg(), DemandedElts,
2022 case TargetOpcode::G_SMIN:
2023 case TargetOpcode::G_SMAX:
2024 case TargetOpcode::G_UMIN:
2025 case TargetOpcode::G_UMAX:
2027 return computeNumSignBitsMin(
MI.getOperand(1).getReg(),
2028 MI.getOperand(2).getReg(), DemandedElts,
2030 case TargetOpcode::G_SADDO:
2031 case TargetOpcode::G_SADDE:
2032 case TargetOpcode::G_UADDO:
2033 case TargetOpcode::G_UADDE:
2034 case TargetOpcode::G_SSUBO:
2035 case TargetOpcode::G_SSUBE:
2036 case TargetOpcode::G_USUBO:
2037 case TargetOpcode::G_USUBE:
2038 case TargetOpcode::G_SMULO:
2039 case TargetOpcode::G_UMULO: {
2043 if (
MI.getOperand(1).getReg() == R) {
2044 if (TL.getBooleanContents(DstTy.
isVector(),
false) ==
2051 case TargetOpcode::G_SUB: {
2053 unsigned Src2NumSignBits =
2055 if (Src2NumSignBits == 1)
2065 if ((Known2.
Zero | 1).isAllOnes())
2072 FirstAnswer = Src2NumSignBits;
2079 unsigned Src1NumSignBits =
2081 if (Src1NumSignBits == 1)
2086 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2089 case TargetOpcode::G_ADD: {
2091 unsigned Src2NumSignBits =
2093 if (Src2NumSignBits <= 2)
2097 unsigned Src1NumSignBits =
2099 if (Src1NumSignBits == 1)
2108 if ((Known1.
Zero | 1).isAllOnes())
2114 FirstAnswer = Src1NumSignBits;
2123 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;
2126 case TargetOpcode::G_FCMP:
2127 case TargetOpcode::G_ICMP: {
2128 bool IsFP = Opcode == TargetOpcode::G_FCMP;
2131 auto BC = TL.getBooleanContents(DstTy.
isVector(), IsFP);
2138 case TargetOpcode::G_BUILD_VECTOR: {
2140 FirstAnswer = TyBits;
2141 APInt SingleDemandedElt(1, 1);
2143 if (!DemandedElts[
I])
2148 FirstAnswer = std::min(FirstAnswer, Tmp2);
2151 if (FirstAnswer == 1)
2156 case TargetOpcode::G_CONCAT_VECTORS: {
2157 if (MRI.getType(
MI.getOperand(0).getReg()).isScalableVector())
2159 FirstAnswer = TyBits;
2162 unsigned NumSubVectorElts =
2163 MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
2166 DemandedElts.
extractBits(NumSubVectorElts,
I * NumSubVectorElts);
2171 FirstAnswer = std::min(FirstAnswer, Tmp2);
2174 if (FirstAnswer == 1)
2179 case TargetOpcode::G_SHUFFLE_VECTOR: {
2182 APInt DemandedLHS, DemandedRHS;
2184 unsigned NumElts = MRI.getType(Src1).getNumElements();
2186 DemandedElts, DemandedLHS, DemandedRHS))
2192 if (FirstAnswer == 1)
2194 if (!!DemandedRHS) {
2197 FirstAnswer = std::min(FirstAnswer, Tmp2);
2201 case TargetOpcode::G_SPLAT_VECTOR: {
2205 unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();
2206 if (NumSrcSignBits > (NumSrcBits - TyBits))
2207 return NumSrcSignBits - (NumSrcBits - TyBits);
2210 case TargetOpcode::G_INTRINSIC:
2211 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2212 case TargetOpcode::G_INTRINSIC_CONVERGENT:
2213 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2216 TL.computeNumSignBitsForTargetInstr(*
this, R, DemandedElts, MRI,
Depth);
2218 FirstAnswer = std::max(FirstAnswer, NumBits);
2238 Mask <<= Mask.getBitWidth() - TyBits;
2239 return std::max(FirstAnswer, Mask.countl_one());
2243 LLT Ty = MRI.getType(R);
2244 APInt DemandedElts =
2253 unsigned Opcode =
MI.getOpcode();
2255 LLT Ty = MRI.getType(R);
2256 unsigned BitWidth = Ty.getScalarSizeInBits();
2258 if (Opcode == TargetOpcode::G_CONSTANT) {
2259 const APInt &ShAmt =
MI.getOperand(1).getCImm()->getValue();
2261 return std::nullopt;
2265 if (Opcode == TargetOpcode::G_BUILD_VECTOR) {
2266 const APInt *MinAmt =
nullptr, *MaxAmt =
nullptr;
2267 for (
unsigned I = 0, E =
MI.getNumOperands() - 1;
I != E; ++
I) {
2268 if (!DemandedElts[
I])
2271 if (
Op->getOpcode() != TargetOpcode::G_CONSTANT) {
2272 MinAmt = MaxAmt =
nullptr;
2276 const APInt &ShAmt =
Op->getOperand(1).getCImm()->getValue();
2278 return std::nullopt;
2279 if (!MinAmt || MinAmt->
ugt(ShAmt))
2281 if (!MaxAmt || MaxAmt->ult(ShAmt))
2284 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
2285 "Failed to find matching min/max shift amounts");
2286 if (MinAmt && MaxAmt)
2296 return std::nullopt;
2301 if (std::optional<ConstantRange> AmtRange =
2303 return AmtRange->getUnsignedMin().getZExtValue();
2304 return std::nullopt;
2322 Info = std::make_unique<GISelValueTracking>(MF, MaxDepth);
2347 if (!MO.isReg() || MO.getReg().isPhysical())
2350 if (!
MRI.getType(Reg).isValid())
2352 KnownBits Known = VTA.getKnownBits(Reg);
2353 unsigned SignedBits = VTA.computeNumSignBits(Reg);
2354 OS <<
" " << MO <<
" KnownBits:" << Known <<
" SignBits:" << SignedBits
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Utilities for dealing with flags related to floating point properties and mode controls.
static void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
static bool outputDenormalIsIEEEOrPosZero(const MachineFunction &MF, LLT Ty)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This class represents a range of values.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Represents any generic load, including sign/zero extending variants.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
GISelValueTracking & get(MachineFunction &MF)
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GISelValueTracking Result
LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
unsigned getMaxDepth() const
KnownBits getKnownBits(Register R)
Align computeKnownAlignment(Register R, unsigned Depth=0)
std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool maskedValueIsZero(Register Val, const APInt &Mask)
std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)
If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...
bool signBitIsZero(Register Op)
const DataLayout & getDataLayout() const
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
APInt getKnownOnes(Register R)
APInt getKnownZeroes(Register R)
void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getSrc2Reg() const
Register getSrc1Reg() const
ArrayRef< int > getMask() const
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
TypeSize getValue() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
operand_type_match m_Pred()
bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)
Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
scope_exit(Callable) -> scope_exit< Callable >
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
constexpr unsigned MaxAnalysisRecursionDepth
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a negative zero.