66 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
72 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
73 for (
const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
74 Attribute::DereferenceableOrNull, Attribute::NoAlias,
75 Attribute::NonNull, Attribute::NoUndef,
76 Attribute::Range, Attribute::NoFPClass})
77 CallerAttrs.removeAttribute(Attr);
79 if (CallerAttrs.hasAttributes())
83 if (CallerAttrs.contains(Attribute::ZExt) ||
84 CallerAttrs.contains(Attribute::SExt))
95 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
112 if (
MRI.getLiveInPhysReg(ArgReg) != Reg)
122 IsSExt =
Call->paramHasAttr(ArgIdx, Attribute::SExt);
123 IsZExt =
Call->paramHasAttr(ArgIdx, Attribute::ZExt);
124 IsNoExt =
Call->paramHasAttr(ArgIdx, Attribute::NoExt);
125 IsInReg =
Call->paramHasAttr(ArgIdx, Attribute::InReg);
126 IsSRet =
Call->paramHasAttr(ArgIdx, Attribute::StructRet);
127 IsNest =
Call->paramHasAttr(ArgIdx, Attribute::Nest);
128 IsByVal =
Call->paramHasAttr(ArgIdx, Attribute::ByVal);
138 "multiple ABI attributes?");
154std::pair<SDValue, SDValue>
164 Args.reserve(
Ops.size());
167 for (
unsigned i = 0; i <
Ops.size(); ++i) {
169 Type *Ty = i < OpsTypeOverrides.
size() && OpsTypeOverrides[i]
170 ? OpsTypeOverrides[i]
179 Entry.IsZExt = !Entry.IsSExt;
183 Entry.IsSExt = Entry.IsZExt =
false;
185 Args.push_back(Entry);
189 if (LC == RTLIB::UNKNOWN_LIBCALL || !LibcallName)
196 Type *OrigRetTy = RetTy;
199 bool zeroExtend = !signExtend;
204 signExtend = zeroExtend =
false;
220 LLVMContext &Context, std::vector<EVT> &MemOps,
unsigned Limit,
221 const MemOp &
Op,
unsigned DstAS,
unsigned SrcAS,
222 const AttributeList &FuncAttributes)
const {
223 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
224 Op.getSrcAlign() <
Op.getDstAlign())
229 if (VT == MVT::Other) {
233 VT = MVT::LAST_INTEGER_VALUETYPE;
234 if (
Op.isFixedDstAlign())
241 MVT LVT = MVT::LAST_INTEGER_VALUETYPE;
252 unsigned NumMemOps = 0;
256 while (VTSize >
Size) {
267 else if (NewVT == MVT::i64 &&
279 if (NewVT == MVT::i8)
288 if (NumMemOps &&
Op.allowOverlap() && NewVTSize <
Size &&
290 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
300 if (++NumMemOps > Limit)
303 MemOps.push_back(VT);
328 bool IsSignaling)
const {
333 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
334 &&
"Unsupported setcc type!");
337 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
338 bool ShouldInvertCC =
false;
342 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
343 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
344 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
348 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
349 (VT == MVT::f64) ? RTLIB::UNE_F64 :
350 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
354 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
355 (VT == MVT::f64) ? RTLIB::OGE_F64 :
356 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
360 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
361 (VT == MVT::f64) ? RTLIB::OLT_F64 :
362 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
366 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
367 (VT == MVT::f64) ? RTLIB::OLE_F64 :
368 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
372 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
373 (VT == MVT::f64) ? RTLIB::OGT_F64 :
374 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
377 ShouldInvertCC =
true;
380 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
381 (VT == MVT::f64) ? RTLIB::UO_F64 :
382 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
386 ShouldInvertCC =
true;
389 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
390 (VT == MVT::f64) ? RTLIB::UO_F64 :
391 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
392 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
393 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
394 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
398 ShouldInvertCC =
true;
401 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
402 (VT == MVT::f64) ? RTLIB::OGE_F64 :
403 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
406 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
407 (VT == MVT::f64) ? RTLIB::OGT_F64 :
408 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
411 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
412 (VT == MVT::f64) ? RTLIB::OLE_F64 :
413 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
416 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
417 (VT == MVT::f64) ? RTLIB::OLT_F64 :
418 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
436 if (LC1Impl == RTLIB::Unsupported) {
438 "no libcall available to soften floating-point compare");
442 if (ShouldInvertCC) {
444 CCCode = getSetCCInverse(CCCode, RetVT);
447 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
452 if (LC2Impl == RTLIB::Unsupported) {
454 "no libcall available to soften floating-point compare");
458 "unordered call should be simple boolean");
468 auto Call2 =
makeLibCall(DAG, LC2, RetVT,
Ops, CallOptions, dl, Chain);
471 CCCode = getSetCCInverse(CCCode, RetVT);
472 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
515 return DAG.
getNode(ISD::BRIND, dl, MVT::Other, Chain, Addr);
525 if (!TM.shouldAssumeDSOLocal(GV))
545 const APInt &DemandedElts,
548 unsigned Opcode =
Op.getOpcode();
567 if (!Op1C || Op1C->isOpaque())
571 const APInt &
C = Op1C->getAPIntValue();
576 EVT VT =
Op.getValueType();
593 EVT VT =
Op.getValueType();
608 "ShrinkDemandedOp only supports binary operators!");
609 assert(
Op.getNode()->getNumValues() == 1 &&
610 "ShrinkDemandedOp only supports nodes with one result!");
612 EVT VT =
Op.getValueType();
621 Op.getOperand(1).getValueType().getScalarSizeInBits() ==
BitWidth &&
622 "ShrinkDemandedOp only supports operands that have the same size!");
626 if (!
Op.getNode()->hasOneUse())
642 unsigned Opcode =
Op.getOpcode();
643 if (Opcode == ISD::PTRADD) {
652 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
676 const APInt &DemandedElts,
696 bool AssumeSingleUse)
const {
697 EVT VT =
Op.getValueType();
713 EVT VT =
Op.getValueType();
731 switch (
Op.getOpcode()) {
737 EVT SrcVT = Src.getValueType();
738 EVT DstVT =
Op.getValueType();
744 if (NumSrcEltBits == NumDstEltBits)
749 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
750 unsigned Scale = NumDstEltBits / NumSrcEltBits;
754 for (
unsigned i = 0; i != Scale; ++i) {
755 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
756 unsigned BitOffset = EltOffset * NumSrcEltBits;
759 DemandedSrcBits |=
Sub;
760 for (
unsigned j = 0; j != NumElts; ++j)
762 DemandedSrcElts.
setBit((j * Scale) + i);
767 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
772 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
773 unsigned Scale = NumSrcEltBits / NumDstEltBits;
777 for (
unsigned i = 0; i != NumElts; ++i)
778 if (DemandedElts[i]) {
779 unsigned Offset = (i % Scale) * NumDstEltBits;
781 DemandedSrcElts.
setBit(i / Scale);
785 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
799 return Op.getOperand(0);
801 return Op.getOperand(1);
812 return Op.getOperand(0);
814 return Op.getOperand(1);
824 return Op.getOperand(0);
826 return Op.getOperand(1);
832 return Op.getOperand(0);
836 return Op.getOperand(1);
842 if (std::optional<unsigned> MaxSA =
845 unsigned ShAmt = *MaxSA;
846 unsigned NumSignBits =
849 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
857 if (std::optional<unsigned> MaxSA =
860 unsigned ShAmt = *MaxSA;
864 unsigned NumSignBits =
903 if (NumSignBits >= (
BitWidth - ExBits + 1))
916 EVT SrcVT = Src.getValueType();
917 EVT DstVT =
Op.getValueType();
918 if (IsLE && DemandedElts == 1 &&
934 !DemandedElts[CIdx->getZExtValue()])
945 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
948 if (DemandedSubElts == 0)
958 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
959 for (
unsigned i = 0; i != NumElts; ++i) {
960 int M = ShuffleMask[i];
961 if (M < 0 || !DemandedElts[i])
964 IdentityLHS &= (M == (int)i);
965 IdentityRHS &= ((M - NumElts) == i);
971 return Op.getOperand(0);
973 return Op.getOperand(1);
993 unsigned Depth)
const {
994 EVT VT =
Op.getValueType();
1007 unsigned Depth)
const {
1021 "SRL or SRA node is required here!");
1024 if (!N1C || !N1C->
isOne())
1071 unsigned ShiftOpc =
Op.getOpcode();
1072 bool IsSigned =
false;
1076 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1;
1081 unsigned NumZero = std::min(NumZeroA, NumZeroB);
1087 if (NumZero >= 2 && NumSigned < NumZero) {
1092 if (NumSigned >= 1) {
1100 if (NumZero >= 1 && NumSigned < NumZero) {
1120 EVT VT =
Op.getValueType();
1134 Add.getOperand(1)) &&
1165 unsigned Depth,
bool AssumeSingleUse)
const {
1168 "Mask size mismatches value type size!");
1173 EVT VT =
Op.getValueType();
1175 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1177 "Unexpected vector size");
1180 APInt DemandedElts = OriginalDemandedElts;
1205 bool HasMultiUse =
false;
1206 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse()) {
1215 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1224 switch (
Op.getOpcode()) {
1228 if (!DemandedElts[0])
1233 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1240 if (DemandedElts == 1)
1269 EVT MemVT = LD->getMemoryVT();
1286 APInt DemandedVecElts(DemandedElts);
1288 unsigned Idx = CIdx->getZExtValue();
1292 if (!DemandedElts[Idx])
1309 if (!!DemandedVecElts)
1322 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
1324 APInt DemandedSrcElts = DemandedElts;
1325 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
1336 if (!!DemandedSubElts)
1338 if (!!DemandedSrcElts)
1348 if (NewSub || NewSrc) {
1349 NewSub = NewSub ? NewSub :
Sub;
1350 NewSrc = NewSrc ? NewSrc : Src;
1363 if (Src.getValueType().isScalableVector())
1366 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1367 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
1389 EVT SubVT =
Op.getOperand(0).getValueType();
1390 unsigned NumSubVecs =
Op.getNumOperands();
1392 for (
unsigned i = 0; i != NumSubVecs; ++i) {
1393 APInt DemandedSubElts =
1394 DemandedElts.
extractBits(NumSubElts, i * NumSubElts);
1396 Known2, TLO,
Depth + 1))
1399 if (!!DemandedSubElts)
1409 APInt DemandedLHS, DemandedRHS;
1414 if (!!DemandedLHS || !!DemandedRHS) {
1419 if (!!DemandedLHS) {
1425 if (!!DemandedRHS) {
1437 if (DemandedOp0 || DemandedOp1) {
1438 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1439 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1474 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1486 unsigned NumSubElts =
1507 Known2, TLO,
Depth + 1))
1533 if (DemandedOp0 || DemandedOp1) {
1534 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1535 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1554 Known2, TLO,
Depth + 1)) {
1578 if (DemandedOp0 || DemandedOp1) {
1579 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1580 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1591 for (
int I = 0;
I != 2; ++
I) {
1594 SDValue Alt =
Op.getOperand(1 -
I).getOperand(0);
1595 SDValue C2 =
Op.getOperand(1 -
I).getOperand(1);
1597 for (
int J = 0; J != 2; ++J) {
1650 if (
C->getAPIntValue() == Known2.
One) {
1659 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1671 if (ShiftC->getAPIntValue().ult(
BitWidth)) {
1672 uint64_t ShiftAmt = ShiftC->getZExtValue();
1675 : Ones.
lshr(ShiftAmt);
1692 if (!
C || !
C->isAllOnes())
1702 if (DemandedOp0 || DemandedOp1) {
1703 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1704 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1715 Known, TLO,
Depth + 1))
1718 Known2, TLO,
Depth + 1))
1730 Known, TLO,
Depth + 1))
1733 Known2, TLO,
Depth + 1))
1741 Known, TLO,
Depth + 1))
1744 Known2, TLO,
Depth + 1))
1787 if (std::optional<unsigned> KnownSA =
1789 unsigned ShAmt = *KnownSA;
1799 if (std::optional<unsigned> InnerSA =
1801 unsigned C1 = *InnerSA;
1803 int Diff = ShAmt - C1;
1822 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1840 InnerOp, DemandedElts,
Depth + 2)) {
1841 unsigned InnerShAmt = *SA2;
1842 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1844 (InnerBits - InnerShAmt + ShAmt) &&
1872 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1883 Op.getNode()->hasOneUse()) {
1894 assert(DemandedSize <= SmallVTBits &&
1895 "Narrowed below demanded bits?");
1925 Flags.setNoUnsignedWrap(IsNUW);
1930 NewShiftAmt, Flags);
1956 if (std::optional<unsigned> MaxSA =
1958 unsigned ShAmt = *MaxSA;
1959 unsigned NumSignBits =
1962 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1972 if (std::optional<unsigned> KnownSA =
1974 unsigned ShAmt = *KnownSA;
1984 if (std::optional<unsigned> InnerSA =
1986 unsigned C1 = *InnerSA;
1988 int Diff = ShAmt - C1;
2004 if (std::optional<unsigned> InnerSA =
2006 unsigned C1 = *InnerSA;
2008 unsigned Combined = std::min(C1 + ShAmt,
BitWidth - 1);
2020 if (
Op->getFlags().hasExact())
2055 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2069 if (std::optional<unsigned> MaxSA =
2071 unsigned ShAmt = *MaxSA;
2075 unsigned NumSignBits =
2084 DemandedElts,
Depth + 1))
2108 if (std::optional<unsigned> KnownSA =
2110 unsigned ShAmt = *KnownSA;
2117 if (std::optional<unsigned> InnerSA =
2119 unsigned LowBits =
BitWidth - ShAmt;
2125 if (*InnerSA == ShAmt) {
2135 unsigned NumSignBits =
2137 if (NumSignBits > ShAmt)
2147 if (
Op->getFlags().hasExact())
2184 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2194 DemandedElts,
Depth + 1))
2207 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2213 Known, TLO,
Depth + 1))
2229 Known2 <<= (IsFSHL ? Amt : (
BitWidth - Amt));
2230 Known >>= (IsFSHL ? (
BitWidth - Amt) : Amt);
2237 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
2239 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
2240 if (DemandedOp0 || DemandedOp1) {
2241 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
2242 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
2254 Known2, TLO,
Depth + 1))
2270 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2286 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) {
2291 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) {
2310 unsigned Opc =
Op.getOpcode();
2317 unsigned NumSignBits =
2321 if (NumSignBits >= NumDemandedUpperBits)
2387 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2419 unsigned MinSignedBits =
2421 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2424 if (!AlreadySignExtended) {
2442 InputDemandedBits.
setBit(ExVTBits - 1);
2452 if (Known.
Zero[ExVTBits - 1])
2456 if (Known.
One[ExVTBits - 1]) {
2466 EVT HalfVT =
Op.getOperand(0).getValueType();
2480 Known = KnownHi.
concat(KnownLo);
2489 EVT SrcVT = Src.getValueType();
2498 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2509 APInt InDemandedElts = DemandedElts.
zext(InElts);
2520 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2530 EVT SrcVT = Src.getValueType();
2535 APInt InDemandedElts = DemandedElts.
zext(InElts);
2540 InDemandedBits.
setBit(InBits - 1);
2546 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2583 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2593 EVT SrcVT = Src.getValueType();
2600 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2605 APInt InDemandedElts = DemandedElts.
zext(InElts);
2614 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2623 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2636 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2641 switch (Src.getOpcode()) {
2652 if (Src.getNode()->hasOneUse()) {
2664 std::optional<unsigned> ShAmtC =
2666 if (!ShAmtC || *ShAmtC >=
BitWidth)
2668 unsigned ShVal = *ShAmtC;
2698 Known.
Zero |= ~InMask;
2699 Known.
One &= (~Known.Zero);
2705 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2706 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2715 if (CIdx->getAPIntValue().ult(NumSrcElts))
2722 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2731 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2733 TLO.
DAG.
getNode(
Op.getOpcode(), dl, VT, DemandedSrc, Idx);
2743 case ISD::BITCAST: {
2747 EVT SrcVT = Src.getValueType();
2757 if ((OpVTLegal || i32Legal) && VT.
isSimple() && SrcVT != MVT::f16 &&
2758 SrcVT != MVT::f128) {
2760 EVT Ty = OpVTLegal ? VT : MVT::i32;
2764 unsigned OpVTSizeInBits =
Op.getValueSizeInBits();
2765 if (!OpVTLegal && OpVTSizeInBits > 32)
2767 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2777 unsigned Scale =
BitWidth / NumSrcEltBits;
2781 for (
unsigned i = 0; i != Scale; ++i) {
2782 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
2783 unsigned BitOffset = EltOffset * NumSrcEltBits;
2785 if (!
Sub.isZero()) {
2786 DemandedSrcBits |=
Sub;
2787 for (
unsigned j = 0; j != NumElts; ++j)
2788 if (DemandedElts[j])
2789 DemandedSrcElts.
setBit((j * Scale) + i);
2793 APInt KnownSrcUndef, KnownSrcZero;
2795 KnownSrcZero, TLO,
Depth + 1))
2800 KnownSrcBits, TLO,
Depth + 1))
2802 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2804 unsigned Scale = NumSrcEltBits /
BitWidth;
2808 for (
unsigned i = 0; i != NumElts; ++i)
2809 if (DemandedElts[i]) {
2812 DemandedSrcElts.
setBit(i / Scale);
2816 APInt KnownSrcUndef, KnownSrcZero;
2818 KnownSrcZero, TLO,
Depth + 1))
2824 KnownSrcBits, TLO,
Depth + 1))
2830 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2852 if (
C &&
C->getAPIntValue().countr_zero() == CTZ) {
2868 if (
Op.getOperand(0).getValueType() !=
Op.getOperand(1).getValueType())
2876 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2881 auto GetDemandedBitsLHSMask = [&](
APInt Demanded,
2890 DemandedElts, KnownOp0, TLO,
Depth + 1) ||
2907 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2909 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2910 if (DemandedOp0 || DemandedOp1) {
2911 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2912 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2926 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2927 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2939 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2966 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
2969 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2970 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
2974 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2975 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
2983 Op.getOpcode() !=
ISD::SUB, Flags.hasNoSignedWrap(),
2984 Flags.hasNoUnsignedWrap(), KnownOp0, KnownOp1);
3005 Known.
Zero |= SignMask;
3006 Known.
One &= ~SignMask;
3023 Known, TLO,
Depth + 1) ||
3037 Known.
Zero &= ~SignMask0;
3038 Known.
One &= ~SignMask0;
3053 Known.
Zero ^= SignMask;
3054 Known.
One ^= SignMask;
3065 if (
Op.getValueType().isScalableVector())
3084 auto *C = dyn_cast<ConstantSDNode>(V);
3085 return C && C->isOpaque();
3106 const APInt &DemandedElts,
3112 APInt KnownUndef, KnownZero;
3126 const APInt &UndefOp0,
3127 const APInt &UndefOp1) {
3130 "Vector binop only");
3135 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
3137 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
3138 const APInt &UndefVals) {
3139 if (UndefVals[Index])
3155 for (
unsigned i = 0; i != NumElts; ++i) {
3174 bool AssumeSingleUse)
const {
3175 EVT VT =
Op.getValueType();
3176 unsigned Opcode =
Op.getOpcode();
3177 APInt DemandedElts = OriginalDemandedElts;
3191 "Mask size mismatches value type element count!");
3200 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse())
3204 if (DemandedElts == 0) {
3219 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
3224 if (NewOp0 || NewOp1) {
3227 NewOp1 ? NewOp1 : Op1,
Op->getFlags());
3235 if (!DemandedElts[0]) {
3242 case ISD::BITCAST: {
3244 EVT SrcVT = Src.getValueType();
3251 for (
unsigned I = 0;
I != NumElts; ++
I) {
3252 if (DemandedElts[
I]) {
3253 unsigned Offset =
I * EltSize;
3266 if (NumSrcElts == NumElts)
3268 KnownZero, TLO,
Depth + 1);
3270 APInt SrcDemandedElts, SrcZero, SrcUndef;
3274 if ((NumElts % NumSrcElts) == 0) {
3275 unsigned Scale = NumElts / NumSrcElts;
3287 for (
unsigned i = 0; i != NumElts; ++i)
3288 if (DemandedElts[i]) {
3289 unsigned Ofs = (i % Scale) * EltSizeInBits;
3290 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
3302 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
3306 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
3307 unsigned Elt = Scale * SrcElt + SubElt;
3308 if (DemandedElts[Elt])
3316 for (
unsigned i = 0; i != NumSrcElts; ++i) {
3317 if (SrcDemandedElts[i]) {
3319 KnownZero.
setBits(i * Scale, (i + 1) * Scale);
3321 KnownUndef.
setBits(i * Scale, (i + 1) * Scale);
3329 if ((NumSrcElts % NumElts) == 0) {
3330 unsigned Scale = NumSrcElts / NumElts;
3338 for (
unsigned i = 0; i != NumElts; ++i) {
3339 if (DemandedElts[i]) {
3369 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
3371 bool Updated =
false;
3372 for (
unsigned i = 0; i != NumElts; ++i) {
3383 for (
unsigned i = 0; i != NumElts; ++i) {
3385 if (
SrcOp.isUndef()) {
3387 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
3395 EVT SubVT =
Op.getOperand(0).getValueType();
3396 unsigned NumSubVecs =
Op.getNumOperands();
3398 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3401 APInt SubUndef, SubZero;
3405 KnownUndef.
insertBits(SubUndef, i * NumSubElts);
3406 KnownZero.
insertBits(SubZero, i * NumSubElts);
3411 bool FoundNewSub =
false;
3413 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3417 SubOp, SubElts, TLO.
DAG,
Depth + 1);
3418 DemandedSubOps.
push_back(NewSubOp ? NewSubOp : SubOp);
3419 FoundNewSub = NewSubOp ?
true : FoundNewSub;
3435 unsigned NumSubElts =
Sub.getValueType().getVectorNumElements();
3437 APInt DemandedSrcElts = DemandedElts;
3438 DemandedSrcElts.
clearBits(Idx, Idx + NumSubElts);
3441 if (!DemandedSubElts)
3444 APInt SubUndef, SubZero;
3450 if (!DemandedSrcElts && !Src.isUndef())
3464 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3467 if (NewSrc || NewSub) {
3468 NewSrc = NewSrc ? NewSrc : Src;
3469 NewSub = NewSub ? NewSub :
Sub;
3471 NewSub,
Op.getOperand(2));
3480 if (Src.getValueType().isScalableVector())
3483 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3484 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts).
shl(Idx);
3486 APInt SrcUndef, SrcZero;
3496 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3512 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3513 unsigned Idx = CIdx->getZExtValue();
3514 if (!DemandedElts[Idx])
3517 APInt DemandedVecElts(DemandedElts);
3520 KnownZero, TLO,
Depth + 1))
3529 APInt VecUndef, VecZero;
3543 APInt UndefSel, ZeroSel;
3549 APInt DemandedLHS(DemandedElts);
3550 APInt DemandedRHS(DemandedElts);
3551 APInt UndefLHS, ZeroLHS;
3552 APInt UndefRHS, ZeroRHS;
3560 KnownUndef = UndefLHS & UndefRHS;
3561 KnownZero = ZeroLHS & ZeroRHS;
3565 APInt DemandedSel = DemandedElts & ~KnownZero;
3566 if (DemandedSel != DemandedElts)
3579 APInt DemandedLHS(NumElts, 0);
3580 APInt DemandedRHS(NumElts, 0);
3581 for (
unsigned i = 0; i != NumElts; ++i) {
3582 int M = ShuffleMask[i];
3583 if (M < 0 || !DemandedElts[i])
3585 assert(0 <= M && M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3586 if (M < (
int)NumElts)
3589 DemandedRHS.
setBit(M - NumElts);
3595 bool FoldLHS = !DemandedLHS && !LHS.isUndef();
3596 bool FoldRHS = !DemandedRHS && !RHS.isUndef();
3597 if (FoldLHS || FoldRHS) {
3598 LHS = FoldLHS ? TLO.
DAG.
getUNDEF(LHS.getValueType()) : LHS;
3599 RHS = FoldRHS ? TLO.
DAG.
getUNDEF(RHS.getValueType()) : RHS;
3606 APInt UndefLHS, ZeroLHS;
3607 APInt UndefRHS, ZeroRHS;
3616 bool Updated =
false;
3617 bool IdentityLHS =
true, IdentityRHS =
true;
3619 for (
unsigned i = 0; i != NumElts; ++i) {
3620 int &M = NewMask[i];
3623 if (!DemandedElts[i] || (M < (
int)NumElts && UndefLHS[M]) ||
3624 (M >= (
int)NumElts && UndefRHS[M - NumElts])) {
3628 IdentityLHS &= (M < 0) || (M == (
int)i);
3629 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
3634 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3642 for (
unsigned i = 0; i != NumElts; ++i) {
3643 int M = ShuffleMask[i];
3646 }
else if (M < (
int)NumElts) {
3652 if (UndefRHS[M - NumElts])
3654 if (ZeroRHS[M - NumElts])
3663 APInt SrcUndef, SrcZero;
3665 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3666 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3674 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3675 DemandedSrcElts == 1) {
3688 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3689 Op->isOnlyUserOf(Src.getNode()) &&
3690 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3692 EVT SrcVT = Src.getValueType();
3699 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3713 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3714 APInt UndefLHS, ZeroLHS;
3736 APInt UndefRHS, ZeroRHS;
3740 APInt UndefLHS, ZeroLHS;
3745 KnownZero = ZeroLHS & ZeroRHS;
3751 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3763 APInt UndefRHS, ZeroRHS;
3767 APInt UndefLHS, ZeroLHS;
3772 KnownZero = ZeroLHS;
3773 KnownUndef = UndefLHS & UndefRHS;
3778 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3789 APInt SrcUndef, SrcZero;
3795 APInt DemandedElts0 = DemandedElts & ~SrcZero;
3800 KnownUndef &= DemandedElts0;
3801 KnownZero &= DemandedElts0;
3806 if (DemandedElts.
isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef))
3813 KnownZero |= SrcZero;
3814 KnownUndef &= SrcUndef;
3815 KnownUndef &= ~KnownZero;
3819 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3827 KnownZero, TLO,
Depth + 1))
3832 Op.getOperand(0), DemandedElts, TLO.
DAG,
Depth + 1))
3847 KnownZero, TLO,
Depth + 1))
3854 KnownZero, TLO,
Depth))
3860 TLO,
Depth, AssumeSingleUse))
3866 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3880 const APInt &DemandedElts,
3882 unsigned Depth)
const {
3887 "Should use MaskedValueIsZero if you don't know whether Op"
3888 " is a target node!");
3895 unsigned Depth)
const {
3902 unsigned Depth)
const {
3914 unsigned Depth)
const {
3923 unsigned Depth)
const {
3928 "Should use ComputeNumSignBits if you don't know whether Op"
3929 " is a target node!");
3946 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3947 " is a target node!");
3958 "Should use SimplifyDemandedBits if you don't know whether Op"
3959 " is a target node!");
3972 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3973 " is a target node!");
4006 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
4007 " is a target node!");
4014 return DAG.isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly,
4026 "Should use canCreateUndefOrPoison if you don't know whether Op"
4027 " is a target node!");
4033 const APInt &DemandedElts,
4036 unsigned Depth)
const {
4041 "Should use isKnownNeverNaN if you don't know whether Op"
4042 " is a target node!");
4047 const APInt &DemandedElts,
4050 unsigned Depth)
const {
4055 "Should use isSplatValue if you don't know whether Op"
4056 " is a target node!");
4071 CVal = CN->getAPIntValue();
4072 EltWidth =
N.getValueType().getScalarSizeInBits();
4079 CVal = CVal.
trunc(EltWidth);
4085 return CVal.
isOne();
4127 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) != MVT::i1));
4130 return N->isAllOnes() && SExt;
4139 DAGCombinerInfo &DCI)
const {
4168 if (AndC &&
isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() &&
4171 AndC->getAPIntValue().getActiveBits());
4198 if (isXAndYEqZeroPreferableToXAndYEqY(
Cond, OpVT) &&
4206 if (DCI.isBeforeLegalizeOps() ||
4235 DAGCombinerInfo &DCI)
const {
4239 SelectionDAG &DAG = DCI.DAG;
4276SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
4278 const SDLoc &
DL)
const {
4289 ConstantSDNode *C01;
4318 auto checkConstants = [&
I1, &I01]() ->
bool {
4323 if (checkConstants()) {
4331 if (!checkConstants())
4337 const unsigned KeptBits =
I1.logBase2();
4338 const unsigned KeptBitsMinusOne = I01.
logBase2();
4341 if (KeptBits != (KeptBitsMinusOne + 1))
4346 SelectionDAG &DAG = DCI.DAG;
4355 return DAG.
getSetCC(
DL, SCCVT, SExtInReg,
X, NewCond);
4359SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
4361 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
4363 "Should be a comparison with 0.");
4365 "Valid only for [in]equality comparisons.");
4367 unsigned NewShiftOpcode;
4370 SelectionDAG &DAG = DCI.DAG;
4373 auto Match = [&NewShiftOpcode, &
X, &
C, &
Y, &DAG,
this](
SDValue V) {
4377 unsigned OldShiftOpcode =
V.getOpcode();
4378 switch (OldShiftOpcode) {
4390 C =
V.getOperand(0);
4391 ConstantSDNode *CC =
4395 Y =
V.getOperand(1);
4397 ConstantSDNode *XC =
4400 X, XC, CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
4417 EVT VT =
X.getValueType();
4432 DAGCombinerInfo &DCI)
const {
4435 "Unexpected binop");
4441 SelectionDAG &DAG = DCI.DAG;
4463 if (!DCI.isCalledByLegalizer())
4464 DCI.AddToWorklist(YShl1.
getNode());
4479 if (CTPOP.getOpcode() !=
ISD::CTPOP || !CTPOP.hasOneUse())
4482 EVT CTVT = CTPOP.getValueType();
4483 SDValue CTOp = CTPOP.getOperand(0);
4503 for (
unsigned i = 0; i <
Passes; i++) {
4552 auto getRotateSource = [](
SDValue X) {
4554 return X.getOperand(0);
4561 if (
SDValue R = getRotateSource(N0))
4594 if (!C1 || !C1->
isZero())
4619 if (
Or.getOperand(0) ==
Other) {
4620 X =
Or.getOperand(0);
4621 Y =
Or.getOperand(1);
4624 if (
Or.getOperand(1) ==
Other) {
4625 X =
Or.getOperand(1);
4626 Y =
Or.getOperand(0);
4636 if (matchOr(F0, F1)) {
4643 if (matchOr(F1, F0)) {
4659 const SDLoc &dl)
const {
4669 bool N0ConstOrSplat =
4671 bool N1ConstOrSplat =
4679 if (N0ConstOrSplat && !N1ConstOrSplat &&
4682 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4688 if (!N0ConstOrSplat && !N1ConstOrSplat &&
4693 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4702 const APInt &C1 = N1C->getAPIntValue();
4718 !Attr.hasFnAttr(Attribute::MinSize)) {
4722 return DAG.
getNode(LogicOp, dl, VT, IsXZero, IsYZero);
4753 const APInt &C1 = N1C->getAPIntValue();
4769 if ((
C->getAPIntValue()+1).isPowerOf2()) {
4770 MinBits =
C->getAPIntValue().countr_one();
4781 MinBits = LN0->getMemoryVT().getSizeInBits();
4785 MinBits = LN0->getMemoryVT().getSizeInBits();
4796 MinBits >= ReqdBits) {
4801 if (MinBits == 1 && C1 == 1)
4820 if (TopSetCC.
getValueType() == MVT::i1 && VT == MVT::i1 &&
4854 unsigned bestWidth = 0, bestOffset = 0;
4855 if (Lod->isSimple() && Lod->isUnindexed() &&
4856 (Lod->getMemoryVT().isByteSized() ||
4858 unsigned memWidth = Lod->getMemoryVT().getStoreSizeInBits();
4860 unsigned maskWidth = origWidth;
4864 origWidth = Lod->getMemoryVT().getSizeInBits();
4868 for (
unsigned width = 8; width < origWidth; width *= 2) {
4873 unsigned maxOffset = origWidth - width;
4874 for (
unsigned offset = 0; offset <= maxOffset; offset += 8) {
4875 if (Mask.isSubsetOf(newMask)) {
4876 unsigned ptrOffset =
4878 unsigned IsFast = 0;
4879 assert((ptrOffset % 8) == 0 &&
"Non-Bytealigned pointer offset");
4884 *DAG.
getContext(), Layout, newVT, Lod->getAddressSpace(),
4885 NewAlign, Lod->getMemOperand()->getFlags(), &IsFast) &&
4887 bestOffset = ptrOffset / 8;
4888 bestMask = Mask.lshr(offset);
4901 SDValue Ptr = Lod->getBasePtr();
4902 if (bestOffset != 0)
4905 DAG.
getLoad(newVT, dl, Lod->getChain(), Ptr,
4906 Lod->getPointerInfo().getWithOffset(bestOffset),
4907 Lod->getBaseAlign());
4986 ExtDstTy != ExtSrcTy &&
"Unexpected types!");
4993 return DAG.
getSetCC(dl, VT, ZextOp,
4995 }
else if ((N1C->isZero() || N1C->isOne()) &&
5042 return DAG.
getSetCC(dl, VT, Val, N1,
5045 }
else if (N1C->isOne()) {
5128 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1,
Cond, DCI, dl))
5135 const APInt &C1 = N1C->getAPIntValue();
5137 APInt MinVal, MaxVal;
5159 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5179 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5227 if (
SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
5228 VT, N0, N1,
Cond, DCI, dl))
5235 bool CmpZero = N1C->isZero();
5236 bool CmpNegOne = N1C->isAllOnes();
5237 if ((CmpZero || CmpNegOne) && N0.
hasOneUse()) {
5240 unsigned EltBits = V.getScalarValueSizeInBits();
5241 if (V.getOpcode() !=
ISD::OR || (EltBits % 2) != 0)
5249 RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5252 Hi = RHS.getOperand(0);
5257 LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5260 Hi = LHS.getOperand(0);
5268 unsigned HalfBits = EltBits / 2;
5279 if (IsConcat(N0,
Lo,
Hi))
5280 return MergeConcat(
Lo,
Hi);
5318 const APInt &C1 = N1C->getAPIntValue();
5333 unsigned ShCt = AndRHS->getAPIntValue().logBase2();
5334 if (AndRHS->getAPIntValue().isPowerOf2() &&
5341 }
else if (
Cond ==
ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
5361 const APInt &AndRHSC = AndRHS->getAPIntValue();
5413 return DAG.
getSetCC(dl, VT, Shift, CmpRHS, NewCond);
5421 assert(!CFP->getValueAPF().isNaN() &&
"Unexpected NaN value");
5442 !
isFPImmLegal(CFP->getValueAPF(), CFP->getValueType(0))) {
5443 bool IsFabs = N0.
getOpcode() == ISD::FABS;
5461 if (CFP->getValueAPF().isInfinity()) {
5462 bool IsNegInf = CFP->getValueAPF().isNegative();
5473 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5482 "Integer types should be handled by FoldSetCC");
5488 if (UOF ==
unsigned(EqTrue))
5493 if (NewCond !=
Cond &&
5496 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5503 if ((isSignedIntSetCC(
Cond) || isUnsignedIntSetCC(
Cond)) &&
5540 bool LegalRHSImm =
false;
5548 DAG.
getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(),
5556 DAG.
getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(),
5566 DAG.
getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(),
5571 if (RHSC->getValueType(0).getSizeInBits() <= 64)
5580 if (
SDValue V = foldSetCCWithBinOp(VT, N0, N1,
Cond, dl, DCI))
5586 if (
SDValue V = foldSetCCWithBinOp(VT, N1, N0,
Cond, dl, DCI))
5589 if (
SDValue V = foldSetCCWithAnd(VT, N0, N1,
Cond, dl, DCI))
5592 if (
SDValue V = foldSetCCWithOr(VT, N0, N1,
Cond, dl, DCI))
5601 if (!
isIntDivCheap(VT, Attr) && !Attr.hasFnAttr(Attribute::MinSize)) {
5603 if (
SDValue Folded = buildUREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5606 if (
SDValue Folded = buildSREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5619 N0 = DAG.
getNOT(dl, Temp, OpVT);
5628 Temp = DAG.
getNOT(dl, N0, OpVT);
5635 Temp = DAG.
getNOT(dl, N1, OpVT);
5642 Temp = DAG.
getNOT(dl, N0, OpVT);
5649 Temp = DAG.
getNOT(dl, N1, OpVT);
5658 N0 = DAG.
getNode(ExtendCode, dl, VT, N0);
5686 GA = GASD->getGlobal();
5687 Offset += GASD->getOffset();
5691 if (
N->isAnyAdd()) {
5696 Offset += V->getSExtValue();
5701 Offset += V->getSExtValue();
5722 unsigned S = Constraint.
size();
5725 switch (Constraint[0]) {
5756 if (S > 1 && Constraint[0] ==
'{' && Constraint[S - 1] ==
'}') {
5757 if (S == 8 && Constraint.
substr(1, 6) ==
"memory")
5785 std::vector<SDValue> &
Ops,
5788 if (Constraint.
size() > 1)
5791 char ConstraintLetter = Constraint[0];
5792 switch (ConstraintLetter) {
5812 bool IsBool =
C->getConstantIntValue()->getBitWidth() == 1;
5822 if (ConstraintLetter !=
'n') {
5825 GA->getValueType(0),
5826 Offset + GA->getOffset()));
5831 BA->getBlockAddress(), BA->getValueType(0),
5832 Offset + BA->getOffset(), BA->getTargetFlags()));
5840 const unsigned OpCode =
Op.getOpcode();
5843 Op =
Op.getOperand(1);
5847 Op =
Op.getOperand(0);
5864std::pair<unsigned, const TargetRegisterClass *>
5870 assert(*(Constraint.
end() - 1) ==
'}' &&
"Not a brace enclosed constraint?");
5875 std::pair<unsigned, const TargetRegisterClass *> R =
5887 std::pair<unsigned, const TargetRegisterClass *> S =
5888 std::make_pair(PR, RC);
5933 unsigned maCount = 0;
5939 unsigned LabelNo = 0;
5942 ConstraintOperands.emplace_back(std::move(CI));
5946 if (OpInfo.multipleAlternatives.size() > maCount)
5947 maCount = OpInfo.multipleAlternatives.size();
5949 OpInfo.ConstraintVT = MVT::Other;
5952 switch (OpInfo.Type) {
5955 if (OpInfo.isIndirect) {
5956 OpInfo.CallOperandVal =
Call.getArgOperand(ArgNo);
5962 assert(!
Call.getType()->isVoidTy() &&
"Bad inline asm!");
5964 OpInfo.ConstraintVT =
5968 assert(ResNo == 0 &&
"Asm only has one result!");
5969 OpInfo.ConstraintVT =
5975 OpInfo.CallOperandVal =
Call.getArgOperand(ArgNo);
5986 if (OpInfo.CallOperandVal) {
5987 llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
5988 if (OpInfo.isIndirect) {
5989 OpTy =
Call.getParamElementType(ArgNo);
5990 assert(OpTy &&
"Indirect operand must have elementtype attribute");
5995 if (STy->getNumElements() == 1)
5996 OpTy = STy->getElementType(0);
6001 unsigned BitSize =
DL.getTypeSizeInBits(OpTy);
6022 if (!ConstraintOperands.empty()) {
6024 unsigned bestMAIndex = 0;
6025 int bestWeight = -1;
6031 for (maIndex = 0; maIndex < maCount; ++maIndex) {
6033 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
6034 cIndex != eIndex; ++cIndex) {
6043 if (OpInfo.hasMatchingInput()) {
6045 if (OpInfo.ConstraintVT !=
Input.ConstraintVT) {
6046 if ((OpInfo.ConstraintVT.isInteger() !=
6047 Input.ConstraintVT.isInteger()) ||
6048 (OpInfo.ConstraintVT.getSizeInBits() !=
6049 Input.ConstraintVT.getSizeInBits())) {
6060 weightSum += weight;
6063 if (weightSum > bestWeight) {
6064 bestWeight = weightSum;
6065 bestMAIndex = maIndex;
6072 cInfo.selectAlternative(bestMAIndex);
6077 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
6078 cIndex != eIndex; ++cIndex) {
6085 if (OpInfo.hasMatchingInput()) {
6088 if (OpInfo.ConstraintVT !=
Input.ConstraintVT) {
6089 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
6091 OpInfo.ConstraintVT);
6092 std::pair<unsigned, const TargetRegisterClass *> InputRC =
6094 Input.ConstraintVT);
6095 const bool OutOpIsIntOrFP = OpInfo.ConstraintVT.isInteger() ||
6096 OpInfo.ConstraintVT.isFloatingPoint();
6097 const bool InOpIsIntOrFP =
Input.ConstraintVT.isInteger() ||
6098 Input.ConstraintVT.isFloatingPoint();
6099 if ((OutOpIsIntOrFP != InOpIsIntOrFP) ||
6100 (MatchRC.second != InputRC.second)) {
6102 " with a matching output constraint of"
6103 " incompatible type!");
6109 return ConstraintOperands;
6144 if (maIndex >= (
int)
info.multipleAlternatives.size())
6145 rCodes = &
info.Codes;
6147 rCodes = &
info.multipleAlternatives[maIndex].Codes;
6151 for (
const std::string &rCode : *rCodes) {
6154 if (weight > BestWeight)
6155 BestWeight = weight;
6168 Value *CallOperandVal =
info.CallOperandVal;
6171 if (!CallOperandVal)
6174 switch (*constraint) {
6238 Ret.reserve(OpInfo.Codes.size());
6253 Ret.emplace_back(Code, CType);
6271 "need immediate or other");
6276 std::vector<SDValue> ResultOps;
6278 return !ResultOps.empty();
6286 assert(!OpInfo.Codes.empty() &&
"Must have at least one constraint");
6289 if (OpInfo.Codes.size() == 1) {
6290 OpInfo.ConstraintCode = OpInfo.Codes[0];
6297 unsigned BestIdx = 0;
6298 for (
const unsigned E =
G.size();
6305 if (BestIdx + 1 == E) {
6311 OpInfo.ConstraintCode =
G[BestIdx].first;
6312 OpInfo.ConstraintType =
G[BestIdx].second;
6316 if (OpInfo.ConstraintCode ==
"X" && OpInfo.CallOperandVal) {
6320 Value *v = OpInfo.CallOperandVal;
6326 OpInfo.ConstraintCode =
"i";
6333 OpInfo.ConstraintCode = Repl;
6347 EVT VT =
N->getValueType(0);
6352 bool UseSRA =
false;
6358 APInt Divisor =
C->getAPIntValue();
6380 "Expected matchUnaryPredicate to return one element for scalable "
6387 Factor = Factors[0];
6405 EVT VT =
N->getValueType(0);
6410 bool UseSRL =
false;
6416 APInt Divisor =
C->getAPIntValue();
6441 "Expected matchUnaryPredicate to return one element for scalable "
6448 Factor = Factors[0];
6491 EVT VT =
N->getValueType(0);
6527 bool IsAfterLegalization,
6528 bool IsAfterLegalTypes,
6531 EVT VT =
N->getValueType(0);
6557 if (
N->getFlags().hasExact())
6566 const APInt &Divisor =
C->getAPIntValue();
6568 int NumeratorFactor = 0;
6579 NumeratorFactor = 1;
6582 NumeratorFactor = -1;
6599 SDValue MagicFactor, Factor, Shift, ShiftMask;
6607 Shifts.
size() == 1 && ShiftMasks.
size() == 1 &&
6608 "Expected matchUnaryPredicate to return one element for scalable "
6616 MagicFactor = MagicFactors[0];
6617 Factor = Factors[0];
6619 ShiftMask = ShiftMasks[0];
6665 SDValue Q = GetMULHS(N0, MagicFactor);
6695 bool IsAfterLegalization,
6696 bool IsAfterLegalTypes,
6699 EVT VT =
N->getValueType(0);
6725 if (
N->getFlags().hasExact())
6735 bool UseNPQ =
false, UsePreShift =
false, UsePostShift =
false;
6741 const APInt& Divisor =
C->getAPIntValue();
6743 SDValue PreShift, MagicFactor, NPQFactor, PostShift;
6747 if (Divisor.
isOne()) {
6748 PreShift = PostShift = DAG.
getUNDEF(ShSVT);
6749 MagicFactor = NPQFactor = DAG.
getUNDEF(SVT);
6753 Divisor, std::min(KnownLeadingZeros, Divisor.
countl_zero()));
6758 "We shouldn't generate an undefined shift!");
6760 "We shouldn't generate an undefined shift!");
6762 "Unexpected pre-shift");
6769 UseNPQ |= magics.
IsAdd;
6770 UsePreShift |= magics.
PreShift != 0;
6785 SDValue PreShift, PostShift, MagicFactor, NPQFactor;
6793 NPQFactors.
size() == 1 && PostShifts.
size() == 1 &&
6794 "Expected matchUnaryPredicate to return one for scalable vectors");
6801 PreShift = PreShifts[0];
6802 MagicFactor = MagicFactors[0];
6803 PostShift = PostShifts[0];
6855 Q = GetMULHU(Q, MagicFactor);
6868 NPQ = GetMULHU(NPQ, NPQFactor);
6887 return DAG.
getSelect(dl, VT, IsOne, N0, Q);
6901 if (SplatValue != Values.
end()) {
6906 Replacement = *SplatValue;
6910 if (!AlternativeReplacement)
6913 Replacement = AlternativeReplacement;
6923SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT,
SDValue REMNode,
6926 DAGCombinerInfo &DCI,
6927 const SDLoc &
DL)
const {
6929 if (
SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6931 for (SDNode *
N : Built)
6932 DCI.AddToWorklist(
N);
6940TargetLowering::prepareUREMEqFold(EVT SETCCVT,
SDValue REMNode,
6942 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6943 SmallVectorImpl<SDNode *> &Created)
const {
6950 "Only applicable for (in)equality comparisons.");
6952 SelectionDAG &DAG = DCI.DAG;
6963 bool ComparingWithAllZeros =
true;
6964 bool AllComparisonsWithNonZerosAreTautological =
true;
6965 bool HadTautologicalLanes =
false;
6966 bool AllLanesAreTautological =
true;
6967 bool HadEvenDivisor =
false;
6968 bool AllDivisorsArePowerOfTwo =
true;
6969 bool HadTautologicalInvertedLanes =
false;
6972 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) {
6978 const APInt &
Cmp = CCmp->getAPIntValue();
6980 ComparingWithAllZeros &=
Cmp.isZero();
6986 bool TautologicalInvertedLane =
D.ule(Cmp);
6987 HadTautologicalInvertedLanes |= TautologicalInvertedLane;
6992 bool TautologicalLane =
D.isOne() || TautologicalInvertedLane;
6993 HadTautologicalLanes |= TautologicalLane;
6994 AllLanesAreTautological &= TautologicalLane;
7000 AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
7003 unsigned K =
D.countr_zero();
7004 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
7005 APInt D0 =
D.lshr(K);
7008 HadEvenDivisor |= (
K != 0);
7011 AllDivisorsArePowerOfTwo &= D0.
isOne();
7015 unsigned W =
D.getBitWidth();
7017 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
7030 "We are expecting that K is always less than all-ones for ShSVT");
7033 if (TautologicalLane) {
7059 if (AllLanesAreTautological)
7064 if (AllDivisorsArePowerOfTwo)
7069 if (HadTautologicalLanes) {
7084 "Expected matchBinaryPredicate to return one element for "
7095 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
7099 "Expecting that the types on LHS and RHS of comparisons match.");
7109 if (HadEvenDivisor) {
7122 if (!HadTautologicalInvertedLanes)
7128 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7135 SDValue TautologicalInvertedChannels =
7145 DL, SETCCVT, SETCCVT);
7147 Replacement, NewCC);
7155 TautologicalInvertedChannels);
7165SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT,
SDValue REMNode,
7168 DAGCombinerInfo &DCI,
7169 const SDLoc &
DL)
const {
7171 if (
SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
7173 assert(Built.
size() <= 7 &&
"Max size prediction failed.");
7174 for (SDNode *
N : Built)
7175 DCI.AddToWorklist(
N);
7183TargetLowering::prepareSREMEqFold(EVT SETCCVT,
SDValue REMNode,
7185 DAGCombinerInfo &DCI,
const SDLoc &
DL,
7186 SmallVectorImpl<SDNode *> &Created)
const {
7210 "Only applicable for (in)equality comparisons.");
7212 SelectionDAG &DAG = DCI.DAG;
7226 if (!CompTarget || !CompTarget->
isZero())
7229 bool HadIntMinDivisor =
false;
7230 bool HadOneDivisor =
false;
7231 bool AllDivisorsAreOnes =
true;
7232 bool HadEvenDivisor =
false;
7233 bool NeedToApplyOffset =
false;
7234 bool AllDivisorsArePowerOfTwo =
true;
7237 auto BuildSREMPattern = [&](ConstantSDNode *
C) {
7245 APInt
D =
C->getAPIntValue();
7249 HadIntMinDivisor |=
D.isMinSignedValue();
7252 HadOneDivisor |=
D.isOne();
7253 AllDivisorsAreOnes &=
D.isOne();
7256 unsigned K =
D.countr_zero();
7257 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
7258 APInt D0 =
D.
lshr(K);
7260 if (!
D.isMinSignedValue()) {
7263 HadEvenDivisor |= (
K != 0);
7268 AllDivisorsArePowerOfTwo &= D0.
isOne();
7272 unsigned W =
D.getBitWidth();
7274 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
7280 if (!
D.isMinSignedValue()) {
7283 NeedToApplyOffset |=
A != 0;
7290 "We are expecting that A is always less than all-ones for SVT");
7292 "We are expecting that K is always less than all-ones for ShSVT");
7332 if (AllDivisorsAreOnes)
7337 if (AllDivisorsArePowerOfTwo)
7340 SDValue PVal, AVal, KVal, QVal;
7342 if (HadOneDivisor) {
7362 QAmts.
size() == 1 &&
7363 "Expected matchUnaryPredicate to return one element for scalable "
7381 if (NeedToApplyOffset) {
7393 if (HadEvenDivisor) {
7408 if (!HadIntMinDivisor)
7414 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7449 MaskedIsZero, Fold);
7457 EVT VT =
Op.getValueType();
7480 bool LegalOps,
bool OptForSize,
7482 unsigned Depth)
const {
7484 if (
Op.getOpcode() == ISD::FNEG ||
Op.getOpcode() == ISD::VP_FNEG) {
7486 return Op.getOperand(0);
7496 EVT VT =
Op.getValueType();
7497 unsigned Opcode =
Op.getOpcode();
7501 bool IsFreeExtend = Opcode == ISD::FP_EXTEND &&
7507 auto RemoveDeadNode = [&](
SDValue N) {
7508 if (
N &&
N.getNode()->use_empty())
7517 std::list<HandleSDNode> Handles;
7528 if (LegalOps && !IsOpLegal)
7545 return !N.isUndef() && !isa<ConstantFPSDNode>(N);
7553 return N.isUndef() ||
7554 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT,
7558 if (LegalOps && !IsOpLegal)
7575 if (!Flags.hasNoSignedZeros())
7589 Handles.emplace_back(NegX);
7600 if (NegX && (CostX <= CostY)) {
7604 RemoveDeadNode(NegY);
7613 RemoveDeadNode(NegX);
7620 if (!Flags.hasNoSignedZeros())
7645 Handles.emplace_back(NegX);
7656 if (NegX && (CostX <= CostY)) {
7660 RemoveDeadNode(NegY);
7666 if (
C->isExactlyValue(2.0) &&
Op.getOpcode() ==
ISD::FMUL)
7674 RemoveDeadNode(NegX);
7682 if (!Flags.hasNoSignedZeros())
7685 SDValue X =
Op.getOperand(0),
Y =
Op.getOperand(1), Z =
Op.getOperand(2);
7694 Handles.emplace_back(NegZ);
7702 Handles.emplace_back(NegX);
7713 if (NegX && (CostX <= CostY)) {
7714 Cost = std::min(CostX, CostZ);
7717 RemoveDeadNode(NegY);
7723 Cost = std::min(CostY, CostZ);
7726 RemoveDeadNode(NegX);
7732 case ISD::FP_EXTEND:
7736 return DAG.
getNode(Opcode,
DL, VT, NegV);
7752 RemoveDeadNode(NegLHS);
7757 Handles.emplace_back(NegLHS);
7770 RemoveDeadNode(NegLHS);
7771 RemoveDeadNode(NegRHS);
7775 Cost = std::min(CostLHS, CostRHS);
7776 return DAG.
getSelect(
DL, VT,
Op.getOperand(0), NegLHS, NegRHS);
7805 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
7818 if ((
Signed && HasSMUL_LOHI) || (!
Signed && HasUMUL_LOHI)) {
7846 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false)) {
7847 Result.push_back(
Lo);
7848 Result.push_back(
Hi);
7851 Result.push_back(Zero);
7852 Result.push_back(Zero);
7863 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
true)) {
7864 Result.push_back(
Lo);
7865 Result.push_back(
Hi);
7870 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
7885 if (!MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false))
7888 Result.push_back(
Lo);
7895 Result.push_back(
Hi);
7908 if (!MakeMUL_LOHI(LL, RH,
Lo,
Hi,
false))
7915 if (!MakeMUL_LOHI(LH, RL,
Lo,
Hi,
false))
7968 N->getOperand(0),
N->getOperand(1), Result, HiLoVT,
7969 DAG, Kind, LL, LH, RL, RH);
7971 assert(Result.size() == 2);
8003 unsigned Opcode =
N->getOpcode();
8004 EVT VT =
N->getValueType(0);
8011 "Unexpected opcode");
8017 APInt Divisor = CN->getAPIntValue();
8025 if (Divisor.
uge(HalfMaxPlus1))
8043 unsigned TrailingZeros = 0;
8057 if (HalfMaxPlus1.
urem(Divisor).
isOne()) {
8058 assert(!LL == !LH &&
"Expected both input halves or no input halves!");
8060 std::tie(LL, LH) = DAG.
SplitScalar(
N->getOperand(0), dl, HiLoVT, HiLoVT);
8064 if (TrailingZeros) {
8132 std::tie(QuotL, QuotH) = DAG.
SplitScalar(Quotient, dl, HiLoVT, HiLoVT);
8133 Result.push_back(QuotL);
8134 Result.push_back(QuotH);
8140 if (TrailingZeros) {
8145 Result.push_back(RemL);
8161 EVT VT =
Node->getValueType(0);
8171 bool IsFSHL =
Node->getOpcode() == ISD::VP_FSHL;
8174 EVT ShVT = Z.getValueType();
8180 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
8181 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitWidthC, ShAmt, Mask, VL);
8182 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, IsFSHL ? ShAmt : InvShAmt, Mask,
8184 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, IsFSHL ? InvShAmt : ShAmt, Mask,
8192 ShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, Z, BitMask, Mask, VL);
8196 InvShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, NotZ, BitMask, Mask, VL);
8199 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
8200 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitMask, ShAmt, Mask, VL);
8205 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, ShAmt, Mask, VL);
8207 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT, ShY1, InvShAmt, Mask, VL);
8210 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT, ShX1, InvShAmt, Mask, VL);
8211 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, ShAmt, Mask, VL);
8214 return DAG.
getNode(ISD::VP_OR,
DL, VT, ShX, ShY, Mask, VL);
8219 if (
Node->isVPOpcode())
8222 EVT VT =
Node->getValueType(0);
8238 EVT ShVT = Z.getValueType();
8308 EVT VT =
Node->getValueType(0);
8326 if (!AllowVectorOps && VT.
isVector() &&
8344 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8346 HsVal = DAG.
getNode(HsOpc,
DL, VT, Op0, HsAmt);
8352 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8363 assert(
Node->getNumOperands() == 3 &&
"Not a double-shift!");
8364 EVT VT =
Node->getValueType(0);
8415 unsigned OpNo =
Node->isStrictFPOpcode() ? 1 : 0;
8417 EVT SrcVT = Src.getValueType();
8418 EVT DstVT =
Node->getValueType(0);
8422 if (SrcVT != MVT::f32 || DstVT != MVT::i64)
8425 if (
Node->isStrictFPOpcode())
8488 unsigned OpNo =
Node->isStrictFPOpcode() ? 1 : 0;
8491 EVT SrcVT = Src.getValueType();
8492 EVT DstVT =
Node->getValueType(0);
8513 if (
Node->isStrictFPOpcode()) {
8515 {
Node->getOperand(0), Src });
8516 Chain = Result.getValue(1);
8530 if (
Node->isStrictFPOpcode()) {
8532 Node->getOperand(0),
true);
8538 bool Strict =
Node->isStrictFPOpcode() ||
8557 if (
Node->isStrictFPOpcode()) {
8559 { Chain, Src, FltOfs });
8581 Result = DAG.
getSelect(dl, DstVT, Sel, True, False);
8591 if (
Node->isStrictFPOpcode())
8595 EVT SrcVT = Src.getValueType();
8596 EVT DstVT =
Node->getValueType(0);
8600 if (
Node->getFlags().hasNonNeg() &&
8648 unsigned Opcode =
Node->getOpcode();
8649 assert((Opcode == ISD::FMINNUM || Opcode == ISD::FMAXNUM ||
8653 if (
Node->getFlags().hasNoNaNs()) {
8655 EVT VT =
Node->getValueType(0);
8674 EVT VT =
Node->getValueType(0);
8677 "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
8681 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
8687 if (!
Node->getFlags().hasNoNaNs()) {
8700 return DAG.
getNode(NewOp, dl, VT, Quiet0, Quiet1,
Node->getFlags());
8706 if ((
Node->getFlags().hasNoNaNs() ||
8709 (
Node->getFlags().hasNoSignedZeros() ||
8712 unsigned IEEE2018Op =
8713 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM;
8715 return DAG.
getNode(IEEE2018Op, dl, VT,
Node->getOperand(0),
8716 Node->getOperand(1),
Node->getFlags());
8733 unsigned Opc =
N->getOpcode();
8734 EVT VT =
N->getValueType(0);
8736 bool IsMax =
Opc == ISD::FMAXIMUM;
8742 unsigned CompOpcIeee = IsMax ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
8743 unsigned CompOpc = IsMax ? ISD::FMAXNUM : ISD::FMINNUM;
8747 bool MinMaxMustRespectOrderedZero =
false;
8751 MinMaxMustRespectOrderedZero =
true;
8765 if (!
N->getFlags().hasNoNaNs() &&
8774 if (!MinMaxMustRespectOrderedZero && !
N->getFlags().hasNoSignedZeros() &&
8797 unsigned Opc =
Node->getOpcode();
8798 EVT VT =
Node->getValueType(0);
8800 bool IsMax =
Opc == ISD::FMAXIMUMNUM;
8804 Opc == ISD::FMINIMUMNUM ? ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
8807 if (!Flags.hasNoNaNs()) {
8818 return DAG.
getNode(NewOp,
DL, VT, LHS, RHS, Flags);
8823 if (Flags.hasNoNaNs() ||
8825 unsigned IEEE2019Op =
8826 Opc == ISD::FMINIMUMNUM ? ISD::FMINIMUM : ISD::FMAXIMUM;
8828 return DAG.
getNode(IEEE2019Op,
DL, VT, LHS, RHS, Flags);
8833 if ((Flags.hasNoNaNs() ||
8837 unsigned IEEE2008Op =
Opc == ISD::FMINIMUMNUM ? ISD::FMINNUM : ISD::FMAXNUM;
8839 return DAG.
getNode(IEEE2008Op,
DL, VT, LHS, RHS, Flags);
8886 bool IsOrdered = NanTest ==
fcNone;
8887 bool IsUnordered = NanTest ==
fcNan;
8890 if (!IsOrdered && !IsUnordered)
8891 return std::nullopt;
8893 if (OrderedMask ==
fcZero &&
8899 return std::nullopt;
8906 EVT OperandVT =
Op.getValueType();
8918 if (OperandVT == MVT::ppcf128) {
8921 OperandVT = MVT::f64;
8928 bool IsF80 = (ScalarFloatVT == MVT::f80);
8932 if (Flags.hasNoFPExcept() &&
8935 bool IsInvertedFP =
false;
8939 FPTestMask = InvertedFPCheck;
8940 IsInvertedFP =
true;
8952 OrderedFPTestMask = FPTestMask;
8954 const bool IsOrdered = FPTestMask == OrderedFPTestMask;
8956 if (std::optional<bool> IsCmp0 =
8959 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode,
8966 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode);
8969 if (FPTestMask ==
fcNan &&
8975 bool IsOrderedInf = FPTestMask ==
fcInf;
8978 : UnorderedCmpOpcode,
8989 IsOrderedInf ? OrderedCmpOpcode : UnorderedCmpOpcode);
8994 : UnorderedCmpOpcode,
9005 IsOrdered ? OrderedCmpOpcode : UnorderedCmpOpcode);
9024 return DAG.
getSetCC(
DL, ResultVT, Abs, SmallestNormal,
9025 IsOrdered ? OrderedOp : UnorderedOp);
9048 DAG.
getSetCC(
DL, ResultVT, Abs, SmallestNormal, IsNormalOp);
9050 return DAG.
getNode(LogicOp,
DL, ResultVT, IsFinite, IsNormal);
9057 bool IsInverted =
false;
9060 Test = InvertedCheck;
9076 const unsigned ExplicitIntBitInF80 = 63;
9077 APInt ExpMask = Inf;
9079 ExpMask.
clearBit(ExplicitIntBitInF80);
9093 const auto appendResult = [&](
SDValue PartialRes) {
9103 const auto getIntBitIsSet = [&]() ->
SDValue {
9104 if (!IntBitIsSetV) {
9105 APInt IntBitMask(BitSize, 0);
9106 IntBitMask.
setBit(ExplicitIntBitInF80);
9111 return IntBitIsSetV;
9139 appendResult(PartialRes);
9148 appendResult(ExpIsZero);
9158 else if (PartialCheck ==
fcZero)
9162 appendResult(PartialRes);
9175 appendResult(PartialRes);
9178 if (
unsigned PartialCheck =
Test &
fcInf) {
9181 else if (PartialCheck ==
fcInf)
9188 appendResult(PartialRes);
9191 if (
unsigned PartialCheck =
Test &
fcNan) {
9192 APInt InfWithQnanBit = Inf | QNaNBitMask;
9194 if (PartialCheck ==
fcNan) {
9207 }
else if (PartialCheck ==
fcQNan) {
9219 appendResult(PartialRes);
9224 APInt ExpLSB = ExpMask & ~(ExpMask.
shl(1));
9227 APInt ExpLimit = ExpMask - ExpLSB;
9240 appendResult(PartialRes);
9263 EVT VT =
Node->getValueType(0);
9270 if (!(Len <= 128 && Len % 8 == 0))
9329 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
9340 EVT VT =
Node->getValueType(0);
9349 if (!(Len <= 128 && Len % 8 == 0))
9361 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5;
9364 Tmp1 = DAG.
getNode(ISD::VP_AND, dl, VT,
9368 Op = DAG.
getNode(ISD::VP_SUB, dl, VT,
Op, Tmp1, Mask, VL);
9371 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op, Mask33, Mask, VL);
9372 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT,
9376 Op = DAG.
getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL);
9381 Tmp5 = DAG.
getNode(ISD::VP_ADD, dl, VT,
Op, Tmp4, Mask, VL);
9382 Op = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL);
9393 V = DAG.
getNode(ISD::VP_MUL, dl, VT,
Op, Mask01, Mask, VL);
9396 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
9398 V = DAG.
getNode(ISD::VP_ADD, dl, VT, V,
9399 DAG.
getNode(ISD::VP_SHL, dl, VT, V, ShiftC, Mask, VL),
9409 EVT VT =
Node->getValueType(0);
9448 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9459 EVT VT =
Node->getValueType(0);
9473 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9476 DAG.
getNode(ISD::VP_SRL, dl, VT,
Op, Tmp, Mask, VL), Mask,
9481 return DAG.
getNode(ISD::VP_CTPOP, dl, VT,
Op, Mask, VL);
9490 :
APInt(64, 0x0218A392CD3D5DBFULL);
9504 for (
unsigned i = 0; i <
BitWidth; i++) {
9530 EVT VT =
Node->getValueType(0);
9590 EVT VT =
Node->getValueType(0);
9598 return DAG.
getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL);
9612 EVT SrcVT = Source.getValueType();
9613 EVT ResVT =
N->getValueType(0);
9622 Source = DAG.
getNode(ISD::VP_SETCC,
DL, SrcVT, Source, AllZero,
9630 DAG.
getNode(ISD::VP_SELECT,
DL, ResVecVT, Source, StepVec,
Splat, EVL);
9631 return DAG.
getNode(ISD::VP_REDUCE_UMIN,
DL, ResVT, ExtEVL,
Select, Mask, EVL);
9638 EVT MaskVT = Mask.getValueType();
9648 true, &VScaleRange);
9667 SDValue HighestIdx = DAG.
getNode(ISD::VECREDUCE_UMAX,
DL, StepVT, ActiveElts);
9672 bool IsNegative)
const {
9674 EVT VT =
N->getValueType(0);
9728 EVT VT =
N->getValueType(0);
9806 EVT VT =
N->getValueType(0);
9810 unsigned Opc =
N->getOpcode();
9819 "Unknown AVG node");
9831 return DAG.
getNode(ShiftOpc, dl, VT, Sum,
9840 LHS = DAG.
getNode(ExtOpc, dl, ExtVT, LHS);
9841 RHS = DAG.
getNode(ExtOpc, dl, ExtVT, RHS);
9883 return DAG.
getNode(SumOpc, dl, VT, Sign, Shift);
9888 EVT VT =
N->getValueType(0);
9895 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9958 EVT VT =
N->getValueType(0);
9967 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9976 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL);
9986 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9990 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9991 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9992 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9996 Tmp7 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
10000 Tmp6 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
10001 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
10004 Tmp5 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
10005 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
10010 Tmp4 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp4,
10011 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
10014 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp3,
10015 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
10018 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10019 DAG.
getConstant(255ULL << 8, dl, VT), Mask, EVL);
10022 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL);
10023 Tmp6 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL);
10024 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
10025 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
10026 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp6, Mask, EVL);
10027 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
10028 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp4, Mask, EVL);
10034 EVT VT =
N->getValueType(0);
10077 for (
unsigned I = 0, J = Sz-1;
I < Sz; ++
I, --J) {
10094 assert(
N->getOpcode() == ISD::VP_BITREVERSE);
10097 EVT VT =
N->getValueType(0);
10116 Tmp = (Sz > 8 ? DAG.
getNode(ISD::VP_BSWAP, dl, VT,
Op, Mask, EVL) :
Op);
10121 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10127 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
10132 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10138 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
10143 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
10149 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
10155std::pair<SDValue, SDValue>
10159 SDValue Chain = LD->getChain();
10160 SDValue BasePTR = LD->getBasePtr();
10161 EVT SrcVT = LD->getMemoryVT();
10162 EVT DstVT = LD->getValueType(0);
10194 LD->getPointerInfo(), SrcIntVT, LD->getBaseAlign(),
10195 LD->getMemOperand()->getFlags(), LD->getAAInfo());
10198 for (
unsigned Idx = 0; Idx < NumElem; ++Idx) {
10199 unsigned ShiftIntoIdx =
10210 Scalar = DAG.
getNode(ExtendOp, SL, DstEltVT, Scalar);
10217 return std::make_pair(
Value, Load.getValue(1));
10226 for (
unsigned Idx = 0; Idx < NumElem; ++Idx) {
10228 ExtType, SL, DstEltVT, Chain, BasePTR,
10229 LD->getPointerInfo().getWithOffset(Idx * Stride), SrcEltVT,
10230 LD->getBaseAlign(), LD->getMemOperand()->getFlags(), LD->getAAInfo());
10241 return std::make_pair(
Value, NewChain);
10248 SDValue Chain = ST->getChain();
10249 SDValue BasePtr = ST->getBasePtr();
10251 EVT StVT = ST->getMemoryVT();
10277 for (
unsigned Idx = 0; Idx < NumElem; ++Idx) {
10281 unsigned ShiftIntoIdx =
10290 return DAG.
getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
10291 ST->getBaseAlign(), ST->getMemOperand()->getFlags(),
10297 assert(Stride &&
"Zero stride!");
10301 for (
unsigned Idx = 0; Idx < NumElem; ++Idx) {
10309 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
10310 MemSclVT, ST->getBaseAlign(), ST->getMemOperand()->getFlags(),
10319std::pair<SDValue, SDValue>
10322 "unaligned indexed loads not implemented!");
10323 SDValue Chain = LD->getChain();
10324 SDValue Ptr = LD->getBasePtr();
10325 EVT VT = LD->getValueType(0);
10326 EVT LoadedVT = LD->getMemoryVT();
10342 LD->getMemOperand());
10343 SDValue Result = DAG.
getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
10344 if (LoadedVT != VT)
10348 return std::make_pair(Result, newLoad.
getValue(1));
10356 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
10362 SDValue StackPtr = StackBase;
10366 EVT StackPtrVT = StackPtr.getValueType();
10372 for (
unsigned i = 1; i < NumRegs; i++) {
10375 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(
Offset),
10376 LD->getBaseAlign(), LD->getMemOperand()->getFlags(), LD->getAAInfo());
10379 Load.getValue(1), dl, Load, StackPtr,
10390 8 * (LoadedBytes -
Offset));
10393 LD->getPointerInfo().getWithOffset(
Offset), MemVT, LD->getBaseAlign(),
10394 LD->getMemOperand()->getFlags(), LD->getAAInfo());
10399 Load.getValue(1), dl, Load, StackPtr,
10406 Load = DAG.
getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
10411 return std::make_pair(Load, TF);
10415 "Unaligned load of unsupported type.");
10424 Align Alignment = LD->getBaseAlign();
10425 unsigned IncrementSize = NumBits / 8;
10436 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10441 LD->getPointerInfo().getWithOffset(IncrementSize),
10442 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10445 Hi = DAG.
getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
10446 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10451 LD->getPointerInfo().getWithOffset(IncrementSize),
10452 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10464 return std::make_pair(Result, TF);
10470 "unaligned indexed stores not implemented!");
10471 SDValue Chain = ST->getChain();
10472 SDValue Ptr = ST->getBasePtr();
10473 SDValue Val = ST->getValue();
10475 Align Alignment = ST->getBaseAlign();
10477 EVT StoreMemVT = ST->getMemoryVT();
10493 Result = DAG.
getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
10494 Alignment, ST->getMemOperand()->getFlags());
10505 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
10513 Chain, dl, Val, StackPtr,
10516 EVT StackPtrVT = StackPtr.getValueType();
10524 for (
unsigned i = 1; i < NumRegs; i++) {
10527 RegVT, dl, Store, StackPtr,
10531 ST->getPointerInfo().getWithOffset(
Offset),
10532 ST->getBaseAlign(),
10533 ST->getMemOperand()->getFlags()));
10552 Load.getValue(1), dl, Load, Ptr,
10553 ST->getPointerInfo().getWithOffset(
Offset), LoadMemVT,
10554 ST->getBaseAlign(), ST->getMemOperand()->getFlags(), ST->getAAInfo()));
10561 "Unaligned store of unknown type.");
10565 unsigned IncrementSize = NumBits / 8;
10585 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
10586 ST->getMemOperand()->getFlags());
10591 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
10592 ST->getMemOperand()->getFlags(), ST->getAAInfo());
10603 bool IsCompressedMemory)
const {
10606 EVT MaskVT = Mask.getValueType();
10608 "Incompatible types of Data and Mask");
10609 if (IsCompressedMemory) {
10612 "Cannot currently handle compressed memory with scalable vectors");
10618 MaskIntVT = MVT::i32;
10642 "Cannot index a scalable vector within a fixed-width vector");
10653 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts)
10667 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0;
10677 DAG, VecPtr, VecVT,
10679 Index, PtrArithFlags);
10695 "Converting bits to bytes lost precision");
10697 "Sub-vector must be a vector with matching element type");
10701 EVT IdxVT = Index.getValueType();
10732 assert(EmuTlsVar &&
"Cannot find EmuTlsVar ");
10733 Args.emplace_back(DAG.
getGlobalAddress(EmuTlsVar, dl, PtrVT), VoidPtrType);
10740 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
10749 "Emulated TLS must have zero offset in GlobalAddressSDNode");
10750 return CallResult.first;
10761 EVT VT =
Op.getOperand(0).getValueType();
10763 if (VT.
bitsLT(MVT::i32)) {
10781 unsigned Opcode =
Node->getOpcode();
10823 {Op0, Op1, DAG.getCondCode(CC)})) {
10830 {Op0, Op1, DAG.getCondCode(CC)})) {
10858 unsigned Opcode =
Node->getOpcode();
10861 EVT VT = LHS.getValueType();
10864 assert(VT == RHS.getValueType() &&
"Expected operands to be the same type");
10880 unsigned OverflowOp;
10895 llvm_unreachable(
"Expected method to receive signed or unsigned saturation "
10896 "addition or subtraction node.");
10904 unsigned BitWidth = LHS.getScalarValueSizeInBits();
10907 SDValue SumDiff = Result.getValue(0);
10908 SDValue Overflow = Result.getValue(1);
10930 return DAG.
getSelect(dl, VT, Overflow, Zero, SumDiff);
10950 if (LHSIsNonNegative || RHSIsNonNegative) {
10952 return DAG.
getSelect(dl, VT, Overflow, SatMax, SumDiff);
10958 if (LHSIsNegative || RHSIsNegative) {
10960 return DAG.
getSelect(dl, VT, Overflow, SatMin, SumDiff);
10970 return DAG.
getSelect(dl, VT, Overflow, Result, SumDiff);
10974 unsigned Opcode =
Node->getOpcode();
10977 EVT VT = LHS.getValueType();
10978 EVT ResVT =
Node->getValueType(0);
11010 unsigned Opcode =
Node->getOpcode();
11014 EVT VT = LHS.getValueType();
11019 "Expected a SHLSAT opcode");
11020 assert(VT == RHS.getValueType() &&
"Expected operands to be the same type");
11052 EVT VT = LHS.getValueType();
11053 assert(RHS.getValueType() == VT &&
"Mismatching operand types");
11055 assert((HiLHS && HiRHS) || (!HiLHS && !HiRHS));
11057 "Signed flag should only be set when HiLHS and RiRHS are null");
11065 unsigned HalfBits = Bits / 2;
11110 EVT VT = LHS.getValueType();
11111 assert(RHS.getValueType() == VT &&
"Mismatching operand types");
11115 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
11116 if (WideVT == MVT::i16)
11117 LC = RTLIB::MUL_I16;
11118 else if (WideVT == MVT::i32)
11119 LC = RTLIB::MUL_I32;
11120 else if (WideVT == MVT::i64)
11121 LC = RTLIB::MUL_I64;
11122 else if (WideVT == MVT::i128)
11123 LC = RTLIB::MUL_I128;
11153 SDValue Args[] = {LHS, HiLHS, RHS, HiRHS};
11154 Ret =
makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
11156 SDValue Args[] = {HiLHS, LHS, HiRHS, RHS};
11157 Ret =
makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
11160 "Ret value is a collection of constituent nodes holding result.");
11163 Lo = Ret.getOperand(0);
11164 Hi = Ret.getOperand(1);
11166 Lo = Ret.getOperand(1);
11167 Hi = Ret.getOperand(0);
11177 "Expected a fixed point multiplication opcode");
11182 EVT VT = LHS.getValueType();
11183 unsigned Scale =
Node->getConstantOperandVal(2);
11199 SDValue Product = Result.getValue(0);
11200 SDValue Overflow = Result.getValue(1);
11211 Result = DAG.
getSelect(dl, VT, ProdNeg, SatMin, SatMax);
11212 return DAG.
getSelect(dl, VT, Overflow, Result, Product);
11216 SDValue Product = Result.getValue(0);
11217 SDValue Overflow = Result.getValue(1);
11221 return DAG.
getSelect(dl, VT, Overflow, SatMax, Product);
11226 "Expected scale to be less than the number of bits if signed or at "
11227 "most the number of bits if unsigned.");
11228 assert(LHS.getValueType() == RHS.getValueType() &&
11229 "Expected both operands to be the same type");
11241 Lo = Result.getValue(0);
11242 Hi = Result.getValue(1);
11245 Hi = DAG.
getNode(HiOp, dl, VT, LHS, RHS);
11263 if (Scale == VTSize)
11309 return DAG.
getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
11334 "Expected a fixed point division opcode");
11336 EVT VT = LHS.getValueType();
11358 if (LHSLead + RHSTrail < Scale + (
unsigned)(Saturating &&
Signed))
11361 unsigned LHSShift = std::min(LHSLead, Scale);
11362 unsigned RHSShift = Scale - LHSShift;
11426 { LHS, RHS, CarryIn });
11433 LHS.getValueType(), LHS, RHS);
11435 EVT ResultType =
Node->getValueType(1);
11446 DAG.
getSetCC(dl, SetCCType, Result,
11455 SetCC = DAG.
getSetCC(dl, SetCCType, Result, LHS, CC);
11468 LHS.getValueType(), LHS, RHS);
11470 EVT ResultType =
Node->getValueType(1);
11477 SDValue Sat = DAG.
getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS);
11496 DAG.
getNode(
ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
11497 ResultType, ResultType);
11503 EVT VT =
Node->getValueType(0);
11511 const APInt &
C = RHSC->getAPIntValue();
11513 if (
C.isPowerOf2()) {
11515 bool UseArithShift =
isSigned && !
C.isMinSignedValue();
11518 Overflow = DAG.
getSetCC(dl, SetCCVT,
11520 dl, VT, Result, ShiftAmt),
11533 static const unsigned Ops[2][3] =
11559 Result = BottomHalf;
11566 Overflow = DAG.
getSetCC(dl, SetCCVT, TopHalf,
11571 EVT RType =
Node->getValueType(1);
11576 "Unexpected result type for S/UMULO legalization");
11584 EVT VT =
Op.getValueType();
11607 "Expanding reductions for scalable vectors is undefined.");
11616 for (
unsigned i = 1; i < NumElts; i++)
11617 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res,
Ops[i],
Node->getFlags());
11620 if (EltVT !=
Node->getValueType(0))
11636 "Expanding reductions for scalable vectors is undefined.");
11646 for (
unsigned i = 0; i < NumElts; i++)
11647 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res,
Ops[i], Flags);
11654 EVT VT =
Node->getValueType(0);
11663 Result = DAG.
getNode(DivRemOpc, dl, VTs, Dividend, Divisor).
getValue(1);
11668 SDValue Divide = DAG.
getNode(DivOpc, dl, VT, Dividend, Divisor);
11683 EVT SrcVT = Src.getValueType();
11684 EVT DstVT =
Node->getValueType(0);
11689 assert(SatWidth <= DstWidth &&
11690 "Expected saturation width smaller than result width");
11694 APInt MinInt, MaxInt;
11705 if (SrcVT == MVT::f16 || SrcVT == MVT::bf16) {
11706 Src = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f32, Src);
11707 SrcVT = Src.getValueType();
11729 if (AreExactFloatBounds && MinMaxLegal) {
11733 Clamped = DAG.
getNode(ISD::FMAXNUM, dl, SrcVT, Clamped, MinFloatNode);
11735 Clamped = DAG.
getNode(ISD::FMINNUM, dl, SrcVT, Clamped, MaxFloatNode);
11738 dl, DstVT, Clamped);
11750 return DAG.
getSelect(dl, DstVT, IsNan, ZeroInt, FpToInt);
11789 EVT OperandVT =
Op.getValueType();
11804 SDValue NarrowBits = DAG.
getNode(ISD::BITCAST, dl, ResultIntVT, Narrow);
11815 Op.getValueType());
11819 KeepNarrow = DAG.
getNode(
ISD::OR, dl, WideSetCCVT, KeepNarrow, AlreadyOdd);
11823 SDValue AbsNarrowAsWide = DAG.
getNode(ISD::FABS, dl, OperandVT, NarrowAsWide);
11830 SDValue Adjust = DAG.
getSelect(dl, ResultIntVT, NarrowIsRd, One, NegativeOne);
11832 Op = DAG.
getSelect(dl, ResultIntVT, KeepNarrow, NarrowBits, Adjusted);
11833 return DAG.
getNode(ISD::BITCAST, dl, ResultVT,
Op);
11839 EVT VT =
Node->getValueType(0);
11842 if (
Node->getConstantOperandVal(1) == 1) {
11843 return DAG.
getNode(ISD::FP_TO_BF16, dl, VT,
Node->getOperand(0));
11845 EVT OperandVT =
Op.getValueType();
11857 EVT I32 =
F32.changeTypeToInteger();
11883 EVT I16 = I32.
isVector() ? I32.changeVectorElementType(MVT::i16) : MVT::i16;
11885 return DAG.
getNode(ISD::BITCAST, dl, VT,
Op);
11893 assert(
Node->getValueType(0).isScalableVector() &&
11894 "Fixed length vector types expected to use SHUFFLE_VECTOR!");
11896 EVT VT =
Node->getValueType(0);
11918 EVT PtrVT = StackPtr.getValueType();
11937 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr,
11960 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr2,
11973 EVT MaskVT = Mask.getValueType();
11990 bool HasPassthru = !Passthru.
isUndef();
11996 Chain = DAG.
getStore(Chain,
DL, Passthru, StackPtr, PtrInfo);
11999 APInt PassthruSplatVal;
12000 bool IsSplatPassthru =
12003 if (IsSplatPassthru) {
12007 LastWriteVal = DAG.
getConstant(PassthruSplatVal,
DL, ScalarVT);
12008 }
else if (HasPassthru) {
12018 Popcount = DAG.
getNode(ISD::VECREDUCE_ADD,
DL, PopcountVT, Popcount);
12022 ScalarVT,
DL, Chain, LastElmtPtr,
12028 for (
unsigned I = 0;
I < NumElms;
I++) {
12032 Chain,
DL, ValI, OutPtr,
12044 if (HasPassthru &&
I == NumElms - 1) {
12054 LastWriteVal = DAG.
getSelect(
DL, ScalarVT, AllLanesSelected, ValI,
12057 Chain,
DL, LastWriteVal, OutPtr,
12062 return DAG.
getLoad(VecVT,
DL, Chain, StackPtr, PtrInfo);
12069 SDValue MulLHS =
N->getOperand(1);
12070 SDValue MulRHS =
N->getOperand(2);
12078 unsigned ExtOpcLHS, ExtOpcRHS;
12079 switch (
N->getOpcode()) {
12082 case ISD::PARTIAL_REDUCE_UMLA:
12085 case ISD::PARTIAL_REDUCE_SMLA:
12088 case ISD::PARTIAL_REDUCE_FMLA:
12089 ExtOpcLHS = ExtOpcRHS = ISD::FP_EXTEND;
12093 if (ExtMulOpVT != MulOpVT) {
12094 MulLHS = DAG.
getNode(ExtOpcLHS,
DL, ExtMulOpVT, MulLHS);
12095 MulRHS = DAG.
getNode(ExtOpcRHS,
DL, ExtMulOpVT, MulRHS);
12098 if (
N->getOpcode() == ISD::PARTIAL_REDUCE_FMLA) {
12109 std::deque<SDValue> Subvectors = {Acc};
12110 for (
unsigned I = 0;
I < ScaleFactor;
I++)
12113 unsigned FlatNode =
12117 while (Subvectors.size() > 1) {
12118 Subvectors.push_back(
12119 DAG.
getNode(FlatNode,
DL, AccVT, {Subvectors[0], Subvectors[1]}));
12120 Subvectors.pop_front();
12121 Subvectors.pop_front();
12124 assert(Subvectors.size() == 1 &&
12125 "There should only be one subvector after tree flattening");
12127 return Subvectors[0];
12140 if (
Op.getNode() != FPNode)
12144 while (!Worklist.
empty()) {
12155 if (
Node == FPNode ||
Node->getOpcode() == ISD::CALLSEQ_START)
12158 if (
Node->getOpcode() == ISD::CALLSEQ_END) {
12178 std::optional<unsigned> CallRetResNo)
const {
12179 if (LC == RTLIB::UNKNOWN_LIBCALL)
12183 if (LibcallImpl == RTLIB::Unsupported)
12187 EVT VT =
Node->getValueType(0);
12188 unsigned NumResults =
Node->getNumValues();
12198 SDValue StoreValue = ST->getValue();
12199 unsigned ResNo = StoreValue.
getResNo();
12201 if (CallRetResNo == ResNo)
12204 if (!ST->isSimple() || ST->getAddressSpace() != 0)
12207 if (StoresInChain && ST->getChain() != StoresInChain)
12211 if (ST->getAlign() <
12219 ResultStores[ResNo] = ST;
12220 StoresInChain = ST->getChain();
12227 EVT ArgVT =
Op.getValueType();
12229 Args.emplace_back(
Op, ArgTy);
12236 if (ResNo == CallRetResNo)
12238 EVT ResVT =
Node->getValueType(ResNo);
12240 ResultPtrs[ResNo] = ResultPtr;
12241 Args.emplace_back(ResultPtr,
PointerTy);
12253 Type *RetType = CallRetResNo.has_value()
12254 ?
Node->getValueType(*CallRetResNo).getTypeForEVT(Ctx)
12266 if (ResNo == CallRetResNo) {
12272 ResultPtr, PtrInfo);
12278 PtrInfo = ST->getPointerInfo();
12285 Results.push_back(LoadResult);
12294 SDValue EVL,
bool &NeedInvert,
12296 bool IsSignaling)
const {
12297 MVT OpVT = LHS.getSimpleValueType();
12299 NeedInvert =
false;
12300 assert(!EVL == !Mask &&
"VP Mask and EVL must either both be set or unset");
12301 bool IsNonVP = !EVL;
12316 bool NeedSwap =
false;
12317 InvCC = getSetCCInverse(CCCode, OpVT);
12333 if (OpVT == MVT::i1) {
12348 DAG.
getNOT(dl, LHS, MVT::i1));
12353 DAG.
getNOT(dl, RHS, MVT::i1));
12358 DAG.
getNOT(dl, LHS, MVT::i1));
12363 DAG.
getNOT(dl, RHS, MVT::i1));
12386 "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
12391 "If SETO is expanded, SETOEQ must be legal!");
12408 NeedInvert = ((
unsigned)CCCode & 0x8U);
12449 SetCC1 = DAG.
getSetCC(dl, VT, LHS, RHS, CC1, Chain, IsSignaling);
12450 SetCC2 = DAG.
getSetCC(dl, VT, LHS, RHS, CC2, Chain, IsSignaling);
12452 SetCC1 = DAG.
getSetCCVP(dl, VT, LHS, RHS, CC1, Mask, EVL);
12453 SetCC2 = DAG.
getSetCCVP(dl, VT, LHS, RHS, CC2, Mask, EVL);
12458 SetCC1 = DAG.
getSetCC(dl, VT, LHS, LHS, CC1, Chain, IsSignaling);
12459 SetCC2 = DAG.
getSetCC(dl, VT, RHS, RHS, CC2, Chain, IsSignaling);
12461 SetCC1 = DAG.
getSetCCVP(dl, VT, LHS, LHS, CC1, Mask, EVL);
12462 SetCC2 = DAG.
getSetCCVP(dl, VT, RHS, RHS, CC2, Mask, EVL);
12469 LHS = DAG.
getNode(
Opc, dl, VT, SetCC1, SetCC2);
12474 LHS = DAG.
getNode(
Opc, dl, VT, SetCC1, SetCC2, Mask, EVL);
12486 EVT VT =
Node->getValueType(0);
12498 unsigned Opcode =
Node->getOpcode();
12536 std::optional<unsigned> ByteOffset;
12540 int Elt = ConstEltNo->getZExtValue();
12554 unsigned IsFast = 0;
12564 DAG, OriginalLoad->
getBasePtr(), InVecVT, EltNo);
12569 if (ResultVT.
bitsGT(VecEltVT)) {
12576 NewPtr, MPI, VecEltVT, Alignment,
12586 if (ResultVT.
bitsLT(VecEltVT))
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
static bool isSigned(unsigned int Opcode)
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isNonZeroModBitWidthOrUndef(const MachineRegisterInfo &MRI, Register Reg, unsigned BW)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Function const char * Passes
const SmallVectorImpl< MachineOperand > & Cond
Contains matchers for matching SelectionDAG nodes and values.
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static bool lowerImmediateIfPossible(TargetLowering::ConstraintPair &P, SDValue Op, SelectionDAG *DAG, const TargetLowering &TLI)
If we have an immediate, see if we can lower it.
static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG)
static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, const APInt &UndefOp0, const APInt &UndefOp1)
Given a vector binary operation and known undefined elements for each input operand,...
static SDValue BuildExactUDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact UDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, EVT VecVT, const SDLoc &dl, ElementCount SubEC)
static unsigned getConstraintPiority(TargetLowering::ConstraintType CT)
Return a number indicating our preference for chosing a type of constraint over another,...
static std::optional< bool > isFCmpEqualZero(FPClassTest Test, const fltSemantics &Semantics, const MachineFunction &MF)
Returns a true value if if this FPClassTest can be performed with an ordered fcmp to 0,...
static bool canFoldStoreIntoLibCallOutputPointers(StoreSDNode *StoreNode, SDNode *FPNode)
Given a store node StoreNode, return true if it is safe to fold that node into FPNode,...
static void turnVectorIntoSplatVector(MutableArrayRef< SDValue > Values, std::function< bool(SDValue)> Predicate, SDValue AlternativeReplacement=SDValue())
If all values in Values that don't match the predicate are same 'splat' value, then replace all value...
static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT)
static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact SDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, SDValue N0, const APInt &C1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue combineShiftToAVG(SDValue Op, TargetLowering::TargetLoweringOpt &TLO, const TargetLowering &TLI, const APInt &DemandedBits, const APInt &DemandedElts, unsigned Depth)
This file describes how to lower LLVM code to machine code.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
static constexpr roundingMode rmTowardZero
static constexpr roundingMode rmNearestTiesToEven
opStatus
IEEE-754R 7: Default exception handling.
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
void clearAllBits()
Set every bit to 0.
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned countLeadingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
LLVM_ABI APInt multiplicativeInverse() const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
void clearBits(unsigned LoBit, unsigned HiBit)
Clear the bits from LoBit (inclusive) to HiBit (exclusive) to 0.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void clearHighBits(unsigned hiBits)
Set top hiBits bits to 0.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void setBitVal(unsigned BitPosition, bool BitValue)
Set a given bit to a given value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
ConstantFP - Floating Point Values [float, double].
This class represents a range of values.
const APInt & getAPIntValue() const
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
std::vector< std::string > ConstraintCodeVector
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setAdjustsStack(bool V)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
Function & getFunction()
Return the LLVM function that this machine code represents.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags getFlags() const
Return the raw flags of the source value,.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
unsigned getAddressSpace() const
Return the address space for the associated pointer.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
const GlobalVariable * getNamedGlobal(StringRef Name) const
Return the global variable in the module with the specified name, of arbitrary type.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
bool willNotOverflowAdd(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the addition of 2 nodes can never overflow.
LLVM_ABI Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
SDValue getExtractVectorElt(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Extract element at Idx from Vec.
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
bool isKnownNeverSNaN(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI std::optional< unsigned > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
static LLVM_ABI unsigned getHasPredecessorMaxSteps()
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
bool willNotOverflowSub(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the sub of 2 nodes can never overflow.
LLVM_ABI bool shouldOptForSize() const
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI std::optional< unsigned > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
LLVM_ABI bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
LLVM_ABI bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
LLVM_ABI void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
const TargetMachine & getTargetMachine() const
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall implementation.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
ISD::CondCode getSoftFloatCmpLibcallPredicate(RTLIB::LibcallImpl Call) const
Get the comparison predicate that's to be used to test the result of the comparison libcall against z...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
TargetLoweringBase(const TargetMachine &TM, const TargetSubtargetInfo &STI)
NOTE: The TargetMachine owns TLOF.
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Get the libcall impl routine name for the specified libcall.
static StringRef getLibcallImplName(RTLIB::LibcallImpl Call)
Get the libcall routine name for the specified libcall implementation.
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom for a comparison of the specified type...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
bool expandMultipleResultFPLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl< SDValue > &Results, std::optional< unsigned > CallRetResNo={}) const
Expands a node with multiple results to an FP or vector libcall.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
SmallVector< ConstraintPair > ConstraintGroup
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
virtual Align computeKnownAlignForTargetInstr(GISelValueTracking &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, const SDValue LHS, const SDValue RHS, SDValue &Lo, SDValue &Hi) const
Calculate full product of LHS and RHS either via a libcall or through brute force expansion of the mu...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
virtual unsigned computeNumSignBitsForTargetInstr(GISelValueTracking &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector eleme...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue unwrapAddress(SDValue N) const
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes.
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
SDValue expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimumnum/fmaximumnum into multiple comparison with selects.
void forceExpandMultiply(SelectionDAG &DAG, const SDLoc &dl, bool Signed, SDValue &Lo, SDValue &Hi, SDValue LHS, SDValue RHS, SDValue HiLHS=SDValue(), SDValue HiRHS=SDValue()) const
Calculate the product twice the width of LHS and RHS.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
virtual bool isTypeDesirableForOp(unsigned, EVT VT) const
Return true if the target has native support for the specified value type and it is 'desirable' to us...
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index, const SDNodeFlags PtrArithFlags=SDNodeFlags()) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
virtual void computeKnownBitsForTargetInstr(GISelValueTracking &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
SDValue expandVectorNaryOpBySplitting(SDNode *Node, SelectionDAG &DAG) const
~TargetLowering() override
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arit...
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
TargetLowering(const TargetLowering &)=delete
virtual bool shouldSimplifyDemandedVectorElts(SDValue Op, const TargetLoweringOpt &TLO) const
Return true if the target supports simplifying demanded vector elements by converting them to undefs.
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
virtual bool shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const
For most targets, an LLVM type must be broken down into multiple smaller types.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index, const SDNodeFlags PtrArithFlags=SDNodeFlags()) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual void computeKnownFPClassForTargetInstr(GISelValueTracking &Analysis, Register R, KnownFPClass &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
SDValue getInboundsVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
SDValue expandVectorFindLastActive(SDNode *N, SelectionDAG &DAG) const
Expand VECTOR_FIND_LAST_ACTIVE nodes.
SDValue expandPartialReduceMLA(SDNode *Node, SelectionDAG &DAG) const
Expands PARTIAL_REDUCE_S/UMLA nodes to a series of simpler operations, consisting of zext/sext,...
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
SDValue scalarizeExtractedVectorLoad(EVT ResultVT, const SDLoc &DL, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad, SelectionDAG &DAG) const
Replace an extraction of a load with a narrowed load.
virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively,...
SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
iterator_range< regclass_iterator > regclasses() const
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
TargetSubtargetInfo - Generic base class for all target subtargets.
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM_ABI const fltSemantics & getFltSemantics() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ SSUBO
Same for subtraction.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
LLVM_ABI NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_ABI bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FPClassTest invertFPClassTestIfSimpler(FPClassTest Test, bool UseFCmp)
Evaluates if the specified FP class test is better performed as the inverse (i.e.
LLVM_ABI bool isOneOrOneSplatFP(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant floating-point value, or a splatted vector of a constant float...
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
LLVM_ABI void reportFatalInternalError(Error Err)
Report a fatal error that indicates a bug in LLVM.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
auto find_if_not(R &&Range, UnaryPredicate P)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
To bit_cast(const From &from) noexcept
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ Sub
Subtraction of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
@ Increment
Incrementally increasing token ID.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
void setAllConflict()
Make all bits known to be both zero and one.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
bool isSignUnknown() const
Returns true if we don't know the sign bit.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasNoUnsignedWrap() const
bool hasNoSignedWrap() const
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Magic data for optimising signed division by a constant.
unsigned ShiftAmount
shift amount
static LLVM_ABI SignedDivisionByConstantInfo get(const APInt &D)
Calculate the magic numbers required to implement a signed integer division by a constant as a sequen...
This contains information for each constraint that we are lowering.
std::string ConstraintCode
This contains the actual string for the code, like "m".
LLVM_ABI unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches.
LLVM_ABI bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)
bool isBeforeLegalizeOps() const
LLVM_ABI void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
LLVM_ABI void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
ArrayRef< EVT > OpsVTBeforeSoften
bool IsPostTypeLegalization
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT)
ArrayRef< Type * > OpsTypeOverrides
MakeLibCallOptions & setIsSigned(bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
bool LegalOperations() const
Magic data for optimising unsigned division by a constant.
unsigned PreShift
pre-shift amount
static LLVM_ABI UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
unsigned PostShift
post-shift amount