58 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
64 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
65 for (
const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
66 Attribute::DereferenceableOrNull, Attribute::NoAlias,
67 Attribute::NonNull, Attribute::NoUndef})
74 if (CallerAttrs.
contains(Attribute::ZExt) ||
75 CallerAttrs.
contains(Attribute::SExt))
86 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
102 Register ArgReg = cast<RegisterSDNode>(
Value->getOperand(1))->getReg();
103 if (
MRI.getLiveInPhysReg(ArgReg) != Reg)
113 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
114 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
115 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
116 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
117 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
118 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
119 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
120 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
121 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
122 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
123 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
124 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
125 Alignment = Call->getParamStackAlign(ArgIdx);
128 "multiple ABI attributes?");
144std::pair<SDValue, SDValue>
154 Args.reserve(Ops.
size());
157 for (
unsigned i = 0; i < Ops.
size(); ++i) {
160 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
163 Entry.IsZExt = !Entry.IsSExt;
167 Entry.IsSExt = Entry.IsZExt =
false;
169 Args.push_back(Entry);
172 if (LC == RTLIB::UNKNOWN_LIBCALL)
180 bool zeroExtend = !signExtend;
184 signExtend = zeroExtend =
false;
195 return LowerCallTo(CLI);
199 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
201 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
202 Op.getSrcAlign() <
Op.getDstAlign())
207 if (VT == MVT::Other) {
212 if (
Op.isFixedDstAlign())
230 unsigned NumMemOps = 0;
234 while (VTSize >
Size) {
245 else if (NewVT == MVT::i64 &&
257 if (NewVT == MVT::i8)
266 if (NumMemOps &&
Op.allowOverlap() && NewVTSize <
Size &&
268 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
278 if (++NumMemOps > Limit)
281 MemOps.push_back(VT);
296 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
306 bool IsSignaling)
const {
311 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
312 &&
"Unsupported setcc type!");
315 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
316 bool ShouldInvertCC =
false;
320 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
321 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
322 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
326 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
327 (VT == MVT::f64) ? RTLIB::UNE_F64 :
328 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
332 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
333 (VT == MVT::f64) ? RTLIB::OGE_F64 :
334 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
338 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
339 (VT == MVT::f64) ? RTLIB::OLT_F64 :
340 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
344 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
345 (VT == MVT::f64) ? RTLIB::OLE_F64 :
346 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
350 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
351 (VT == MVT::f64) ? RTLIB::OGT_F64 :
352 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
355 ShouldInvertCC =
true;
358 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
359 (VT == MVT::f64) ? RTLIB::UO_F64 :
360 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
364 ShouldInvertCC =
true;
367 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
368 (VT == MVT::f64) ? RTLIB::UO_F64 :
369 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
370 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
371 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
372 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
376 ShouldInvertCC =
true;
379 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
380 (VT == MVT::f64) ? RTLIB::OGE_F64 :
381 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
384 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
385 (VT == MVT::f64) ? RTLIB::OGT_F64 :
386 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
389 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
390 (VT == MVT::f64) ? RTLIB::OLE_F64 :
391 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
394 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
395 (VT == MVT::f64) ? RTLIB::OLT_F64 :
396 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
404 SDValue Ops[2] = {NewLHS, NewRHS};
409 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
414 if (ShouldInvertCC) {
416 CCCode = getSetCCInverse(CCCode, RetVT);
419 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
426 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
429 CCCode = getSetCCInverse(CCCode, RetVT);
430 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
444 if (!isPositionIndependent())
458 unsigned JTEncoding = getJumpTableEncoding();
494 if (!
TM.shouldAssumeDSOLocal(GV))
498 if (isPositionIndependent())
514 const APInt &DemandedElts,
517 unsigned Opcode =
Op.getOpcode();
525 if (targetShrinkDemandedConstant(
Op,
DemandedBits, DemandedElts, TLO))
535 auto *Op1C = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
536 if (!Op1C || Op1C->isOpaque())
540 const APInt &
C = Op1C->getAPIntValue();
545 EVT VT =
Op.getValueType();
561 EVT VT =
Op.getValueType();
576 "ShrinkDemandedOp only supports binary operators!");
577 assert(
Op.getNode()->getNumValues() == 1 &&
578 "ShrinkDemandedOp only supports nodes with one result!");
580 EVT VT =
Op.getValueType();
590 if (!
Op.getNode()->hasOneUse())
603 Op.getOpcode(), dl, SmallVT,
606 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
621 bool Simplified = SimplifyDemandedBits(
Op,
DemandedBits, Known, TLO);
630 const APInt &DemandedElts,
650 bool AssumeSingleUse)
const {
651 EVT VT =
Op.getValueType();
667 EVT VT =
Op.getValueType();
685 switch (
Op.getOpcode()) {
691 EVT SrcVT = Src.getValueType();
692 EVT DstVT =
Op.getValueType();
698 if (NumSrcEltBits == NumDstEltBits)
699 if (
SDValue V = SimplifyMultipleUseDemandedBits(
703 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
704 unsigned Scale = NumDstEltBits / NumSrcEltBits;
708 for (
unsigned i = 0; i != Scale; ++i) {
709 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
710 unsigned BitOffset = EltOffset * NumSrcEltBits;
713 DemandedSrcBits |= Sub;
714 for (
unsigned j = 0; j != NumElts; ++j)
716 DemandedSrcElts.
setBit((j * Scale) + i);
720 if (
SDValue V = SimplifyMultipleUseDemandedBits(
721 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
726 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
727 unsigned Scale = NumSrcEltBits / NumDstEltBits;
731 for (
unsigned i = 0; i != NumElts; ++i)
732 if (DemandedElts[i]) {
733 unsigned Offset = (i % Scale) * NumDstEltBits;
735 DemandedSrcElts.
setBit(i / Scale);
738 if (
SDValue V = SimplifyMultipleUseDemandedBits(
739 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
753 return Op.getOperand(0);
755 return Op.getOperand(1);
766 return Op.getOperand(0);
768 return Op.getOperand(1);
778 return Op.getOperand(0);
780 return Op.getOperand(1);
786 if (
const APInt *MaxSA =
789 unsigned ShAmt = MaxSA->getZExtValue();
790 unsigned NumSignBits =
793 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
822 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
829 if (NumSignBits >= (
BitWidth - ExBits + 1))
842 EVT SrcVT = Src.getValueType();
843 EVT DstVT =
Op.getValueType();
844 if (IsLE && DemandedElts == 1 &&
857 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
860 !DemandedElts[CIdx->getZExtValue()])
874 if (DemandedSubElts == 0)
884 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
885 for (
unsigned i = 0; i != NumElts; ++i) {
886 int M = ShuffleMask[i];
887 if (M < 0 || !DemandedElts[i])
890 IdentityLHS &= (M == (int)i);
891 IdentityRHS &= ((M - NumElts) == i);
897 return Op.getOperand(0);
899 return Op.getOperand(1);
909 if (
SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
919 unsigned Depth)
const {
920 EVT VT =
Op.getValueType();
927 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
933 unsigned Depth)
const {
935 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
944 const APInt &DemandedElts,
947 "SRL or SRA node is required here!");
950 if (!N1C || !N1C->
isOne())
996 unsigned ShiftOpc =
Op.getOpcode();
997 bool IsSigned =
false;
1001 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1;
1006 unsigned NumZero = std::min(NumZeroA, NumZeroB);
1012 if (NumZero >= 2 && NumSigned < NumZero) {
1017 if (NumSigned >= 1) {
1025 if (NumZero >= 1 && NumSigned < NumZero) {
1045 EVT VT =
Op.getValueType();
1057 Add.getOperand(1)) &&
1082 unsigned Depth,
bool AssumeSingleUse)
const {
1085 "Mask size mismatches value type size!");
1090 EVT VT =
Op.getValueType();
1092 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1094 "Unexpected vector size");
1097 APInt DemandedElts = OriginalDemandedElts;
1117 cast<ConstantFPSDNode>(
Op)->getValueAPF().bitcastToAPInt());
1122 bool HasMultiUse =
false;
1123 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse()) {
1132 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1141 switch (
Op.getOpcode()) {
1145 if (!DemandedElts[0])
1150 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1152 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO,
Depth + 1))
1157 if (DemandedElts == 1)
1170 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1179 auto *LD = cast<LoadSDNode>(
Op);
1180 if (getTargetConstantFromLoad(LD)) {
1186 EVT MemVT = LD->getMemoryVT();
1198 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
1203 APInt DemandedVecElts(DemandedElts);
1205 unsigned Idx = CIdx->getZExtValue();
1209 if (!DemandedElts[
Idx])
1216 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1222 if (SimplifyDemandedBits(Vec,
DemandedBits, DemandedVecElts, KnownVec, TLO,
1226 if (!!DemandedVecElts)
1241 APInt DemandedSrcElts = DemandedElts;
1245 if (SimplifyDemandedBits(Sub,
DemandedBits, DemandedSubElts, KnownSub, TLO,
1248 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1254 if (!!DemandedSubElts)
1256 if (!!DemandedSrcElts)
1262 SDValue NewSub = SimplifyMultipleUseDemandedBits(
1264 SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1266 if (NewSub || NewSrc) {
1267 NewSub = NewSub ? NewSub : Sub;
1268 NewSrc = NewSrc ? NewSrc : Src;
1281 if (Src.getValueType().isScalableVector())
1284 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1287 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, Known, TLO,
1293 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1308 EVT SubVT =
Op.getOperand(0).getValueType();
1311 for (
unsigned i = 0; i != NumSubVecs; ++i) {
1312 APInt DemandedSubElts =
1313 DemandedElts.
extractBits(NumSubElts, i * NumSubElts);
1314 if (SimplifyDemandedBits(
Op.getOperand(i),
DemandedBits, DemandedSubElts,
1315 Known2, TLO,
Depth + 1))
1318 if (!!DemandedSubElts)
1328 APInt DemandedLHS, DemandedRHS;
1333 if (!!DemandedLHS || !!DemandedRHS) {
1339 if (!!DemandedLHS) {
1340 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedLHS, Known2, TLO,
1345 if (!!DemandedRHS) {
1346 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedRHS, Known2, TLO,
1353 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1355 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1357 if (DemandedOp0 || DemandedOp1) {
1358 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1359 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1394 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1406 unsigned NumSubElts =
1423 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1428 Known2, TLO,
Depth + 1))
1451 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1453 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1455 if (DemandedOp0 || DemandedOp1) {
1456 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1457 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1470 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1472 if (Flags.hasDisjoint()) {
1473 Flags.setDisjoint(
false);
1474 Op->setFlags(Flags);
1479 if (SimplifyDemandedBits(Op0, ~Known.
One &
DemandedBits, DemandedElts,
1480 Known2, TLO,
Depth + 1)) {
1481 if (Flags.hasDisjoint()) {
1482 Flags.setDisjoint(
false);
1483 Op->setFlags(Flags);
1504 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1506 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1508 if (DemandedOp0 || DemandedOp1) {
1509 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1510 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1521 for (
int I = 0;
I != 2; ++
I) {
1524 SDValue Alt =
Op.getOperand(1 -
I).getOperand(0);
1525 SDValue C2 =
Op.getOperand(1 -
I).getOperand(1);
1527 for (
int J = 0; J != 2; ++J) {
1550 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1554 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known2, TLO,
1582 if (
C->getAPIntValue() == Known2.
One) {
1591 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1603 if (ShiftC->getAPIntValue().ult(
BitWidth)) {
1604 uint64_t ShiftAmt = ShiftC->getZExtValue();
1607 : Ones.
lshr(ShiftAmt);
1625 if (!
C || !
C->isAllOnes())
1631 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1633 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1635 if (DemandedOp0 || DemandedOp1) {
1636 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1637 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1647 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1648 Known, TLO,
Depth + 1))
1650 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1651 Known2, TLO,
Depth + 1))
1664 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1665 Known, TLO,
Depth + 1))
1667 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1668 Known2, TLO,
Depth + 1))
1677 if (SimplifyDemandedBits(
Op.getOperand(3),
DemandedBits, DemandedElts,
1678 Known, TLO,
Depth + 1))
1680 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1681 Known2, TLO,
Depth + 1))
1726 if (
const APInt *SA =
1728 unsigned ShAmt = SA->getZExtValue();
1738 if (
const APInt *SA2 =
1740 unsigned C1 = SA2->getZExtValue();
1742 int Diff = ShAmt - C1;
1761 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1762 isTypeDesirableForOp(
ISD::SHL, InnerVT)) {
1778 if (
const APInt *SA2 =
1780 unsigned InnerShAmt = SA2->getZExtValue();
1781 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1783 (InnerBits - InnerShAmt + ShAmt) &&
1797 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1800 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
1803 Flags.setNoSignedWrap(
false);
1804 Flags.setNoUnsignedWrap(
false);
1805 Op->setFlags(Flags);
1810 Known.
Zero <<= ShAmt;
1811 Known.
One <<= ShAmt;
1817 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1818 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1839 isTypeDesirableForOp(
ISD::SHL, HalfVT) &&
1848 Flags.setNoSignedWrap(IsNSW);
1849 Flags.setNoUnsignedWrap(IsNUW);
1854 NewShiftAmt, Flags);
1867 if (SimplifyDemandedBits(Op0, DemandedFromOp, DemandedElts, Known, TLO,
1870 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
1873 Flags.setNoSignedWrap(
false);
1874 Flags.setNoUnsignedWrap(
false);
1875 Op->setFlags(Flags);
1885 if (
const APInt *MaxSA =
1887 unsigned ShAmt = MaxSA->getZExtValue();
1888 unsigned NumSignBits =
1891 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1903 DemandedElts,
Depth + 1))
1906 if (
const APInt *SA =
1908 unsigned ShAmt = SA->getZExtValue();
1918 if (
const APInt *SA2 =
1920 unsigned C1 = SA2->getZExtValue();
1922 int Diff = ShAmt - C1;
1938 if (
Op->getFlags().hasExact())
1947 isTypeDesirableForOp(
ISD::SRL, HalfVT) &&
1963 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1974 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1975 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2009 DemandedElts,
Depth + 1))
2012 if (
const APInt *SA =
2014 unsigned ShAmt = SA->getZExtValue();
2021 if (
const APInt *InnerSA =
2023 unsigned LowBits =
BitWidth - ShAmt;
2029 if (*InnerSA == ShAmt) {
2039 unsigned NumSignBits =
2041 if (NumSignBits > ShAmt)
2051 if (
Op->getFlags().hasExact())
2059 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2071 Flags.setExact(
Op->getFlags().hasExact());
2089 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2090 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2107 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2112 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1,
DemandedBits, DemandedElts,
2113 Known, TLO,
Depth + 1))
2122 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2125 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
2138 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2139 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
2140 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2141 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
2142 if (DemandedOp0 || DemandedOp1) {
2143 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
2144 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
2155 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
2156 Known2, TLO,
Depth + 1))
2172 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2178 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2188 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) {
2193 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) {
2202 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
2212 unsigned Opc =
Op.getOpcode();
2219 unsigned NumSignBits =
2223 if (NumSignBits >= NumDemandedUpperBits)
2264 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2290 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2298 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2318 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2323 unsigned MinSignedBits =
2325 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2328 if (!AlreadySignExtended) {
2346 InputDemandedBits.
setBit(ExVTBits - 1);
2348 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO,
2357 if (Known.
Zero[ExVTBits - 1])
2361 if (Known.
One[ExVTBits - 1]) {
2371 EVT HalfVT =
Op.getOperand(0).getValueType();
2379 if (SimplifyDemandedBits(
Op.getOperand(0), MaskLo, KnownLo, TLO,
Depth + 1))
2382 if (SimplifyDemandedBits(
Op.getOperand(1), MaskHi, KnownHi, TLO,
Depth + 1))
2385 Known = KnownHi.
concat(KnownLo);
2394 EVT SrcVT = Src.getValueType();
2403 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2415 APInt InDemandedElts = DemandedElts.
zext(InElts);
2416 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2418 if (Flags.hasNonNeg()) {
2419 Flags.setNonNeg(
false);
2420 Op->setFlags(Flags);
2429 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2430 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2440 EVT SrcVT = Src.getValueType();
2445 APInt InDemandedElts = DemandedElts.
zext(InElts);
2450 InDemandedBits.
setBit(InBits - 1);
2456 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2471 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2487 Flags.setNonNeg(
true);
2493 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2494 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2504 EVT SrcVT = Src.getValueType();
2511 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2516 APInt InDemandedElts = DemandedElts.
zext(InElts);
2517 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2525 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2526 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2535 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2537 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
2543 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2544 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2549 switch (Src.getOpcode()) {
2560 if (Src.getNode()->hasOneUse()) {
2561 const APInt *ShAmtC =
2592 EVT ZVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2594 if (SimplifyDemandedBits(
Op.getOperand(0), ~InMask |
DemandedBits, Known,
2599 Known.
Zero |= ~InMask;
2600 Known.
One &= (~Known.Zero);
2606 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2607 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2615 if (
auto *CIdx = dyn_cast<ConstantSDNode>(
Idx))
2616 if (CIdx->getAPIntValue().ult(NumSrcElts))
2623 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2625 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2631 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2632 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2648 EVT SrcVT = Src.getValueType();
2658 if ((OpVTLegal || i32Legal) && VT.
isSimple() && SrcVT != MVT::f16 &&
2659 SrcVT != MVT::f128) {
2661 EVT Ty = OpVTLegal ? VT : MVT::i32;
2665 unsigned OpVTSizeInBits =
Op.getValueSizeInBits();
2666 if (!OpVTLegal && OpVTSizeInBits > 32)
2668 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2678 unsigned Scale =
BitWidth / NumSrcEltBits;
2682 for (
unsigned i = 0; i != Scale; ++i) {
2683 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
2684 unsigned BitOffset = EltOffset * NumSrcEltBits;
2687 DemandedSrcBits |= Sub;
2688 for (
unsigned j = 0; j != NumElts; ++j)
2689 if (DemandedElts[j])
2690 DemandedSrcElts.
setBit((j * Scale) + i);
2694 APInt KnownSrcUndef, KnownSrcZero;
2695 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2696 KnownSrcZero, TLO,
Depth + 1))
2700 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2701 KnownSrcBits, TLO,
Depth + 1))
2703 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2705 unsigned Scale = NumSrcEltBits /
BitWidth;
2709 for (
unsigned i = 0; i != NumElts; ++i)
2710 if (DemandedElts[i]) {
2713 DemandedSrcElts.
setBit(i / Scale);
2717 APInt KnownSrcUndef, KnownSrcZero;
2718 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2719 KnownSrcZero, TLO,
Depth + 1))
2724 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2725 KnownSrcBits, TLO,
Depth + 1))
2730 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2731 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2753 if (
C &&
C->getAPIntValue().countr_zero() == CTZ) {
2772 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2777 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, KnownOp0, TLO,
2779 SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO,
2783 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
2786 Flags.setNoSignedWrap(
false);
2787 Flags.setNoUnsignedWrap(
false);
2788 Op->setFlags(Flags);
2800 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2801 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2802 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2803 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2804 if (DemandedOp0 || DemandedOp1) {
2805 Flags.setNoSignedWrap(
false);
2806 Flags.setNoUnsignedWrap(
false);
2807 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2808 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2810 TLO.
DAG.
getNode(
Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2822 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2823 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2827 Flags.setNoSignedWrap(
false);
2828 Flags.setNoUnsignedWrap(
false);
2836 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2863 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
2866 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2867 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
2871 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2872 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
2880 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
2881 Flags.hasNoUnsignedWrap(), KnownOp0, KnownOp1);
2891 if (
Op.getValueType().isScalableVector())
2893 if (SimplifyDemandedBitsForTargetNode(
Op,
DemandedBits, DemandedElts,
2906 if (!isTargetCanonicalConstantNode(
Op) &&
2935 const APInt &DemandedElts,
2941 APInt KnownUndef, KnownZero;
2943 SimplifyDemandedVectorElts(
Op, DemandedElts, KnownUndef, KnownZero, TLO);
2955 const APInt &UndefOp0,
2956 const APInt &UndefOp1) {
2959 "Vector binop only");
2964 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
2966 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
2967 const APInt &UndefVals) {
2968 if (UndefVals[
Index])
2971 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2975 auto *
C = dyn_cast<ConstantSDNode>(Elt);
2976 if (isa<ConstantFPSDNode>(Elt) || Elt.
isUndef() || (
C && !
C->isOpaque()))
2984 for (
unsigned i = 0; i != NumElts; ++i) {
3003 bool AssumeSingleUse)
const {
3004 EVT VT =
Op.getValueType();
3005 unsigned Opcode =
Op.getOpcode();
3006 APInt DemandedElts = OriginalDemandedElts;
3021 "Mask size mismatches value type element count!");
3030 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse())
3034 if (DemandedElts == 0) {
3049 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
3050 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
3052 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
3054 if (NewOp0 || NewOp1) {
3057 NewOp1 ? NewOp1 : Op1,
Op->getFlags());
3065 if (!DemandedElts[0]) {
3073 EVT SrcVT = Src.getValueType();
3085 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3095 EVT SrcVT = Src.getValueType();
3104 if (NumSrcElts == NumElts)
3105 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
3106 KnownZero, TLO,
Depth + 1);
3108 APInt SrcDemandedElts, SrcZero, SrcUndef;
3112 if ((NumElts % NumSrcElts) == 0) {
3113 unsigned Scale = NumElts / NumSrcElts;
3115 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3125 for (
unsigned i = 0; i != NumElts; ++i)
3126 if (DemandedElts[i]) {
3127 unsigned Ofs = (i % Scale) * EltSizeInBits;
3128 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
3132 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
3140 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
3144 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
3145 unsigned Elt = Scale * SrcElt + SubElt;
3146 if (DemandedElts[Elt])
3154 for (
unsigned i = 0; i != NumSrcElts; ++i) {
3155 if (SrcDemandedElts[i]) {
3157 KnownZero.
setBits(i * Scale, (i + 1) * Scale);
3159 KnownUndef.
setBits(i * Scale, (i + 1) * Scale);
3167 if ((NumSrcElts % NumElts) == 0) {
3168 unsigned Scale = NumSrcElts / NumElts;
3170 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3176 for (
unsigned i = 0; i != NumElts; ++i) {
3177 if (DemandedElts[i]) {
3192 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
3194 bool Updated =
false;
3195 for (
unsigned i = 0; i != NumElts; ++i) {
3196 if (!DemandedElts[i] && !Ops[i].
isUndef()) {
3206 for (
unsigned i = 0; i != NumElts; ++i) {
3208 if (
SrcOp.isUndef()) {
3210 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
3218 EVT SubVT =
Op.getOperand(0).getValueType();
3221 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3224 APInt SubUndef, SubZero;
3225 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
3228 KnownUndef.
insertBits(SubUndef, i * NumSubElts);
3229 KnownZero.
insertBits(SubZero, i * NumSubElts);
3234 bool FoundNewSub =
false;
3236 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3239 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts(
3240 SubOp, SubElts, TLO.
DAG,
Depth + 1);
3241 DemandedSubOps.
push_back(NewSubOp ? NewSubOp : SubOp);
3242 FoundNewSub = NewSubOp ?
true : FoundNewSub;
3260 APInt DemandedSrcElts = DemandedElts;
3263 APInt SubUndef, SubZero;
3264 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
3269 if (!DemandedSrcElts && !Src.isUndef())
3274 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
3282 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3283 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3284 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
3285 Sub, DemandedSubElts, TLO.
DAG,
Depth + 1);
3286 if (NewSrc || NewSub) {
3287 NewSrc = NewSrc ? NewSrc : Src;
3288 NewSub = NewSub ? NewSub : Sub;
3290 NewSub,
Op.getOperand(2));
3299 if (Src.getValueType().isScalableVector())
3302 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3305 APInt SrcUndef, SrcZero;
3306 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3314 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3315 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3327 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
3331 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3332 unsigned Idx = CIdx->getZExtValue();
3333 if (!DemandedElts[
Idx])
3336 APInt DemandedVecElts(DemandedElts);
3338 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
3339 KnownZero, TLO,
Depth + 1))
3348 APInt VecUndef, VecZero;
3349 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
3362 APInt UndefSel, ZeroSel;
3363 if (SimplifyDemandedVectorElts(Sel, DemandedElts, UndefSel, ZeroSel, TLO,
3368 APInt DemandedLHS(DemandedElts);
3369 APInt DemandedRHS(DemandedElts);
3370 APInt UndefLHS, ZeroLHS;
3371 APInt UndefRHS, ZeroRHS;
3372 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3375 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3379 KnownUndef = UndefLHS & UndefRHS;
3380 KnownZero = ZeroLHS & ZeroRHS;
3384 APInt DemandedSel = DemandedElts & ~KnownZero;
3385 if (DemandedSel != DemandedElts)
3386 if (SimplifyDemandedVectorElts(Sel, DemandedSel, UndefSel, ZeroSel, TLO,
3398 APInt DemandedLHS(NumElts, 0);
3399 APInt DemandedRHS(NumElts, 0);
3400 for (
unsigned i = 0; i != NumElts; ++i) {
3401 int M = ShuffleMask[i];
3402 if (M < 0 || !DemandedElts[i])
3404 assert(0 <= M && M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3405 if (M < (
int)NumElts)
3408 DemandedRHS.
setBit(M - NumElts);
3412 APInt UndefLHS, ZeroLHS;
3413 APInt UndefRHS, ZeroRHS;
3414 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3417 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3422 bool Updated =
false;
3423 bool IdentityLHS =
true, IdentityRHS =
true;
3425 for (
unsigned i = 0; i != NumElts; ++i) {
3426 int &M = NewMask[i];
3429 if (!DemandedElts[i] || (M < (
int)NumElts && UndefLHS[M]) ||
3430 (M >= (
int)NumElts && UndefRHS[M - NumElts])) {
3434 IdentityLHS &= (M < 0) || (M == (
int)i);
3435 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
3440 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3442 buildLegalVectorShuffle(VT,
DL,
LHS,
RHS, NewMask, TLO.
DAG);
3448 for (
unsigned i = 0; i != NumElts; ++i) {
3449 int M = ShuffleMask[i];
3452 }
else if (M < (
int)NumElts) {
3458 if (UndefRHS[M - NumElts])
3460 if (ZeroRHS[M - NumElts])
3469 APInt SrcUndef, SrcZero;
3471 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3472 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3473 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3480 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3481 DemandedSrcElts == 1) {
3494 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3495 Op->isOnlyUserOf(Src.getNode()) &&
3496 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3498 EVT SrcVT = Src.getValueType();
3505 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3519 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3520 APInt UndefLHS, ZeroLHS;
3521 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3538 APInt UndefRHS, ZeroRHS;
3539 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3542 APInt UndefLHS, ZeroLHS;
3543 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3547 KnownZero = ZeroLHS & ZeroRHS;
3553 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3565 APInt UndefRHS, ZeroRHS;
3566 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3569 APInt UndefLHS, ZeroLHS;
3570 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3574 KnownZero = ZeroLHS;
3575 KnownUndef = UndefLHS & UndefRHS;
3580 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3591 APInt SrcUndef, SrcZero;
3592 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
3597 APInt DemandedElts0 = DemandedElts & ~SrcZero;
3598 if (SimplifyDemandedVectorElts(Op0, DemandedElts0, KnownUndef, KnownZero,
3602 KnownUndef &= DemandedElts0;
3603 KnownZero &= DemandedElts0;
3608 if (DemandedElts.
isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef))
3615 KnownZero |= SrcZero;
3616 KnownUndef &= SrcUndef;
3617 KnownUndef &= ~KnownZero;
3621 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3628 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3629 KnownZero, TLO,
Depth + 1))
3641 if (SimplifyDemandedVectorEltsForTargetNode(
Op, DemandedElts, KnownUndef,
3642 KnownZero, TLO,
Depth))
3647 if (SimplifyDemandedBits(
Op,
DemandedBits, OriginalDemandedElts, Known,
3648 TLO,
Depth, AssumeSingleUse))
3654 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3668 const APInt &DemandedElts,
3670 unsigned Depth)
const {
3675 "Should use MaskedValueIsZero if you don't know whether Op"
3676 " is a target node!");
3683 unsigned Depth)
const {
3695 unsigned Depth)
const {
3704 unsigned Depth)
const {
3709 "Should use ComputeNumSignBits if you don't know whether Op"
3710 " is a target node!");
3727 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3728 " is a target node!");
3739 "Should use SimplifyDemandedBits if you don't know whether Op"
3740 " is a target node!");
3741 computeKnownBitsForTargetNode(
Op, Known, DemandedElts, TLO.
DAG,
Depth);
3753 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3754 " is a target node!");
3787 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
3788 " is a target node!");
3792 return !canCreateUndefOrPoisonForTargetNode(
Op, DemandedElts, DAG,
PoisonOnly,
3795 return DAG.isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly,
3807 "Should use canCreateUndefOrPoison if you don't know whether Op"
3808 " is a target node!");
3816 unsigned Depth)
const {
3821 "Should use isKnownNeverNaN if you don't know whether Op"
3822 " is a target node!");
3827 const APInt &DemandedElts,
3830 unsigned Depth)
const {
3835 "Should use isSplatValue if you don't know whether Op"
3836 " is a target node!");
3851 CVal = CN->getAPIntValue();
3852 EltWidth =
N.getValueType().getScalarSizeInBits();
3859 CVal = CVal.
trunc(EltWidth);
3865 return CVal.
isOne();
3907 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) != MVT::i1));
3910 return N->isAllOnes() && SExt;
3919 DAGCombinerInfo &DCI)
const {
3947 auto *AndC = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
3948 if (AndC &&
isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() &&
3951 AndC->getAPIntValue().getActiveBits());
3978 if (isXAndYEqZeroPreferableToXAndYEqY(
Cond, OpVT) &&
3986 if (DCI.isBeforeLegalizeOps() ||
4020SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
4025 if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
4034 if (!(C01 = dyn_cast<ConstantSDNode>(N0->
getOperand(1))))
4038 EVT XVT =
X.getValueType();
4062 auto checkConstants = [&
I1, &I01]() ->
bool {
4067 if (checkConstants()) {
4075 if (!checkConstants())
4081 const unsigned KeptBits =
I1.logBase2();
4082 const unsigned KeptBitsMinusOne = I01.
logBase2();
4085 if (KeptBits != (KeptBitsMinusOne + 1))
4100 return DAG.
getSetCC(
DL, SCCVT, SExtInReg,
X, NewCond);
4104SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
4106 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
4108 "Should be a comparison with 0.");
4110 "Valid only for [in]equality comparisons.");
4112 unsigned NewShiftOpcode;
4123 unsigned OldShiftOpcode =
V.getOpcode();
4124 switch (OldShiftOpcode) {
4136 C =
V.getOperand(0);
4141 Y =
V.getOperand(1);
4145 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
4146 X, XC,
CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
4163 EVT VT =
X.getValueType();
4178 DAGCombinerInfo &DCI)
const {
4181 "Unexpected binop");
4210 if (!DCI.isCalledByLegalizer())
4211 DCI.AddToWorklist(YShl1.
getNode());
4226 if (CTPOP.getOpcode() !=
ISD::CTPOP || !CTPOP.hasOneUse())
4229 EVT CTVT = CTPOP.getValueType();
4230 SDValue CTOp = CTPOP.getOperand(0);
4250 for (
unsigned i = 0; i <
Passes; i++) {
4299 auto getRotateSource = [](
SDValue X) {
4301 return X.getOperand(0);
4308 if (
SDValue R = getRotateSource(N0))
4341 if (!C1 || !C1->
isZero())
4350 if (!ShAmtC || ShAmtC->getAPIntValue().uge(
BitWidth))
4354 unsigned ShAmt = ShAmtC->getZExtValue();
4363 if (
Or.getOperand(0) ==
Other) {
4364 X =
Or.getOperand(0);
4365 Y =
Or.getOperand(1);
4368 if (
Or.getOperand(1) ==
Other) {
4369 X =
Or.getOperand(1);
4370 Y =
Or.getOperand(0);
4380 if (matchOr(F0, F1)) {
4387 if (matchOr(F1, F0)) {
4403 const SDLoc &dl)
const {
4413 bool N0ConstOrSplat =
4415 bool N1ConstOrSplat =
4423 if (N0ConstOrSplat && !N1ConstOrSplat &&
4426 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4432 if (!N0ConstOrSplat && !N1ConstOrSplat &&
4437 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4446 const APInt &C1 = N1C->getAPIntValue();
4466 return DAG.
getNode(LogicOp, dl, VT, IsXZero, IsYZero);
4496 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
4497 const APInt &C1 = N1C->getAPIntValue();
4512 if (
auto *
C = dyn_cast<ConstantSDNode>(N0->
getOperand(1)))
4513 if ((
C->getAPIntValue()+1).isPowerOf2()) {
4514 MinBits =
C->getAPIntValue().countr_one();
4522 }
else if (
auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
4525 MinBits = LN0->getMemoryVT().getSizeInBits();
4529 MinBits = LN0->getMemoryVT().getSizeInBits();
4540 MinBits >= ReqdBits) {
4542 if (isTypeDesirableForOp(
ISD::SETCC, MinVT)) {
4545 if (MinBits == 1 && C1 == 1)
4564 if (TopSetCC.
getValueType() == MVT::i1 && VT == MVT::i1 &&
4577 cast<CondCodeSDNode>(TopSetCC.
getOperand(2))->get(),
4598 unsigned bestWidth = 0, bestOffset = 0;
4601 unsigned maskWidth = origWidth;
4607 for (
unsigned width = origWidth / 2; width>=8; width /= 2) {
4609 for (
unsigned offset=0; offset<origWidth/width; offset++) {
4610 if (Mask.isSubsetOf(newMask)) {
4612 bestOffset = (
uint64_t)offset * (width/8);
4614 bestOffset = (origWidth/width - offset - 1) * (width/8);
4615 bestMask = Mask.lshr(offset * (width/8) * 8);
4628 if (bestOffset != 0)
4713 ExtDstTy != ExtSrcTy &&
"Unexpected types!");
4720 return DAG.
getSetCC(dl, VT, ZextOp,
4722 }
else if ((N1C->isZero() || N1C->isOne()) &&
4769 return DAG.
getSetCC(dl, VT, Val, N1,
4772 }
else if (N1C->isOne()) {
4809 cast<VTSDNode>(Op0.
getOperand(1))->getVT() == MVT::i1)
4833 N1C && N1C->isAllOnes()) {
4840 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1,
Cond, DCI, dl))
4847 const APInt &C1 = N1C->getAPIntValue();
4849 APInt MinVal, MaxVal;
4871 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
4891 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
4939 if (
SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
4940 VT, N0, N1,
Cond, DCI, dl))
4947 bool CmpZero = N1C->isZero();
4948 bool CmpNegOne = N1C->isAllOnes();
4949 if ((CmpZero || CmpNegOne) && N0.
hasOneUse()) {
4952 unsigned EltBits = V.getScalarValueSizeInBits();
4953 if (V.getOpcode() !=
ISD::OR || (EltBits % 2) != 0)
4960 isa<ConstantSDNode>(
RHS.getOperand(1)) &&
4961 RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
4964 Hi =
RHS.getOperand(0);
4968 isa<ConstantSDNode>(
LHS.getOperand(1)) &&
4969 LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
4972 Hi =
LHS.getOperand(0);
4980 unsigned HalfBits = EltBits / 2;
4991 if (IsConcat(N0,
Lo,
Hi))
4992 return MergeConcat(
Lo,
Hi);
5029 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
5031 const APInt &C1 = N1C->getAPIntValue();
5043 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5046 unsigned ShCt = AndRHS->getAPIntValue().logBase2();
5047 if (AndRHS->getAPIntValue().isPowerOf2() &&
5055 }
else if (
Cond ==
ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
5076 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5077 const APInt &AndRHSC = AndRHS->getAPIntValue();
5116 return DAG.
getSetCC(dl, VT, Shift, CmpRHS, NewCond);
5122 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
5123 auto *CFP = cast<ConstantFPSDNode>(N1);
5124 assert(!CFP->getValueAPF().isNaN() &&
"Unexpected NaN value");
5145 !
isFPImmLegal(CFP->getValueAPF(), CFP->getValueType(0))) {
5164 if (CFP->getValueAPF().isInfinity()) {
5165 bool IsNegInf = CFP->getValueAPF().isNegative();
5176 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5185 "Integer types should be handled by FoldSetCC");
5191 if (UOF ==
unsigned(EqTrue))
5196 if (NewCond !=
Cond &&
5199 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5206 if ((isSignedIntSetCC(
Cond) || isUnsignedIntSetCC(
Cond)) &&
5243 bool LegalRHSImm =
false;
5245 if (
auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
5246 if (
auto *LHSR = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5251 DAG.
getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(),
5259 DAG.
getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(),
5265 if (
auto *SUBC = dyn_cast<ConstantSDNode>(N0.
getOperand(0)))
5269 DAG.
getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(),
5274 if (RHSC->getValueType(0).getSizeInBits() <= 64)
5283 if (
SDValue V = foldSetCCWithBinOp(VT, N0, N1,
Cond, dl, DCI))
5289 if (
SDValue V = foldSetCCWithBinOp(VT, N1, N0,
Cond, dl, DCI))
5292 if (
SDValue V = foldSetCCWithAnd(VT, N0, N1,
Cond, dl, DCI))
5303 if (
SDValue Folded = buildUREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5306 if (
SDValue Folded = buildSREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5319 N0 = DAG.
getNOT(dl, Temp, OpVT);
5328 Temp = DAG.
getNOT(dl, N0, OpVT);
5335 Temp = DAG.
getNOT(dl, N1, OpVT);
5342 Temp = DAG.
getNOT(dl, N0, OpVT);
5349 Temp = DAG.
getNOT(dl, N1, OpVT);
5358 N0 = DAG.
getNode(ExtendCode, dl, VT, N0);
5374 if (
auto *GASD = dyn_cast<GlobalAddressSDNode>(
N)) {
5375 GA = GASD->getGlobal();
5376 Offset += GASD->getOffset();
5384 if (
auto *V = dyn_cast<ConstantSDNode>(N2)) {
5385 Offset += V->getSExtValue();
5389 if (
auto *V = dyn_cast<ConstantSDNode>(N1)) {
5390 Offset += V->getSExtValue();
5411 unsigned S = Constraint.
size();
5414 switch (Constraint[0]) {
5417 return C_RegisterClass;
5445 if (S > 1 && Constraint[0] ==
'{' && Constraint[S - 1] ==
'}') {
5446 if (S == 8 && Constraint.
substr(1, 6) ==
"memory")
5474 std::vector<SDValue> &Ops,
5477 if (Constraint.
size() > 1)
5480 char ConstraintLetter = Constraint[0];
5481 switch (ConstraintLetter) {
5497 if ((
C = dyn_cast<ConstantSDNode>(
Op)) && ConstraintLetter !=
's') {
5501 bool IsBool =
C->getConstantIntValue()->getBitWidth() == 1;
5511 if (ConstraintLetter !=
'n') {
5512 if (
const auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
5514 GA->getValueType(0),
5515 Offset + GA->getOffset()));
5518 if (
const auto *BA = dyn_cast<BlockAddressSDNode>(
Op)) {
5520 BA->getBlockAddress(), BA->getValueType(0),
5521 Offset + BA->getOffset(), BA->getTargetFlags()));
5524 if (isa<BasicBlockSDNode>(
Op)) {
5529 const unsigned OpCode =
Op.getOpcode();
5531 if ((
C = dyn_cast<ConstantSDNode>(
Op.getOperand(0))))
5532 Op =
Op.getOperand(1);
5535 (
C = dyn_cast<ConstantSDNode>(
Op.getOperand(1))))
5536 Op =
Op.getOperand(0);
5553std::pair<unsigned, const TargetRegisterClass *>
5559 assert(*(Constraint.
end() - 1) ==
'}' &&
"Not a brace enclosed constraint?");
5564 std::pair<unsigned, const TargetRegisterClass *> R =
5576 std::pair<unsigned, const TargetRegisterClass *> S =
5577 std::make_pair(PR, RC);
5599 assert(!ConstraintCode.empty() &&
"No known constraint!");
5600 return isdigit(
static_cast<unsigned char>(ConstraintCode[0]));
5606 assert(!ConstraintCode.empty() &&
"No known constraint!");
5607 return atoi(ConstraintCode.c_str());
5621 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
5622 unsigned maCount = 0;
5628 unsigned LabelNo = 0;
5631 ConstraintOperands.emplace_back(std::move(CI));
5641 switch (OpInfo.
Type) {
5651 assert(!Call.getType()->isVoidTy() &&
"Bad inline asm!");
5652 if (
StructType *STy = dyn_cast<StructType>(Call.getType())) {
5656 assert(ResNo == 0 &&
"Asm only has one result!");
5666 OpInfo.
CallOperandVal = cast<CallBrInst>(&Call)->getIndirectDest(LabelNo);
5677 OpTy = Call.getParamElementType(ArgNo);
5678 assert(OpTy &&
"Indirect operand must have elementtype attribute");
5682 if (
StructType *STy = dyn_cast<StructType>(OpTy))
5683 if (STy->getNumElements() == 1)
5684 OpTy = STy->getElementType(0);
5689 unsigned BitSize =
DL.getTypeSizeInBits(OpTy);
5710 if (!ConstraintOperands.empty()) {
5712 unsigned bestMAIndex = 0;
5713 int bestWeight = -1;
5719 for (maIndex = 0; maIndex < maCount; ++maIndex) {
5721 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5722 cIndex != eIndex; ++cIndex) {
5743 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
5748 weightSum += weight;
5751 if (weightSum > bestWeight) {
5752 bestWeight = weightSum;
5753 bestMAIndex = maIndex;
5760 cInfo.selectAlternative(bestMAIndex);
5765 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5766 cIndex != eIndex; ++cIndex) {
5777 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
5780 std::pair<unsigned, const TargetRegisterClass *> InputRC =
5785 (MatchRC.second != InputRC.second)) {
5787 " with a matching output constraint of"
5788 " incompatible type!");
5794 return ConstraintOperands;
5829 if (maIndex >= (
int)
info.multipleAlternatives.size())
5830 rCodes = &
info.Codes;
5832 rCodes = &
info.multipleAlternatives[maIndex].Codes;
5836 for (
const std::string &rCode : *rCodes) {
5838 getSingleConstraintMatchWeight(
info, rCode.c_str());
5839 if (weight > BestWeight)
5840 BestWeight = weight;
5853 Value *CallOperandVal =
info.CallOperandVal;
5856 if (!CallOperandVal)
5859 switch (*constraint) {
5862 if (isa<ConstantInt>(CallOperandVal))
5863 weight = CW_Constant;
5866 if (isa<GlobalValue>(CallOperandVal))
5867 weight = CW_Constant;
5871 if (isa<ConstantFP>(CallOperandVal))
5872 weight = CW_Constant;
5885 weight = CW_Register;
5889 weight = CW_Default;
5923 Ret.reserve(OpInfo.
Codes.size());
5938 Ret.emplace_back(Code, CType);
5943 return getConstraintPiority(a.second) > getConstraintPiority(b.second);
5957 "need immediate or other");
5962 std::vector<SDValue> ResultOps;
5964 return !ResultOps.empty();
5972 assert(!OpInfo.
Codes.empty() &&
"Must have at least one constraint");
5975 if (OpInfo.
Codes.size() == 1) {
5983 unsigned BestIdx = 0;
5984 for (
const unsigned E =
G.size();
5991 if (BestIdx + 1 == E) {
6007 if (isa<ConstantInt>(v) || isa<Function>(v)) {
6011 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) {
6018 if (
const char *Repl = LowerXConstraint(OpInfo.
ConstraintVT)) {
6032 EVT VT =
N->getValueType(0);
6037 bool UseSRA =
false;
6043 APInt Divisor =
C->getAPIntValue();
6051 APInt Factor = Divisor;
6052 while ((t = Divisor * Factor) != 1)
6069 "Expected matchUnaryPredicate to return one element for scalable "
6074 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6076 Factor = Factors[0];
6085 Flags.setExact(
true);
6126 EVT VT =
N->getValueType(0);
6162 bool IsAfterLegalization,
6165 EVT VT =
N->getValueType(0);
6191 if (
N->getFlags().hasExact())
6200 const APInt &Divisor =
C->getAPIntValue();
6202 int NumeratorFactor = 0;
6213 NumeratorFactor = 1;
6216 NumeratorFactor = -1;
6233 SDValue MagicFactor, Factor, Shift, ShiftMask;
6241 Shifts.
size() == 1 && ShiftMasks.
size() == 1 &&
6242 "Expected matchUnaryPredicate to return one element for scalable "
6249 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6250 MagicFactor = MagicFactors[0];
6251 Factor = Factors[0];
6253 ShiftMask = ShiftMasks[0];
6294 SDValue Q = GetMULHS(N0, MagicFactor);
6324 bool IsAfterLegalization,
6327 EVT VT =
N->getValueType(0);
6358 unsigned LeadingZeros = 0;
6359 if (!VT.
isVector() && isa<ConstantSDNode>(N1)) {
6367 bool UseNPQ =
false, UsePreShift =
false, UsePostShift =
false;
6373 const APInt& Divisor =
C->getAPIntValue();
6375 SDValue PreShift, MagicFactor, NPQFactor, PostShift;
6379 if (Divisor.
isOne()) {
6380 PreShift = PostShift = DAG.
getUNDEF(ShSVT);
6381 MagicFactor = NPQFactor = DAG.
getUNDEF(SVT);
6389 "We shouldn't generate an undefined shift!");
6391 "We shouldn't generate an undefined shift!");
6393 "Unexpected pre-shift");
6400 UseNPQ |= magics.
IsAdd;
6401 UsePreShift |= magics.
PreShift != 0;
6416 SDValue PreShift, PostShift, MagicFactor, NPQFactor;
6424 NPQFactors.
size() == 1 && PostShifts.
size() == 1 &&
6425 "Expected matchUnaryPredicate to return one for scalable vectors");
6431 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6432 PreShift = PreShifts[0];
6433 MagicFactor = MagicFactors[0];
6434 PostShift = PostShifts[0];
6481 Q = GetMULHU(Q, MagicFactor);
6494 NPQ = GetMULHU(NPQ, NPQFactor);
6513 return DAG.
getSelect(dl, VT, IsOne, N0, Q);
6522 std::function<
bool(
SDValue)> Predicate,
6527 if (SplatValue != Values.
end()) {
6530 return Value == *SplatValue || Predicate(
Value);
6532 Replacement = *SplatValue;
6536 if (!AlternativeReplacement)
6539 Replacement = AlternativeReplacement;
6541 std::replace_if(Values.
begin(), Values.
end(), Predicate, Replacement);
6552 DAGCombinerInfo &DCI,
6555 if (
SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6558 DCI.AddToWorklist(
N);
6566TargetLowering::prepareUREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6568 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6576 "Only applicable for (in)equality comparisons.");
6589 bool ComparingWithAllZeros =
true;
6590 bool AllComparisonsWithNonZerosAreTautological =
true;
6591 bool HadTautologicalLanes =
false;
6592 bool AllLanesAreTautological =
true;
6593 bool HadEvenDivisor =
false;
6594 bool AllDivisorsArePowerOfTwo =
true;
6595 bool HadTautologicalInvertedLanes =
false;
6604 const APInt &
Cmp = CCmp->getAPIntValue();
6606 ComparingWithAllZeros &=
Cmp.isZero();
6612 bool TautologicalInvertedLane =
D.ule(Cmp);
6613 HadTautologicalInvertedLanes |= TautologicalInvertedLane;
6618 bool TautologicalLane =
D.isOne() || TautologicalInvertedLane;
6619 HadTautologicalLanes |= TautologicalLane;
6620 AllLanesAreTautological &= TautologicalLane;
6626 AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
6629 unsigned K =
D.countr_zero();
6630 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
6634 HadEvenDivisor |= (
K != 0);
6637 AllDivisorsArePowerOfTwo &= D0.
isOne();
6641 unsigned W =
D.getBitWidth();
6645 assert(!
P.isZero() &&
"No multiplicative inverse!");
6646 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
6659 "We are expecting that K is always less than all-ones for ShSVT");
6662 if (TautologicalLane) {
6686 if (AllLanesAreTautological)
6691 if (AllDivisorsArePowerOfTwo)
6696 if (HadTautologicalLanes) {
6711 "Expected matchBinaryPredicate to return one element for "
6722 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
6726 "Expecting that the types on LHS and RHS of comparisons match.");
6736 if (HadEvenDivisor) {
6749 if (!HadTautologicalInvertedLanes)
6755 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
6762 SDValue TautologicalInvertedChannels =
6772 DL, SETCCVT, SETCCVT);
6774 Replacement, NewCC);
6782 TautologicalInvertedChannels);
6795 DAGCombinerInfo &DCI,
6798 if (
SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6800 assert(Built.
size() <= 7 &&
"Max size prediction failed.");
6802 DCI.AddToWorklist(
N);
6810TargetLowering::prepareSREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6812 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6837 "Only applicable for (in)equality comparisons.");
6853 if (!CompTarget || !CompTarget->
isZero())
6856 bool HadIntMinDivisor =
false;
6857 bool HadOneDivisor =
false;
6858 bool AllDivisorsAreOnes =
true;
6859 bool HadEvenDivisor =
false;
6860 bool NeedToApplyOffset =
false;
6861 bool AllDivisorsArePowerOfTwo =
true;
6876 HadIntMinDivisor |=
D.isMinSignedValue();
6879 HadOneDivisor |=
D.isOne();
6880 AllDivisorsAreOnes &=
D.isOne();
6883 unsigned K =
D.countr_zero();
6884 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
6887 if (!
D.isMinSignedValue()) {
6890 HadEvenDivisor |= (
K != 0);
6895 AllDivisorsArePowerOfTwo &= D0.
isOne();
6899 unsigned W =
D.getBitWidth();
6903 assert(!
P.isZero() &&
"No multiplicative inverse!");
6904 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
6910 if (!
D.isMinSignedValue()) {
6913 NeedToApplyOffset |=
A != 0;
6920 "We are expecting that A is always less than all-ones for SVT");
6922 "We are expecting that K is always less than all-ones for ShSVT");
6960 if (AllDivisorsAreOnes)
6965 if (AllDivisorsArePowerOfTwo)
6968 SDValue PVal, AVal, KVal, QVal;
6970 if (HadOneDivisor) {
6990 QAmts.
size() == 1 &&
6991 "Expected matchUnaryPredicate to return one element for scalable "
6998 assert(isa<ConstantSDNode>(
D) &&
"Expected a constant");
7009 if (NeedToApplyOffset) {
7021 if (HadEvenDivisor) {
7036 if (!HadIntMinDivisor)
7042 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7077 MaskedIsZero, Fold);
7084 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
7086 "be a constant integer");
7096 EVT VT =
Op.getValueType();
7119 bool LegalOps,
bool OptForSize,
7121 unsigned Depth)
const {
7123 if (
Op.getOpcode() ==
ISD::FNEG ||
Op.getOpcode() == ISD::VP_FNEG) {
7125 return Op.getOperand(0);
7136 EVT VT =
Op.getValueType();
7137 unsigned Opcode =
Op.getOpcode();
7147 auto RemoveDeadNode = [&](
SDValue N) {
7148 if (
N &&
N.getNode()->use_empty())
7157 std::list<HandleSDNode> Handles;
7168 if (LegalOps && !IsOpLegal)
7171 APFloat V = cast<ConstantFPSDNode>(
Op)->getValueAPF();
7185 return !N.isUndef() && !isa<ConstantFPSDNode>(N);
7193 return N.isUndef() ||
7198 if (LegalOps && !IsOpLegal)
7207 APFloat V = cast<ConstantFPSDNode>(
C)->getValueAPF();
7215 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7226 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7229 Handles.emplace_back(NegX);
7234 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7240 if (NegX && (CostX <= CostY)) {
7244 RemoveDeadNode(NegY);
7253 RemoveDeadNode(NegX);
7260 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7282 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7285 Handles.emplace_back(NegX);
7290 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7296 if (NegX && (CostX <= CostY)) {
7300 RemoveDeadNode(NegY);
7306 if (
C->isExactlyValue(2.0) &&
Op.getOpcode() ==
ISD::FMUL)
7314 RemoveDeadNode(NegX);
7321 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7324 SDValue X =
Op.getOperand(0),
Y =
Op.getOperand(1), Z =
Op.getOperand(2);
7327 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ,
Depth);
7333 Handles.emplace_back(NegZ);
7338 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7341 Handles.emplace_back(NegX);
7346 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7352 if (NegX && (CostX <= CostY)) {
7353 Cost = std::min(CostX, CostZ);
7356 RemoveDeadNode(NegY);
7362 Cost = std::min(CostY, CostZ);
7365 RemoveDeadNode(NegX);
7373 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7375 return DAG.
getNode(Opcode,
DL, VT, NegV);
7378 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7389 getNegatedExpression(
LHS, DAG, LegalOps, OptForSize, CostLHS,
Depth);
7391 RemoveDeadNode(NegLHS);
7396 Handles.emplace_back(NegLHS);
7401 getNegatedExpression(
RHS, DAG, LegalOps, OptForSize, CostRHS,
Depth);
7409 RemoveDeadNode(NegLHS);
7410 RemoveDeadNode(NegRHS);
7414 Cost = std::min(CostLHS, CostRHS);
7415 return DAG.
getSelect(
DL, VT,
Op.getOperand(0), NegLHS, NegRHS);
7444 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
7457 if ((
Signed && HasSMUL_LOHI) || (!
Signed && HasUMUL_LOHI)) {
7485 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false)) {
7486 Result.push_back(
Lo);
7487 Result.push_back(
Hi);
7490 Result.push_back(Zero);
7491 Result.push_back(Zero);
7502 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
true)) {
7503 Result.push_back(
Lo);
7504 Result.push_back(
Hi);
7509 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
7524 if (!MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false))
7527 Result.push_back(
Lo);
7534 Result.push_back(
Hi);
7547 if (!MakeMUL_LOHI(LL, RH,
Lo,
Hi,
false))
7554 if (!MakeMUL_LOHI(LH, RL,
Lo,
Hi,
false))
7606 bool Ok = expandMUL_LOHI(
N->getOpcode(),
N->getValueType(0),
SDLoc(
N),
7607 N->getOperand(0),
N->getOperand(1), Result, HiLoVT,
7608 DAG, Kind, LL, LH, RL, RH);
7610 assert(Result.size() == 2);
7642 unsigned Opcode =
N->getOpcode();
7643 EVT VT =
N->getValueType(0);
7650 "Unexpected opcode");
7652 auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7656 APInt Divisor = CN->getAPIntValue();
7664 if (Divisor.
uge(HalfMaxPlus1))
7682 unsigned TrailingZeros = 0;
7696 if (HalfMaxPlus1.
urem(Divisor).
isOne()) {
7697 assert(!LL == !LH &&
"Expected both input halves or no input halves!");
7699 std::tie(LL, LH) = DAG.
SplitScalar(
N->getOperand(0), dl, HiLoVT, HiLoVT);
7703 if (TrailingZeros) {
7774 std::tie(QuotL, QuotH) = DAG.
SplitScalar(Quotient, dl, HiLoVT, HiLoVT);
7775 Result.push_back(QuotL);
7776 Result.push_back(QuotH);
7782 if (TrailingZeros) {
7788 Result.push_back(RemL);
7804 EVT VT =
Node->getValueType(0);
7814 bool IsFSHL =
Node->getOpcode() == ISD::VP_FSHL;
7817 EVT ShVT = Z.getValueType();
7823 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
7824 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitWidthC, ShAmt, Mask, VL);
7825 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, IsFSHL ? ShAmt : InvShAmt, Mask,
7827 ShY = DAG.
getNode(ISD::VP_LSHR,
DL, VT,
Y, IsFSHL ? InvShAmt : ShAmt, Mask,
7835 ShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, Z, BitMask, Mask, VL);
7839 InvShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, NotZ, BitMask, Mask, VL);
7842 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
7843 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitMask, ShAmt, Mask, VL);
7848 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, ShAmt, Mask, VL);
7850 ShY = DAG.
getNode(ISD::VP_LSHR,
DL, VT, ShY1, InvShAmt, Mask, VL);
7853 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT, ShX1, InvShAmt, Mask, VL);
7854 ShY = DAG.
getNode(ISD::VP_LSHR,
DL, VT,
Y, ShAmt, Mask, VL);
7857 return DAG.
getNode(ISD::VP_OR,
DL, VT, ShX, ShY, Mask, VL);
7862 if (Node->isVPOpcode())
7865 EVT VT = Node->getValueType(0);
7875 SDValue Z = Node->getOperand(2);
7878 bool IsFSHL = Node->getOpcode() ==
ISD::FSHL;
7881 EVT ShVT = Z.getValueType();
7951 EVT VT = Node->getValueType(0);
7953 bool IsLeft = Node->getOpcode() ==
ISD::ROTL;
7954 SDValue Op0 = Node->getOperand(0);
7955 SDValue Op1 = Node->getOperand(1);
7966 return DAG.
getNode(RevRot,
DL, VT, Op0, Sub);
7969 if (!AllowVectorOps && VT.
isVector() &&
7987 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
7989 HsVal = DAG.
getNode(HsOpc,
DL, VT, Op0, HsAmt);
7995 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8006 assert(Node->getNumOperands() == 3 &&
"Not a double-shift!");
8007 EVT VT = Node->getValueType(0);
8013 SDValue ShOpLo = Node->getOperand(0);
8014 SDValue ShOpHi = Node->getOperand(1);
8015 SDValue ShAmt = Node->getOperand(2);
8058 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8059 SDValue Src = Node->getOperand(OpNo);
8060 EVT SrcVT = Src.getValueType();
8061 EVT DstVT = Node->getValueType(0);
8065 if (SrcVT != MVT::f32 || DstVT != MVT::i64)
8068 if (Node->isStrictFPOpcode())
8131 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8132 SDValue Src = Node->getOperand(OpNo);
8134 EVT SrcVT = Src.getValueType();
8135 EVT DstVT = Node->getValueType(0);
8156 if (Node->isStrictFPOpcode()) {
8158 { Node->getOperand(0), Src });
8159 Chain = Result.getValue(1);
8173 if (Node->isStrictFPOpcode()) {
8175 Node->getOperand(0),
true);
8181 bool Strict = Node->isStrictFPOpcode() ||
8200 if (Node->isStrictFPOpcode()) {
8202 { Chain, Src, FltOfs });
8224 Result = DAG.
getSelect(dl, DstVT, Sel, True, False);
8235 if (Node->isStrictFPOpcode())
8238 SDValue Src = Node->getOperand(0);
8239 EVT SrcVT = Src.getValueType();
8240 EVT DstVT = Node->getValueType(0);
8263 llvm::bit_cast<double>(UINT64_C(0x4530000000100000)), dl, DstVT);
8283 unsigned Opcode = Node->getOpcode();
8288 if (Node->getFlags().hasNoNaNs()) {
8290 SDValue Op1 = Node->getOperand(0);
8291 SDValue Op2 = Node->getOperand(1);
8296 Flags.setNoSignedZeros(
true);
8309 EVT VT = Node->getValueType(0);
8313 "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
8316 SDValue Quiet0 = Node->getOperand(0);
8317 SDValue Quiet1 = Node->getOperand(1);
8319 if (!Node->getFlags().hasNoNaNs()) {
8332 return DAG.
getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags());
8338 if ((Node->getFlags().hasNoNaNs() ||
8341 (Node->getFlags().hasNoSignedZeros() ||
8344 unsigned IEEE2018Op =
8347 return DAG.
getNode(IEEE2018Op, dl, VT, Node->getOperand(0),
8348 Node->getOperand(1), Node->getFlags());
8351 if (
SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG))
8365 bool IsOrdered = NanTest ==
fcNone;
8366 bool IsUnordered = NanTest ==
fcNan;
8369 if (!IsOrdered && !IsUnordered)
8370 return std::nullopt;
8372 if (OrderedMask ==
fcZero &&
8378 return std::nullopt;
8385 EVT OperandVT =
Op.getValueType();
8396 if (OperandVT == MVT::ppcf128) {
8399 OperandVT = MVT::f64;
8404 bool IsInverted =
false;
8407 Test = InvertedCheck;
8414 bool IsF80 = (ScalarFloatVT == MVT::f80);
8418 if (Flags.hasNoFPExcept() &&
8423 if (std::optional<bool> IsCmp0 =
8426 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode,
8433 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode);
8468 const unsigned ExplicitIntBitInF80 = 63;
8469 APInt ExpMask = Inf;
8471 ExpMask.
clearBit(ExplicitIntBitInF80);
8485 const auto appendResult = [&](
SDValue PartialRes) {
8495 const auto getIntBitIsSet = [&]() ->
SDValue {
8496 if (!IntBitIsSetV) {
8497 APInt IntBitMask(BitSize, 0);
8498 IntBitMask.
setBit(ExplicitIntBitInF80);
8503 return IntBitIsSetV;
8524 Test &= ~fcPosFinite;
8529 Test &= ~fcNegFinite;
8531 appendResult(PartialRes);
8540 appendResult(ExpIsZero);
8550 else if (PartialCheck ==
fcZero)
8554 appendResult(PartialRes);
8567 appendResult(PartialRes);
8570 if (
unsigned PartialCheck =
Test &
fcInf) {
8573 else if (PartialCheck ==
fcInf)
8580 appendResult(PartialRes);
8583 if (
unsigned PartialCheck =
Test &
fcNan) {
8584 APInt InfWithQnanBit = Inf | QNaNBitMask;
8586 if (PartialCheck ==
fcNan) {
8599 }
else if (PartialCheck ==
fcQNan) {
8611 appendResult(PartialRes);
8616 APInt ExpLSB = ExpMask & ~(ExpMask.
shl(1));
8619 APInt ExpLimit = ExpMask - ExpLSB;
8632 appendResult(PartialRes);
8655 EVT VT = Node->getValueType(0);
8662 if (!(Len <= 128 && Len % 8 == 0))
8722 EVT VT = Node->getValueType(0);
8725 SDValue Mask = Node->getOperand(1);
8726 SDValue VL = Node->getOperand(2);
8731 if (!(Len <= 128 && Len % 8 == 0))
8743 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5;
8746 Tmp1 = DAG.
getNode(ISD::VP_AND, dl, VT,
8750 Op = DAG.
getNode(ISD::VP_SUB, dl, VT,
Op, Tmp1, Mask, VL);
8753 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op, Mask33, Mask, VL);
8754 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT,
8758 Op = DAG.
getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL);
8763 Tmp5 = DAG.
getNode(ISD::VP_ADD, dl, VT,
Op, Tmp4, Mask, VL);
8764 Op = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL);
8772 return DAG.
getNode(ISD::VP_LSHR, dl, VT,
8773 DAG.
getNode(ISD::VP_MUL, dl, VT,
Op, Mask01, Mask, VL),
8779 EVT VT = Node->getValueType(0);
8818 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
8829 EVT VT = Node->getValueType(0);
8832 SDValue Mask = Node->getOperand(1);
8833 SDValue VL = Node->getOperand(2);
8843 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
8846 DAG.
getNode(ISD::VP_LSHR, dl, VT,
Op, Tmp, Mask, VL), Mask,
8851 return DAG.
getNode(ISD::VP_CTPOP, dl, VT,
Op, Mask, VL);
8860 :
APInt(64, 0x0218A392CD3D5DBFULL);
8874 for (
unsigned i = 0; i <
BitWidth; i++) {
8900 EVT VT = Node->getValueType(0);
8934 if (
SDValue V = CTTZTableLookup(Node, DAG, dl, VT,
Op, NumBitsPerElt))
8956 SDValue Mask = Node->getOperand(1);
8957 SDValue VL = Node->getOperand(2);
8959 EVT VT = Node->getValueType(0);
8966 SDValue Tmp = DAG.
getNode(ISD::VP_AND, dl, VT, Not, MinusOne, Mask, VL);
8967 return DAG.
getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL);
8971 bool IsNegative)
const {
8973 EVT VT =
N->getValueType(0);
9026 EVT VT =
N->getValueType(0);
9058 EVT VT =
N->getValueType(0);
9065 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9116 EVT VT =
N->getValueType(0);
9125 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9134 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL);
9144 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9148 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9149 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9150 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9154 Tmp7 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9158 Tmp6 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9159 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9162 Tmp5 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9163 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9168 Tmp4 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp4,
9169 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9172 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp3,
9173 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9176 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9180 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL);
9181 Tmp6 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL);
9182 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9183 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9184 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp6, Mask, EVL);
9185 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9186 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp4, Mask, EVL);
9192 EVT VT =
N->getValueType(0);
9235 for (
unsigned I = 0, J = Sz-1;
I < Sz; ++
I, --J) {
9252 assert(
N->getOpcode() == ISD::VP_BITREVERSE);
9255 EVT VT =
N->getValueType(0);
9274 Tmp = (Sz > 8 ? DAG.
getNode(ISD::VP_BSWAP, dl, VT,
Op, Mask, EVL) :
Op);
9279 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9285 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9290 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9296 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9301 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9307 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9313std::pair<SDValue, SDValue>
9317 SDValue Chain = LD->getChain();
9318 SDValue BasePTR = LD->getBasePtr();
9319 EVT SrcVT = LD->getMemoryVT();
9320 EVT DstVT = LD->getValueType(0);
9352 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(),
9353 LD->getMemOperand()->getFlags(), LD->getAAInfo());
9356 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9357 unsigned ShiftIntoIdx =
9369 Scalar = DAG.
getNode(ExtendOp, SL, DstEltVT, Scalar);
9376 return std::make_pair(
Value, Load.getValue(1));
9385 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9387 DAG.
getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
9388 LD->getPointerInfo().getWithOffset(
Idx * Stride),
9389 SrcEltVT, LD->getOriginalAlign(),
9390 LD->getMemOperand()->getFlags(), LD->getAAInfo());
9401 return std::make_pair(
Value, NewChain);
9408 SDValue Chain = ST->getChain();
9409 SDValue BasePtr = ST->getBasePtr();
9411 EVT StVT = ST->getMemoryVT();
9437 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9442 unsigned ShiftIntoIdx =
9451 return DAG.
getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
9452 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
9458 assert(Stride &&
"Zero stride!");
9462 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9471 Chain, SL, Elt,
Ptr, ST->getPointerInfo().getWithOffset(
Idx * Stride),
9472 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
9481std::pair<SDValue, SDValue>
9484 "unaligned indexed loads not implemented!");
9485 SDValue Chain = LD->getChain();
9487 EVT VT = LD->getValueType(0);
9488 EVT LoadedVT = LD->getMemoryVT();
9498 return scalarizeVectorLoad(LD, DAG);
9504 LD->getMemOperand());
9510 return std::make_pair(Result, newLoad.
getValue(1));
9518 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
9522 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.
getNode())->getIndex();
9527 EVT PtrVT =
Ptr.getValueType();
9528 EVT StackPtrVT = StackPtr.getValueType();
9534 for (
unsigned i = 1; i < NumRegs; i++) {
9537 RegVT, dl, Chain,
Ptr, LD->getPointerInfo().getWithOffset(
Offset),
9538 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
9542 Load.getValue(1), dl, Load, StackPtr,
9553 8 * (LoadedBytes -
Offset));
9556 LD->getPointerInfo().getWithOffset(
Offset), MemVT,
9557 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
9563 Load.getValue(1), dl, Load, StackPtr,
9570 Load = DAG.
getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
9575 return std::make_pair(Load, TF);
9579 "Unaligned load of unsupported type.");
9588 Align Alignment = LD->getOriginalAlign();
9589 unsigned IncrementSize = NumBits / 8;
9600 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9605 LD->getPointerInfo().getWithOffset(IncrementSize),
9606 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9609 Hi = DAG.
getExtLoad(HiExtType, dl, VT, Chain,
Ptr, LD->getPointerInfo(),
9610 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9615 LD->getPointerInfo().getWithOffset(IncrementSize),
9616 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9628 return std::make_pair(Result, TF);
9634 "unaligned indexed stores not implemented!");
9635 SDValue Chain = ST->getChain();
9639 Align Alignment = ST->getOriginalAlign();
9641 EVT StoreMemVT = ST->getMemoryVT();
9657 Result = DAG.
getStore(Chain, dl, Result,
Ptr, ST->getPointerInfo(),
9658 Alignment, ST->getMemOperand()->getFlags());
9666 EVT PtrVT =
Ptr.getValueType();
9669 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
9673 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
9677 Chain, dl, Val, StackPtr,
9680 EVT StackPtrVT = StackPtr.getValueType();
9688 for (
unsigned i = 1; i < NumRegs; i++) {
9691 RegVT, dl, Store, StackPtr,
9695 ST->getPointerInfo().getWithOffset(
Offset),
9696 ST->getOriginalAlign(),
9697 ST->getMemOperand()->getFlags()));
9717 ST->getPointerInfo().getWithOffset(
Offset), LoadMemVT,
9718 ST->getOriginalAlign(),
9719 ST->getMemOperand()->getFlags(), ST->getAAInfo()));
9726 "Unaligned store of unknown type.");
9730 unsigned IncrementSize = NumBits / 8;
9739 if (
auto *
C = dyn_cast<ConstantSDNode>(
Lo);
C && !
C->isOpaque())
9750 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
9751 ST->getMemOperand()->getFlags());
9756 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
9757 ST->getMemOperand()->getFlags(), ST->getAAInfo());
9768 bool IsCompressedMemory)
const {
9770 EVT AddrVT =
Addr.getValueType();
9771 EVT MaskVT = Mask.getValueType();
9773 "Incompatible types of Data and Mask");
9774 if (IsCompressedMemory) {
9777 "Cannot currently handle compressed memory with scalable vectors");
9783 MaskIntVT = MVT::i32;
9807 "Cannot index a scalable vector within a fixed-width vector");
9811 EVT IdxVT =
Idx.getValueType();
9817 if (
auto *IdxCst = dyn_cast<ConstantSDNode>(
Idx))
9818 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts)
9832 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0;
9840 return getVectorSubVecPointer(
9859 "Converting bits to bytes lost precision");
9861 "Sub-vector must be a vector with matching element type");
9890 std::string NameString = (
"__emutls_v." + GA->
getGlobal()->
getName()).str();
9894 assert(EmuTlsVar &&
"Cannot find EmuTlsVar ");
9896 Entry.Ty = VoidPtrType;
9897 Args.push_back(Entry);
9904 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
9913 "Emulated TLS must have zero offset in GlobalAddressSDNode");
9914 return CallResult.first;
9925 EVT VT =
Op.getOperand(0).getValueType();
9927 if (VT.
bitsLT(MVT::i32)) {
9941 SDValue Op0 = Node->getOperand(0);
9942 SDValue Op1 = Node->getOperand(1);
9945 unsigned Opcode = Node->getOpcode();
9987 {Op0, Op1, DAG.getCondCode(CC)})) {
9994 {Op0, Op1, DAG.getCondCode(CC)})) {
10022 unsigned Opcode = Node->getOpcode();
10025 EVT VT =
LHS.getValueType();
10028 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10044 unsigned OverflowOp;
10059 llvm_unreachable(
"Expected method to receive signed or unsigned saturation "
10060 "addition or subtraction node.");
10068 unsigned BitWidth =
LHS.getScalarValueSizeInBits();
10071 SDValue SumDiff = Result.getValue(0);
10072 SDValue Overflow = Result.getValue(1);
10094 return DAG.
getSelect(dl, VT, Overflow, Zero, SumDiff);
10114 if (LHSIsNonNegative || RHSIsNonNegative) {
10116 return DAG.
getSelect(dl, VT, Overflow, SatMax, SumDiff);
10122 if (LHSIsNegative || RHSIsNegative) {
10124 return DAG.
getSelect(dl, VT, Overflow, SatMin, SumDiff);
10134 return DAG.
getSelect(dl, VT, Overflow, Result, SumDiff);
10138 unsigned Opcode = Node->getOpcode();
10142 EVT VT =
LHS.getValueType();
10147 "Expected a SHLSAT opcode");
10148 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10186 if (WideVT == MVT::i16)
10187 LC = RTLIB::MUL_I16;
10188 else if (WideVT == MVT::i32)
10189 LC = RTLIB::MUL_I32;
10190 else if (WideVT == MVT::i64)
10191 LC = RTLIB::MUL_I64;
10192 else if (WideVT == MVT::i128)
10193 LC = RTLIB::MUL_I128;
10202 unsigned HalfBits = Bits >> 1;
10241 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.
getDataLayout())) {
10246 SDValue Args[] = {LL, LH, RL, RH};
10247 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10249 SDValue Args[] = {LH, LL, RH, RL};
10250 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10253 "Ret value is a collection of constituent nodes holding result.");
10256 Lo = Ret.getOperand(0);
10257 Hi = Ret.getOperand(1);
10259 Lo = Ret.getOperand(1);
10260 Hi = Ret.getOperand(0);
10269 EVT VT =
LHS.getValueType();
10270 assert(
RHS.getValueType() == VT &&
"Mismatching operand types");
10289 forceExpandWideMUL(DAG, dl,
Signed, WideVT,
LHS, HiLHS,
RHS, HiRHS,
Lo,
Hi);
10298 "Expected a fixed point multiplication opcode");
10303 EVT VT =
LHS.getValueType();
10304 unsigned Scale = Node->getConstantOperandVal(2);
10320 SDValue Product = Result.getValue(0);
10321 SDValue Overflow = Result.getValue(1);
10332 Result = DAG.
getSelect(dl, VT, ProdNeg, SatMin, SatMax);
10333 return DAG.
getSelect(dl, VT, Overflow, Result, Product);
10337 SDValue Product = Result.getValue(0);
10338 SDValue Overflow = Result.getValue(1);
10342 return DAG.
getSelect(dl, VT, Overflow, SatMax, Product);
10347 "Expected scale to be less than the number of bits if signed or at "
10348 "most the number of bits if unsigned.");
10350 "Expected both operands to be the same type");
10358 Lo = Result.getValue(0);
10359 Hi = Result.getValue(1);
10369 if (Scale == VTSize)
10415 return DAG.
getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
10440 "Expected a fixed point division opcode");
10442 EVT VT =
LHS.getValueType();
10464 if (LHSLead + RHSTrail < Scale + (
unsigned)(Saturating &&
Signed))
10467 unsigned LHSShift = std::min(LHSLead, Scale);
10468 unsigned RHSShift = Scale - LHSShift;
10525 bool IsAdd = Node->getOpcode() ==
ISD::UADDO;
10531 SDValue NodeCarry = DAG.
getNode(OpcCarry, dl, Node->getVTList(),
10532 { LHS, RHS, CarryIn });
10541 EVT ResultType = Node->getValueType(1);
10552 DAG.
getSetCC(dl, SetCCType, Result,
10571 bool IsAdd = Node->getOpcode() ==
ISD::SADDO;
10576 EVT ResultType = Node->getValueType(1);
10602 DAG.
getNode(
ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
10603 ResultType, ResultType);
10609 EVT VT = Node->getValueType(0);
10617 const APInt &
C = RHSC->getAPIntValue();
10619 if (
C.isPowerOf2()) {
10621 bool UseArithShift =
isSigned && !
C.isMinSignedValue();
10624 Overflow = DAG.
getSetCC(dl, SetCCVT,
10626 dl, VT, Result, ShiftAmt),
10639 static const unsigned Ops[2][3] =
10662 forceExpandWideMUL(DAG, dl,
isSigned,
LHS,
RHS, BottomHalf, TopHalf);
10665 Result = BottomHalf;
10672 Overflow = DAG.
getSetCC(dl, SetCCVT, TopHalf,
10677 EVT RType = Node->getValueType(1);
10682 "Unexpected result type for S/UMULO legalization");
10690 EVT VT =
Op.getValueType();
10694 "Expanding reductions for scalable vectors is undefined.");
10705 Op = DAG.
getNode(BaseOpcode, dl, HalfVT,
Lo,
Hi, Node->getFlags());
10717 for (
unsigned i = 1; i < NumElts; i++)
10718 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags());
10721 if (EltVT != Node->getValueType(0))
10728 SDValue AccOp = Node->getOperand(0);
10729 SDValue VecOp = Node->getOperand(1);
10737 "Expanding reductions for scalable vectors is undefined.");
10747 for (
unsigned i = 0; i < NumElts; i++)
10748 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
10755 EVT VT = Node->getValueType(0);
10760 SDValue Dividend = Node->getOperand(0);
10761 SDValue Divisor = Node->getOperand(1);
10764 Result = DAG.
getNode(DivRemOpc, dl, VTs, Dividend, Divisor).
getValue(1);
10769 SDValue Divide = DAG.
getNode(DivOpc, dl, VT, Dividend, Divisor);
10781 SDValue Src = Node->getOperand(0);
10784 EVT SrcVT = Src.getValueType();
10785 EVT DstVT = Node->getValueType(0);
10787 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
10790 assert(SatWidth <= DstWidth &&
10791 "Expected saturation width smaller than result width");
10795 APInt MinInt, MaxInt;
10806 if (SrcVT == MVT::f16 || SrcVT == MVT::bf16) {
10808 SrcVT = Src.getValueType();
10829 if (AreExactFloatBounds && MinMaxLegal) {
10838 dl, DstVT, Clamped);
10850 return DAG.
getSelect(dl, DstVT, IsNan, ZeroInt, FpToInt);
10889 EVT OperandVT =
Op.getValueType();
10911 AbsWide = DAG.
getBitcast(OperandVT, ClearedSign);
10934 KeepNarrow = DAG.
getNode(
ISD::OR, dl, WideSetCCVT, KeepNarrow, AlreadyOdd);
10943 SDValue Adjust = DAG.
getSelect(dl, ResultIntVT, NarrowIsRd, One, NegativeOne);
10945 Op = DAG.
getSelect(dl, ResultIntVT, KeepNarrow, NarrowBits, Adjusted);
10957 EVT VT = Node->getValueType(0);
10960 if (Node->getConstantOperandVal(1) == 1) {
10963 EVT OperandVT =
Op.getValueType();
10975 EVT I32 = F32.changeTypeToInteger();
10976 Op = expandRoundInexactToOdd(F32,
Op, dl, DAG);
11001 EVT I16 = I32.isVector() ? I32.changeVectorElementType(MVT::i16) : MVT::i16;
11011 assert(Node->getValueType(0).isScalableVector() &&
11012 "Fixed length vector types expected to use SHUFFLE_VECTOR!");
11014 EVT VT = Node->getValueType(0);
11015 SDValue V1 = Node->getOperand(0);
11016 SDValue V2 = Node->getOperand(1);
11017 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue();
11036 EVT PtrVT = StackPtr.getValueType();
11038 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11053 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2));
11055 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr,
11078 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr2,
11085 SDValue EVL,
bool &NeedInvert,
11087 bool IsSignaling)
const {
11089 MVT OpVT =
LHS.getSimpleValueType();
11091 NeedInvert =
false;
11092 assert(!EVL == !Mask &&
"VP Mask and EVL must either both be set or unset");
11093 bool IsNonVP = !EVL;
11108 bool NeedSwap =
false;
11109 InvCC = getSetCCInverse(CCCode, OpVT);
11137 "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
11142 "If SETO is expanded, SETOEQ must be legal!");
11160 NeedInvert = ((
unsigned)CCCode & 0x8U);
11201 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC1, Chain, IsSignaling);
11202 SetCC2 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC2, Chain, IsSignaling);
11210 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
LHS, CC1, Chain, IsSignaling);
11211 SetCC2 = DAG.
getSetCC(dl, VT,
RHS,
RHS, CC2, Chain, IsSignaling);
11221 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2);
11225 Opc = Opc ==
ISD::OR ? ISD::VP_OR : ISD::VP_AND;
11226 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2, Mask, EVL);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
static bool isUndef(ArrayRef< int > Mask)
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
static bool isNonZeroModBitWidthOrUndef(const MachineRegisterInfo &MRI, Register Reg, unsigned BW)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static bool lowerImmediateIfPossible(TargetLowering::ConstraintPair &P, SDValue Op, SelectionDAG *DAG, const TargetLowering &TLI)
If we have an immediate, see if we can lower it.
static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG)
static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, const APInt &UndefOp0, const APInt &UndefOp1)
Given a vector binary operation and known undefined elements for each input operand,...
static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, EVT VecVT, const SDLoc &dl, ElementCount SubEC)
static unsigned getConstraintPiority(TargetLowering::ConstraintType CT)
Return a number indicating our preference for chosing a type of constraint over another,...
static std::optional< bool > isFCmpEqualZero(FPClassTest Test, const fltSemantics &Semantics, const MachineFunction &MF)
Returns a true value if if this FPClassTest can be performed with an ordered fcmp to 0,...
static void turnVectorIntoSplatVector(MutableArrayRef< SDValue > Values, std::function< bool(SDValue)> Predicate, SDValue AlternativeReplacement=SDValue())
If all values in Values that don't match the predicate are same 'splat' value, then replace all value...
static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT)
static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue combineShiftToAVG(SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, const APInt &DemandedBits, const APInt &DemandedElts, unsigned Depth)
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact SDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, SDValue N0, const APInt &C1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
This file describes how to lower LLVM code to machine code.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
APInt multiplicativeInverse(const APInt &modulo) const
Computes the multiplicative inverse of this APInt for a given modulo.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
void clearAllBits()
Set every bit to 0.
APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned countLeadingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void setBitVal(unsigned BitPosition, bool BitValue)
Set a given bit to a given value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool hasAttributes() const
Return true if the builder has IR-level attributes.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
const APInt & getAPIntValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
std::vector< std::string > ConstraintCodeVector
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setAdjustsStack(bool V)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
Function & getFunction()
Return the LLVM function that this machine code represents.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
const GlobalVariable * getNamedGlobal(StringRef Name) const
Return the global variable in the module with the specified name, of arbitrary type.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static SDNodeIterator end(const SDNode *N)
static SDNodeIterator begin(const SDNode *N)
Represents one node in the SelectionDAG.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
bool willNotOverflowAdd(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the addition of 2 nodes can never overflow.
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
bool isKnownNeverSNaN(SDValue Op, unsigned Depth=0) const
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const APInt * getValidMaximumShiftAmountConstant(SDValue V, const APInt &DemandedElts) const
If a SHL/SRA/SRL node V has constant shift amounts that are all less than the element bit-width of th...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
bool shouldOptForSize() const
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
const APInt * getValidShiftAmountConstant(SDValue V, const APInt &DemandedElts) const
If a SHL/SRA/SRL node V has a constant or splat constant shift amount that is less than the element b...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
static const fltSemantics & EVTToAPFloatSemantics(EVT VT)
Returns an APFloat semantics tag appropriate for the given type.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL, bool LegalTypes=true)
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
const TargetMachine & getTargetMachine() const
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal on this target.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const
Get the CondCode that's to be used to test the result of the comparison libcall against zero.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom on this target.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector eleme...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arit...
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, EVT WideVT, const SDValue LL, const SDValue LH, const SDValue RL, const SDValue RH, SDValue &Lo, SDValue &Hi) const
forceExpandWideMUL - Unconditionally expand a MUL into either a libcall or brute force via a wide mul...
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
TargetLowering(const TargetLowering &)=delete
virtual bool shouldSimplifyDemandedVectorElts(SDValue Op, const TargetLoweringOpt &TLO) const
Return true if the target supports simplifying demanded vector elements by converting them to undefs.
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively,...
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
iterator_range< regclass_iterator > regclasses() const
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantSDNode predicate.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
FPClassTest invertFPClassTestIfSimpler(FPClassTest Test)
Evaluates if the specified FP class test is better performed as the inverse (i.e.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
auto find_if_not(R &&Range, UnaryPredicate P)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Mod
The access may modify the value stored in memory.
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
constexpr unsigned BitWidth
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
int MatchingInput
MatchingInput - If this is not -1, this is an output constraint where an input constraint is required...
ConstraintCodeVector Codes
Code - The constraint code, either the register name (in braces) or the constraint letter/number.
SubConstraintInfoVector multipleAlternatives
multipleAlternatives - If there are multiple alternative constraints, this array will contain them.
bool isIndirect
isIndirect - True if this operand is an indirect operand.
bool hasMatchingInput() const
hasMatchingInput - Return true if this is an output constraint that has a matching input constraint.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
bool hasConflict() const
Returns true if there is conflicting information.
static std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasNoUnsignedWrap() const
bool hasNoSignedWrap() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Magic data for optimising signed division by a constant.
unsigned ShiftAmount
shift amount
static SignedDivisionByConstantInfo get(const APInt &D)
Calculate the magic numbers required to implement a signed integer division by a constant as a sequen...
This contains information for each constraint that we are lowering.
MVT ConstraintVT
The ValueType for the operand value.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
std::string ConstraintCode
This contains the actual string for the code, like "m".
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches.
bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)
bool isBeforeLegalizeOps() const
void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
ArrayRef< EVT > OpsVTBeforeSoften
bool IsPostTypeLegalization
MakeLibCallOptions & setSExt(bool Value=true)
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
bool LegalOperations() const
Magic data for optimising unsigned division by a constant.
unsigned PreShift
pre-shift amount
static UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
unsigned PostShift
post-shift amount