57 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
63 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
64 for (
const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
65 Attribute::DereferenceableOrNull, Attribute::NoAlias,
66 Attribute::NonNull, Attribute::NoUndef,
67 Attribute::Range, Attribute::NoFPClass})
74 if (CallerAttrs.
contains(Attribute::ZExt) ||
75 CallerAttrs.
contains(Attribute::SExt))
86 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
102 Register ArgReg = cast<RegisterSDNode>(
Value->getOperand(1))->getReg();
103 if (
MRI.getLiveInPhysReg(ArgReg) != Reg)
113 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
114 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
115 IsNoExt = Call->paramHasAttr(ArgIdx, Attribute::NoExt);
116 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
117 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
118 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
119 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
120 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
121 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
122 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
123 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
124 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
125 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
126 Alignment = Call->getParamStackAlign(ArgIdx);
129 "multiple ABI attributes?");
145std::pair<SDValue, SDValue>
155 Args.reserve(Ops.
size());
158 for (
unsigned i = 0; i < Ops.
size(); ++i) {
161 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
164 Entry.IsZExt = !Entry.IsSExt;
168 Entry.IsSExt = Entry.IsZExt =
false;
170 Args.push_back(Entry);
173 if (LC == RTLIB::UNKNOWN_LIBCALL)
181 bool zeroExtend = !signExtend;
185 signExtend = zeroExtend =
false;
196 return LowerCallTo(CLI);
200 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
202 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
203 Op.getSrcAlign() <
Op.getDstAlign())
208 if (VT == MVT::Other) {
212 VT = MVT::LAST_INTEGER_VALUETYPE;
213 if (
Op.isFixedDstAlign())
220 MVT LVT = MVT::LAST_INTEGER_VALUETYPE;
231 unsigned NumMemOps = 0;
235 while (VTSize >
Size) {
246 else if (NewVT == MVT::i64 &&
258 if (NewVT == MVT::i8)
267 if (NumMemOps &&
Op.allowOverlap() && NewVTSize <
Size &&
269 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
279 if (++NumMemOps > Limit)
282 MemOps.push_back(VT);
297 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
307 bool IsSignaling)
const {
312 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
313 &&
"Unsupported setcc type!");
316 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
317 bool ShouldInvertCC =
false;
321 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
322 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
323 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
327 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
328 (VT == MVT::f64) ? RTLIB::UNE_F64 :
329 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
333 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
334 (VT == MVT::f64) ? RTLIB::OGE_F64 :
335 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
339 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
340 (VT == MVT::f64) ? RTLIB::OLT_F64 :
341 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
345 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
346 (VT == MVT::f64) ? RTLIB::OLE_F64 :
347 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
351 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
352 (VT == MVT::f64) ? RTLIB::OGT_F64 :
353 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
356 ShouldInvertCC =
true;
359 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
360 (VT == MVT::f64) ? RTLIB::UO_F64 :
361 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
365 ShouldInvertCC =
true;
368 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
369 (VT == MVT::f64) ? RTLIB::UO_F64 :
370 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
371 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
372 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
373 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
377 ShouldInvertCC =
true;
380 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
381 (VT == MVT::f64) ? RTLIB::OGE_F64 :
382 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
385 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
386 (VT == MVT::f64) ? RTLIB::OGT_F64 :
387 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
390 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
391 (VT == MVT::f64) ? RTLIB::OLE_F64 :
392 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
395 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
396 (VT == MVT::f64) ? RTLIB::OLT_F64 :
397 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
405 SDValue Ops[2] = {NewLHS, NewRHS};
410 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
415 if (ShouldInvertCC) {
417 CCCode = getSetCCInverse(CCCode, RetVT);
420 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
427 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
430 CCCode = getSetCCInverse(CCCode, RetVT);
431 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
445 if (!isPositionIndependent())
459 unsigned JTEncoding = getJumpTableEncoding();
495 if (!TM.shouldAssumeDSOLocal(GV))
499 if (isPositionIndependent())
515 const APInt &DemandedElts,
518 unsigned Opcode =
Op.getOpcode();
526 if (targetShrinkDemandedConstant(
Op,
DemandedBits, DemandedElts, TLO))
536 auto *Op1C = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
537 if (!Op1C || Op1C->isOpaque())
541 const APInt &
C = Op1C->getAPIntValue();
546 EVT VT =
Op.getValueType();
563 EVT VT =
Op.getValueType();
578 "ShrinkDemandedOp only supports binary operators!");
579 assert(
Op.getNode()->getNumValues() == 1 &&
580 "ShrinkDemandedOp only supports nodes with one result!");
582 EVT VT =
Op.getValueType();
591 Op.getOperand(1).getValueType().getScalarSizeInBits() ==
BitWidth &&
592 "ShrinkDemandedOp only supports operands that have the same size!");
596 if (!
Op.getNode()->hasOneUse())
613 Op.getOpcode(), dl, SmallVT,
616 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
631 bool Simplified = SimplifyDemandedBits(
Op,
DemandedBits, Known, TLO);
640 const APInt &DemandedElts,
660 bool AssumeSingleUse)
const {
661 EVT VT =
Op.getValueType();
677 EVT VT =
Op.getValueType();
695 switch (
Op.getOpcode()) {
701 EVT SrcVT = Src.getValueType();
702 EVT DstVT =
Op.getValueType();
708 if (NumSrcEltBits == NumDstEltBits)
709 if (
SDValue V = SimplifyMultipleUseDemandedBits(
713 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
714 unsigned Scale = NumDstEltBits / NumSrcEltBits;
718 for (
unsigned i = 0; i != Scale; ++i) {
719 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
720 unsigned BitOffset = EltOffset * NumSrcEltBits;
723 DemandedSrcBits |= Sub;
724 for (
unsigned j = 0; j != NumElts; ++j)
726 DemandedSrcElts.
setBit((j * Scale) + i);
730 if (
SDValue V = SimplifyMultipleUseDemandedBits(
731 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
736 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
737 unsigned Scale = NumSrcEltBits / NumDstEltBits;
741 for (
unsigned i = 0; i != NumElts; ++i)
742 if (DemandedElts[i]) {
743 unsigned Offset = (i % Scale) * NumDstEltBits;
745 DemandedSrcElts.
setBit(i / Scale);
748 if (
SDValue V = SimplifyMultipleUseDemandedBits(
749 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
770 return Op.getOperand(0);
772 return Op.getOperand(1);
783 return Op.getOperand(0);
785 return Op.getOperand(1);
795 return Op.getOperand(0);
797 return Op.getOperand(1);
803 return Op.getOperand(0);
807 return Op.getOperand(1);
813 if (std::optional<uint64_t> MaxSA =
816 unsigned ShAmt = *MaxSA;
817 unsigned NumSignBits =
820 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
828 if (std::optional<uint64_t> MaxSA =
831 unsigned ShAmt = *MaxSA;
835 unsigned NumSignBits =
867 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
874 if (NumSignBits >= (
BitWidth - ExBits + 1))
887 EVT SrcVT = Src.getValueType();
888 EVT DstVT =
Op.getValueType();
889 if (IsLE && DemandedElts == 1 &&
902 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
905 !DemandedElts[CIdx->getZExtValue()])
919 if (DemandedSubElts == 0)
929 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
930 for (
unsigned i = 0; i != NumElts; ++i) {
931 int M = ShuffleMask[i];
932 if (M < 0 || !DemandedElts[i])
935 IdentityLHS &= (M == (int)i);
936 IdentityRHS &= ((M - NumElts) == i);
942 return Op.getOperand(0);
944 return Op.getOperand(1);
954 if (
SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
964 unsigned Depth)
const {
965 EVT VT =
Op.getValueType();
972 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
978 unsigned Depth)
const {
980 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
992 "SRL or SRA node is required here!");
995 if (!N1C || !N1C->
isOne())
1042 unsigned ShiftOpc =
Op.getOpcode();
1043 bool IsSigned =
false;
1047 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1;
1052 unsigned NumZero = std::min(NumZeroA, NumZeroB);
1058 if (NumZero >= 2 && NumSigned < NumZero) {
1063 if (NumSigned >= 1) {
1071 if (NumZero >= 1 && NumSigned < NumZero) {
1091 EVT VT =
Op.getValueType();
1105 Add.getOperand(1)) &&
1116 (isa<ConstantSDNode>(ExtOpA) || isa<ConstantSDNode>(ExtOpB)))
1136 unsigned Depth,
bool AssumeSingleUse)
const {
1139 "Mask size mismatches value type size!");
1144 EVT VT =
Op.getValueType();
1146 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1148 "Unexpected vector size");
1151 APInt DemandedElts = OriginalDemandedElts;
1171 cast<ConstantFPSDNode>(
Op)->getValueAPF().bitcastToAPInt());
1176 bool HasMultiUse =
false;
1177 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse()) {
1186 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1195 switch (
Op.getOpcode()) {
1199 if (!DemandedElts[0])
1204 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1206 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO,
Depth + 1))
1211 if (DemandedElts == 1)
1224 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1233 auto *LD = cast<LoadSDNode>(
Op);
1234 if (getTargetConstantFromLoad(LD)) {
1240 EVT MemVT = LD->getMemoryVT();
1252 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
1257 APInt DemandedVecElts(DemandedElts);
1259 unsigned Idx = CIdx->getZExtValue();
1263 if (!DemandedElts[
Idx])
1270 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1276 if (SimplifyDemandedBits(Vec,
DemandedBits, DemandedVecElts, KnownVec, TLO,
1280 if (!!DemandedVecElts)
1295 APInt DemandedSrcElts = DemandedElts;
1299 if (SimplifyDemandedBits(Sub,
DemandedBits, DemandedSubElts, KnownSub, TLO,
1302 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1308 if (!!DemandedSubElts)
1310 if (!!DemandedSrcElts)
1316 SDValue NewSub = SimplifyMultipleUseDemandedBits(
1318 SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1320 if (NewSub || NewSrc) {
1321 NewSub = NewSub ? NewSub : Sub;
1322 NewSrc = NewSrc ? NewSrc : Src;
1335 if (Src.getValueType().isScalableVector())
1338 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1341 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, Known, TLO,
1347 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1362 EVT SubVT =
Op.getOperand(0).getValueType();
1365 for (
unsigned i = 0; i != NumSubVecs; ++i) {
1366 APInt DemandedSubElts =
1367 DemandedElts.
extractBits(NumSubElts, i * NumSubElts);
1368 if (SimplifyDemandedBits(
Op.getOperand(i),
DemandedBits, DemandedSubElts,
1369 Known2, TLO,
Depth + 1))
1372 if (!!DemandedSubElts)
1382 APInt DemandedLHS, DemandedRHS;
1387 if (!!DemandedLHS || !!DemandedRHS) {
1393 if (!!DemandedLHS) {
1394 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedLHS, Known2, TLO,
1399 if (!!DemandedRHS) {
1400 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedRHS, Known2, TLO,
1407 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1409 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1411 if (DemandedOp0 || DemandedOp1) {
1412 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1413 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1448 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1460 unsigned NumSubElts =
1477 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1481 Known2, TLO,
Depth + 1))
1503 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1505 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1507 if (DemandedOp0 || DemandedOp1) {
1508 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1509 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1521 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1527 if (SimplifyDemandedBits(Op0, ~Known.
One &
DemandedBits, DemandedElts,
1528 Known2, TLO,
Depth + 1)) {
1548 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1550 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1552 if (DemandedOp0 || DemandedOp1) {
1553 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1554 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1565 for (
int I = 0;
I != 2; ++
I) {
1568 SDValue Alt =
Op.getOperand(1 -
I).getOperand(0);
1569 SDValue C2 =
Op.getOperand(1 -
I).getOperand(1);
1571 for (
int J = 0; J != 2; ++J) {
1594 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1597 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known2, TLO,
1624 if (
C->getAPIntValue() == Known2.
One) {
1633 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1645 if (ShiftC->getAPIntValue().ult(
BitWidth)) {
1646 uint64_t ShiftAmt = ShiftC->getZExtValue();
1649 : Ones.
lshr(ShiftAmt);
1651 isDesirableToCommuteXorWithShift(
Op.getNode())) {
1666 if (!
C || !
C->isAllOnes())
1672 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1674 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1676 if (DemandedOp0 || DemandedOp1) {
1677 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1678 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1688 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1689 Known, TLO,
Depth + 1))
1691 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1692 Known2, TLO,
Depth + 1))
1703 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1704 Known, TLO,
Depth + 1))
1706 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1707 Known2, TLO,
Depth + 1))
1714 if (SimplifyDemandedBits(
Op.getOperand(3),
DemandedBits, DemandedElts,
1715 Known, TLO,
Depth + 1))
1717 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1718 Known2, TLO,
Depth + 1))
1761 if (std::optional<uint64_t> KnownSA =
1763 unsigned ShAmt = *KnownSA;
1773 if (std::optional<uint64_t> InnerSA =
1775 unsigned C1 = *InnerSA;
1777 int Diff = ShAmt - C1;
1796 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1797 isTypeDesirableForOp(
ISD::SHL, InnerVT)) {
1814 InnerOp, DemandedElts,
Depth + 2)) {
1815 unsigned InnerShAmt = *SA2;
1816 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1818 (InnerBits - InnerShAmt + ShAmt) &&
1832 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1839 Known.
Zero <<= ShAmt;
1840 Known.
One <<= ShAmt;
1846 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1847 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1858 Op.getNode()->hasOneUse()) {
1866 isTypeDesirableForOp(
ISD::SHL, SmallVT) &&
1869 assert(DemandedSize <= SmallVTBits &&
1870 "Narrowed below demanded bits?");
1890 isTypeDesirableForOp(
ISD::SHL, HalfVT) &&
1899 Flags.setNoSignedWrap(IsNSW);
1900 Flags.setNoUnsignedWrap(IsNUW);
1905 NewShiftAmt, Flags);
1918 if (SimplifyDemandedBits(Op0, DemandedFromOp, DemandedElts, Known, TLO,
1931 if (std::optional<uint64_t> MaxSA =
1933 unsigned ShAmt = *MaxSA;
1934 unsigned NumSignBits =
1937 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1947 if (std::optional<uint64_t> KnownSA =
1949 unsigned ShAmt = *KnownSA;
1959 if (std::optional<uint64_t> InnerSA =
1961 unsigned C1 = *InnerSA;
1963 int Diff = ShAmt - C1;
1979 if (std::optional<uint64_t> InnerSA =
1981 unsigned C1 = *InnerSA;
1983 unsigned Combined = std::min(C1 + ShAmt,
BitWidth - 1);
1995 if (
Op->getFlags().hasExact())
2004 isTypeDesirableForOp(
ISD::SRL, HalfVT) &&
2020 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2030 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2031 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2045 if (std::optional<uint64_t> MaxSA =
2047 unsigned ShAmt = *MaxSA;
2051 unsigned NumSignBits =
2060 DemandedElts,
Depth + 1))
2084 if (std::optional<uint64_t> KnownSA =
2086 unsigned ShAmt = *KnownSA;
2093 if (std::optional<uint64_t> InnerSA =
2095 unsigned LowBits =
BitWidth - ShAmt;
2101 if (*InnerSA == ShAmt) {
2111 unsigned NumSignBits =
2113 if (NumSignBits > ShAmt)
2123 if (
Op->getFlags().hasExact())
2131 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2142 Flags.setExact(
Op->getFlags().hasExact());
2160 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2161 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2171 DemandedElts,
Depth + 1))
2184 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2189 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1,
DemandedBits, DemandedElts,
2190 Known, TLO,
Depth + 1))
2199 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2202 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
2215 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2216 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
2217 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2218 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
2219 if (DemandedOp0 || DemandedOp1) {
2220 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
2221 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
2232 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
2233 Known2, TLO,
Depth + 1))
2249 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2255 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2265 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) {
2270 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) {
2279 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
2289 unsigned Opc =
Op.getOpcode();
2296 unsigned NumSignBits =
2300 if (NumSignBits >= NumDemandedUpperBits)
2341 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2367 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2375 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2395 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2400 unsigned MinSignedBits =
2402 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2405 if (!AlreadySignExtended) {
2423 InputDemandedBits.
setBit(ExVTBits - 1);
2425 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO,
2433 if (Known.
Zero[ExVTBits - 1])
2437 if (Known.
One[ExVTBits - 1]) {
2447 EVT HalfVT =
Op.getOperand(0).getValueType();
2455 if (SimplifyDemandedBits(
Op.getOperand(0), MaskLo, KnownLo, TLO,
Depth + 1))
2458 if (SimplifyDemandedBits(
Op.getOperand(1), MaskHi, KnownHi, TLO,
Depth + 1))
2461 Known = KnownHi.
concat(KnownLo);
2470 EVT SrcVT = Src.getValueType();
2479 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2490 APInt InDemandedElts = DemandedElts.
zext(InElts);
2491 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2500 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2501 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2511 EVT SrcVT = Src.getValueType();
2516 APInt InDemandedElts = DemandedElts.
zext(InElts);
2521 InDemandedBits.
setBit(InBits - 1);
2527 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2542 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2563 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2564 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2574 EVT SrcVT = Src.getValueType();
2581 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2586 APInt InDemandedElts = DemandedElts.
zext(InElts);
2587 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2594 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2595 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2604 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2606 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
2616 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2617 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2622 switch (Src.getOpcode()) {
2633 if (Src.getNode()->hasOneUse()) {
2645 std::optional<uint64_t> ShAmtC =
2647 if (!ShAmtC || *ShAmtC >=
BitWidth)
2673 EVT ZVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2675 if (SimplifyDemandedBits(
Op.getOperand(0), ~InMask |
DemandedBits, Known,
2679 Known.
Zero |= ~InMask;
2680 Known.
One &= (~Known.Zero);
2686 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2687 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2695 if (
auto *CIdx = dyn_cast<ConstantSDNode>(
Idx))
2696 if (CIdx->getAPIntValue().ult(NumSrcElts))
2703 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2705 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2711 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2712 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2728 EVT SrcVT = Src.getValueType();
2738 if ((OpVTLegal || i32Legal) && VT.
isSimple() && SrcVT != MVT::f16 &&
2739 SrcVT != MVT::f128) {
2741 EVT Ty = OpVTLegal ? VT : MVT::i32;
2745 unsigned OpVTSizeInBits =
Op.getValueSizeInBits();
2746 if (!OpVTLegal && OpVTSizeInBits > 32)
2748 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2758 unsigned Scale =
BitWidth / NumSrcEltBits;
2762 for (
unsigned i = 0; i != Scale; ++i) {
2763 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
2764 unsigned BitOffset = EltOffset * NumSrcEltBits;
2767 DemandedSrcBits |= Sub;
2768 for (
unsigned j = 0; j != NumElts; ++j)
2769 if (DemandedElts[j])
2770 DemandedSrcElts.
setBit((j * Scale) + i);
2774 APInt KnownSrcUndef, KnownSrcZero;
2775 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2776 KnownSrcZero, TLO,
Depth + 1))
2780 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2781 KnownSrcBits, TLO,
Depth + 1))
2783 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2785 unsigned Scale = NumSrcEltBits /
BitWidth;
2789 for (
unsigned i = 0; i != NumElts; ++i)
2790 if (DemandedElts[i]) {
2793 DemandedSrcElts.
setBit(i / Scale);
2797 APInt KnownSrcUndef, KnownSrcZero;
2798 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2799 KnownSrcZero, TLO,
Depth + 1))
2804 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2805 KnownSrcBits, TLO,
Depth + 1))
2810 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2811 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2833 if (
C &&
C->getAPIntValue().countr_zero() == CTZ) {
2852 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2857 auto GetDemandedBitsLHSMask = [&](
APInt Demanded,
2863 if (SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO,
2865 SimplifyDemandedBits(Op0, GetDemandedBitsLHSMask(LoMask, KnownOp1),
2866 DemandedElts, KnownOp0, TLO,
Depth + 1) ||
2882 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2883 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2884 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2885 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2886 if (DemandedOp0 || DemandedOp1) {
2887 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2888 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2902 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2903 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2915 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2942 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
2945 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2946 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
2950 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2951 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
2959 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
2960 Flags.hasNoUnsignedWrap(), KnownOp0, KnownOp1);
2970 if (
Op.getValueType().isScalableVector())
2972 if (SimplifyDemandedBitsForTargetNode(
Op,
DemandedBits, DemandedElts,
2985 if (!isTargetCanonicalConstantNode(
Op) &&
2991 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
3013 const APInt &DemandedElts,
3019 APInt KnownUndef, KnownZero;
3021 SimplifyDemandedVectorElts(
Op, DemandedElts, KnownUndef, KnownZero, TLO);
3033 const APInt &UndefOp0,
3034 const APInt &UndefOp1) {
3037 "Vector binop only");
3042 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
3044 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
3045 const APInt &UndefVals) {
3046 if (UndefVals[Index])
3049 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
3053 auto *
C = dyn_cast<ConstantSDNode>(Elt);
3054 if (isa<ConstantFPSDNode>(Elt) || Elt.
isUndef() || (
C && !
C->isOpaque()))
3062 for (
unsigned i = 0; i != NumElts; ++i) {
3081 bool AssumeSingleUse)
const {
3082 EVT VT =
Op.getValueType();
3083 unsigned Opcode =
Op.getOpcode();
3084 APInt DemandedElts = OriginalDemandedElts;
3090 if (!shouldSimplifyDemandedVectorElts(
Op, TLO))
3098 "Mask size mismatches value type element count!");
3107 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse())
3111 if (DemandedElts == 0) {
3126 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
3127 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
3129 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
3131 if (NewOp0 || NewOp1) {
3134 NewOp1 ? NewOp1 : Op1,
Op->getFlags());
3142 if (!DemandedElts[0]) {
3150 EVT SrcVT = Src.getValueType();
3162 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3172 EVT SrcVT = Src.getValueType();
3181 if (NumSrcElts == NumElts)
3182 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
3183 KnownZero, TLO,
Depth + 1);
3185 APInt SrcDemandedElts, SrcZero, SrcUndef;
3189 if ((NumElts % NumSrcElts) == 0) {
3190 unsigned Scale = NumElts / NumSrcElts;
3192 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3202 for (
unsigned i = 0; i != NumElts; ++i)
3203 if (DemandedElts[i]) {
3204 unsigned Ofs = (i % Scale) * EltSizeInBits;
3205 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
3209 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
3217 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
3221 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
3222 unsigned Elt = Scale * SrcElt + SubElt;
3223 if (DemandedElts[Elt])
3231 for (
unsigned i = 0; i != NumSrcElts; ++i) {
3232 if (SrcDemandedElts[i]) {
3234 KnownZero.
setBits(i * Scale, (i + 1) * Scale);
3236 KnownUndef.
setBits(i * Scale, (i + 1) * Scale);
3244 if ((NumSrcElts % NumElts) == 0) {
3245 unsigned Scale = NumSrcElts / NumElts;
3247 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3253 for (
unsigned i = 0; i != NumElts; ++i) {
3254 if (DemandedElts[i]) {
3283 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
3285 bool Updated =
false;
3286 for (
unsigned i = 0; i != NumElts; ++i) {
3287 if (!DemandedElts[i] && !Ops[i].
isUndef()) {
3297 for (
unsigned i = 0; i != NumElts; ++i) {
3299 if (
SrcOp.isUndef()) {
3301 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
3309 EVT SubVT =
Op.getOperand(0).getValueType();
3312 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3315 APInt SubUndef, SubZero;
3316 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
3319 KnownUndef.
insertBits(SubUndef, i * NumSubElts);
3320 KnownZero.
insertBits(SubZero, i * NumSubElts);
3325 bool FoundNewSub =
false;
3327 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3330 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts(
3331 SubOp, SubElts, TLO.
DAG,
Depth + 1);
3332 DemandedSubOps.
push_back(NewSubOp ? NewSubOp : SubOp);
3333 FoundNewSub = NewSubOp ?
true : FoundNewSub;
3351 APInt DemandedSrcElts = DemandedElts;
3354 APInt SubUndef, SubZero;
3355 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
3360 if (!DemandedSrcElts && !Src.isUndef())
3365 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
3373 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3374 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3375 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
3376 Sub, DemandedSubElts, TLO.
DAG,
Depth + 1);
3377 if (NewSrc || NewSub) {
3378 NewSrc = NewSrc ? NewSrc : Src;
3379 NewSub = NewSub ? NewSub : Sub;
3381 NewSub,
Op.getOperand(2));
3390 if (Src.getValueType().isScalableVector())
3393 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3396 APInt SrcUndef, SrcZero;
3397 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3405 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3406 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3418 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
3422 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3423 unsigned Idx = CIdx->getZExtValue();
3424 if (!DemandedElts[
Idx])
3427 APInt DemandedVecElts(DemandedElts);
3429 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
3430 KnownZero, TLO,
Depth + 1))
3439 APInt VecUndef, VecZero;
3440 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
3453 APInt UndefSel, ZeroSel;
3454 if (SimplifyDemandedVectorElts(Sel, DemandedElts, UndefSel, ZeroSel, TLO,
3459 APInt DemandedLHS(DemandedElts);
3460 APInt DemandedRHS(DemandedElts);
3461 APInt UndefLHS, ZeroLHS;
3462 APInt UndefRHS, ZeroRHS;
3463 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3466 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3470 KnownUndef = UndefLHS & UndefRHS;
3471 KnownZero = ZeroLHS & ZeroRHS;
3475 APInt DemandedSel = DemandedElts & ~KnownZero;
3476 if (DemandedSel != DemandedElts)
3477 if (SimplifyDemandedVectorElts(Sel, DemandedSel, UndefSel, ZeroSel, TLO,
3489 APInt DemandedLHS(NumElts, 0);
3490 APInt DemandedRHS(NumElts, 0);
3491 for (
unsigned i = 0; i != NumElts; ++i) {
3492 int M = ShuffleMask[i];
3493 if (M < 0 || !DemandedElts[i])
3495 assert(0 <= M && M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3496 if (M < (
int)NumElts)
3499 DemandedRHS.
setBit(M - NumElts);
3503 APInt UndefLHS, ZeroLHS;
3504 APInt UndefRHS, ZeroRHS;
3505 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3508 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3513 bool Updated =
false;
3514 bool IdentityLHS =
true, IdentityRHS =
true;
3516 for (
unsigned i = 0; i != NumElts; ++i) {
3517 int &M = NewMask[i];
3520 if (!DemandedElts[i] || (M < (
int)NumElts && UndefLHS[M]) ||
3521 (M >= (
int)NumElts && UndefRHS[M - NumElts])) {
3525 IdentityLHS &= (M < 0) || (M == (
int)i);
3526 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
3531 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3533 buildLegalVectorShuffle(VT,
DL,
LHS,
RHS, NewMask, TLO.
DAG);
3539 for (
unsigned i = 0; i != NumElts; ++i) {
3540 int M = ShuffleMask[i];
3543 }
else if (M < (
int)NumElts) {
3549 if (UndefRHS[M - NumElts])
3551 if (ZeroRHS[M - NumElts])
3560 APInt SrcUndef, SrcZero;
3562 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3563 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3564 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3571 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3572 DemandedSrcElts == 1) {
3585 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3586 Op->isOnlyUserOf(Src.getNode()) &&
3587 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3589 EVT SrcVT = Src.getValueType();
3596 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3610 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3611 APInt UndefLHS, ZeroLHS;
3612 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3633 APInt UndefRHS, ZeroRHS;
3634 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3637 APInt UndefLHS, ZeroLHS;
3638 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3642 KnownZero = ZeroLHS & ZeroRHS;
3648 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3660 APInt UndefRHS, ZeroRHS;
3661 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3664 APInt UndefLHS, ZeroLHS;
3665 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3669 KnownZero = ZeroLHS;
3670 KnownUndef = UndefLHS & UndefRHS;
3675 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3686 APInt SrcUndef, SrcZero;
3687 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
3692 APInt DemandedElts0 = DemandedElts & ~SrcZero;
3693 if (SimplifyDemandedVectorElts(Op0, DemandedElts0, KnownUndef, KnownZero,
3697 KnownUndef &= DemandedElts0;
3698 KnownZero &= DemandedElts0;
3703 if (DemandedElts.
isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef))
3710 KnownZero |= SrcZero;
3711 KnownUndef &= SrcUndef;
3712 KnownUndef &= ~KnownZero;
3716 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3723 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3724 KnownZero, TLO,
Depth + 1))
3728 if (
SDValue NewOp = SimplifyMultipleUseDemandedVectorElts(
3729 Op.getOperand(0), DemandedElts, TLO.
DAG,
Depth + 1))
3743 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3744 KnownZero, TLO,
Depth + 1))
3750 if (SimplifyDemandedVectorEltsForTargetNode(
Op, DemandedElts, KnownUndef,
3751 KnownZero, TLO,
Depth))
3756 if (SimplifyDemandedBits(
Op,
DemandedBits, OriginalDemandedElts, Known,
3757 TLO,
Depth, AssumeSingleUse))
3763 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3777 const APInt &DemandedElts,
3779 unsigned Depth)
const {
3784 "Should use MaskedValueIsZero if you don't know whether Op"
3785 " is a target node!");
3792 unsigned Depth)
const {
3804 unsigned Depth)
const {
3813 unsigned Depth)
const {
3818 "Should use ComputeNumSignBits if you don't know whether Op"
3819 " is a target node!");
3836 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3837 " is a target node!");
3848 "Should use SimplifyDemandedBits if you don't know whether Op"
3849 " is a target node!");
3850 computeKnownBitsForTargetNode(
Op, Known, DemandedElts, TLO.
DAG,
Depth);
3862 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3863 " is a target node!");
3896 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
3897 " is a target node!");
3901 return !canCreateUndefOrPoisonForTargetNode(
Op, DemandedElts, DAG,
PoisonOnly,
3904 return DAG.isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly,
3916 "Should use canCreateUndefOrPoison if you don't know whether Op"
3917 " is a target node!");
3925 unsigned Depth)
const {
3930 "Should use isKnownNeverNaN if you don't know whether Op"
3931 " is a target node!");
3936 const APInt &DemandedElts,
3939 unsigned Depth)
const {
3944 "Should use isSplatValue if you don't know whether Op"
3945 " is a target node!");
3960 CVal = CN->getAPIntValue();
3961 EltWidth =
N.getValueType().getScalarSizeInBits();
3968 CVal = CVal.
trunc(EltWidth);
3974 return CVal.
isOne();
4016 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) != MVT::i1));
4019 return N->isAllOnes() && SExt;
4028 DAGCombinerInfo &DCI)
const {
4056 auto *AndC = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
4057 if (AndC &&
isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() &&
4060 AndC->getAPIntValue().getActiveBits());
4087 if (isXAndYEqZeroPreferableToXAndYEqY(
Cond, OpVT) &&
4095 if (DCI.isBeforeLegalizeOps() ||
4129SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
4134 if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
4143 if (!(C01 = dyn_cast<ConstantSDNode>(N0->
getOperand(1))))
4147 EVT XVT =
X.getValueType();
4171 auto checkConstants = [&
I1, &I01]() ->
bool {
4176 if (checkConstants()) {
4184 if (!checkConstants())
4190 const unsigned KeptBits =
I1.logBase2();
4191 const unsigned KeptBitsMinusOne = I01.
logBase2();
4194 if (KeptBits != (KeptBitsMinusOne + 1))
4208 return DAG.
getSetCC(
DL, SCCVT, SExtInReg,
X, NewCond);
4212SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
4214 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
4216 "Should be a comparison with 0.");
4218 "Valid only for [in]equality comparisons.");
4220 unsigned NewShiftOpcode;
4230 unsigned OldShiftOpcode =
V.getOpcode();
4231 switch (OldShiftOpcode) {
4243 C =
V.getOperand(0);
4248 Y =
V.getOperand(1);
4253 X, XC,
CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
4270 EVT VT =
X.getValueType();
4285 DAGCombinerInfo &DCI)
const {
4288 "Unexpected binop");
4316 if (!DCI.isCalledByLegalizer())
4317 DCI.AddToWorklist(YShl1.
getNode());
4332 if (CTPOP.getOpcode() !=
ISD::CTPOP || !CTPOP.hasOneUse())
4335 EVT CTVT = CTPOP.getValueType();
4336 SDValue CTOp = CTPOP.getOperand(0);
4356 for (
unsigned i = 0; i <
Passes; i++) {