58 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
64 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
65 for (
const auto &Attr :
66 {Attribute::Alignment, Attribute::Dereferenceable,
67 Attribute::DereferenceableOrNull, Attribute::NoAlias,
68 Attribute::NonNull, Attribute::NoUndef, Attribute::Range})
75 if (CallerAttrs.
contains(Attribute::ZExt) ||
76 CallerAttrs.
contains(Attribute::SExt))
87 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
103 Register ArgReg = cast<RegisterSDNode>(
Value->getOperand(1))->getReg();
104 if (
MRI.getLiveInPhysReg(ArgReg) != Reg)
114 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
115 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
116 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
117 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
118 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
119 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
120 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
121 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
122 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
123 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
124 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
125 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
126 Alignment = Call->getParamStackAlign(ArgIdx);
129 "multiple ABI attributes?");
145std::pair<SDValue, SDValue>
155 Args.reserve(Ops.
size());
158 for (
unsigned i = 0; i < Ops.
size(); ++i) {
161 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
164 Entry.IsZExt = !Entry.IsSExt;
168 Entry.IsSExt = Entry.IsZExt =
false;
170 Args.push_back(Entry);
173 if (LC == RTLIB::UNKNOWN_LIBCALL)
181 bool zeroExtend = !signExtend;
185 signExtend = zeroExtend =
false;
196 return LowerCallTo(CLI);
200 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
202 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
203 Op.getSrcAlign() <
Op.getDstAlign())
208 if (VT == MVT::Other) {
212 VT = MVT::LAST_INTEGER_VALUETYPE;
213 if (
Op.isFixedDstAlign())
220 MVT LVT = MVT::LAST_INTEGER_VALUETYPE;
231 unsigned NumMemOps = 0;
235 while (VTSize >
Size) {
246 else if (NewVT == MVT::i64 &&
258 if (NewVT == MVT::i8)
267 if (NumMemOps &&
Op.allowOverlap() && NewVTSize <
Size &&
269 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
279 if (++NumMemOps > Limit)
282 MemOps.push_back(VT);
297 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
307 bool IsSignaling)
const {
312 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
313 &&
"Unsupported setcc type!");
316 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
317 bool ShouldInvertCC =
false;
321 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
322 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
323 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
327 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
328 (VT == MVT::f64) ? RTLIB::UNE_F64 :
329 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
333 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
334 (VT == MVT::f64) ? RTLIB::OGE_F64 :
335 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
339 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
340 (VT == MVT::f64) ? RTLIB::OLT_F64 :
341 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
345 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
346 (VT == MVT::f64) ? RTLIB::OLE_F64 :
347 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
351 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
352 (VT == MVT::f64) ? RTLIB::OGT_F64 :
353 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
356 ShouldInvertCC =
true;
359 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
360 (VT == MVT::f64) ? RTLIB::UO_F64 :
361 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
365 ShouldInvertCC =
true;
368 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
369 (VT == MVT::f64) ? RTLIB::UO_F64 :
370 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
371 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
372 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
373 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
377 ShouldInvertCC =
true;
380 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
381 (VT == MVT::f64) ? RTLIB::OGE_F64 :
382 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
385 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
386 (VT == MVT::f64) ? RTLIB::OGT_F64 :
387 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
390 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
391 (VT == MVT::f64) ? RTLIB::OLE_F64 :
392 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
395 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
396 (VT == MVT::f64) ? RTLIB::OLT_F64 :
397 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
405 SDValue Ops[2] = {NewLHS, NewRHS};
410 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
415 if (ShouldInvertCC) {
417 CCCode = getSetCCInverse(CCCode, RetVT);
420 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
427 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
430 CCCode = getSetCCInverse(CCCode, RetVT);
431 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
445 if (!isPositionIndependent())
459 unsigned JTEncoding = getJumpTableEncoding();
495 if (!
TM.shouldAssumeDSOLocal(GV))
499 if (isPositionIndependent())
515 const APInt &DemandedElts,
518 unsigned Opcode =
Op.getOpcode();
526 if (targetShrinkDemandedConstant(
Op,
DemandedBits, DemandedElts, TLO))
536 auto *Op1C = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
537 if (!Op1C || Op1C->isOpaque())
541 const APInt &
C = Op1C->getAPIntValue();
546 EVT VT =
Op.getValueType();
563 EVT VT =
Op.getValueType();
578 "ShrinkDemandedOp only supports binary operators!");
579 assert(
Op.getNode()->getNumValues() == 1 &&
580 "ShrinkDemandedOp only supports nodes with one result!");
582 EVT VT =
Op.getValueType();
591 Op.getOperand(1).getValueType().getScalarSizeInBits() ==
BitWidth &&
592 "ShrinkDemandedOp only supports operands that have the same size!");
596 if (!
Op.getNode()->hasOneUse())
609 Op.getOpcode(), dl, SmallVT,
612 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
627 bool Simplified = SimplifyDemandedBits(
Op,
DemandedBits, Known, TLO);
636 const APInt &DemandedElts,
656 bool AssumeSingleUse)
const {
657 EVT VT =
Op.getValueType();
673 EVT VT =
Op.getValueType();
691 switch (
Op.getOpcode()) {
697 EVT SrcVT = Src.getValueType();
698 EVT DstVT =
Op.getValueType();
704 if (NumSrcEltBits == NumDstEltBits)
705 if (
SDValue V = SimplifyMultipleUseDemandedBits(
709 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
710 unsigned Scale = NumDstEltBits / NumSrcEltBits;
714 for (
unsigned i = 0; i != Scale; ++i) {
715 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
716 unsigned BitOffset = EltOffset * NumSrcEltBits;
719 DemandedSrcBits |= Sub;
720 for (
unsigned j = 0; j != NumElts; ++j)
722 DemandedSrcElts.
setBit((j * Scale) + i);
726 if (
SDValue V = SimplifyMultipleUseDemandedBits(
727 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
732 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
733 unsigned Scale = NumSrcEltBits / NumDstEltBits;
737 for (
unsigned i = 0; i != NumElts; ++i)
738 if (DemandedElts[i]) {
739 unsigned Offset = (i % Scale) * NumDstEltBits;
741 DemandedSrcElts.
setBit(i / Scale);
744 if (
SDValue V = SimplifyMultipleUseDemandedBits(
745 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
766 return Op.getOperand(0);
768 return Op.getOperand(1);
779 return Op.getOperand(0);
781 return Op.getOperand(1);
791 return Op.getOperand(0);
793 return Op.getOperand(1);
799 if (std::optional<uint64_t> MaxSA =
802 unsigned ShAmt = *MaxSA;
803 unsigned NumSignBits =
806 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
835 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
842 if (NumSignBits >= (
BitWidth - ExBits + 1))
855 EVT SrcVT = Src.getValueType();
856 EVT DstVT =
Op.getValueType();
857 if (IsLE && DemandedElts == 1 &&
870 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
873 !DemandedElts[CIdx->getZExtValue()])
887 if (DemandedSubElts == 0)
897 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
898 for (
unsigned i = 0; i != NumElts; ++i) {
899 int M = ShuffleMask[i];
900 if (M < 0 || !DemandedElts[i])
903 IdentityLHS &= (M == (int)i);
904 IdentityRHS &= ((M - NumElts) == i);
910 return Op.getOperand(0);
912 return Op.getOperand(1);
922 if (
SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
932 unsigned Depth)
const {
933 EVT VT =
Op.getValueType();
940 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
946 unsigned Depth)
const {
948 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
960 "SRL or SRA node is required here!");
963 if (!N1C || !N1C->
isOne())
1010 unsigned ShiftOpc =
Op.getOpcode();
1011 bool IsSigned =
false;
1015 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1;
1020 unsigned NumZero = std::min(NumZeroA, NumZeroB);
1026 if (NumZero >= 2 && NumSigned < NumZero) {
1031 if (NumSigned >= 1) {
1039 if (NumZero >= 1 && NumSigned < NumZero) {
1059 EVT VT =
Op.getValueType();
1073 Add.getOperand(1)) &&
1084 (isa<ConstantSDNode>(ExtOpA) || isa<ConstantSDNode>(ExtOpB)))
1104 unsigned Depth,
bool AssumeSingleUse)
const {
1107 "Mask size mismatches value type size!");
1112 EVT VT =
Op.getValueType();
1114 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1116 "Unexpected vector size");
1119 APInt DemandedElts = OriginalDemandedElts;
1139 cast<ConstantFPSDNode>(
Op)->getValueAPF().bitcastToAPInt());
1144 bool HasMultiUse =
false;
1145 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse()) {
1154 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1163 switch (
Op.getOpcode()) {
1167 if (!DemandedElts[0])
1172 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1174 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO,
Depth + 1))
1179 if (DemandedElts == 1)
1192 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1201 auto *LD = cast<LoadSDNode>(
Op);
1202 if (getTargetConstantFromLoad(LD)) {
1208 EVT MemVT = LD->getMemoryVT();
1220 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
1225 APInt DemandedVecElts(DemandedElts);
1227 unsigned Idx = CIdx->getZExtValue();
1231 if (!DemandedElts[
Idx])
1238 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1244 if (SimplifyDemandedBits(Vec,
DemandedBits, DemandedVecElts, KnownVec, TLO,
1248 if (!!DemandedVecElts)
1263 APInt DemandedSrcElts = DemandedElts;
1267 if (SimplifyDemandedBits(Sub,
DemandedBits, DemandedSubElts, KnownSub, TLO,
1270 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1276 if (!!DemandedSubElts)
1278 if (!!DemandedSrcElts)
1284 SDValue NewSub = SimplifyMultipleUseDemandedBits(
1286 SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1288 if (NewSub || NewSrc) {
1289 NewSub = NewSub ? NewSub : Sub;
1290 NewSrc = NewSrc ? NewSrc : Src;
1303 if (Src.getValueType().isScalableVector())
1306 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1309 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, Known, TLO,
1315 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1330 EVT SubVT =
Op.getOperand(0).getValueType();
1333 for (
unsigned i = 0; i != NumSubVecs; ++i) {
1334 APInt DemandedSubElts =
1335 DemandedElts.
extractBits(NumSubElts, i * NumSubElts);
1336 if (SimplifyDemandedBits(
Op.getOperand(i),
DemandedBits, DemandedSubElts,
1337 Known2, TLO,
Depth + 1))
1340 if (!!DemandedSubElts)
1350 APInt DemandedLHS, DemandedRHS;
1355 if (!!DemandedLHS || !!DemandedRHS) {
1361 if (!!DemandedLHS) {
1362 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedLHS, Known2, TLO,
1367 if (!!DemandedRHS) {
1368 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedRHS, Known2, TLO,
1375 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1377 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1379 if (DemandedOp0 || DemandedOp1) {
1380 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1381 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1416 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1428 unsigned NumSubElts =
1445 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1449 Known2, TLO,
Depth + 1))
1471 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1473 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1475 if (DemandedOp0 || DemandedOp1) {
1476 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1477 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1490 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1492 if (Flags.hasDisjoint()) {
1493 Flags.setDisjoint(
false);
1494 Op->setFlags(Flags);
1499 if (SimplifyDemandedBits(Op0, ~Known.
One &
DemandedBits, DemandedElts,
1500 Known2, TLO,
Depth + 1)) {
1501 if (Flags.hasDisjoint()) {
1502 Flags.setDisjoint(
false);
1503 Op->setFlags(Flags);
1523 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1525 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1527 if (DemandedOp0 || DemandedOp1) {
1528 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1529 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1540 for (
int I = 0;
I != 2; ++
I) {
1543 SDValue Alt =
Op.getOperand(1 -
I).getOperand(0);
1544 SDValue C2 =
Op.getOperand(1 -
I).getOperand(1);
1546 for (
int J = 0; J != 2; ++J) {
1569 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1572 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known2, TLO,
1599 if (
C->getAPIntValue() == Known2.
One) {
1608 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1620 if (ShiftC->getAPIntValue().ult(
BitWidth)) {
1621 uint64_t ShiftAmt = ShiftC->getZExtValue();
1624 : Ones.
lshr(ShiftAmt);
1642 if (!
C || !
C->isAllOnes())
1648 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1650 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1652 if (DemandedOp0 || DemandedOp1) {
1653 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1654 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1664 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1665 Known, TLO,
Depth + 1))
1667 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1668 Known2, TLO,
Depth + 1))
1679 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1680 Known, TLO,
Depth + 1))
1682 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1683 Known2, TLO,
Depth + 1))
1690 if (SimplifyDemandedBits(
Op.getOperand(3),
DemandedBits, DemandedElts,
1691 Known, TLO,
Depth + 1))
1693 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1694 Known2, TLO,
Depth + 1))
1737 if (std::optional<uint64_t> KnownSA =
1739 unsigned ShAmt = *KnownSA;
1749 if (std::optional<uint64_t> InnerSA =
1751 unsigned C1 = *InnerSA;
1753 int Diff = ShAmt - C1;
1772 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1773 isTypeDesirableForOp(
ISD::SHL, InnerVT)) {
1790 InnerOp, DemandedElts,
Depth + 2)) {
1791 unsigned InnerShAmt = *SA2;
1792 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1794 (InnerBits - InnerShAmt + ShAmt) &&
1808 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1811 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
1814 Flags.setNoSignedWrap(
false);
1815 Flags.setNoUnsignedWrap(
false);
1816 Op->setFlags(Flags);
1820 Known.
Zero <<= ShAmt;
1821 Known.
One <<= ShAmt;
1827 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1828 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1839 Op.getNode()->hasOneUse()) {
1847 isTypeDesirableForOp(
ISD::SHL, SmallVT) &&
1850 assert(DemandedSize <= SmallVTBits &&
1851 "Narrowed below demanded bits?");
1871 isTypeDesirableForOp(
ISD::SHL, HalfVT) &&
1880 Flags.setNoSignedWrap(IsNSW);
1881 Flags.setNoUnsignedWrap(IsNUW);
1886 NewShiftAmt, Flags);
1899 if (SimplifyDemandedBits(Op0, DemandedFromOp, DemandedElts, Known, TLO,
1902 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
1905 Flags.setNoSignedWrap(
false);
1906 Flags.setNoUnsignedWrap(
false);
1907 Op->setFlags(Flags);
1917 if (std::optional<uint64_t> MaxSA =
1919 unsigned ShAmt = *MaxSA;
1920 unsigned NumSignBits =
1923 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1933 if (std::optional<uint64_t> KnownSA =
1935 unsigned ShAmt = *KnownSA;
1945 if (std::optional<uint64_t> InnerSA =
1947 unsigned C1 = *InnerSA;
1949 int Diff = ShAmt - C1;
1965 if (
Op->getFlags().hasExact())
1974 isTypeDesirableForOp(
ISD::SRL, HalfVT) &&
1990 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2000 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2001 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2015 DemandedElts,
Depth + 1))
2039 if (std::optional<uint64_t> KnownSA =
2041 unsigned ShAmt = *KnownSA;
2048 if (std::optional<uint64_t> InnerSA =
2050 unsigned LowBits =
BitWidth - ShAmt;
2056 if (*InnerSA == ShAmt) {
2066 unsigned NumSignBits =
2068 if (NumSignBits > ShAmt)
2078 if (
Op->getFlags().hasExact())
2086 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2097 Flags.setExact(
Op->getFlags().hasExact());
2115 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2116 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2126 DemandedElts,
Depth + 1))
2139 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2144 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1,
DemandedBits, DemandedElts,
2145 Known, TLO,
Depth + 1))
2154 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2157 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
2170 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2171 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
2172 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2173 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
2174 if (DemandedOp0 || DemandedOp1) {
2175 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
2176 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
2187 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
2188 Known2, TLO,
Depth + 1))
2204 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2210 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2220 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) {
2225 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) {
2234 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
2244 unsigned Opc =
Op.getOpcode();
2251 unsigned NumSignBits =
2255 if (NumSignBits >= NumDemandedUpperBits)
2296 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2322 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2330 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2350 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2355 unsigned MinSignedBits =
2357 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2360 if (!AlreadySignExtended) {
2378 InputDemandedBits.
setBit(ExVTBits - 1);
2380 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO,
2388 if (Known.
Zero[ExVTBits - 1])
2392 if (Known.
One[ExVTBits - 1]) {
2402 EVT HalfVT =
Op.getOperand(0).getValueType();
2410 if (SimplifyDemandedBits(
Op.getOperand(0), MaskLo, KnownLo, TLO,
Depth + 1))
2413 if (SimplifyDemandedBits(
Op.getOperand(1), MaskHi, KnownHi, TLO,
Depth + 1))
2416 Known = KnownHi.
concat(KnownLo);
2425 EVT SrcVT = Src.getValueType();
2434 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2446 APInt InDemandedElts = DemandedElts.
zext(InElts);
2447 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2449 if (Flags.hasNonNeg()) {
2450 Flags.setNonNeg(
false);
2451 Op->setFlags(Flags);
2459 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2460 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2470 EVT SrcVT = Src.getValueType();
2475 APInt InDemandedElts = DemandedElts.
zext(InElts);
2480 InDemandedBits.
setBit(InBits - 1);
2486 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2501 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2516 Flags.setNonNeg(
true);
2522 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2523 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2533 EVT SrcVT = Src.getValueType();
2540 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2545 APInt InDemandedElts = DemandedElts.
zext(InElts);
2546 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2553 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2554 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2563 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2565 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
2571 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2572 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2577 switch (Src.getOpcode()) {
2588 if (Src.getNode()->hasOneUse()) {
2600 std::optional<uint64_t> ShAmtC =
2602 if (!ShAmtC || *ShAmtC >=
BitWidth)
2628 EVT ZVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2630 if (SimplifyDemandedBits(
Op.getOperand(0), ~InMask |
DemandedBits, Known,
2634 Known.
Zero |= ~InMask;
2635 Known.
One &= (~Known.Zero);
2641 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2642 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2650 if (
auto *CIdx = dyn_cast<ConstantSDNode>(
Idx))
2651 if (CIdx->getAPIntValue().ult(NumSrcElts))
2658 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2660 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2666 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2667 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2683 EVT SrcVT = Src.getValueType();
2693 if ((OpVTLegal || i32Legal) && VT.
isSimple() && SrcVT != MVT::f16 &&
2694 SrcVT != MVT::f128) {
2696 EVT Ty = OpVTLegal ? VT : MVT::i32;
2700 unsigned OpVTSizeInBits =
Op.getValueSizeInBits();
2701 if (!OpVTLegal && OpVTSizeInBits > 32)
2703 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2713 unsigned Scale =
BitWidth / NumSrcEltBits;
2717 for (
unsigned i = 0; i != Scale; ++i) {
2718 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
2719 unsigned BitOffset = EltOffset * NumSrcEltBits;
2722 DemandedSrcBits |= Sub;
2723 for (
unsigned j = 0; j != NumElts; ++j)
2724 if (DemandedElts[j])
2725 DemandedSrcElts.
setBit((j * Scale) + i);
2729 APInt KnownSrcUndef, KnownSrcZero;
2730 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2731 KnownSrcZero, TLO,
Depth + 1))
2735 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2736 KnownSrcBits, TLO,
Depth + 1))
2738 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2740 unsigned Scale = NumSrcEltBits /
BitWidth;
2744 for (
unsigned i = 0; i != NumElts; ++i)
2745 if (DemandedElts[i]) {
2748 DemandedSrcElts.
setBit(i / Scale);
2752 APInt KnownSrcUndef, KnownSrcZero;
2753 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2754 KnownSrcZero, TLO,
Depth + 1))
2759 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2760 KnownSrcBits, TLO,
Depth + 1))
2765 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2766 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2788 if (
C &&
C->getAPIntValue().countr_zero() == CTZ) {
2807 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2812 auto GetDemandedBitsLHSMask = [&](
APInt Demanded,
2818 if (SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO,
2820 SimplifyDemandedBits(Op0, GetDemandedBitsLHSMask(LoMask, KnownOp1),
2821 DemandedElts, KnownOp0, TLO,
Depth + 1) ||
2824 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
2827 Flags.setNoSignedWrap(
false);
2828 Flags.setNoUnsignedWrap(
false);
2829 Op->setFlags(Flags);
2841 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2842 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2843 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2844 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2845 if (DemandedOp0 || DemandedOp1) {
2846 Flags.setNoSignedWrap(
false);
2847 Flags.setNoUnsignedWrap(
false);
2848 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2849 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2851 TLO.
DAG.
getNode(
Op.getOpcode(), dl, VT, Op0, Op1, Flags);
2863 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2864 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2868 Flags.setNoSignedWrap(
false);
2869 Flags.setNoUnsignedWrap(
false);
2877 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2904 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
2907 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2908 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
2912 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2913 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
2921 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
2922 Flags.hasNoUnsignedWrap(), KnownOp0, KnownOp1);
2932 if (
Op.getValueType().isScalableVector())
2934 if (SimplifyDemandedBitsForTargetNode(
Op,
DemandedBits, DemandedElts,
2947 if (!isTargetCanonicalConstantNode(
Op) &&
2953 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
2976 const APInt &DemandedElts,
2982 APInt KnownUndef, KnownZero;
2984 SimplifyDemandedVectorElts(
Op, DemandedElts, KnownUndef, KnownZero, TLO);
2996 const APInt &UndefOp0,
2997 const APInt &UndefOp1) {
3000 "Vector binop only");
3005 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
3007 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
3008 const APInt &UndefVals) {
3009 if (UndefVals[
Index])
3012 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
3016 auto *
C = dyn_cast<ConstantSDNode>(Elt);
3017 if (isa<ConstantFPSDNode>(Elt) || Elt.
isUndef() || (
C && !
C->isOpaque()))
3025 for (
unsigned i = 0; i != NumElts; ++i) {
3044 bool AssumeSingleUse)
const {
3045 EVT VT =
Op.getValueType();
3046 unsigned Opcode =
Op.getOpcode();
3047 APInt DemandedElts = OriginalDemandedElts;
3062 "Mask size mismatches value type element count!");
3071 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse())
3075 if (DemandedElts == 0) {
3090 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
3091 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
3093 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
3095 if (NewOp0 || NewOp1) {
3098 NewOp1 ? NewOp1 : Op1,
Op->getFlags());
3106 if (!DemandedElts[0]) {
3114 EVT SrcVT = Src.getValueType();
3126 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3136 EVT SrcVT = Src.getValueType();
3145 if (NumSrcElts == NumElts)
3146 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
3147 KnownZero, TLO,
Depth + 1);
3149 APInt SrcDemandedElts, SrcZero, SrcUndef;
3153 if ((NumElts % NumSrcElts) == 0) {
3154 unsigned Scale = NumElts / NumSrcElts;
3156 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3166 for (
unsigned i = 0; i != NumElts; ++i)
3167 if (DemandedElts[i]) {
3168 unsigned Ofs = (i % Scale) * EltSizeInBits;
3169 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
3173 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
3181 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
3185 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
3186 unsigned Elt = Scale * SrcElt + SubElt;
3187 if (DemandedElts[Elt])
3195 for (
unsigned i = 0; i != NumSrcElts; ++i) {
3196 if (SrcDemandedElts[i]) {
3198 KnownZero.
setBits(i * Scale, (i + 1) * Scale);
3200 KnownUndef.
setBits(i * Scale, (i + 1) * Scale);
3208 if ((NumSrcElts % NumElts) == 0) {
3209 unsigned Scale = NumSrcElts / NumElts;
3211 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3217 for (
unsigned i = 0; i != NumElts; ++i) {
3218 if (DemandedElts[i]) {
3247 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
3249 bool Updated =
false;
3250 for (
unsigned i = 0; i != NumElts; ++i) {
3251 if (!DemandedElts[i] && !Ops[i].
isUndef()) {
3261 for (
unsigned i = 0; i != NumElts; ++i) {
3263 if (
SrcOp.isUndef()) {
3265 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
3273 EVT SubVT =
Op.getOperand(0).getValueType();
3276 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3279 APInt SubUndef, SubZero;
3280 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
3283 KnownUndef.
insertBits(SubUndef, i * NumSubElts);
3284 KnownZero.
insertBits(SubZero, i * NumSubElts);
3289 bool FoundNewSub =
false;
3291 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3294 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts(
3295 SubOp, SubElts, TLO.
DAG,
Depth + 1);
3296 DemandedSubOps.
push_back(NewSubOp ? NewSubOp : SubOp);
3297 FoundNewSub = NewSubOp ?
true : FoundNewSub;
3315 APInt DemandedSrcElts = DemandedElts;
3318 APInt SubUndef, SubZero;
3319 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
3324 if (!DemandedSrcElts && !Src.isUndef())
3329 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
3337 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3338 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3339 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
3340 Sub, DemandedSubElts, TLO.
DAG,
Depth + 1);
3341 if (NewSrc || NewSub) {
3342 NewSrc = NewSrc ? NewSrc : Src;
3343 NewSub = NewSub ? NewSub : Sub;
3345 NewSub,
Op.getOperand(2));
3354 if (Src.getValueType().isScalableVector())
3357 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3360 APInt SrcUndef, SrcZero;
3361 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3369 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3370 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3382 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
3386 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3387 unsigned Idx = CIdx->getZExtValue();
3388 if (!DemandedElts[
Idx])
3391 APInt DemandedVecElts(DemandedElts);
3393 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
3394 KnownZero, TLO,
Depth + 1))
3403 APInt VecUndef, VecZero;
3404 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
3417 APInt UndefSel, ZeroSel;
3418 if (SimplifyDemandedVectorElts(Sel, DemandedElts, UndefSel, ZeroSel, TLO,
3423 APInt DemandedLHS(DemandedElts);
3424 APInt DemandedRHS(DemandedElts);
3425 APInt UndefLHS, ZeroLHS;
3426 APInt UndefRHS, ZeroRHS;
3427 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3430 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3434 KnownUndef = UndefLHS & UndefRHS;
3435 KnownZero = ZeroLHS & ZeroRHS;
3439 APInt DemandedSel = DemandedElts & ~KnownZero;
3440 if (DemandedSel != DemandedElts)
3441 if (SimplifyDemandedVectorElts(Sel, DemandedSel, UndefSel, ZeroSel, TLO,
3453 APInt DemandedLHS(NumElts, 0);
3454 APInt DemandedRHS(NumElts, 0);
3455 for (
unsigned i = 0; i != NumElts; ++i) {
3456 int M = ShuffleMask[i];
3457 if (M < 0 || !DemandedElts[i])
3459 assert(0 <= M && M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3460 if (M < (
int)NumElts)
3463 DemandedRHS.
setBit(M - NumElts);
3467 APInt UndefLHS, ZeroLHS;
3468 APInt UndefRHS, ZeroRHS;
3469 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3472 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3477 bool Updated =
false;
3478 bool IdentityLHS =
true, IdentityRHS =
true;
3480 for (
unsigned i = 0; i != NumElts; ++i) {
3481 int &M = NewMask[i];
3484 if (!DemandedElts[i] || (M < (
int)NumElts && UndefLHS[M]) ||
3485 (M >= (
int)NumElts && UndefRHS[M - NumElts])) {
3489 IdentityLHS &= (M < 0) || (M == (
int)i);
3490 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
3495 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3497 buildLegalVectorShuffle(VT,
DL,
LHS,
RHS, NewMask, TLO.
DAG);
3503 for (
unsigned i = 0; i != NumElts; ++i) {
3504 int M = ShuffleMask[i];
3507 }
else if (M < (
int)NumElts) {
3513 if (UndefRHS[M - NumElts])
3515 if (ZeroRHS[M - NumElts])
3524 APInt SrcUndef, SrcZero;
3526 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3527 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3528 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3535 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3536 DemandedSrcElts == 1) {
3549 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3550 Op->isOnlyUserOf(Src.getNode()) &&
3551 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3553 EVT SrcVT = Src.getValueType();
3560 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3574 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3575 APInt UndefLHS, ZeroLHS;
3576 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3597 APInt UndefRHS, ZeroRHS;
3598 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3601 APInt UndefLHS, ZeroLHS;
3602 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3606 KnownZero = ZeroLHS & ZeroRHS;
3612 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3624 APInt UndefRHS, ZeroRHS;
3625 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3628 APInt UndefLHS, ZeroLHS;
3629 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3633 KnownZero = ZeroLHS;
3634 KnownUndef = UndefLHS & UndefRHS;
3639 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3650 APInt SrcUndef, SrcZero;
3651 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
3656 APInt DemandedElts0 = DemandedElts & ~SrcZero;
3657 if (SimplifyDemandedVectorElts(Op0, DemandedElts0, KnownUndef, KnownZero,
3661 KnownUndef &= DemandedElts0;
3662 KnownZero &= DemandedElts0;
3667 if (DemandedElts.
isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef))
3674 KnownZero |= SrcZero;
3675 KnownUndef &= SrcUndef;
3676 KnownUndef &= ~KnownZero;
3680 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3687 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3688 KnownZero, TLO,
Depth + 1))
3700 if (SimplifyDemandedVectorEltsForTargetNode(
Op, DemandedElts, KnownUndef,
3701 KnownZero, TLO,
Depth))
3706 if (SimplifyDemandedBits(
Op,
DemandedBits, OriginalDemandedElts, Known,
3707 TLO,
Depth, AssumeSingleUse))
3713 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3727 const APInt &DemandedElts,
3729 unsigned Depth)
const {
3734 "Should use MaskedValueIsZero if you don't know whether Op"
3735 " is a target node!");
3742 unsigned Depth)
const {
3754 unsigned Depth)
const {
3763 unsigned Depth)
const {
3768 "Should use ComputeNumSignBits if you don't know whether Op"
3769 " is a target node!");
3786 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3787 " is a target node!");
3798 "Should use SimplifyDemandedBits if you don't know whether Op"
3799 " is a target node!");
3800 computeKnownBitsForTargetNode(
Op, Known, DemandedElts, TLO.
DAG,
Depth);
3812 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3813 " is a target node!");
3846 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
3847 " is a target node!");
3851 return !canCreateUndefOrPoisonForTargetNode(
Op, DemandedElts, DAG,
PoisonOnly,
3854 return DAG.isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly,
3866 "Should use canCreateUndefOrPoison if you don't know whether Op"
3867 " is a target node!");
3875 unsigned Depth)
const {
3880 "Should use isKnownNeverNaN if you don't know whether Op"
3881 " is a target node!");
3886 const APInt &DemandedElts,
3889 unsigned Depth)
const {
3894 "Should use isSplatValue if you don't know whether Op"
3895 " is a target node!");
3910 CVal = CN->getAPIntValue();
3911 EltWidth =
N.getValueType().getScalarSizeInBits();
3918 CVal = CVal.
trunc(EltWidth);
3924 return CVal.
isOne();
3966 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) != MVT::i1));
3969 return N->isAllOnes() && SExt;
3978 DAGCombinerInfo &DCI)
const {
4006 auto *AndC = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
4007 if (AndC &&
isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() &&
4010 AndC->getAPIntValue().getActiveBits());
4037 if (isXAndYEqZeroPreferableToXAndYEqY(
Cond, OpVT) &&
4045 if (DCI.isBeforeLegalizeOps() ||
4079SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
4084 if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
4093 if (!(C01 = dyn_cast<ConstantSDNode>(N0->
getOperand(1))))
4097 EVT XVT =
X.getValueType();
4121 auto checkConstants = [&
I1, &I01]() ->
bool {
4126 if (checkConstants()) {
4134 if (!checkConstants())
4140 const unsigned KeptBits =
I1.logBase2();
4141 const unsigned KeptBitsMinusOne = I01.
logBase2();
4144 if (KeptBits != (KeptBitsMinusOne + 1))
4159 return DAG.
getSetCC(
DL, SCCVT, SExtInReg,
X, NewCond);
4163SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
4165 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
4167 "Should be a comparison with 0.");
4169 "Valid only for [in]equality comparisons.");
4171 unsigned NewShiftOpcode;
4182 unsigned OldShiftOpcode =
V.getOpcode();
4183 switch (OldShiftOpcode) {
4195 C =
V.getOperand(0);
4200 Y =
V.getOperand(1);
4204 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
4205 X, XC,
CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
4222 EVT VT =
X.getValueType();
4237 DAGCombinerInfo &DCI)
const {
4240 "Unexpected binop");
4268 if (!DCI.isCalledByLegalizer())
4269 DCI.AddToWorklist(YShl1.
getNode());
4284 if (CTPOP.getOpcode() !=
ISD::CTPOP || !CTPOP.hasOneUse())
4287 EVT CTVT = CTPOP.getValueType();
4288 SDValue CTOp = CTPOP.getOperand(0);
4308 for (
unsigned i = 0; i <
Passes; i++) {
4357 auto getRotateSource = [](
SDValue X) {
4359 return X.getOperand(0);
4366 if (
SDValue R = getRotateSource(N0))
4399 if (!C1 || !C1->
isZero())
4408 if (!ShAmtC || ShAmtC->getAPIntValue().uge(
BitWidth))
4412 unsigned ShAmt = ShAmtC->getZExtValue();
4421 if (
Or.getOperand(0) ==
Other) {
4422 X =
Or.getOperand(0);
4423 Y =
Or.getOperand(1);
4426 if (
Or.getOperand(1) ==
Other) {
4427 X =
Or.getOperand(1);
4428 Y =
Or.getOperand(0);
4438 if (matchOr(F0, F1)) {
4445 if (matchOr(F1, F0)) {
4461 const SDLoc &dl)
const {
4471 bool N0ConstOrSplat =
4473 bool N1ConstOrSplat =
4481 if (N0ConstOrSplat && !N1ConstOrSplat &&
4484 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4490 if (!N0ConstOrSplat && !N1ConstOrSplat &&
4495 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4504 const APInt &C1 = N1C->getAPIntValue();
4524 return DAG.
getNode(LogicOp, dl, VT, IsXZero, IsYZero);
4554 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
4555 const APInt &C1 = N1C->getAPIntValue();
4570 if (
auto *
C = dyn_cast<ConstantSDNode>(N0->
getOperand(1)))
4571 if ((
C->getAPIntValue()+1).isPowerOf2()) {
4572 MinBits =
C->getAPIntValue().countr_one();
4580 }
else if (
auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
4583 MinBits = LN0->getMemoryVT().getSizeInBits();
4587 MinBits = LN0->getMemoryVT().getSizeInBits();
4598 MinBits >= ReqdBits) {
4600 if (isTypeDesirableForOp(
ISD::SETCC, MinVT)) {
4603 if (MinBits == 1 && C1 == 1)
4622 if (TopSetCC.
getValueType() == MVT::i1 && VT == MVT::i1 &&
4635 cast<CondCodeSDNode>(TopSetCC.
getOperand(2))->get(),
4654 auto *Lod = cast<LoadSDNode>(N0.
getOperand(0));
4656 unsigned bestWidth = 0, bestOffset = 0;
4657 if (Lod->isSimple() && Lod->isUnindexed() &&
4658 (Lod->getMemoryVT().isByteSized() ||
4660 unsigned memWidth = Lod->getMemoryVT().getStoreSizeInBits();
4662 unsigned maskWidth = origWidth;
4666 origWidth = Lod->getMemoryVT().getSizeInBits();
4670 for (
unsigned width = 8; width < origWidth; width *= 2) {
4677 unsigned maxOffset = origWidth - width;
4678 for (
unsigned offset = 0; offset <= maxOffset; offset += 8) {
4679 if (Mask.isSubsetOf(newMask)) {
4680 unsigned ptrOffset =
4682 unsigned IsFast = 0;
4685 *DAG.
getContext(), Layout, newVT, Lod->getAddressSpace(),
4686 NewAlign, Lod->getMemOperand()->getFlags(), &IsFast) &&
4688 bestOffset = ptrOffset / 8;
4689 bestMask = Mask.lshr(offset);
4703 if (bestOffset != 0)
4707 Lod->getPointerInfo().getWithOffset(bestOffset),
4708 Lod->getOriginalAlign());
4785 ExtDstTy != ExtSrcTy &&
"Unexpected types!");
4792 return DAG.
getSetCC(dl, VT, ZextOp,
4794 }
else if ((N1C->isZero() || N1C->isOne()) &&
4841 return DAG.
getSetCC(dl, VT, Val, N1,
4844 }
else if (N1C->isOne()) {
4881 cast<VTSDNode>(Op0.
getOperand(1))->getVT() == MVT::i1)
4905 N1C && N1C->isAllOnes()) {
4912 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1,
Cond, DCI, dl))
4919 const APInt &C1 = N1C->getAPIntValue();
4921 APInt MinVal, MaxVal;
4943 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
4963 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5011 if (
SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
5012 VT, N0, N1,
Cond, DCI, dl))
5019 bool CmpZero = N1C->isZero();
5020 bool CmpNegOne = N1C->isAllOnes();
5021 if ((CmpZero || CmpNegOne) && N0.
hasOneUse()) {
5024 unsigned EltBits = V.getScalarValueSizeInBits();
5025 if (V.getOpcode() !=
ISD::OR || (EltBits % 2) != 0)
5032 isa<ConstantSDNode>(
RHS.getOperand(1)) &&
5033 RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5036 Hi =
RHS.getOperand(0);
5040 isa<ConstantSDNode>(
LHS.getOperand(1)) &&
5041 LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5044 Hi =
LHS.getOperand(0);
5052 unsigned HalfBits = EltBits / 2;
5063 if (IsConcat(N0,
Lo,
Hi))
5064 return MergeConcat(
Lo,
Hi);
5101 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
5103 const APInt &C1 = N1C->getAPIntValue();
5115 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5118 unsigned ShCt = AndRHS->getAPIntValue().logBase2();
5119 if (AndRHS->getAPIntValue().isPowerOf2() &&
5126 }
else if (
Cond ==
ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
5146 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5147 const APInt &AndRHSC = AndRHS->getAPIntValue();
5184 return DAG.
getSetCC(dl, VT, Shift, CmpRHS, NewCond);
5190 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
5191 auto *CFP = cast<ConstantFPSDNode>(N1);
5192 assert(!CFP->getValueAPF().isNaN() &&
"Unexpected NaN value");
5213 !
isFPImmLegal(CFP->getValueAPF(), CFP->getValueType(0))) {
5232 if (CFP->getValueAPF().isInfinity()) {
5233 bool IsNegInf = CFP->getValueAPF().isNegative();
5244 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5253 "Integer types should be handled by FoldSetCC");
5259 if (UOF ==
unsigned(EqTrue))
5264 if (NewCond !=
Cond &&
5267 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5274 if ((isSignedIntSetCC(
Cond) || isUnsignedIntSetCC(
Cond)) &&
5311 bool LegalRHSImm =
false;
5313 if (
auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
5314 if (
auto *LHSR = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5319 DAG.
getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(),
5327 DAG.
getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(),
5333 if (
auto *SUBC = dyn_cast<ConstantSDNode>(N0.
getOperand(0)))
5337 DAG.
getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(),
5342 if (RHSC->getValueType(0).getSizeInBits() <= 64)
5351 if (
SDValue V = foldSetCCWithBinOp(VT, N0, N1,
Cond, dl, DCI))
5357 if (
SDValue V = foldSetCCWithBinOp(VT, N1, N0,
Cond, dl, DCI))
5360 if (
SDValue V = foldSetCCWithAnd(VT, N0, N1,
Cond, dl, DCI))
5371 if (
SDValue Folded = buildUREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5374 if (
SDValue Folded = buildSREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5387 N0 = DAG.
getNOT(dl, Temp, OpVT);
5396 Temp = DAG.
getNOT(dl, N0, OpVT);
5403 Temp = DAG.
getNOT(dl, N1, OpVT);
5410 Temp = DAG.
getNOT(dl, N0, OpVT);
5417 Temp = DAG.
getNOT(dl, N1, OpVT);
5426 N0 = DAG.
getNode(ExtendCode, dl, VT, N0);
5442 if (
auto *GASD = dyn_cast<GlobalAddressSDNode>(
N)) {
5443 GA = GASD->getGlobal();
5444 Offset += GASD->getOffset();
5452 if (
auto *V = dyn_cast<ConstantSDNode>(N2)) {
5453 Offset += V->getSExtValue();
5457 if (
auto *V = dyn_cast<ConstantSDNode>(N1)) {
5458 Offset += V->getSExtValue();
5479 unsigned S = Constraint.
size();
5482 switch (Constraint[0]) {
5485 return C_RegisterClass;
5513 if (S > 1 && Constraint[0] ==
'{' && Constraint[S - 1] ==
'}') {
5514 if (S == 8 && Constraint.
substr(1, 6) ==
"memory")
5542 std::vector<SDValue> &Ops,
5545 if (Constraint.
size() > 1)
5548 char ConstraintLetter = Constraint[0];
5549 switch (ConstraintLetter) {
5565 if ((
C = dyn_cast<ConstantSDNode>(
Op)) && ConstraintLetter !=
's') {
5569 bool IsBool =
C->getConstantIntValue()->getBitWidth() == 1;
5579 if (ConstraintLetter !=
'n') {
5580 if (
const auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
5582 GA->getValueType(0),
5583 Offset + GA->getOffset()));
5586 if (
const auto *BA = dyn_cast<BlockAddressSDNode>(
Op)) {
5588 BA->getBlockAddress(), BA->getValueType(0),
5589 Offset + BA->getOffset(), BA->getTargetFlags()));
5592 if (isa<BasicBlockSDNode>(
Op)) {
5597 const unsigned OpCode =
Op.getOpcode();
5599 if ((
C = dyn_cast<ConstantSDNode>(
Op.getOperand(0))))
5600 Op =
Op.getOperand(1);
5603 (
C = dyn_cast<ConstantSDNode>(
Op.getOperand(1))))
5604 Op =
Op.getOperand(0);
5621std::pair<unsigned, const TargetRegisterClass *>
5627 assert(*(Constraint.
end() - 1) ==
'}' &&
"Not a brace enclosed constraint?");
5632 std::pair<unsigned, const TargetRegisterClass *> R =
5644 std::pair<unsigned, const TargetRegisterClass *> S =
5645 std::make_pair(PR, RC);
5667 assert(!ConstraintCode.empty() &&
"No known constraint!");
5668 return isdigit(
static_cast<unsigned char>(ConstraintCode[0]));
5674 assert(!ConstraintCode.empty() &&
"No known constraint!");
5675 return atoi(ConstraintCode.c_str());
5689 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
5690 unsigned maCount = 0;
5696 unsigned LabelNo = 0;
5699 ConstraintOperands.emplace_back(std::move(CI));
5709 switch (OpInfo.
Type) {
5719 assert(!Call.getType()->isVoidTy() &&
"Bad inline asm!");
5720 if (
auto *STy = dyn_cast<StructType>(Call.getType())) {
5724 assert(ResNo == 0 &&
"Asm only has one result!");
5734 OpInfo.
CallOperandVal = cast<CallBrInst>(&Call)->getIndirectDest(LabelNo);
5745 OpTy = Call.getParamElementType(ArgNo);
5746 assert(OpTy &&
"Indirect operand must have elementtype attribute");
5750 if (
StructType *STy = dyn_cast<StructType>(OpTy))
5751 if (STy->getNumElements() == 1)
5752 OpTy = STy->getElementType(0);
5757 unsigned BitSize =
DL.getTypeSizeInBits(OpTy);
5778 if (!ConstraintOperands.empty()) {
5780 unsigned bestMAIndex = 0;
5781 int bestWeight = -1;
5787 for (maIndex = 0; maIndex < maCount; ++maIndex) {
5789 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5790 cIndex != eIndex; ++cIndex) {
5811 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
5816 weightSum += weight;
5819 if (weightSum > bestWeight) {
5820 bestWeight = weightSum;
5821 bestMAIndex = maIndex;
5828 cInfo.selectAlternative(bestMAIndex);
5833 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5834 cIndex != eIndex; ++cIndex) {
5845 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
5848 std::pair<unsigned, const TargetRegisterClass *> InputRC =
5853 (MatchRC.second != InputRC.second)) {
5855 " with a matching output constraint of"
5856 " incompatible type!");
5862 return ConstraintOperands;
5897 if (maIndex >= (
int)
info.multipleAlternatives.size())
5898 rCodes = &
info.Codes;
5900 rCodes = &
info.multipleAlternatives[maIndex].Codes;
5904 for (
const std::string &rCode : *rCodes) {
5906 getSingleConstraintMatchWeight(
info, rCode.c_str());
5907 if (weight > BestWeight)
5908 BestWeight = weight;
5921 Value *CallOperandVal =
info.CallOperandVal;
5924 if (!CallOperandVal)
5927 switch (*constraint) {
5930 if (isa<ConstantInt>(CallOperandVal))
5931 weight = CW_Constant;
5934 if (isa<GlobalValue>(CallOperandVal))
5935 weight = CW_Constant;
5939 if (isa<ConstantFP>(CallOperandVal))
5940 weight = CW_Constant;
5953 weight = CW_Register;
5957 weight = CW_Default;
5991 Ret.reserve(OpInfo.
Codes.size());
6006 Ret.emplace_back(Code, CType);
6011 return getConstraintPiority(a.second) > getConstraintPiority(b.second);
6025 "need immediate or other");
6030 std::vector<SDValue> ResultOps;
6032 return !ResultOps.empty();
6040 assert(!OpInfo.
Codes.empty() &&
"Must have at least one constraint");
6043 if (OpInfo.
Codes.size() == 1) {
6051 unsigned BestIdx = 0;
6052 for (
const unsigned E =
G.size();
6059 if (BestIdx + 1 == E) {
6075 if (isa<ConstantInt>(v) || isa<Function>(v)) {
6079 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) {
6086 if (
const char *Repl = LowerXConstraint(OpInfo.
ConstraintVT)) {
6101 EVT VT =
N->getValueType(0);
6106 bool UseSRA =
false;
6112 APInt Divisor =
C->getAPIntValue();
6134 "Expected matchUnaryPredicate to return one element for scalable "
6139 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6141 Factor = Factors[0];
6147 Flags.setExact(
true);
6161 EVT VT =
N->getValueType(0);
6166 bool UseSRL =
false;
6172 APInt Divisor =
C->getAPIntValue();
6197 "Expected matchUnaryPredicate to return one element for scalable "
6202 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6204 Factor = Factors[0];
6210 Flags.setExact(
true);
6251 EVT VT =
N->getValueType(0);
6287 bool IsAfterLegalization,
6290 EVT VT =
N->getValueType(0);
6316 if (
N->getFlags().hasExact())
6325 const APInt &Divisor =
C->getAPIntValue();
6327 int NumeratorFactor = 0;
6338 NumeratorFactor = 1;
6341 NumeratorFactor = -1;
6358 SDValue MagicFactor, Factor, Shift, ShiftMask;
6366 Shifts.
size() == 1 && ShiftMasks.
size() == 1 &&
6367 "Expected matchUnaryPredicate to return one element for scalable "
6374 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6375 MagicFactor = MagicFactors[0];
6376 Factor = Factors[0];
6378 ShiftMask = ShiftMasks[0];
6419 SDValue Q = GetMULHS(N0, MagicFactor);
6449 bool IsAfterLegalization,
6452 EVT VT =
N->getValueType(0);
6478 if (
N->getFlags().hasExact())
6488 bool UseNPQ =
false, UsePreShift =
false, UsePostShift =
false;
6494 const APInt& Divisor =
C->getAPIntValue();
6496 SDValue PreShift, MagicFactor, NPQFactor, PostShift;
6500 if (Divisor.
isOne()) {
6501 PreShift = PostShift = DAG.
getUNDEF(ShSVT);
6502 MagicFactor = NPQFactor = DAG.
getUNDEF(SVT);
6506 Divisor, std::min(KnownLeadingZeros, Divisor.
countl_zero()));
6511 "We shouldn't generate an undefined shift!");
6513 "We shouldn't generate an undefined shift!");
6515 "Unexpected pre-shift");
6522 UseNPQ |= magics.
IsAdd;
6523 UsePreShift |= magics.
PreShift != 0;
6538 SDValue PreShift, PostShift, MagicFactor, NPQFactor;
6546 NPQFactors.
size() == 1 && PostShifts.
size() == 1 &&
6547 "Expected matchUnaryPredicate to return one for scalable vectors");
6553 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6554 PreShift = PreShifts[0];
6555 MagicFactor = MagicFactors[0];
6556 PostShift = PostShifts[0];
6603 Q = GetMULHU(Q, MagicFactor);
6616 NPQ = GetMULHU(NPQ, NPQFactor);
6635 return DAG.
getSelect(dl, VT, IsOne, N0, Q);
6644 std::function<
bool(
SDValue)> Predicate,
6649 if (SplatValue != Values.
end()) {
6652 return Value == *SplatValue || Predicate(
Value);
6654 Replacement = *SplatValue;
6658 if (!AlternativeReplacement)
6661 Replacement = AlternativeReplacement;
6663 std::replace_if(Values.
begin(), Values.
end(), Predicate, Replacement);
6674 DAGCombinerInfo &DCI,
6677 if (
SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6680 DCI.AddToWorklist(
N);
6688TargetLowering::prepareUREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6690 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6698 "Only applicable for (in)equality comparisons.");
6711 bool ComparingWithAllZeros =
true;
6712 bool AllComparisonsWithNonZerosAreTautological =
true;
6713 bool HadTautologicalLanes =
false;
6714 bool AllLanesAreTautological =
true;
6715 bool HadEvenDivisor =
false;
6716 bool AllDivisorsArePowerOfTwo =
true;
6717 bool HadTautologicalInvertedLanes =
false;
6726 const APInt &
Cmp = CCmp->getAPIntValue();
6728 ComparingWithAllZeros &=
Cmp.isZero();
6734 bool TautologicalInvertedLane =
D.ule(Cmp);
6735 HadTautologicalInvertedLanes |= TautologicalInvertedLane;
6740 bool TautologicalLane =
D.isOne() || TautologicalInvertedLane;
6741 HadTautologicalLanes |= TautologicalLane;
6742 AllLanesAreTautological &= TautologicalLane;
6748 AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
6751 unsigned K =
D.countr_zero();
6752 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
6756 HadEvenDivisor |= (
K != 0);
6759 AllDivisorsArePowerOfTwo &= D0.
isOne();
6763 unsigned W =
D.getBitWidth();
6765 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
6778 "We are expecting that K is always less than all-ones for ShSVT");
6781 if (TautologicalLane) {
6805 if (AllLanesAreTautological)
6810 if (AllDivisorsArePowerOfTwo)
6815 if (HadTautologicalLanes) {
6830 "Expected matchBinaryPredicate to return one element for "
6841 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
6845 "Expecting that the types on LHS and RHS of comparisons match.");
6855 if (HadEvenDivisor) {
6868 if (!HadTautologicalInvertedLanes)
6874 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
6881 SDValue TautologicalInvertedChannels =
6891 DL, SETCCVT, SETCCVT);
6893 Replacement, NewCC);
6901 TautologicalInvertedChannels);
6914 DAGCombinerInfo &DCI,
6917 if (
SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6919 assert(Built.
size() <= 7 &&
"Max size prediction failed.");
6921 DCI.AddToWorklist(
N);
6929TargetLowering::prepareSREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6931 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6956 "Only applicable for (in)equality comparisons.");
6972 if (!CompTarget || !CompTarget->
isZero())
6975 bool HadIntMinDivisor =
false;
6976 bool HadOneDivisor =
false;
6977 bool AllDivisorsAreOnes =
true;
6978 bool HadEvenDivisor =
false;
6979 bool NeedToApplyOffset =
false;
6980 bool AllDivisorsArePowerOfTwo =
true;
6995 HadIntMinDivisor |=
D.isMinSignedValue();
6998 HadOneDivisor |=
D.isOne();
6999 AllDivisorsAreOnes &=
D.isOne();
7002 unsigned K =
D.countr_zero();
7003 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
7006 if (!
D.isMinSignedValue()) {
7009 HadEvenDivisor |= (
K != 0);
7014 AllDivisorsArePowerOfTwo &= D0.
isOne();
7018 unsigned W =
D.getBitWidth();
7020 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
7026 if (!
D.isMinSignedValue()) {
7029 NeedToApplyOffset |=
A != 0;
7036 "We are expecting that A is always less than all-ones for SVT");
7038 "We are expecting that K is always less than all-ones for ShSVT");
7076 if (AllDivisorsAreOnes)
7081 if (AllDivisorsArePowerOfTwo)
7084 SDValue PVal, AVal, KVal, QVal;
7086 if (HadOneDivisor) {
7106 QAmts.
size() == 1 &&
7107 "Expected matchUnaryPredicate to return one element for scalable "
7114 assert(isa<ConstantSDNode>(
D) &&
"Expected a constant");
7125 if (NeedToApplyOffset) {
7137 if (HadEvenDivisor) {
7152 if (!HadIntMinDivisor)
7158 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7193 MaskedIsZero, Fold);
7200 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
7202 "be a constant integer");
7212 EVT VT =
Op.getValueType();
7235 bool LegalOps,
bool OptForSize,
7237 unsigned Depth)
const {
7239 if (
Op.getOpcode() ==
ISD::FNEG ||
Op.getOpcode() == ISD::VP_FNEG) {
7241 return Op.getOperand(0);
7252 EVT VT =
Op.getValueType();
7253 unsigned Opcode =
Op.getOpcode();
7263 auto RemoveDeadNode = [&](
SDValue N) {
7264 if (
N &&
N.getNode()->use_empty())
7273 std::list<HandleSDNode> Handles;
7284 if (LegalOps && !IsOpLegal)
7287 APFloat V = cast<ConstantFPSDNode>(
Op)->getValueAPF();
7301 return !N.isUndef() && !isa<ConstantFPSDNode>(N);
7309 return N.isUndef() ||
7314 if (LegalOps && !IsOpLegal)
7323 APFloat V = cast<ConstantFPSDNode>(
C)->getValueAPF();
7331 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7342 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7345 Handles.emplace_back(NegX);
7350 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7356 if (NegX && (CostX <= CostY)) {
7360 RemoveDeadNode(NegY);
7369 RemoveDeadNode(NegX);
7376 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7398 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7401 Handles.emplace_back(NegX);
7406 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7412 if (NegX && (CostX <= CostY)) {
7416 RemoveDeadNode(NegY);
7422 if (
C->isExactlyValue(2.0) &&
Op.getOpcode() ==
ISD::FMUL)
7430 RemoveDeadNode(NegX);
7437 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7440 SDValue X =
Op.getOperand(0),
Y =
Op.getOperand(1), Z =
Op.getOperand(2);
7443 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ,
Depth);
7449 Handles.emplace_back(NegZ);
7454 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7457 Handles.emplace_back(NegX);
7462 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7468 if (NegX && (CostX <= CostY)) {
7469 Cost = std::min(CostX, CostZ);
7472 RemoveDeadNode(NegY);
7478 Cost = std::min(CostY, CostZ);
7481 RemoveDeadNode(NegX);
7489 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7491 return DAG.
getNode(Opcode,
DL, VT, NegV);
7494 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7505 getNegatedExpression(
LHS, DAG, LegalOps, OptForSize, CostLHS,
Depth);
7507 RemoveDeadNode(NegLHS);
7512 Handles.emplace_back(NegLHS);
7517 getNegatedExpression(
RHS, DAG, LegalOps, OptForSize, CostRHS,
Depth);
7525 RemoveDeadNode(NegLHS);
7526 RemoveDeadNode(NegRHS);
7530 Cost = std::min(CostLHS, CostRHS);
7531 return DAG.
getSelect(
DL, VT,
Op.getOperand(0), NegLHS, NegRHS);
7560 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
7573 if ((
Signed && HasSMUL_LOHI) || (!
Signed && HasUMUL_LOHI)) {
7601 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false)) {
7602 Result.push_back(
Lo);
7603 Result.push_back(
Hi);
7606 Result.push_back(Zero);
7607 Result.push_back(Zero);
7618 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
true)) {
7619 Result.push_back(
Lo);
7620 Result.push_back(
Hi);
7625 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
7640 if (!MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false))
7643 Result.push_back(
Lo);
7650 Result.push_back(
Hi);
7663 if (!MakeMUL_LOHI(LL, RH,
Lo,
Hi,
false))
7670 if (!MakeMUL_LOHI(LH, RL,
Lo,
Hi,
false))
7722 bool Ok = expandMUL_LOHI(
N->getOpcode(),
N->getValueType(0),
SDLoc(
N),
7723 N->getOperand(0),
N->getOperand(1), Result, HiLoVT,
7724 DAG, Kind, LL, LH, RL, RH);
7726 assert(Result.size() == 2);
7758 unsigned Opcode =
N->getOpcode();
7759 EVT VT =
N->getValueType(0);
7766 "Unexpected opcode");
7768 auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7772 APInt Divisor = CN->getAPIntValue();
7780 if (Divisor.
uge(HalfMaxPlus1))
7798 unsigned TrailingZeros = 0;
7812 if (HalfMaxPlus1.
urem(Divisor).
isOne()) {
7813 assert(!LL == !LH &&
"Expected both input halves or no input halves!");
7815 std::tie(LL, LH) = DAG.
SplitScalar(
N->getOperand(0), dl, HiLoVT, HiLoVT);
7819 if (TrailingZeros) {
7887 std::tie(QuotL, QuotH) = DAG.
SplitScalar(Quotient, dl, HiLoVT, HiLoVT);
7888 Result.push_back(QuotL);
7889 Result.push_back(QuotH);
7895 if (TrailingZeros) {
7901 Result.push_back(RemL);
7917 EVT VT =
Node->getValueType(0);
7927 bool IsFSHL =
Node->getOpcode() == ISD::VP_FSHL;
7930 EVT ShVT = Z.getValueType();
7936 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
7937 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitWidthC, ShAmt, Mask, VL);
7938 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, IsFSHL ? ShAmt : InvShAmt, Mask,
7940 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, IsFSHL ? InvShAmt : ShAmt, Mask,
7948 ShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, Z, BitMask, Mask, VL);
7952 InvShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, NotZ, BitMask, Mask, VL);
7955 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
7956 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitMask, ShAmt, Mask, VL);
7961 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, ShAmt, Mask, VL);
7963 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT, ShY1, InvShAmt, Mask, VL);
7966 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT, ShX1, InvShAmt, Mask, VL);
7967 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, ShAmt, Mask, VL);
7970 return DAG.
getNode(ISD::VP_OR,
DL, VT, ShX, ShY, Mask, VL);
7975 if (Node->isVPOpcode())
7978 EVT VT = Node->getValueType(0);
7988 SDValue Z = Node->getOperand(2);
7991 bool IsFSHL = Node->getOpcode() ==
ISD::FSHL;
7994 EVT ShVT = Z.getValueType();
8064 EVT VT = Node->getValueType(0);
8066 bool IsLeft = Node->getOpcode() ==
ISD::ROTL;
8067 SDValue Op0 = Node->getOperand(0);
8068 SDValue Op1 = Node->getOperand(1);
8079 return DAG.
getNode(RevRot,
DL, VT, Op0, Sub);
8082 if (!AllowVectorOps && VT.
isVector() &&
8100 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8102 HsVal = DAG.
getNode(HsOpc,
DL, VT, Op0, HsAmt);
8108 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8119 assert(Node->getNumOperands() == 3 &&
"Not a double-shift!");
8120 EVT VT = Node->getValueType(0);
8126 SDValue ShOpLo = Node->getOperand(0);
8127 SDValue ShOpHi = Node->getOperand(1);
8128 SDValue ShAmt = Node->getOperand(2);
8171 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8172 SDValue Src = Node->getOperand(OpNo);
8173 EVT SrcVT = Src.getValueType();
8174 EVT DstVT = Node->getValueType(0);
8178 if (SrcVT != MVT::f32 || DstVT != MVT::i64)
8181 if (Node->isStrictFPOpcode())
8244 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8245 SDValue Src = Node->getOperand(OpNo);
8247 EVT SrcVT = Src.getValueType();
8248 EVT DstVT = Node->getValueType(0);
8269 if (Node->isStrictFPOpcode()) {
8271 { Node->getOperand(0), Src });
8272 Chain = Result.getValue(1);
8286 if (Node->isStrictFPOpcode()) {
8288 Node->getOperand(0),
true);
8294 bool Strict = Node->isStrictFPOpcode() ||
8313 if (Node->isStrictFPOpcode()) {
8315 { Chain, Src, FltOfs });
8337 Result = DAG.
getSelect(dl, DstVT, Sel, True, False);
8348 if (Node->isStrictFPOpcode())
8351 SDValue Src = Node->getOperand(0);
8352 EVT SrcVT = Src.getValueType();
8353 EVT DstVT = Node->getValueType(0);
8376 llvm::bit_cast<double>(UINT64_C(0x4530000000100000)), dl, DstVT);
8396 unsigned Opcode = Node->getOpcode();
8401 if (Node->getFlags().hasNoNaNs()) {
8403 SDValue Op1 = Node->getOperand(0);
8404 SDValue Op2 = Node->getOperand(1);
8409 Flags.setNoSignedZeros(
true);
8422 EVT VT = Node->getValueType(0);
8426 "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
8429 SDValue Quiet0 = Node->getOperand(0);
8430 SDValue Quiet1 = Node->getOperand(1);
8432 if (!Node->getFlags().hasNoNaNs()) {
8445 return DAG.
getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags());
8451 if ((Node->getFlags().hasNoNaNs() ||
8454 (Node->getFlags().hasNoSignedZeros() ||
8457 unsigned IEEE2018Op =
8460 return DAG.
getNode(IEEE2018Op, dl, VT, Node->getOperand(0),
8461 Node->getOperand(1), Node->getFlags());
8464 if (
SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG))
8475 unsigned Opc =
N->getOpcode();
8476 EVT VT =
N->getValueType(0);
8489 bool MinMaxMustRespectOrderedZero =
false;
8493 MinMaxMustRespectOrderedZero =
true;
8507 if (!
N->getFlags().hasNoNaNs() &&
8516 if (!MinMaxMustRespectOrderedZero && !
N->getFlags().hasNoSignedZeros() &&
8542 bool IsOrdered = NanTest ==
fcNone;
8543 bool IsUnordered = NanTest ==
fcNan;
8546 if (!IsOrdered && !IsUnordered)
8547 return std::nullopt;
8549 if (OrderedMask ==
fcZero &&
8555 return std::nullopt;
8562 EVT OperandVT =
Op.getValueType();
8573 if (OperandVT == MVT::ppcf128) {
8576 OperandVT = MVT::f64;
8581 bool IsInverted =
false;
8584 Test = InvertedCheck;
8591 bool IsF80 = (ScalarFloatVT == MVT::f80);
8595 if (Flags.hasNoFPExcept() &&
8600 if (std::optional<bool> IsCmp0 =
8603 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode,
8610 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode);
8645 const unsigned ExplicitIntBitInF80 = 63;
8646 APInt ExpMask = Inf;
8648 ExpMask.
clearBit(ExplicitIntBitInF80);
8662 const auto appendResult = [&](
SDValue PartialRes) {
8672 const auto getIntBitIsSet = [&]() ->
SDValue {
8673 if (!IntBitIsSetV) {
8674 APInt IntBitMask(BitSize, 0);
8675 IntBitMask.
setBit(ExplicitIntBitInF80);
8680 return IntBitIsSetV;
8701 Test &= ~fcPosFinite;
8706 Test &= ~fcNegFinite;
8708 appendResult(PartialRes);
8717 appendResult(ExpIsZero);
8727 else if (PartialCheck ==
fcZero)
8731 appendResult(PartialRes);
8744 appendResult(PartialRes);
8747 if (
unsigned PartialCheck =
Test &
fcInf) {
8750 else if (PartialCheck ==
fcInf)
8757 appendResult(PartialRes);
8760 if (
unsigned PartialCheck =
Test &
fcNan) {
8761 APInt InfWithQnanBit = Inf | QNaNBitMask;
8763 if (PartialCheck ==
fcNan) {
8776 }
else if (PartialCheck ==
fcQNan) {
8788 appendResult(PartialRes);
8793 APInt ExpLSB = ExpMask & ~(ExpMask.
shl(1));
8796 APInt ExpLimit = ExpMask - ExpLSB;
8809 appendResult(PartialRes);
8832 EVT VT = Node->getValueType(0);
8839 if (!(Len <= 128 && Len % 8 == 0))
8898 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
8909 EVT VT = Node->getValueType(0);
8912 SDValue Mask = Node->getOperand(1);
8913 SDValue VL = Node->getOperand(2);
8918 if (!(Len <= 128 && Len % 8 == 0))
8930 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5;
8933 Tmp1 = DAG.
getNode(ISD::VP_AND, dl, VT,
8937 Op = DAG.
getNode(ISD::VP_SUB, dl, VT,
Op, Tmp1, Mask, VL);
8940 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op, Mask33, Mask, VL);
8941 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT,
8945 Op = DAG.
getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL);
8950 Tmp5 = DAG.
getNode(ISD::VP_ADD, dl, VT,
Op, Tmp4, Mask, VL);
8951 Op = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL);
8962 V = DAG.
getNode(ISD::VP_MUL, dl, VT,
Op, Mask01, Mask, VL);
8965 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
8967 V = DAG.
getNode(ISD::VP_ADD, dl, VT, V,
8968 DAG.
getNode(ISD::VP_SHL, dl, VT, V, ShiftC, Mask, VL),
8978 EVT VT = Node->getValueType(0);
9017 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9028 EVT VT = Node->getValueType(0);
9031 SDValue Mask = Node->getOperand(1);
9032 SDValue VL = Node->getOperand(2);
9042 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9045 DAG.
getNode(ISD::VP_SRL, dl, VT,
Op, Tmp, Mask, VL), Mask,
9050 return DAG.
getNode(ISD::VP_CTPOP, dl, VT,
Op, Mask, VL);
9059 :
APInt(64, 0x0218A392CD3D5DBFULL);
9073 for (
unsigned i = 0; i <
BitWidth; i++) {
9099 EVT VT = Node->getValueType(0);
9133 if (
SDValue V = CTTZTableLookup(Node, DAG, dl, VT,
Op, NumBitsPerElt))
9155 SDValue Mask = Node->getOperand(1);
9156 SDValue VL = Node->getOperand(2);
9158 EVT VT = Node->getValueType(0);
9165 SDValue Tmp = DAG.
getNode(ISD::VP_AND, dl, VT, Not, MinusOne, Mask, VL);
9166 return DAG.
getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL);
9180 EVT SrcVT = Source.getValueType();
9181 EVT ResVT =
N->getValueType(0);
9190 Source = DAG.
getNode(ISD::VP_SETCC,
DL, SrcVT, Source, AllZero,
9198 DAG.
getNode(ISD::VP_SELECT,
DL, ResVecVT, Source, StepVec,
Splat, EVL);
9199 return DAG.
getNode(ISD::VP_REDUCE_UMIN,
DL, ResVT, ExtEVL,
Select, Mask, EVL);
9203 bool IsNegative)
const {
9205 EVT VT =
N->getValueType(0);
9259 EVT VT =
N->getValueType(0);
9301 EVT VT =
N->getValueType(0);
9305 unsigned Opc =
N->getOpcode();
9314 "Unknown AVG node");
9326 return DAG.
getNode(ShiftOpc, dl, VT, Sum,
9358 return DAG.
getNode(SumOpc, dl, VT, Sign, Shift);
9363 EVT VT =
N->getValueType(0);
9370 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9421 EVT VT =
N->getValueType(0);
9430 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9439 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL);
9449 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9453 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9454 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9455 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9459 Tmp7 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9463 Tmp6 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9464 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9467 Tmp5 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9468 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9473 Tmp4 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp4,
9474 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9477 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp3,
9478 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9481 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9485 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL);
9486 Tmp6 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL);
9487 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9488 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9489 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp6, Mask, EVL);
9490 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9491 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp4, Mask, EVL);
9497 EVT VT =
N->getValueType(0);
9540 for (
unsigned I = 0, J = Sz-1;
I < Sz; ++
I, --J) {
9557 assert(
N->getOpcode() == ISD::VP_BITREVERSE);
9560 EVT VT =
N->getValueType(0);
9579 Tmp = (Sz > 8 ? DAG.
getNode(ISD::VP_BSWAP, dl, VT,
Op, Mask, EVL) :
Op);
9584 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9590 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9595 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9601 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9606 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9612 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9618std::pair<SDValue, SDValue>
9622 SDValue Chain = LD->getChain();
9623 SDValue BasePTR = LD->getBasePtr();
9624 EVT SrcVT = LD->getMemoryVT();
9625 EVT DstVT = LD->getValueType(0);
9657 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(),
9658 LD->getMemOperand()->getFlags(), LD->getAAInfo());
9661 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9662 unsigned ShiftIntoIdx =
9673 Scalar = DAG.
getNode(ExtendOp, SL, DstEltVT, Scalar);
9680 return std::make_pair(
Value, Load.getValue(1));
9689 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9691 DAG.
getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
9692 LD->getPointerInfo().getWithOffset(
Idx * Stride),
9693 SrcEltVT, LD->getOriginalAlign(),
9694 LD->getMemOperand()->getFlags(), LD->getAAInfo());
9705 return std::make_pair(
Value, NewChain);
9712 SDValue Chain = ST->getChain();
9713 SDValue BasePtr = ST->getBasePtr();
9715 EVT StVT = ST->getMemoryVT();
9741 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9746 unsigned ShiftIntoIdx =
9755 return DAG.
getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
9756 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
9762 assert(Stride &&
"Zero stride!");
9766 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
9775 Chain, SL, Elt,
Ptr, ST->getPointerInfo().getWithOffset(
Idx * Stride),
9776 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
9785std::pair<SDValue, SDValue>
9788 "unaligned indexed loads not implemented!");
9789 SDValue Chain = LD->getChain();
9791 EVT VT = LD->getValueType(0);
9792 EVT LoadedVT = LD->getMemoryVT();
9802 return scalarizeVectorLoad(LD, DAG);
9808 LD->getMemOperand());
9814 return std::make_pair(Result, newLoad.
getValue(1));
9822 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
9826 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.
getNode())->getIndex();
9831 EVT PtrVT =
Ptr.getValueType();
9832 EVT StackPtrVT = StackPtr.getValueType();
9838 for (
unsigned i = 1; i < NumRegs; i++) {
9841 RegVT, dl, Chain,
Ptr, LD->getPointerInfo().getWithOffset(
Offset),
9842 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
9846 Load.getValue(1), dl, Load, StackPtr,
9857 8 * (LoadedBytes -
Offset));
9860 LD->getPointerInfo().getWithOffset(
Offset), MemVT,
9861 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
9867 Load.getValue(1), dl, Load, StackPtr,
9874 Load = DAG.
getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
9879 return std::make_pair(Load, TF);
9883 "Unaligned load of unsupported type.");
9892 Align Alignment = LD->getOriginalAlign();
9893 unsigned IncrementSize = NumBits / 8;
9904 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9909 LD->getPointerInfo().getWithOffset(IncrementSize),
9910 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9913 Hi = DAG.
getExtLoad(HiExtType, dl, VT, Chain,
Ptr, LD->getPointerInfo(),
9914 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9919 LD->getPointerInfo().getWithOffset(IncrementSize),
9920 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
9932 return std::make_pair(Result, TF);
9938 "unaligned indexed stores not implemented!");
9939 SDValue Chain = ST->getChain();
9943 Align Alignment = ST->getOriginalAlign();
9945 EVT StoreMemVT = ST->getMemoryVT();
9961 Result = DAG.
getStore(Chain, dl, Result,
Ptr, ST->getPointerInfo(),
9962 Alignment, ST->getMemOperand()->getFlags());
9970 EVT PtrVT =
Ptr.getValueType();
9973 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
9977 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
9981 Chain, dl, Val, StackPtr,
9984 EVT StackPtrVT = StackPtr.getValueType();
9992 for (
unsigned i = 1; i < NumRegs; i++) {
9995 RegVT, dl, Store, StackPtr,
9999 ST->getPointerInfo().getWithOffset(
Offset),
10000 ST->getOriginalAlign(),
10001 ST->getMemOperand()->getFlags()));
10021 ST->getPointerInfo().getWithOffset(
Offset), LoadMemVT,
10022 ST->getOriginalAlign(),
10023 ST->getMemOperand()->getFlags(), ST->getAAInfo()));
10030 "Unaligned store of unknown type.");
10034 unsigned IncrementSize = NumBits / 8;
10043 if (
auto *
C = dyn_cast<ConstantSDNode>(
Lo);
C && !
C->isOpaque())
10054 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
10055 ST->getMemOperand()->getFlags());
10060 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
10061 ST->getMemOperand()->getFlags(), ST->getAAInfo());
10072 bool IsCompressedMemory)
const {
10074 EVT AddrVT =
Addr.getValueType();
10075 EVT MaskVT = Mask.getValueType();
10077 "Incompatible types of Data and Mask");
10078 if (IsCompressedMemory) {
10081 "Cannot currently handle compressed memory with scalable vectors");
10087 MaskIntVT = MVT::i32;
10111 "Cannot index a scalable vector within a fixed-width vector");
10115 EVT IdxVT =
Idx.getValueType();
10121 if (
auto *IdxCst = dyn_cast<ConstantSDNode>(
Idx))
10122 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts)
10136 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0;
10144 return getVectorSubVecPointer(
10145 DAG, VecPtr, VecVT,
10163 "Converting bits to bytes lost precision");
10165 "Sub-vector must be a vector with matching element type");
10194 std::string NameString = (
"__emutls_v." + GA->
getGlobal()->
getName()).str();
10198 assert(EmuTlsVar &&
"Cannot find EmuTlsVar ");
10200 Entry.Ty = VoidPtrType;
10201 Args.push_back(Entry);
10208 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
10217 "Emulated TLS must have zero offset in GlobalAddressSDNode");
10218 return CallResult.first;
10229 EVT VT =
Op.getOperand(0).getValueType();
10231 if (VT.
bitsLT(MVT::i32)) {
10245 SDValue Op0 = Node->getOperand(0);
10246 SDValue Op1 = Node->getOperand(1);
10249 unsigned Opcode = Node->getOpcode();
10291 {Op0, Op1, DAG.getCondCode(CC)})) {
10298 {Op0, Op1, DAG.getCondCode(CC)})) {
10326 unsigned Opcode = Node->getOpcode();
10329 EVT VT =
LHS.getValueType();
10332 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10348 unsigned OverflowOp;
10363 llvm_unreachable(
"Expected method to receive signed or unsigned saturation "
10364 "addition or subtraction node.");
10372 unsigned BitWidth =
LHS.getScalarValueSizeInBits();
10375 SDValue SumDiff = Result.getValue(0);
10376 SDValue Overflow = Result.getValue(1);
10398 return DAG.
getSelect(dl, VT, Overflow, Zero, SumDiff);
10418 if (LHSIsNonNegative || RHSIsNonNegative) {
10420 return DAG.
getSelect(dl, VT, Overflow, SatMax, SumDiff);
10426 if (LHSIsNegative || RHSIsNegative) {
10428 return DAG.
getSelect(dl, VT, Overflow, SatMin, SumDiff);
10438 return DAG.
getSelect(dl, VT, Overflow, Result, SumDiff);
10442 unsigned Opcode = Node->getOpcode();
10445 EVT VT =
LHS.getValueType();
10446 EVT ResVT = Node->getValueType(0);
10477 unsigned Opcode = Node->getOpcode();
10481 EVT VT =
LHS.getValueType();
10486 "Expected a SHLSAT opcode");
10487 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10525 if (WideVT == MVT::i16)
10526 LC = RTLIB::MUL_I16;
10527 else if (WideVT == MVT::i32)
10528 LC = RTLIB::MUL_I32;
10529 else if (WideVT == MVT::i64)
10530 LC = RTLIB::MUL_I64;
10531 else if (WideVT == MVT::i128)
10532 LC = RTLIB::MUL_I128;
10541 unsigned HalfBits = Bits >> 1;
10580 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.
getDataLayout())) {
10585 SDValue Args[] = {LL, LH, RL, RH};
10586 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10588 SDValue Args[] = {LH, LL, RH, RL};
10589 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10592 "Ret value is a collection of constituent nodes holding result.");
10595 Lo = Ret.getOperand(0);
10596 Hi = Ret.getOperand(1);
10598 Lo = Ret.getOperand(1);
10599 Hi = Ret.getOperand(0);
10608 EVT VT =
LHS.getValueType();
10609 assert(
RHS.getValueType() == VT &&
"Mismatching operand types");
10628 forceExpandWideMUL(DAG, dl,
Signed, WideVT,
LHS, HiLHS,
RHS, HiRHS,
Lo,
Hi);
10637 "Expected a fixed point multiplication opcode");
10642 EVT VT =
LHS.getValueType();
10643 unsigned Scale = Node->getConstantOperandVal(2);
10659 SDValue Product = Result.getValue(0);
10660 SDValue Overflow = Result.getValue(1);
10671 Result = DAG.
getSelect(dl, VT, ProdNeg, SatMin, SatMax);
10672 return DAG.
getSelect(dl, VT, Overflow, Result, Product);
10676 SDValue Product = Result.getValue(0);
10677 SDValue Overflow = Result.getValue(1);
10681 return DAG.
getSelect(dl, VT, Overflow, SatMax, Product);
10686 "Expected scale to be less than the number of bits if signed or at "
10687 "most the number of bits if unsigned.");
10689 "Expected both operands to be the same type");
10698 Lo = Result.getValue(0);
10699 Hi = Result.getValue(1);
10720 if (Scale == VTSize)
10766 return DAG.
getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
10791 "Expected a fixed point division opcode");
10793 EVT VT =
LHS.getValueType();
10815 if (LHSLead + RHSTrail < Scale + (
unsigned)(Saturating &&
Signed))
10818 unsigned LHSShift = std::min(LHSLead, Scale);
10819 unsigned RHSShift = Scale - LHSShift;
10876 bool IsAdd = Node->getOpcode() ==
ISD::UADDO;
10882 SDValue NodeCarry = DAG.
getNode(OpcCarry, dl, Node->getVTList(),
10883 { LHS, RHS, CarryIn });
10892 EVT ResultType = Node->getValueType(1);
10903 DAG.
getSetCC(dl, SetCCType, Result,
10922 bool IsAdd = Node->getOpcode() ==
ISD::SADDO;
10927 EVT ResultType = Node->getValueType(1);
10953 DAG.
getNode(
ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
10954 ResultType, ResultType);
10960 EVT VT = Node->getValueType(0);
10968 const APInt &
C = RHSC->getAPIntValue();
10970 if (
C.isPowerOf2()) {
10972 bool UseArithShift =
isSigned && !
C.isMinSignedValue();
10975 Overflow = DAG.
getSetCC(dl, SetCCVT,
10977 dl, VT, Result, ShiftAmt),
10990 static const unsigned Ops[2][3] =
11013 forceExpandWideMUL(DAG, dl,
isSigned,
LHS,
RHS, BottomHalf, TopHalf);
11016 Result = BottomHalf;
11023 Overflow = DAG.
getSetCC(dl, SetCCVT, TopHalf,
11028 EVT RType = Node->getValueType(1);
11033 "Unexpected result type for S/UMULO legalization");
11041 EVT VT =
Op.getValueType();
11045 "Expanding reductions for scalable vectors is undefined.");
11056 Op = DAG.
getNode(BaseOpcode, dl, HalfVT,
Lo,
Hi, Node->getFlags());
11068 for (
unsigned i = 1; i < NumElts; i++)
11069 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags());
11072 if (EltVT != Node->getValueType(0))
11079 SDValue AccOp = Node->getOperand(0);
11080 SDValue VecOp = Node->getOperand(1);
11088 "Expanding reductions for scalable vectors is undefined.");
11098 for (
unsigned i = 0; i < NumElts; i++)
11099 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
11106 EVT VT = Node->getValueType(0);
11111 SDValue Dividend = Node->getOperand(0);
11112 SDValue Divisor = Node->getOperand(1);
11115 Result = DAG.
getNode(DivRemOpc, dl, VTs, Dividend, Divisor).
getValue(1);
11120 SDValue Divide = DAG.
getNode(DivOpc, dl, VT, Dividend, Divisor);
11132 SDValue Src = Node->getOperand(0);
11135 EVT SrcVT = Src.getValueType();
11136 EVT DstVT = Node->getValueType(0);
11138 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
11141 assert(SatWidth <= DstWidth &&
11142 "Expected saturation width smaller than result width");
11146 APInt MinInt, MaxInt;
11157 if (SrcVT == MVT::f16 || SrcVT == MVT::bf16) {
11159 SrcVT = Src.getValueType();
11180 if (AreExactFloatBounds && MinMaxLegal) {
11189 dl, DstVT, Clamped);
11201 return DAG.
getSelect(dl, DstVT, IsNan, ZeroInt, FpToInt);
11240 EVT OperandVT =
Op.getValueType();
11262 AbsWide = DAG.
getBitcast(OperandVT, ClearedSign);
11285 KeepNarrow = DAG.
getNode(
ISD::OR, dl, WideSetCCVT, KeepNarrow, AlreadyOdd);
11294 SDValue Adjust = DAG.
getSelect(dl, ResultIntVT, NarrowIsRd, One, NegativeOne);
11296 Op = DAG.
getSelect(dl, ResultIntVT, KeepNarrow, NarrowBits, Adjusted);
11308 EVT VT = Node->getValueType(0);
11311 if (Node->getConstantOperandVal(1) == 1) {
11314 EVT OperandVT =
Op.getValueType();
11326 EVT I32 =
F32.changeTypeToInteger();
11327 Op = expandRoundInexactToOdd(
F32,
Op, dl, DAG);
11352 EVT I16 = I32.isVector() ? I32.changeVectorElementType(MVT::i16) : MVT::i16;
11362 assert(Node->getValueType(0).isScalableVector() &&
11363 "Fixed length vector types expected to use SHUFFLE_VECTOR!");
11365 EVT VT = Node->getValueType(0);
11366 SDValue V1 = Node->getOperand(0);
11367 SDValue V2 = Node->getOperand(1);
11368 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue();
11387 EVT PtrVT = StackPtr.getValueType();
11389 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11404 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2));
11406 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr,
11429 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr2,
11436 SDValue Vec = Node->getOperand(0);
11437 SDValue Mask = Node->getOperand(1);
11438 SDValue Passthru = Node->getOperand(2);
11442 EVT MaskVT = Mask.getValueType();
11451 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11459 bool HasPassthru = !Passthru.
isUndef();
11465 Chain = DAG.
getStore(Chain,
DL, Passthru, StackPtr, PtrInfo);
11468 APInt PassthruSplatVal;
11469 bool IsSplatPassthru =
11472 if (IsSplatPassthru) {
11476 LastWriteVal = DAG.
getConstant(PassthruSplatVal,
DL, ScalarVT);
11477 }
else if (HasPassthru) {
11487 getVectorElementPointer(DAG, StackPtr, VecVT, Popcount);
11489 ScalarVT,
DL, Chain, LastElmtPtr,
11495 for (
unsigned I = 0;
I < NumElms;
I++) {
11499 SDValue OutPtr = getVectorElementPointer(DAG, StackPtr, VecVT, OutPos);
11501 Chain,
DL, ValI, OutPtr,
11514 if (HasPassthru &&
I == NumElms - 1) {
11520 OutPtr = getVectorElementPointer(DAG, StackPtr, VecVT, OutPos);
11525 DAG.
getSelect(
DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal);
11527 Chain,
DL, LastWriteVal, OutPtr,
11532 return DAG.
getLoad(VecVT,
DL, Chain, StackPtr, PtrInfo);
11538 SDValue EVL,
bool &NeedInvert,
11540 bool IsSignaling)
const {
11542 MVT OpVT =
LHS.getSimpleValueType();
11544 NeedInvert =
false;
11545 assert(!EVL == !Mask &&
"VP Mask and EVL must either both be set or unset");
11546 bool IsNonVP = !EVL;
11561 bool NeedSwap =
false;
11562 InvCC = getSetCCInverse(CCCode, OpVT);
11590 "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
11595 "If SETO is expanded, SETOEQ must be legal!");
11613 NeedInvert = ((
unsigned)CCCode & 0x8U);
11654 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC1, Chain, IsSignaling);
11655 SetCC2 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC2, Chain, IsSignaling);
11663 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
LHS, CC1, Chain, IsSignaling);
11664 SetCC2 = DAG.
getSetCC(dl, VT,
RHS,
RHS, CC2, Chain, IsSignaling);
11674 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2);
11678 Opc = Opc ==
ISD::OR ? ISD::VP_OR : ISD::VP_AND;
11679 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2, Mask, EVL);
unsigned const MachineRegisterInfo * MRI
amdgpu AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
static bool isUndef(ArrayRef< int > Mask)
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
static bool isNonZeroModBitWidthOrUndef(const MachineRegisterInfo &MRI, Register Reg, unsigned BW)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static bool lowerImmediateIfPossible(TargetLowering::ConstraintPair &P, SDValue Op, SelectionDAG *DAG, const TargetLowering &TLI)
If we have an immediate, see if we can lower it.
static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG)
static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, const APInt &UndefOp0, const APInt &UndefOp1)
Given a vector binary operation and known undefined elements for each input operand,...
static SDValue BuildExactUDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact UDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, EVT VecVT, const SDLoc &dl, ElementCount SubEC)
static unsigned getConstraintPiority(TargetLowering::ConstraintType CT)
Return a number indicating our preference for chosing a type of constraint over another,...
static std::optional< bool > isFCmpEqualZero(FPClassTest Test, const fltSemantics &Semantics, const MachineFunction &MF)
Returns a true value if if this FPClassTest can be performed with an ordered fcmp to 0,...
static void turnVectorIntoSplatVector(MutableArrayRef< SDValue > Values, std::function< bool(SDValue)> Predicate, SDValue AlternativeReplacement=SDValue())
If all values in Values that don't match the predicate are same 'splat' value, then replace all value...
static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT)
static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact SDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, SDValue N0, const APInt &C1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue combineShiftToAVG(SDValue Op, TargetLowering::TargetLoweringOpt &TLO, const TargetLowering &TLI, const APInt &DemandedBits, const APInt &DemandedElts, unsigned Depth)
This file describes how to lower LLVM code to machine code.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
void clearAllBits()
Set every bit to 0.
APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned countLeadingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt multiplicativeInverse() const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void clearHighBits(unsigned hiBits)
Set top hiBits bits to 0.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void setBitVal(unsigned BitPosition, bool BitValue)
Set a given bit to a given value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool hasAttributes() const
Return true if the builder has IR-level attributes.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
ConstantFP - Floating Point Values [float, double].
const APInt & getAPIntValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
std::vector< std::string > ConstraintCodeVector
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setAdjustsStack(bool V)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
Function & getFunction()
Return the LLVM function that this machine code represents.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
const GlobalVariable * getNamedGlobal(StringRef Name) const
Return the global variable in the module with the specified name, of arbitrary type.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static SDNodeIterator end(const SDNode *N)
static SDNodeIterator begin(const SDNode *N)
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
bool willNotOverflowAdd(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the addition of 2 nodes can never overflow.
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
bool isKnownNeverSNaN(SDValue Op, unsigned Depth=0) const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
bool shouldOptForSize() const
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
static const fltSemantics & EVTToAPFloatSemantics(EVT VT)
Returns an APFloat semantics tag appropriate for the given type.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
std::optional< uint64_t > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
std::optional< uint64_t > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
const TargetMachine & getTargetMachine() const
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
virtual EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool shouldExpandCmpUsingSelects() const
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal on this target.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const
Get the CondCode that's to be used to test the result of the comparison libcall against zero.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom on this target.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector eleme...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes.
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arit...
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, EVT WideVT, const SDValue LL, const SDValue LH, const SDValue RL, const SDValue RH, SDValue &Lo, SDValue &Hi) const
forceExpandWideMUL - Unconditionally expand a MUL into either a libcall or brute force via a wide mul...
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
TargetLowering(const TargetLowering &)=delete
virtual bool shouldSimplifyDemandedVectorElts(SDValue Op, const TargetLoweringOpt &TLO) const
Return true if the target supports simplifying demanded vector elements by converting them to undefs.
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively,...
SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
iterator_range< regclass_iterator > regclasses() const
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantSDNode predicate.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
FPClassTest invertFPClassTestIfSimpler(FPClassTest Test)
Evaluates if the specified FP class test is better performed as the inverse (i.e.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
auto find_if_not(R &&Range, UnaryPredicate P)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
constexpr unsigned BitWidth
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
int MatchingInput
MatchingInput - If this is not -1, this is an output constraint where an input constraint is required...
ConstraintCodeVector Codes
Code - The constraint code, either the register name (in braces) or the constraint letter/number.
SubConstraintInfoVector multipleAlternatives
multipleAlternatives - If there are multiple alternative constraints, this array will contain them.
bool isIndirect
isIndirect - True if this operand is an indirect operand.
bool hasMatchingInput() const
hasMatchingInput - Return true if this is an output constraint that has a matching input constraint.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
static std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasNoUnsignedWrap() const
bool hasNoSignedWrap() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Magic data for optimising signed division by a constant.
unsigned ShiftAmount
shift amount
static SignedDivisionByConstantInfo get(const APInt &D)
Calculate the magic numbers required to implement a signed integer division by a constant as a sequen...
This contains information for each constraint that we are lowering.
MVT ConstraintVT
The ValueType for the operand value.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
std::string ConstraintCode
This contains the actual string for the code, like "m".
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches.
bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)
bool isBeforeLegalizeOps() const
void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
ArrayRef< EVT > OpsVTBeforeSoften
bool IsPostTypeLegalization
MakeLibCallOptions & setSExt(bool Value=true)
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
bool LegalOperations() const
Magic data for optimising unsigned division by a constant.
unsigned PreShift
pre-shift amount
static UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
unsigned PostShift
post-shift amount