32#include "llvm/IR/IntrinsicsLoongArch.h"
42#define DEBUG_TYPE "loongarch-isel-lowering"
57 cl::desc(
"Maximum number of instructions used (including code sequence "
58 "to generate the value and moving the value to FPR) when "
59 "materializing floating-point immediates (default = 3)"),
63 "Materialize FP immediate within 2 instructions"),
65 "Materialize FP immediate within 3 instructions"),
67 "Materialize FP immediate within 4 instructions"),
69 "Materialize FP immediate within 5 instructions"),
71 "Materialize FP immediate within 6 instructions "
72 "(behaves same as 5 on loongarch64)")));
75 cl::desc(
"Trap on integer division by zero."),
82 MVT GRLenVT = Subtarget.getGRLenVT();
87 if (Subtarget.hasBasicF())
89 if (Subtarget.hasBasicD())
93 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
95 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
97 if (Subtarget.hasExtLSX())
101 if (Subtarget.hasExtLASX())
102 for (
MVT VT : LASXVTs)
170 if (Subtarget.is64Bit()) {
198 if (!Subtarget.is64Bit()) {
204 if (Subtarget.hasBasicD())
216 if (Subtarget.hasBasicF()) {
248 if (Subtarget.is64Bit())
251 if (!Subtarget.hasBasicD()) {
253 if (Subtarget.is64Bit()) {
262 if (Subtarget.hasBasicD()) {
295 if (Subtarget.is64Bit())
301 if (Subtarget.hasExtLSX()) {
316 for (
MVT VT : LSXVTs) {
330 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
358 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
360 for (
MVT VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
362 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
366 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
390 {MVT::v16i8, MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v8i16, MVT::v4i16,
391 MVT::v2i16, MVT::v4i32, MVT::v2i32, MVT::v2i64}) {
406 for (
MVT VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16})
408 for (
MVT VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v16i32, MVT::v8i64,
415 if (Subtarget.hasExtLASX()) {
416 for (
MVT VT : LASXVTs) {
431 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
460 for (
MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
462 for (
MVT VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64})
464 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
468 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
487 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16}) {
492 {MVT::v2i64, MVT::v4i32, MVT::v4i64, MVT::v8i16, MVT::v8i32}) {
499 if (Subtarget.hasBasicF()) {
510 if (Subtarget.hasExtLSX()) {
518 if (Subtarget.hasExtLASX()) {
545 if (Subtarget.hasLAMCAS())
548 if (Subtarget.hasSCQ()) {
568 switch (
Op.getOpcode()) {
570 return lowerATOMIC_FENCE(
Op, DAG);
572 return lowerEH_DWARF_CFA(
Op, DAG);
574 return lowerGlobalAddress(
Op, DAG);
576 return lowerGlobalTLSAddress(
Op, DAG);
578 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
580 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
582 return lowerINTRINSIC_VOID(
Op, DAG);
584 return lowerBlockAddress(
Op, DAG);
586 return lowerJumpTable(
Op, DAG);
588 return lowerShiftLeftParts(
Op, DAG);
590 return lowerShiftRightParts(
Op, DAG,
true);
592 return lowerShiftRightParts(
Op, DAG,
false);
594 return lowerConstantPool(
Op, DAG);
596 return lowerFP_TO_SINT(
Op, DAG);
598 return lowerBITCAST(
Op, DAG);
600 return lowerUINT_TO_FP(
Op, DAG);
602 return lowerSINT_TO_FP(
Op, DAG);
604 return lowerVASTART(
Op, DAG);
606 return lowerFRAMEADDR(
Op, DAG);
608 return lowerRETURNADDR(
Op, DAG);
610 return lowerWRITE_REGISTER(
Op, DAG);
612 return lowerINSERT_VECTOR_ELT(
Op, DAG);
614 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
616 return lowerBUILD_VECTOR(
Op, DAG);
618 return lowerCONCAT_VECTORS(
Op, DAG);
620 return lowerVECTOR_SHUFFLE(
Op, DAG);
622 return lowerBITREVERSE(
Op, DAG);
624 return lowerSCALAR_TO_VECTOR(
Op, DAG);
626 return lowerPREFETCH(
Op, DAG);
628 return lowerSELECT(
Op, DAG);
630 return lowerBRCOND(
Op, DAG);
632 return lowerFP_TO_FP16(
Op, DAG);
634 return lowerFP16_TO_FP(
Op, DAG);
636 return lowerFP_TO_BF16(
Op, DAG);
638 return lowerBF16_TO_FP(
Op, DAG);
640 return lowerVECREDUCE_ADD(
Op, DAG);
643 return lowerRotate(
Op, DAG);
651 return lowerVECREDUCE(
Op, DAG);
653 return lowerConstantFP(
Op, DAG);
655 return lowerSETCC(
Op, DAG);
657 return lowerFP_ROUND(
Op, DAG);
659 return lowerFP_EXTEND(
Op, DAG);
661 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
670 EVT VT = V.getValueType();
676 return V.getOperand(0);
680 (
isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
682 Not = DAG.
getBitcast(V.getOperand(0).getValueType(), Not);
692 if (!V->isOnlyUserOf(SplatValue.getNode()))
696 Not = DAG.
getBitcast(V.getOperand(0).getValueType(), Not);
704 V.getOperand(0).hasOneUse() && V.getOperand(1).hasOneUse()) {
732 (
N->getOpcode() == LoongArchISD::VPACKEV) ||
733 (
N->getOpcode() == LoongArchISD::VPERMI)) &&
740 if (Opcode0 != Opcode1)
743 if (Opcode0 !=
ISD::FP_ROUND && Opcode0 != LoongArchISD::VFCVT)
750 EVT VT =
N.getValueType();
764 if (Subtarget.hasExtLASX() && VT.
is256BitVector() && SVT0 == MVT::v4f32 &&
765 SSVT0 == MVT::v4f64) {
784 if ((
N->getOpcode() == LoongArchISD::VPACKEV ||
785 N->getOpcode() == LoongArchISD::VPERMI) &&
786 Opcode0 == LoongArchISD::VFCVT) {
791 if (!Subtarget.hasExtLSX() || SVT0 != MVT::v4f32 || SSVT0 != MVT::v2f64)
794 if (
N->getOpcode() == LoongArchISD::VPACKEV &&
795 (VT == MVT::v2i64 || VT == MVT::v2f64)) {
801 if (
N->getOpcode() == LoongArchISD::VPERMI && VT == MVT::v4f32) {
817 MVT VT =
Op.getSimpleValueType();
818 MVT SVT =
In.getSimpleValueType();
820 if (VT == MVT::v4f32 && SVT == MVT::v4f64) {
833 EVT VT =
Op.getValueType();
835 EVT SVT = Src.getValueType();
838 VT == MVT::v2f64 && SVT == MVT::v2f32 && Subtarget.hasExtLSX();
840 VT == MVT::v4f64 && SVT == MVT::v4f32 && Subtarget.hasExtLASX();
841 if (!V2F32ToV2F64 && !V4F32ToV4F64)
853 const uint64_t
Imm =
Op.getConstantOperandVal(1);
868 if (
SDValue V = CheckVecHighPart(Src)) {
870 "Unexpected wide vector");
871 Opcode = LoongArchISD::VFCVTH;
874 Opcode = LoongArchISD::VFCVTL;
876 DAG.
getUNDEF(WideOpVT), Src, ZeroIdx);
881 return DAG.
getNode(Opcode,
DL, VT, VFCVTOp);
887 SmallVector<int, 8>
Mask = {0, 1, 4, 5, 2, 3, 6, 7};
899 EVT VT =
Op.getValueType();
904 assert((VT == MVT::f32 && Subtarget.hasBasicF()) ||
905 (VT == MVT::f64 && Subtarget.hasBasicD()));
922 int InsNum = Seq.size() + ((VT == MVT::f64 && !Subtarget.is64Bit()) ? 2 : 1);
932 if (Subtarget.is64Bit())
934 return DAG.
getNode(Subtarget.is64Bit() ? LoongArchISD::MOVGR2FR_W_LA64
935 : LoongArchISD::MOVGR2FR_W,
939 if (Subtarget.is64Bit()) {
941 return DAG.
getNode(LoongArchISD::MOVGR2FR_D,
DL, VT, NewVal);
945 return DAG.
getNode(LoongArchISD::MOVGR2FR_D_LO_HI,
DL, VT,
Lo,
Hi);
957 EVT ResultVT =
Op.getValueType();
958 EVT OperandVT =
Op.getOperand(0).getValueType();
963 if (ResultVT == SetCCResultVT)
966 assert(
Op.getOperand(0).getValueType() ==
Op.getOperand(1).getValueType() &&
967 "SETCC operands must have the same type!");
971 Op.getOperand(1),
Op.getOperand(2));
973 if (ResultVT.
bitsGT(SetCCResultVT))
975 else if (ResultVT.
bitsLT(SetCCResultVT))
987SDValue LoongArchTargetLowering::lowerSIGN_EXTEND_VECTOR_INREG(
991 MVT SrcVT = Src.getSimpleValueType();
992 MVT DstVT =
Op.getSimpleValueType();
1003 DAG.
getNode(LoongArchISD::VILVL,
DL, SrcVT, Mask, Src);
1023 MVT OpVT =
Op.getSimpleValueType();
1030 unsigned LegalVecSize = 128;
1031 bool isLASX256Vector =
1041 if (isLASX256Vector) {
1046 for (
unsigned i = 1; i < NumEles; i *= 2, EleBits *= 2) {
1049 Val = DAG.
getNode(LoongArchISD::VHADDW,
DL, VecTy, Val, Val);
1052 if (isLASX256Vector) {
1078 MVT OpVT =
Op.getSimpleValueType();
1091 MVT GRLenVT = Subtarget.getGRLenVT();
1093 for (
int i = NumEles; i > 1; i /= 2) {
1096 Val = DAG.
getNode(Opcode,
DL, VecTy, Tmp, Val);
1105 unsigned IsData =
Op.getConstantOperandVal(4);
1110 return Op.getOperand(0);
1117 MVT VT =
Op.getSimpleValueType();
1123 unsigned Opcode =
Op.getOpcode();
1126 auto checkCstSplat = [](
SDValue V, APInt &CstSplatValue) {
1132 CstSplatValue =
C->getAPIntValue();
1140 APInt CstSplatValue;
1141 bool IsCstSplat = checkCstSplat(Amt, CstSplatValue);
1145 if (IsCstSplat && CstSplatValue.
urem(EltSizeInBits) == 0)
1161 return DAG.
getNode(Opcode,
DL, VT, R, Urem);
1177 if (
LHS == LHS2 &&
RHS == RHS2) {
1182 }
else if (
LHS == RHS2 &&
RHS == LHS2) {
1190 return std::nullopt;
1198 MVT VT =
N->getSimpleValueType(0);
1229 if (~TrueVal == FalseVal) {
1269 unsigned SelOpNo = 0;
1279 unsigned ConstSelOpNo = 1;
1280 unsigned OtherSelOpNo = 2;
1287 if (!ConstSelOpNode || ConstSelOpNode->
isOpaque())
1292 if (!ConstBinOpNode || ConstBinOpNode->
isOpaque())
1298 SDValue NewConstOps[2] = {ConstSelOp, ConstBinOp};
1300 std::swap(NewConstOps[0], NewConstOps[1]);
1312 SDValue NewNonConstOps[2] = {OtherSelOp, ConstBinOp};
1314 std::swap(NewNonConstOps[0], NewNonConstOps[1]);
1317 SDValue NewT = (ConstSelOpNo == 1) ? NewConstOp : NewNonConstOp;
1318 SDValue NewF = (ConstSelOpNo == 1) ? NewNonConstOp : NewConstOp;
1338 ShAmt =
LHS.getValueSizeInBits() - 1 -
Log2_64(Mask);
1352 int64_t
C = RHSC->getSExtValue();
1395 MVT VT =
Op.getSimpleValueType();
1396 MVT GRLenVT = Subtarget.getGRLenVT();
1401 if (
Op.hasOneUse()) {
1402 unsigned UseOpc =
Op->user_begin()->getOpcode();
1404 SDNode *BinOp = *
Op->user_begin();
1411 return lowerSELECT(NewSel, DAG);
1428 return DAG.
getNode(LoongArchISD::SELECT_CC,
DL, VT,
Ops);
1451 if (TrueVal - 1 == FalseVal)
1453 if (TrueVal + 1 == FalseVal)
1460 RHS == TrueV &&
LHS == FalseV) {
1485 return DAG.
getNode(LoongArchISD::SELECT_CC,
DL, VT,
Ops);
1492 MVT GRLenVT = Subtarget.getGRLenVT();
1503 return DAG.
getNode(LoongArchISD::BR_CC,
DL,
Op.getValueType(),
1504 Op.getOperand(0),
LHS,
RHS, TargetCC,
1507 return DAG.
getNode(LoongArchISD::BRCOND,
DL,
Op.getValueType(),
1508 Op.getOperand(0), CondV,
Op.getOperand(2));
1512 return DAG.
getNode(LoongArchISD::BR_CC,
DL,
Op.getValueType(),
1518LoongArchTargetLowering::lowerSCALAR_TO_VECTOR(
SDValue Op,
1521 MVT OpVT =
Op.getSimpleValueType();
1532 EVT ResTy =
Op->getValueType(0);
1537 if (!Subtarget.is64Bit() && (ResTy == MVT::v16i8 || ResTy == MVT::v32i8))
1547 for (
unsigned int i = 0; i < NewEltNum; i++) {
1550 unsigned RevOp = (ResTy == MVT::v16i8 || ResTy == MVT::v32i8)
1551 ? (
unsigned)LoongArchISD::BITREV_8B
1569 for (
unsigned int i = 0; i < NewEltNum; i++)
1570 for (
int j = OrigEltNum / NewEltNum - 1;
j >= 0;
j--)
1571 Mask.push_back(j + (OrigEltNum / NewEltNum) * i);
1589 if (EltBits > 32 || EltBits == 1)
1617 int MaskOffset,
const APInt &Zeroable) {
1618 int Size = Mask.size();
1619 unsigned SizeInBits =
Size * ScalarSizeInBits;
1621 auto CheckZeros = [&](
int Shift,
int Scale,
bool Left) {
1622 for (
int i = 0; i <
Size; i += Scale)
1623 for (
int j = 0; j < Shift; ++j)
1624 if (!Zeroable[i + j + (
Left ? 0 : (Scale - Shift))])
1632 for (
unsigned i = Pos, e = Pos +
Size; i != e; ++i,
Low += Step)
1633 if (!(Mask[i] == -1 || Mask[i] ==
Low))
1638 auto MatchShift = [&](
int Shift,
int Scale,
bool Left) {
1639 for (
int i = 0; i !=
Size; i += Scale) {
1640 unsigned Pos =
Left ? i + Shift : i;
1641 unsigned Low =
Left ? i : i + Shift;
1642 unsigned Len = Scale - Shift;
1647 int ShiftEltBits = ScalarSizeInBits * Scale;
1648 bool ByteShift = ShiftEltBits > 64;
1649 Opcode =
Left ? (ByteShift ? LoongArchISD::VBSLL : LoongArchISD::VSLLI)
1650 : (ByteShift ? LoongArchISD::VBSRL : LoongArchISD::VSRLI);
1651 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
1655 Scale = ByteShift ? Scale / 2 : Scale;
1661 return (
int)ShiftAmt;
1664 unsigned MaxWidth = 128;
1665 for (
int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
1666 for (
int Shift = 1; Shift != Scale; ++Shift)
1667 for (
bool Left : {
true,
false})
1668 if (CheckZeros(Shift, Scale,
Left)) {
1669 int ShiftAmt = MatchShift(Shift, Scale,
Left);
1694 const APInt &Zeroable) {
1695 int Size = Mask.size();
1709 Mask,
Size, Zeroable);
1717 "Illegal integer vector type");
1726template <
typename ValType>
1729 unsigned CheckStride,
1731 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
1735 if (*
I != -1 && *
I != ExpectedIndex)
1737 ExpectedIndex += ExpectedIndexStride;
1741 for (
unsigned n = 0; n < CheckStride &&
I != End; ++n, ++
I)
1753 int Size = Mask.size();
1763 int ScalarSizeInBits = VectorSizeInBits /
Size;
1764 assert(!(VectorSizeInBits % ScalarSizeInBits) &&
"Illegal shuffle mask size");
1765 (void)ScalarSizeInBits;
1767 for (
int i = 0; i <
Size; ++i) {
1773 if ((M >= 0 && M <
Size && V1IsZero) || (M >=
Size && V2IsZero)) {
1790 RepeatedMask.
assign(LaneSize, -1);
1791 int Size = Mask.size();
1792 for (
int i = 0; i <
Size; ++i) {
1793 assert(Mask[i] == -1 || Mask[i] >= 0);
1796 if ((Mask[i] %
Size) / LaneSize != i / LaneSize)
1803 Mask[i] <
Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
1804 if (RepeatedMask[i % LaneSize] < 0)
1806 RepeatedMask[i % LaneSize] = LocalM;
1807 else if (RepeatedMask[i % LaneSize] != LocalM)
1824 int NumElts = RepeatedMask.
size();
1826 int Scale = 16 / NumElts;
1828 for (
int i = 0; i < NumElts; ++i) {
1829 int M = RepeatedMask[i];
1830 assert((M == -1 || (0 <= M && M < (2 * NumElts))) &&
1831 "Unexpected mask index.");
1836 int StartIdx = i - (M % NumElts);
1843 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
1846 Rotation = CandidateRotation;
1847 else if (Rotation != CandidateRotation)
1851 SDValue MaskV = M < NumElts ? V1 : V2;
1862 else if (TargetV != MaskV)
1867 assert(Rotation != 0 &&
"Failed to locate a viable rotation!");
1868 assert((
Lo ||
Hi) &&
"Failed to find a rotated input vector!");
1877 return Rotation * Scale;
1896 if (ByteRotation <= 0)
1903 int LoByteShift = 16 - ByteRotation;
1904 int HiByteShift = ByteRotation;
1927 const APInt &Zeroable) {
1941 for (
int i = 0; i < NumElements; i++) {
1945 if (i % Scale != 0) {
1956 SDValue V = M < NumElements ? V1 : V2;
1957 M = M % NumElements;
1960 Offset = M - (i / Scale);
1963 if (
Offset % (NumElements / Scale))
1965 }
else if (InputV != V)
1968 if (M != (
Offset + (i / Scale)))
1978 unsigned VilVLoHi = LoongArchISD::VILVL;
1979 if (
Offset >= (NumElements / 2)) {
1980 VilVLoHi = LoongArchISD::VILVH;
1981 Offset -= (NumElements / 2);
1988 InputV = DAG.
getNode(VilVLoHi,
DL, InputVT, Ext, InputV);
1992 }
while (Scale > 1);
1998 for (
int NumExtElements = Bits / 64; NumExtElements < NumElements;
1999 NumExtElements *= 2) {
2019 int SplatIndex = -1;
2020 for (
const auto &M : Mask) {
2027 if (SplatIndex == -1)
2030 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
2032 return DAG.
getNode(LoongArchISD::VREPLVEI,
DL, VT, V1,
2062 unsigned SubVecSize = 4;
2063 if (VT == MVT::v2f64 || VT == MVT::v2i64)
2066 int SubMask[4] = {-1, -1, -1, -1};
2067 for (
unsigned i = 0; i < SubVecSize; ++i) {
2068 for (
unsigned j = i; j < Mask.size(); j += SubVecSize) {
2074 M -= 4 * (j / SubVecSize);
2075 if (M < 0 || M >= 4)
2081 if (SubMask[i] == -1)
2085 else if (M != -1 && M != SubMask[i])
2092 for (
int i = SubVecSize - 1; i >= 0; --i) {
2105 if (VT == MVT::v2f64 || VT == MVT::v2i64)
2106 return DAG.
getNode(LoongArchISD::VSHUF4I_D,
DL, VT, V1, V2,
2109 return DAG.
getNode(LoongArchISD::VSHUF4I,
DL, VT, V1,
2127 if (VT != MVT::v16i8 && VT != MVT::v8i16 && VT != MVT::v32i8 &&
2136 for (
int i = 0; i < WidenNumElts; ++i)
2137 WidenMask[i] = WidenNumElts - 1 - i;
2145 return DAG.
getNode(LoongArchISD::VSHUF4I,
DL, VT,
2169 const auto &Begin = Mask.begin();
2170 const auto &End = Mask.end();
2171 SDValue OriV1 = V1, OriV2 = V2;
2187 return DAG.
getNode(LoongArchISD::VPACKEV,
DL, VT, V2, V1);
2209 const auto &Begin = Mask.begin();
2210 const auto &End = Mask.end();
2211 SDValue OriV1 = V1, OriV2 = V2;
2227 return DAG.
getNode(LoongArchISD::VPACKOD,
DL, VT, V2, V1);
2250 const auto &Begin = Mask.begin();
2251 const auto &End = Mask.end();
2252 unsigned HalfSize = Mask.size() / 2;
2253 SDValue OriV1 = V1, OriV2 = V2;
2270 return DAG.
getNode(LoongArchISD::VILVH,
DL, VT, V2, V1);
2293 const auto &Begin = Mask.begin();
2294 const auto &End = Mask.end();
2295 SDValue OriV1 = V1, OriV2 = V2;
2311 return DAG.
getNode(LoongArchISD::VILVL,
DL, VT, V2, V1);
2333 const auto &Begin = Mask.begin();
2334 const auto &Mid = Mask.begin() + Mask.size() / 2;
2335 const auto &End = Mask.end();
2336 SDValue OriV1 = V1, OriV2 = V2;
2353 return DAG.
getNode(LoongArchISD::VPICKEV,
DL, VT, V2, V1);
2375 const auto &Begin = Mask.begin();
2376 const auto &Mid = Mask.begin() + Mask.size() / 2;
2377 const auto &End = Mask.end();
2378 SDValue OriV1 = V1, OriV2 = V2;
2394 return DAG.
getNode(LoongArchISD::VPICKOD,
DL, VT, V2, V1);
2420 if (Mask.size() != NumElts)
2423 auto tryLowerToExtrAndIns = [&](
unsigned Base) ->
SDValue {
2426 for (
unsigned i = 0; i < NumElts; ++i) {
2429 if (Mask[i] !=
int(
Base + i)) {
2442 int DiffMask = Mask[DiffPos];
2443 if (DiffMask < 0 || DiffMask >=
int(2 * NumElts))
2449 if (
unsigned(DiffMask) < NumElts) {
2454 SrcIdx =
unsigned(DiffMask) - NumElts;
2470 if (
SDValue Result = tryLowerToExtrAndIns(0))
2472 return tryLowerToExtrAndIns(NumElts);
2480 unsigned &MaskImm) {
2481 unsigned MaskSize = Mask.size();
2483 auto isValid = [&](
int M,
int Off) {
2484 return (M == -1) || (M >= Off && M < Off + 4);
2487 auto buildImm = [&](
int MLo,
int MHi,
unsigned Off,
unsigned I) {
2488 auto immPart = [&](
int M,
unsigned Off) {
2489 return (M == -1 ? 0 : (M - Off)) & 0x3;
2491 MaskImm |= immPart(MLo, Off) << (
I * 2);
2492 MaskImm |= immPart(MHi, Off) << ((
I + 1) * 2);
2495 for (
unsigned i = 0; i < 4; i += 2) {
2497 int MHi = Mask[i + 1];
2499 if (MaskSize == 8) {
2500 int M2Lo = Mask[i + 4];
2501 int M2Hi = Mask[i + 5];
2502 if (M2Lo != MLo + 4 || M2Hi != MHi + 4)
2508 buildImm(MLo, MHi, 0, i);
2511 buildImm(MLo, MHi, MaskSize, i);
2541 if ((VT != MVT::v4i32 && VT != MVT::v4f32) ||
2546 unsigned MaskImm = 0;
2550 return DAG.
getNode(LoongArchISD::VPERMI,
DL, VT, SrcVec[1], SrcVec[0],
2577 return DAG.
getNode(LoongArchISD::VSHUF,
DL, VT, MaskVec, V2, V1);
2590 "Vector type is unsupported for lsx!");
2592 "Two operands have different types!");
2594 "Unexpected mask size for shuffle!");
2595 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
2597 APInt KnownUndef, KnownZero;
2599 APInt Zeroable = KnownUndef | KnownZero;
2672 int SplatIndex = -1;
2673 for (
const auto &M : Mask) {
2680 if (SplatIndex == -1)
2683 const auto &Begin = Mask.begin();
2684 const auto &End = Mask.end();
2685 int HalfSize = Mask.size() / 2;
2687 if (SplatIndex >= HalfSize)
2690 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
2694 return DAG.
getNode(LoongArchISD::VREPLVEI,
DL, VT, V1,
2708 if (Mask.size() == 4) {
2709 unsigned MaskImm = 0;
2710 for (
int i = 1; i >= 0; --i) {
2712 int MHi = Mask[i + 2];
2713 if (!(MLo == -1 || (MLo >= 0 && MLo <= 1) || (MLo >= 4 && MLo <= 5)) ||
2714 !(MHi == -1 || (MHi >= 2 && MHi <= 3) || (MHi >= 6 && MHi <= 7)))
2716 if (MHi != -1 && MLo != -1 && MHi != MLo + 2)
2721 MaskImm |= ((MLo <= 1) ? MLo : (MLo - 2)) & 0x3;
2723 MaskImm |= ((MHi <= 3) ? (MHi - 2) : (MHi - 4)) & 0x3;
2726 return DAG.
getNode(LoongArchISD::VSHUF4I_D,
DL, VT, V1, V2,
2739 unsigned MaskSize = Mask.size();
2744 if (VT == MVT::v8i32 || VT == MVT::v8f32) {
2746 unsigned MaskImm = 0;
2750 return DAG.
getNode(LoongArchISD::VPERMI,
DL, VT, SrcVec[1], SrcVec[0],
2755 if (VT == MVT::v4i64 || VT == MVT::v4f64) {
2756 unsigned MaskImm = 0;
2757 for (
unsigned i = 0; i < MaskSize; ++i) {
2760 if (Mask[i] >= (
int)MaskSize)
2762 MaskImm |= Mask[i] << (i * 2);
2765 return DAG.
getNode(LoongArchISD::XVPERMI,
DL, VT, V1,
2777 if (Mask.size() != 8 || (VT != MVT::v8i32 && VT != MVT::v8f32))
2781 unsigned HalfSize = NumElts / 2;
2782 bool FrontLo =
true, FrontHi =
true;
2783 bool BackLo =
true, BackHi =
true;
2785 auto inRange = [](
int val,
int low,
int high) {
2786 return (val == -1) || (val >= low && val < high);
2789 for (
unsigned i = 0; i < HalfSize; ++i) {
2790 int Fronti = Mask[i];
2791 int Backi = Mask[i + HalfSize];
2793 FrontLo &=
inRange(Fronti, 0, HalfSize);
2794 FrontHi &=
inRange(Fronti, HalfSize, NumElts);
2795 BackLo &=
inRange(Backi, 0, HalfSize);
2796 BackHi &=
inRange(Backi, HalfSize, NumElts);
2802 if ((FrontLo || FrontHi) && (BackLo || BackHi))
2807 for (
unsigned i = 0; i < NumElts; ++i)
2812 return DAG.
getNode(LoongArchISD::XVPERM,
DL, VT, V1, MaskVec);
2834 const auto &Begin = Mask.begin();
2835 const auto &End = Mask.end();
2836 unsigned HalfSize = Mask.size() / 2;
2837 unsigned LeftSize = HalfSize / 2;
2838 SDValue OriV1 = V1, OriV2 = V2;
2845 Mask.size() + HalfSize - LeftSize, 1) &&
2847 Mask.size() + HalfSize + LeftSize, 1))
2858 Mask.size() + HalfSize - LeftSize, 1) &&
2860 Mask.size() + HalfSize + LeftSize, 1))
2865 return DAG.
getNode(LoongArchISD::VILVH,
DL, VT, V2, V1);
2873 const auto &Begin = Mask.begin();
2874 const auto &End = Mask.end();
2875 unsigned HalfSize = Mask.size() / 2;
2876 SDValue OriV1 = V1, OriV2 = V2;
2883 Mask.size() + HalfSize, 1))
2894 Mask.size() + HalfSize, 1))
2899 return DAG.
getNode(LoongArchISD::VILVL,
DL, VT, V2, V1);
2907 const auto &Begin = Mask.begin();
2908 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
2909 const auto &Mid = Mask.begin() + Mask.size() / 2;
2910 const auto &RightMid = Mask.end() - Mask.size() / 4;
2911 const auto &End = Mask.end();
2912 unsigned HalfSize = Mask.size() / 2;
2913 SDValue OriV1 = V1, OriV2 = V2;
2934 return DAG.
getNode(LoongArchISD::VPICKEV,
DL, VT, V2, V1);
2942 const auto &Begin = Mask.begin();
2943 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
2944 const auto &Mid = Mask.begin() + Mask.size() / 2;
2945 const auto &RightMid = Mask.end() - Mask.size() / 4;
2946 const auto &End = Mask.end();
2947 unsigned HalfSize = Mask.size() / 2;
2948 SDValue OriV1 = V1, OriV2 = V2;
2970 return DAG.
getNode(LoongArchISD::VPICKOD,
DL, VT, V2, V1);
2979 int HalfSize = NumElts / 2;
2983 if ((
int)Mask.size() != NumElts)
2986 auto tryLowerToExtrAndIns = [&](
int Base) ->
SDValue {
2988 for (
int i = 0; i < NumElts; ++i) {
2991 if (Mask[i] !=
Base + i) {
2993 if (DiffPos.
size() > 2)
3001 if (DiffPos.
size() == 1) {
3002 if (DiffPos[0] < HalfSize && Mask[DiffPos[0] + HalfSize] == -1)
3003 DiffPos.
push_back(DiffPos[0] + HalfSize);
3004 else if (DiffPos[0] >= HalfSize && Mask[DiffPos[0] - HalfSize] == -1)
3005 DiffPos.
insert(DiffPos.
begin(), DiffPos[0] - HalfSize);
3009 if (DiffPos.
size() != 2 || DiffPos[1] != DiffPos[0] + HalfSize)
3013 int DiffMaskLo = Mask[DiffPos[0]];
3014 int DiffMaskHi = Mask[DiffPos[1]];
3015 DiffMaskLo = DiffMaskLo == -1 ? DiffMaskHi - HalfSize : DiffMaskLo;
3016 DiffMaskHi = DiffMaskHi == -1 ? DiffMaskLo + HalfSize : DiffMaskHi;
3017 if (!(DiffMaskLo >= 0 && DiffMaskLo < HalfSize) &&
3018 !(DiffMaskLo >= NumElts && DiffMaskLo < NumElts + HalfSize))
3020 if (!(DiffMaskHi >= HalfSize && DiffMaskHi < NumElts) &&
3021 !(DiffMaskHi >= NumElts + HalfSize && DiffMaskHi < 2 * NumElts))
3023 if (DiffMaskHi != DiffMaskLo + HalfSize)
3027 SDValue SrcVec = (DiffMaskLo < HalfSize) ? V1 : V2;
3029 (DiffMaskLo < HalfSize) ? DiffMaskLo : (DiffMaskLo - NumElts);
3050 if (
SDValue Result = tryLowerToExtrAndIns(0))
3052 return tryLowerToExtrAndIns(NumElts);
3061 if (VT != MVT::v8i32 && VT != MVT::v8f32 && VT != MVT::v4i64 &&
3066 int MaskSize = Mask.size();
3072 auto checkReplaceOne = [&](
int Base,
int Replaced) ->
int {
3074 for (
int i = 0; i < MaskSize; ++i) {
3075 if (Mask[i] ==
Base + i || Mask[i] == -1)
3077 if (Mask[i] != Replaced)
3088 int Idx = checkReplaceOne(0, MaskSize);
3090 return DAG.
getNode(LoongArchISD::XVINSVE0,
DL, VT, V1, V2,
3094 Idx = checkReplaceOne(MaskSize, 0);
3096 return DAG.
getNode(LoongArchISD::XVINSVE0,
DL, VT, V2, V1,
3107 int MaskSize = Mask.size();
3108 int HalfSize = Mask.size() / 2;
3109 const auto &Begin = Mask.begin();
3110 const auto &Mid = Mask.begin() + HalfSize;
3111 const auto &End = Mask.end();
3123 for (
auto it = Begin; it < Mid; it++) {
3126 else if ((*it >= 0 && *it < HalfSize) ||
3127 (*it >= MaskSize && *it < MaskSize + HalfSize)) {
3128 int M = *it < HalfSize ? *it : *it - HalfSize;
3133 assert((
int)MaskAlloc.
size() == HalfSize &&
"xvshuf convert failed!");
3135 for (
auto it = Mid; it < End; it++) {
3138 else if ((*it >= HalfSize && *it < MaskSize) ||
3139 (*it >= MaskSize + HalfSize && *it < MaskSize * 2)) {
3140 int M = *it < MaskSize ? *it - HalfSize : *it - MaskSize;
3145 assert((
int)MaskAlloc.
size() == MaskSize &&
"xvshuf convert failed!");
3149 return DAG.
getNode(LoongArchISD::VSHUF,
DL, VT, MaskVec, V2, V1);
3177 enum HalfMaskType { HighLaneTy, LowLaneTy,
None };
3179 int MaskSize = Mask.size();
3180 int HalfSize = Mask.size() / 2;
3183 HalfMaskType preMask =
None, postMask =
None;
3185 if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
3186 return M < 0 || (M >= 0 && M < HalfSize) ||
3187 (M >= MaskSize && M < MaskSize + HalfSize);
3189 preMask = HighLaneTy;
3190 else if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
3191 return M < 0 || (M >= HalfSize && M < MaskSize) ||
3192 (M >= MaskSize + HalfSize && M < MaskSize * 2);
3194 preMask = LowLaneTy;
3196 if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
3197 return M < 0 || (M >= HalfSize && M < MaskSize) ||
3198 (M >= MaskSize + HalfSize && M < MaskSize * 2);
3200 postMask = LowLaneTy;
3201 else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
3202 return M < 0 || (M >= 0 && M < HalfSize) ||
3203 (M >= MaskSize && M < MaskSize + HalfSize);
3205 postMask = HighLaneTy;
3213 if (preMask == HighLaneTy && postMask == LowLaneTy) {
3216 if (preMask == LowLaneTy && postMask == HighLaneTy) {
3218 V1 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V1,
3224 V2 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V2,
3229 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
3230 *it = *it < 0 ? *it : *it - HalfSize;
3232 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
3233 *it = *it < 0 ? *it : *it + HalfSize;
3235 }
else if (preMask == LowLaneTy && postMask == LowLaneTy) {
3237 V1 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V1,
3243 V2 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V2,
3248 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
3249 *it = *it < 0 ? *it : *it - HalfSize;
3251 }
else if (preMask == HighLaneTy && postMask == HighLaneTy) {
3253 V1 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V1,
3259 V2 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V2,
3264 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
3265 *it = *it < 0 ? *it : *it + HalfSize;
3290 int Size = Mask.size();
3291 int LaneSize =
Size / 2;
3293 bool LaneCrossing[2] = {
false,
false};
3294 for (
int i = 0; i <
Size; ++i)
3295 if (Mask[i] >= 0 && ((Mask[i] %
Size) / LaneSize) != (i / LaneSize))
3296 LaneCrossing[(Mask[i] %
Size) / LaneSize] =
true;
3299 if (!LaneCrossing[0] && !LaneCrossing[1])
3303 InLaneMask.
assign(Mask.begin(), Mask.end());
3304 for (
int i = 0; i <
Size; ++i) {
3305 int &M = InLaneMask[i];
3308 if (((M %
Size) / LaneSize) != (i / LaneSize))
3309 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) +
Size;
3314 DAG.
getUNDEF(MVT::v4i64), {2, 3, 0, 1});
3329 "Vector type is unsupported for lasx!");
3331 "Two operands have different types!");
3333 "Unexpected mask size for shuffle!");
3334 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
3335 assert(Mask.size() >= 4 &&
"Mask size is less than 4.");
3337 APInt KnownUndef, KnownZero;
3339 APInt Zeroable = KnownUndef | KnownZero;
3426 ArrayRef<int> OrigMask = SVOp->
getMask();
3429 MVT VT =
Op.getSimpleValueType();
3433 bool V1IsUndef = V1.
isUndef();
3434 bool V2IsUndef = V2.
isUndef();
3435 if (V1IsUndef && V2IsUndef)
3448 any_of(OrigMask, [NumElements](
int M) {
return M >= NumElements; })) {
3449 SmallVector<int, 8> NewMask(OrigMask);
3450 for (
int &M : NewMask)
3451 if (M >= NumElements)
3457 int MaskUpperLimit = OrigMask.
size() * (V2IsUndef ? 1 : 2);
3458 (void)MaskUpperLimit;
3460 [&](
int M) {
return -1 <=
M &&
M < MaskUpperLimit; }) &&
3461 "Out of bounds shuffle index");
3483 std::tie(Res, Chain) =
3484 makeLibCall(DAG, LC, MVT::f32, Op0, CallOptions,
DL, Chain);
3485 if (Subtarget.is64Bit())
3486 return DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Res);
3498 SDValue Arg = Subtarget.is64Bit() ? DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
3502 std::tie(Res, Chain) =
makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg,
3503 CallOptions,
DL, Chain);
3509 assert(Subtarget.hasBasicF() &&
"Unexpected custom legalization");
3515 makeLibCall(DAG, LC, MVT::f32,
Op.getOperand(0), CallOptions,
DL).first;
3516 if (Subtarget.is64Bit())
3517 return DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Res);
3523 assert(Subtarget.hasBasicF() &&
"Unexpected custom legalization");
3524 MVT VT =
Op.getSimpleValueType();
3529 SDValue Res = Subtarget.is64Bit() ? DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
3550 "Unsupported vector type for broadcast.");
3553 bool IsIdeneity =
true;
3555 for (
int i = 0; i !=
NumOps; i++) {
3557 if (
Op.getOpcode() !=
ISD::LOAD || (IdentitySrc &&
Op != IdentitySrc)) {
3569 auto ExtType = LN->getExtensionType();
3574 assert(LN->isUnindexed() &&
"Unexpected indexed load.");
3579 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
3597 for (
unsigned i = 1; i <
Ops.size(); ++i) {
3611 EVT ResTy,
unsigned first) {
3614 assert(first + NumElts <= Node->getSimpleValueType(0).getVectorNumElements());
3617 Node->op_begin() + first + NumElts);
3626 MVT VT =
Node->getSimpleValueType(0);
3627 EVT ResTy =
Op->getValueType(0);
3630 APInt SplatValue, SplatUndef;
3631 unsigned SplatBitSize;
3634 bool UseSameConstant =
true;
3639 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
3640 (!Subtarget.hasExtLASX() || !Is256Vec))
3646 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
3648 SplatBitSize <= 64) {
3650 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
3654 if (SplatBitSize == 64 && !Subtarget.is64Bit()) {
3661 if ((Is128Vec && ResTy == MVT::v4i32) ||
3662 (Is256Vec && ResTy == MVT::v8i32))
3668 switch (SplatBitSize) {
3672 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
3675 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
3678 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
3681 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
3689 if (ViaVecTy != ResTy)
3698 for (
unsigned i = 0; i < NumElts; ++i) {
3703 ConstantValue = Opi;
3704 else if (ConstantValue != Opi)
3705 UseSameConstant =
false;
3710 if (IsConstant && UseSameConstant && ResTy != MVT::v2f64) {
3712 for (
unsigned i = 0; i < NumElts; ++i) {
3730 BitVector UndefElements;
3731 if (
Node->getRepeatedSequence(Sequence, &UndefElements) &&
3732 UndefElements.
count() == 0) {
3736 EVT FillTy = Is256Vec
3742 fillVector(Sequence, DAG,
DL, Subtarget, FillVec, FillTy);
3745 unsigned SplatLen = NumElts / SeqLen;
3751 if (SplatEltTy == MVT::i128)
3752 SplatTy = MVT::v4i64;
3760 DAG.
getNode((SplatEltTy == MVT::i128) ? LoongArchISD::XVREPLVE0Q
3761 : LoongArchISD::XVREPLVE0,
3762 DL, SplatTy, SrcVec);
3764 SplatVec = DAG.
getNode(LoongArchISD::VREPLVEI,
DL, SplatTy, SrcVec,
3777 if (ResTy == MVT::v8i32 || ResTy == MVT::v8f32 || ResTy == MVT::v4i64 ||
3778 ResTy == MVT::v4f64) {
3779 unsigned NonUndefCount = 0;
3780 for (
unsigned i = NumElts / 2; i < NumElts; ++i) {
3781 if (!
Node->getOperand(i).isUndef()) {
3783 if (NonUndefCount > 1)
3787 if (NonUndefCount == 1)
3800 VecTy, NumElts / 2);
3811 MVT ResVT =
Op.getSimpleValueType();
3815 unsigned NumFreezeUndef = 0;
3816 unsigned NumZero = 0;
3817 unsigned NumNonZero = 0;
3818 unsigned NonZeros = 0;
3819 SmallSet<SDValue, 4> Undefs;
3820 for (
unsigned i = 0; i != NumOperands; ++i) {
3835 assert(i <
sizeof(NonZeros) * CHAR_BIT);
3842 if (NumNonZero > 2) {
3846 Ops.slice(0, NumOperands / 2));
3848 Ops.slice(NumOperands / 2));
3861 MVT SubVT =
Op.getOperand(0).getSimpleValueType();
3863 for (
unsigned i = 0; i != NumOperands; ++i) {
3864 if ((NonZeros & (1 << i)) == 0)
3875LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
3877 MVT EltVT =
Op.getSimpleValueType();
3882 MVT GRLenVT = Subtarget.getGRLenVT();
3910 ? DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
DL, MVT::f32, Idx)
3914 DAG.
getBitcast((VecTy == MVT::v4f64) ? MVT::v4i64 : VecTy, IdxVec);
3916 DAG.
getNode(LoongArchISD::VSHUF,
DL, VecTy, MaskVec, TmpVec, Vec);
3925 DAG.
getNode(LoongArchISD::XVPERM,
DL, VecTy, Vec, SplatIdx);
3934LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
3936 MVT VT =
Op.getSimpleValueType();
3959 if (!Subtarget.is64Bit() && IdxTy == MVT::i64) {
3961 for (
unsigned i = 0; i < NumElts; ++i) {
3969 for (
unsigned i = 0; i < NumElts; ++i) {
3978 for (
unsigned i = 0; i < NumElts; ++i)
4010 if (Subtarget.is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
4012 "On LA64, only 64-bit registers can be written.");
4013 return Op.getOperand(0);
4016 if (!Subtarget.is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
4018 "On LA32, only 32-bit registers can be written.");
4019 return Op.getOperand(0);
4029 "be a constant integer");
4035 Register FrameReg = Subtarget.getRegisterInfo()->getFrameRegister(MF);
4036 EVT VT =
Op.getValueType();
4039 unsigned Depth =
Op.getConstantOperandVal(0);
4040 int GRLenInBytes = Subtarget.getGRLen() / 8;
4043 int Offset = -(GRLenInBytes * 2);
4055 if (
Op.getConstantOperandVal(0) != 0) {
4057 "return address can only be determined for the current frame");
4063 MVT GRLenVT = Subtarget.getGRLenVT();
4075 auto Size = Subtarget.getGRLen() / 8;
4083 auto *FuncInfo = MF.
getInfo<LoongArchMachineFunctionInfo>();
4093 MachinePointerInfo(SV));
4098 assert(Subtarget.is64Bit() && Subtarget.hasBasicF() &&
4099 !Subtarget.hasBasicD() &&
"unexpected target features");
4105 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
4109 if (Op0->
getOpcode() == LoongArchISD::BSTRPICK &&
4119 EVT RetVT =
Op.getValueType();
4125 std::tie(Result, Chain) =
4132 assert(Subtarget.is64Bit() && Subtarget.hasBasicF() &&
4133 !Subtarget.hasBasicD() &&
"unexpected target features");
4144 EVT RetVT =
Op.getValueType();
4150 std::tie(Result, Chain) =
4159 EVT VT =
Op.getValueType();
4163 if (
Op.getValueType() == MVT::f32 && Op0VT == MVT::i32 &&
4164 Subtarget.is64Bit() && Subtarget.hasBasicF()) {
4166 return DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
DL, MVT::f32, NewOp0);
4168 if (VT == MVT::f64 && Op0VT == MVT::i64 && !Subtarget.is64Bit()) {
4171 return DAG.
getNode(LoongArchISD::BUILD_PAIR_F64,
DL, MVT::f64,
Lo,
Hi);
4185 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
4186 !Subtarget.hasBasicD()) {
4188 return DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Dst);
4210 N->getOffset(), Flags);
4218template <
class NodeTy>
4221 bool IsLocal)
const {
4232 assert(Subtarget.is64Bit() &&
"Large code model requires LA64");
4313 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
4315 const GlobalValue *GV =
N->getGlobal();
4327 unsigned Opc,
bool UseGOT,
4331 MVT GRLenVT = Subtarget.getGRLenVT();
4345 if (
Opc == LoongArch::PseudoLA_TLS_LE && !Large)
4383 Args.emplace_back(Load, CallTy);
4386 TargetLowering::CallLoweringInfo CLI(DAG);
4401 const GlobalValue *GV =
N->getGlobal();
4415LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
4422 assert((!Large || Subtarget.is64Bit()) &&
"Large code model requires LA64");
4425 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
4438 return getDynamicTLSAddr(
N, DAG,
4439 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
4440 : LoongArch::PseudoLA_TLS_GD,
4447 return getDynamicTLSAddr(
N, DAG,
4448 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
4449 : LoongArch::PseudoLA_TLS_LD,
4454 return getStaticTLSAddr(
N, DAG,
4455 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
4456 : LoongArch::PseudoLA_TLS_IE,
4463 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
4467 return getTLSDescAddr(
N, DAG,
4468 Large ? LoongArch::PseudoLA_TLS_DESC_LARGE
4469 : LoongArch::PseudoLA_TLS_DESC,
4473template <
unsigned N>
4478 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
4479 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
4481 ": argument out of range.");
4488LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
4490 switch (
Op.getConstantOperandVal(0)) {
4493 case Intrinsic::thread_pointer: {
4497 case Intrinsic::loongarch_lsx_vpickve2gr_d:
4498 case Intrinsic::loongarch_lsx_vpickve2gr_du:
4499 case Intrinsic::loongarch_lsx_vreplvei_d:
4500 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
4502 case Intrinsic::loongarch_lsx_vreplvei_w:
4503 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
4504 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
4505 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
4506 case Intrinsic::loongarch_lasx_xvpickve_d:
4507 case Intrinsic::loongarch_lasx_xvpickve_d_f:
4509 case Intrinsic::loongarch_lasx_xvinsve0_d:
4511 case Intrinsic::loongarch_lsx_vsat_b:
4512 case Intrinsic::loongarch_lsx_vsat_bu:
4513 case Intrinsic::loongarch_lsx_vrotri_b:
4514 case Intrinsic::loongarch_lsx_vsllwil_h_b:
4515 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
4516 case Intrinsic::loongarch_lsx_vsrlri_b:
4517 case Intrinsic::loongarch_lsx_vsrari_b:
4518 case Intrinsic::loongarch_lsx_vreplvei_h:
4519 case Intrinsic::loongarch_lasx_xvsat_b:
4520 case Intrinsic::loongarch_lasx_xvsat_bu:
4521 case Intrinsic::loongarch_lasx_xvrotri_b:
4522 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
4523 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
4524 case Intrinsic::loongarch_lasx_xvsrlri_b:
4525 case Intrinsic::loongarch_lasx_xvsrari_b:
4526 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
4527 case Intrinsic::loongarch_lasx_xvpickve_w:
4528 case Intrinsic::loongarch_lasx_xvpickve_w_f:
4530 case Intrinsic::loongarch_lasx_xvinsve0_w:
4532 case Intrinsic::loongarch_lsx_vsat_h:
4533 case Intrinsic::loongarch_lsx_vsat_hu:
4534 case Intrinsic::loongarch_lsx_vrotri_h:
4535 case Intrinsic::loongarch_lsx_vsllwil_w_h:
4536 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
4537 case Intrinsic::loongarch_lsx_vsrlri_h:
4538 case Intrinsic::loongarch_lsx_vsrari_h:
4539 case Intrinsic::loongarch_lsx_vreplvei_b:
4540 case Intrinsic::loongarch_lasx_xvsat_h:
4541 case Intrinsic::loongarch_lasx_xvsat_hu:
4542 case Intrinsic::loongarch_lasx_xvrotri_h:
4543 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
4544 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
4545 case Intrinsic::loongarch_lasx_xvsrlri_h:
4546 case Intrinsic::loongarch_lasx_xvsrari_h:
4547 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
4549 case Intrinsic::loongarch_lsx_vsrlni_b_h:
4550 case Intrinsic::loongarch_lsx_vsrani_b_h:
4551 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
4552 case Intrinsic::loongarch_lsx_vsrarni_b_h:
4553 case Intrinsic::loongarch_lsx_vssrlni_b_h:
4554 case Intrinsic::loongarch_lsx_vssrani_b_h:
4555 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
4556 case Intrinsic::loongarch_lsx_vssrani_bu_h:
4557 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
4558 case Intrinsic::loongarch_lsx_vssrarni_b_h:
4559 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
4560 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
4561 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
4562 case Intrinsic::loongarch_lasx_xvsrani_b_h:
4563 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
4564 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
4565 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
4566 case Intrinsic::loongarch_lasx_xvssrani_b_h:
4567 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
4568 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
4569 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
4570 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
4571 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
4572 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
4574 case Intrinsic::loongarch_lsx_vsat_w:
4575 case Intrinsic::loongarch_lsx_vsat_wu:
4576 case Intrinsic::loongarch_lsx_vrotri_w:
4577 case Intrinsic::loongarch_lsx_vsllwil_d_w:
4578 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
4579 case Intrinsic::loongarch_lsx_vsrlri_w:
4580 case Intrinsic::loongarch_lsx_vsrari_w:
4581 case Intrinsic::loongarch_lsx_vslei_bu:
4582 case Intrinsic::loongarch_lsx_vslei_hu:
4583 case Intrinsic::loongarch_lsx_vslei_wu:
4584 case Intrinsic::loongarch_lsx_vslei_du:
4585 case Intrinsic::loongarch_lsx_vslti_bu:
4586 case Intrinsic::loongarch_lsx_vslti_hu:
4587 case Intrinsic::loongarch_lsx_vslti_wu:
4588 case Intrinsic::loongarch_lsx_vslti_du:
4589 case Intrinsic::loongarch_lsx_vbsll_v:
4590 case Intrinsic::loongarch_lsx_vbsrl_v:
4591 case Intrinsic::loongarch_lasx_xvsat_w:
4592 case Intrinsic::loongarch_lasx_xvsat_wu:
4593 case Intrinsic::loongarch_lasx_xvrotri_w:
4594 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
4595 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
4596 case Intrinsic::loongarch_lasx_xvsrlri_w:
4597 case Intrinsic::loongarch_lasx_xvsrari_w:
4598 case Intrinsic::loongarch_lasx_xvslei_bu:
4599 case Intrinsic::loongarch_lasx_xvslei_hu:
4600 case Intrinsic::loongarch_lasx_xvslei_wu:
4601 case Intrinsic::loongarch_lasx_xvslei_du:
4602 case Intrinsic::loongarch_lasx_xvslti_bu:
4603 case Intrinsic::loongarch_lasx_xvslti_hu:
4604 case Intrinsic::loongarch_lasx_xvslti_wu:
4605 case Intrinsic::loongarch_lasx_xvslti_du:
4606 case Intrinsic::loongarch_lasx_xvbsll_v:
4607 case Intrinsic::loongarch_lasx_xvbsrl_v:
4609 case Intrinsic::loongarch_lsx_vseqi_b:
4610 case Intrinsic::loongarch_lsx_vseqi_h:
4611 case Intrinsic::loongarch_lsx_vseqi_w:
4612 case Intrinsic::loongarch_lsx_vseqi_d:
4613 case Intrinsic::loongarch_lsx_vslei_b:
4614 case Intrinsic::loongarch_lsx_vslei_h:
4615 case Intrinsic::loongarch_lsx_vslei_w:
4616 case Intrinsic::loongarch_lsx_vslei_d:
4617 case Intrinsic::loongarch_lsx_vslti_b:
4618 case Intrinsic::loongarch_lsx_vslti_h:
4619 case Intrinsic::loongarch_lsx_vslti_w:
4620 case Intrinsic::loongarch_lsx_vslti_d:
4621 case Intrinsic::loongarch_lasx_xvseqi_b:
4622 case Intrinsic::loongarch_lasx_xvseqi_h:
4623 case Intrinsic::loongarch_lasx_xvseqi_w:
4624 case Intrinsic::loongarch_lasx_xvseqi_d:
4625 case Intrinsic::loongarch_lasx_xvslei_b:
4626 case Intrinsic::loongarch_lasx_xvslei_h:
4627 case Intrinsic::loongarch_lasx_xvslei_w:
4628 case Intrinsic::loongarch_lasx_xvslei_d:
4629 case Intrinsic::loongarch_lasx_xvslti_b:
4630 case Intrinsic::loongarch_lasx_xvslti_h:
4631 case Intrinsic::loongarch_lasx_xvslti_w:
4632 case Intrinsic::loongarch_lasx_xvslti_d:
4634 case Intrinsic::loongarch_lsx_vsrlni_h_w:
4635 case Intrinsic::loongarch_lsx_vsrani_h_w:
4636 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
4637 case Intrinsic::loongarch_lsx_vsrarni_h_w:
4638 case Intrinsic::loongarch_lsx_vssrlni_h_w:
4639 case Intrinsic::loongarch_lsx_vssrani_h_w:
4640 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
4641 case Intrinsic::loongarch_lsx_vssrani_hu_w:
4642 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
4643 case Intrinsic::loongarch_lsx_vssrarni_h_w:
4644 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
4645 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
4646 case Intrinsic::loongarch_lsx_vfrstpi_b:
4647 case Intrinsic::loongarch_lsx_vfrstpi_h:
4648 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
4649 case Intrinsic::loongarch_lasx_xvsrani_h_w:
4650 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
4651 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
4652 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
4653 case Intrinsic::loongarch_lasx_xvssrani_h_w:
4654 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
4655 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
4656 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
4657 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
4658 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
4659 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
4660 case Intrinsic::loongarch_lasx_xvfrstpi_b:
4661 case Intrinsic::loongarch_lasx_xvfrstpi_h:
4663 case Intrinsic::loongarch_lsx_vsat_d:
4664 case Intrinsic::loongarch_lsx_vsat_du:
4665 case Intrinsic::loongarch_lsx_vrotri_d:
4666 case Intrinsic::loongarch_lsx_vsrlri_d:
4667 case Intrinsic::loongarch_lsx_vsrari_d:
4668 case Intrinsic::loongarch_lasx_xvsat_d:
4669 case Intrinsic::loongarch_lasx_xvsat_du:
4670 case Intrinsic::loongarch_lasx_xvrotri_d:
4671 case Intrinsic::loongarch_lasx_xvsrlri_d:
4672 case Intrinsic::loongarch_lasx_xvsrari_d:
4674 case Intrinsic::loongarch_lsx_vsrlni_w_d:
4675 case Intrinsic::loongarch_lsx_vsrani_w_d:
4676 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
4677 case Intrinsic::loongarch_lsx_vsrarni_w_d:
4678 case Intrinsic::loongarch_lsx_vssrlni_w_d:
4679 case Intrinsic::loongarch_lsx_vssrani_w_d:
4680 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
4681 case Intrinsic::loongarch_lsx_vssrani_wu_d:
4682 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
4683 case Intrinsic::loongarch_lsx_vssrarni_w_d:
4684 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
4685 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
4686 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
4687 case Intrinsic::loongarch_lasx_xvsrani_w_d:
4688 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
4689 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
4690 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
4691 case Intrinsic::loongarch_lasx_xvssrani_w_d:
4692 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
4693 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
4694 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
4695 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
4696 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
4697 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
4699 case Intrinsic::loongarch_lsx_vsrlni_d_q:
4700 case Intrinsic::loongarch_lsx_vsrani_d_q:
4701 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
4702 case Intrinsic::loongarch_lsx_vsrarni_d_q:
4703 case Intrinsic::loongarch_lsx_vssrlni_d_q:
4704 case Intrinsic::loongarch_lsx_vssrani_d_q:
4705 case Intrinsic::loongarch_lsx_vssrlni_du_q:
4706 case Intrinsic::loongarch_lsx_vssrani_du_q:
4707 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
4708 case Intrinsic::loongarch_lsx_vssrarni_d_q:
4709 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
4710 case Intrinsic::loongarch_lsx_vssrarni_du_q:
4711 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
4712 case Intrinsic::loongarch_lasx_xvsrani_d_q:
4713 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
4714 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
4715 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
4716 case Intrinsic::loongarch_lasx_xvssrani_d_q:
4717 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
4718 case Intrinsic::loongarch_lasx_xvssrani_du_q:
4719 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
4720 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
4721 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
4722 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
4724 case Intrinsic::loongarch_lsx_vnori_b:
4725 case Intrinsic::loongarch_lsx_vshuf4i_b:
4726 case Intrinsic::loongarch_lsx_vshuf4i_h:
4727 case Intrinsic::loongarch_lsx_vshuf4i_w:
4728 case Intrinsic::loongarch_lasx_xvnori_b:
4729 case Intrinsic::loongarch_lasx_xvshuf4i_b:
4730 case Intrinsic::loongarch_lasx_xvshuf4i_h:
4731 case Intrinsic::loongarch_lasx_xvshuf4i_w:
4732 case Intrinsic::loongarch_lasx_xvpermi_d:
4734 case Intrinsic::loongarch_lsx_vshuf4i_d:
4735 case Intrinsic::loongarch_lsx_vpermi_w:
4736 case Intrinsic::loongarch_lsx_vbitseli_b:
4737 case Intrinsic::loongarch_lsx_vextrins_b:
4738 case Intrinsic::loongarch_lsx_vextrins_h:
4739 case Intrinsic::loongarch_lsx_vextrins_w:
4740 case Intrinsic::loongarch_lsx_vextrins_d:
4741 case Intrinsic::loongarch_lasx_xvshuf4i_d:
4742 case Intrinsic::loongarch_lasx_xvpermi_w:
4743 case Intrinsic::loongarch_lasx_xvpermi_q:
4744 case Intrinsic::loongarch_lasx_xvbitseli_b:
4745 case Intrinsic::loongarch_lasx_xvextrins_b:
4746 case Intrinsic::loongarch_lasx_xvextrins_h:
4747 case Intrinsic::loongarch_lasx_xvextrins_w:
4748 case Intrinsic::loongarch_lasx_xvextrins_d:
4750 case Intrinsic::loongarch_lsx_vrepli_b:
4751 case Intrinsic::loongarch_lsx_vrepli_h:
4752 case Intrinsic::loongarch_lsx_vrepli_w:
4753 case Intrinsic::loongarch_lsx_vrepli_d:
4754 case Intrinsic::loongarch_lasx_xvrepli_b:
4755 case Intrinsic::loongarch_lasx_xvrepli_h:
4756 case Intrinsic::loongarch_lasx_xvrepli_w:
4757 case Intrinsic::loongarch_lasx_xvrepli_d:
4759 case Intrinsic::loongarch_lsx_vldi:
4760 case Intrinsic::loongarch_lasx_xvldi:
4776LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
4779 MVT GRLenVT = Subtarget.getGRLenVT();
4780 EVT VT =
Op.getValueType();
4782 const StringRef ErrorMsgOOR =
"argument out of range";
4783 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
4784 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
4786 switch (
Op.getConstantOperandVal(1)) {
4789 case Intrinsic::loongarch_crc_w_b_w:
4790 case Intrinsic::loongarch_crc_w_h_w:
4791 case Intrinsic::loongarch_crc_w_w_w:
4792 case Intrinsic::loongarch_crc_w_d_w:
4793 case Intrinsic::loongarch_crcc_w_b_w:
4794 case Intrinsic::loongarch_crcc_w_h_w:
4795 case Intrinsic::loongarch_crcc_w_w_w:
4796 case Intrinsic::loongarch_crcc_w_d_w:
4798 case Intrinsic::loongarch_csrrd_w:
4799 case Intrinsic::loongarch_csrrd_d: {
4800 unsigned Imm =
Op.getConstantOperandVal(2);
4803 : DAG.
getNode(LoongArchISD::CSRRD,
DL, {GRLenVT, MVT::Other},
4806 case Intrinsic::loongarch_csrwr_w:
4807 case Intrinsic::loongarch_csrwr_d: {
4808 unsigned Imm =
Op.getConstantOperandVal(3);
4811 : DAG.
getNode(LoongArchISD::CSRWR,
DL, {GRLenVT, MVT::Other},
4812 {Chain,
Op.getOperand(2),
4815 case Intrinsic::loongarch_csrxchg_w:
4816 case Intrinsic::loongarch_csrxchg_d: {
4817 unsigned Imm =
Op.getConstantOperandVal(4);
4820 : DAG.
getNode(LoongArchISD::CSRXCHG,
DL, {GRLenVT, MVT::Other},
4821 {Chain,
Op.getOperand(2),
Op.getOperand(3),
4824 case Intrinsic::loongarch_iocsrrd_d: {
4826 LoongArchISD::IOCSRRD_D,
DL, {GRLenVT, MVT::Other},
4829#define IOCSRRD_CASE(NAME, NODE) \
4830 case Intrinsic::loongarch_##NAME: { \
4831 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
4832 {Chain, Op.getOperand(2)}); \
4838 case Intrinsic::loongarch_cpucfg: {
4839 return DAG.
getNode(LoongArchISD::CPUCFG,
DL, {GRLenVT, MVT::Other},
4840 {Chain,
Op.getOperand(2)});
4842 case Intrinsic::loongarch_lddir_d: {
4843 unsigned Imm =
Op.getConstantOperandVal(3);
4848 case Intrinsic::loongarch_movfcsr2gr: {
4849 if (!Subtarget.hasBasicF())
4851 unsigned Imm =
Op.getConstantOperandVal(2);
4854 : DAG.
getNode(LoongArchISD::MOVFCSR2GR,
DL, {VT, MVT::Other},
4857 case Intrinsic::loongarch_lsx_vld:
4858 case Intrinsic::loongarch_lsx_vldrepl_b:
4859 case Intrinsic::loongarch_lasx_xvld:
4860 case Intrinsic::loongarch_lasx_xvldrepl_b:
4864 case Intrinsic::loongarch_lsx_vldrepl_h:
4865 case Intrinsic::loongarch_lasx_xvldrepl_h:
4869 Op,
"argument out of range or not a multiple of 2", DAG)
4871 case Intrinsic::loongarch_lsx_vldrepl_w:
4872 case Intrinsic::loongarch_lasx_xvldrepl_w:
4876 Op,
"argument out of range or not a multiple of 4", DAG)
4878 case Intrinsic::loongarch_lsx_vldrepl_d:
4879 case Intrinsic::loongarch_lasx_xvldrepl_d:
4883 Op,
"argument out of range or not a multiple of 8", DAG)
4894 return Op.getOperand(0);
4900 MVT GRLenVT = Subtarget.getGRLenVT();
4902 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
4904 const StringRef ErrorMsgOOR =
"argument out of range";
4905 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
4906 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
4907 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
4909 switch (IntrinsicEnum) {
4913 case Intrinsic::loongarch_cacop_d:
4914 case Intrinsic::loongarch_cacop_w: {
4915 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.is64Bit())
4917 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.is64Bit())
4926 case Intrinsic::loongarch_dbar: {
4933 case Intrinsic::loongarch_ibar: {
4940 case Intrinsic::loongarch_break: {
4947 case Intrinsic::loongarch_movgr2fcsr: {
4948 if (!Subtarget.hasBasicF())
4958 case Intrinsic::loongarch_syscall: {
4965#define IOCSRWR_CASE(NAME, NODE) \
4966 case Intrinsic::loongarch_##NAME: { \
4967 SDValue Op3 = Op.getOperand(3); \
4968 return Subtarget.is64Bit() \
4969 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
4970 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
4971 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
4972 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
4979 case Intrinsic::loongarch_iocsrwr_d: {
4980 return !Subtarget.is64Bit()
4987#define ASRT_LE_GT_CASE(NAME) \
4988 case Intrinsic::loongarch_##NAME: { \
4989 return !Subtarget.is64Bit() \
4990 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
4995#undef ASRT_LE_GT_CASE
4996 case Intrinsic::loongarch_ldpte_d: {
4997 unsigned Imm =
Op.getConstantOperandVal(3);
4998 return !Subtarget.is64Bit()
5003 case Intrinsic::loongarch_lsx_vst:
5004 case Intrinsic::loongarch_lasx_xvst:
5008 case Intrinsic::loongarch_lasx_xvstelm_b:
5013 case Intrinsic::loongarch_lsx_vstelm_b:
5018 case Intrinsic::loongarch_lasx_xvstelm_h:
5023 Op,
"argument out of range or not a multiple of 2", DAG)
5025 case Intrinsic::loongarch_lsx_vstelm_h:
5030 Op,
"argument out of range or not a multiple of 2", DAG)
5032 case Intrinsic::loongarch_lasx_xvstelm_w:
5037 Op,
"argument out of range or not a multiple of 4", DAG)
5039 case Intrinsic::loongarch_lsx_vstelm_w:
5044 Op,
"argument out of range or not a multiple of 4", DAG)
5046 case Intrinsic::loongarch_lasx_xvstelm_d:
5051 Op,
"argument out of range or not a multiple of 8", DAG)
5053 case Intrinsic::loongarch_lsx_vstelm_d:
5058 Op,
"argument out of range or not a multiple of 8", DAG)
5069 EVT VT =
Lo.getValueType();
5110 EVT VT =
Lo.getValueType();
5164 return LoongArchISD::DIV_W;
5166 return LoongArchISD::DIV_WU;
5168 return LoongArchISD::MOD_W;
5170 return LoongArchISD::MOD_WU;
5172 return LoongArchISD::SLL_W;
5174 return LoongArchISD::SRA_W;
5176 return LoongArchISD::SRL_W;
5179 return LoongArchISD::ROTR_W;
5181 return LoongArchISD::CTZ_W;
5183 return LoongArchISD::CLZ_W;
5202 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
5203 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
5207 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
5213 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
5240 StringRef ErrorMsg,
bool WithChain =
true) {
5245 Results.push_back(
N->getOperand(0));
5248template <
unsigned N>
5253 const StringRef ErrorMsgOOR =
"argument out of range";
5254 unsigned Imm =
Node->getConstantOperandVal(2);
5288 switch (
N->getConstantOperandVal(0)) {
5291 case Intrinsic::loongarch_lsx_vpickve2gr_b:
5293 LoongArchISD::VPICK_SEXT_ELT);
5295 case Intrinsic::loongarch_lsx_vpickve2gr_h:
5296 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
5298 LoongArchISD::VPICK_SEXT_ELT);
5300 case Intrinsic::loongarch_lsx_vpickve2gr_w:
5302 LoongArchISD::VPICK_SEXT_ELT);
5304 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
5306 LoongArchISD::VPICK_ZEXT_ELT);
5308 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
5309 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
5311 LoongArchISD::VPICK_ZEXT_ELT);
5313 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
5315 LoongArchISD::VPICK_ZEXT_ELT);
5317 case Intrinsic::loongarch_lsx_bz_b:
5318 case Intrinsic::loongarch_lsx_bz_h:
5319 case Intrinsic::loongarch_lsx_bz_w:
5320 case Intrinsic::loongarch_lsx_bz_d:
5321 case Intrinsic::loongarch_lasx_xbz_b:
5322 case Intrinsic::loongarch_lasx_xbz_h:
5323 case Intrinsic::loongarch_lasx_xbz_w:
5324 case Intrinsic::loongarch_lasx_xbz_d:
5326 LoongArchISD::VALL_ZERO);
5328 case Intrinsic::loongarch_lsx_bz_v:
5329 case Intrinsic::loongarch_lasx_xbz_v:
5331 LoongArchISD::VANY_ZERO);
5333 case Intrinsic::loongarch_lsx_bnz_b:
5334 case Intrinsic::loongarch_lsx_bnz_h:
5335 case Intrinsic::loongarch_lsx_bnz_w:
5336 case Intrinsic::loongarch_lsx_bnz_d:
5337 case Intrinsic::loongarch_lasx_xbnz_b:
5338 case Intrinsic::loongarch_lasx_xbnz_h:
5339 case Intrinsic::loongarch_lasx_xbnz_w:
5340 case Intrinsic::loongarch_lasx_xbnz_d:
5342 LoongArchISD::VALL_NONZERO);
5344 case Intrinsic::loongarch_lsx_bnz_v:
5345 case Intrinsic::loongarch_lasx_xbnz_v:
5347 LoongArchISD::VANY_NONZERO);
5355 assert(
N->getValueType(0) == MVT::i128 &&
5356 "AtomicCmpSwap on types less than 128 should be legal");
5360 switch (
MemOp->getMergedOrdering()) {
5364 Opcode = LoongArch::PseudoCmpXchg128Acquire;
5368 Opcode = LoongArch::PseudoCmpXchg128;
5375 auto CmpVal = DAG.
SplitScalar(
N->getOperand(2),
DL, MVT::i64, MVT::i64);
5376 auto NewVal = DAG.
SplitScalar(
N->getOperand(3),
DL, MVT::i64, MVT::i64);
5377 SDValue Ops[] = {
N->getOperand(1), CmpVal.first, CmpVal.second,
5378 NewVal.first, NewVal.second,
N->getOperand(0)};
5381 Opcode,
SDLoc(
N), DAG.
getVTList(MVT::i64, MVT::i64, MVT::i64, MVT::Other),
5392 EVT VT =
N->getValueType(0);
5393 switch (
N->getOpcode()) {
5398 assert(
N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5399 "Unexpected custom legalisation");
5406 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
5407 "Unexpected custom legalisation");
5409 Subtarget.hasDiv32() && VT == MVT::i32
5416 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
5417 "Unexpected custom legalisation");
5425 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
5426 "Unexpected custom legalisation");
5433 MVT VT =
N->getSimpleValueType(0);
5434 assert(VT == MVT::v2f32 && Subtarget.hasExtLSX() &&
5435 "Unexpected custom legalisation");
5437 "Unexpected type action!");
5442 Ld->getPointerInfo(), Ld->getBaseAlign(),
5443 Ld->getMemOperand()->getFlags());
5454 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
5455 "Unexpected custom legalisation");
5462 if (Src.getValueType() == MVT::f16)
5473 EVT OpVT = Src.getValueType();
5477 std::tie(Result, Chain) =
5484 EVT SrcVT = Src.getValueType();
5485 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.is64Bit() &&
5486 Subtarget.hasBasicF()) {
5488 DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Src);
5490 }
else if (VT == MVT::i64 && SrcVT == MVT::f64 && !Subtarget.is64Bit()) {
5492 DAG.
getVTList(MVT::i32, MVT::i32), Src);
5500 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
5501 "Unexpected custom legalisation");
5504 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
5509 assert(VT == MVT::v2f32 && Subtarget.hasExtLSX() &&
5510 "Unexpected custom legalisation");
5516 if (OpVT == MVT::v2f64) {
5526 assert((VT == MVT::i16 || VT == MVT::i32) &&
5527 "Unexpected custom legalization");
5528 MVT GRLenVT = Subtarget.getGRLenVT();
5535 Tmp = DAG.
getNode(LoongArchISD::REVB_2H,
DL, GRLenVT, NewSrc);
5540 Tmp = DAG.
getNode(LoongArchISD::REVB_2W,
DL, GRLenVT, NewSrc);
5548 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
5549 "Unexpected custom legalization");
5550 MVT GRLenVT = Subtarget.getGRLenVT();
5557 Tmp = DAG.
getNode(LoongArchISD::BITREV_4B,
DL, GRLenVT, NewSrc);
5560 Tmp = DAG.
getNode(LoongArchISD::BITREV_W,
DL, GRLenVT, NewSrc);
5568 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
5569 "Unexpected custom legalisation");
5576 MVT GRLenVT = Subtarget.getGRLenVT();
5577 const StringRef ErrorMsgOOR =
"argument out of range";
5578 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
5579 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
5581 switch (
N->getConstantOperandVal(1)) {
5584 case Intrinsic::loongarch_movfcsr2gr: {
5585 if (!Subtarget.hasBasicF()) {
5595 LoongArchISD::MOVFCSR2GR,
SDLoc(
N), {MVT::i64, MVT::Other},
5602#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
5603 case Intrinsic::loongarch_##NAME: { \
5604 SDValue NODE = DAG.getNode( \
5605 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
5606 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
5607 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
5608 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
5609 Results.push_back(NODE.getValue(1)); \
5618#undef CRC_CASE_EXT_BINARYOP
5620#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
5621 case Intrinsic::loongarch_##NAME: { \
5622 SDValue NODE = DAG.getNode( \
5623 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
5625 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
5626 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
5627 Results.push_back(NODE.getValue(1)); \
5632#undef CRC_CASE_EXT_UNARYOP
5633#define CSR_CASE(ID) \
5634 case Intrinsic::loongarch_##ID: { \
5635 if (!Subtarget.is64Bit()) \
5636 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
5644 case Intrinsic::loongarch_csrrd_w: {
5651 DAG.
getNode(LoongArchISD::CSRRD,
DL, {GRLenVT, MVT::Other},
5658 case Intrinsic::loongarch_csrwr_w: {
5659 unsigned Imm =
N->getConstantOperandVal(3);
5665 DAG.
getNode(LoongArchISD::CSRWR,
DL, {GRLenVT, MVT::Other},
5673 case Intrinsic::loongarch_csrxchg_w: {
5674 unsigned Imm =
N->getConstantOperandVal(4);
5680 LoongArchISD::CSRXCHG,
DL, {GRLenVT, MVT::Other},
5689#define IOCSRRD_CASE(NAME, NODE) \
5690 case Intrinsic::loongarch_##NAME: { \
5691 SDValue IOCSRRDResults = \
5692 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
5693 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
5694 Results.push_back( \
5695 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
5696 Results.push_back(IOCSRRDResults.getValue(1)); \
5703 case Intrinsic::loongarch_cpucfg: {
5705 DAG.
getNode(LoongArchISD::CPUCFG,
DL, {GRLenVT, MVT::Other},
5712 case Intrinsic::loongarch_lddir_d: {
5713 if (!Subtarget.is64Bit()) {
5723 if (Subtarget.is64Bit())
5725 "On LA64, only 64-bit registers can be read.");
5728 "On LA32, only 32-bit registers can be read.");
5730 Results.push_back(
N->getOperand(0));
5741 OpVT == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
5754 MVT VT =
N->getSimpleValueType(0);
5760 EVT InVT = In.getValueType();
5771 for (
unsigned I = 0;
I < MinElts; ++
I)
5772 TruncMask[
I] = Scale *
I;
5774 unsigned WidenNumElts = 128 / In.getScalarValueSizeInBits();
5775 MVT SVT = In.getSimpleValueType().getScalarType();
5781 "Illegal vector type in truncation");
5793 if (!Subtarget.hasExtLSX() || Subtarget.hasExtLASX())
5796 EVT DstVT =
N->getValueType(0);
5798 MVT SrcVT = Src.getSimpleValueType();
5812 unsigned WidenSrcElts = 128 / SrcEltBits;
5819 unsigned FirstStageEltBits = 128 / NumElts;
5823 SrcVT = FirstStageVT;
5824 SrcEltBits = FirstStageEltBits;
5831 while (SrcEltBits < DstEltBits) {
5832 unsigned NextEltBits = SrcEltBits * 2;
5835 unsigned NextEltsPerBlock = CurEltsPerBlock / 2;
5853 Blocks = std::move(NextBlocks);
5854 SrcVT = NextBlockVT;
5855 SrcEltBits = NextEltBits;
5867 assert(
N->getOpcode() ==
ISD::AND &&
"Unexpected opcode combine into ANDN");
5869 MVT VT =
N->getSimpleValueType(0);
5888 return DAG.
getNode(LoongArchISD::VANDN,
DL, VT,
X,
Y);
5892 unsigned MinSizeInBits) {
5900 unsigned SplatBitSize;
5903 return Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
5904 HasAnyUndefs, MinSizeInBits,
5914 EVT VT =
N->getValueType(0);
5980 if (
And.getOperand(0) ==
X) {
6009 if (ShiftVal != (SplatVal + 1))
6018 : LoongArchISD::VSRAR,
6028 SDValue FirstOperand =
N->getOperand(0);
6029 SDValue SecondOperand =
N->getOperand(1);
6030 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
6031 EVT ValTy =
N->getValueType(0);
6034 unsigned SMIdx, SMLen;
6043 if (!Subtarget.has32S())
6099 NewOperand = FirstOperand;
6102 msb = lsb + SMLen - 1;
6106 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
6119 if (!Subtarget.has32S())
6131 SDValue FirstOperand =
N->getOperand(0);
6133 EVT ValTy =
N->getValueType(0);
6136 unsigned MaskIdx, MaskLen;
6151 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
6152 return DAG.
getNode(LoongArchISD::BSTRPICK,
DL, ValTy,
6167 switch (Src.getOpcode()) {
6170 return Src.getOperand(0).getValueSizeInBits() ==
Size;
6180 return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
6193 switch (Src.getOpcode()) {
6203 Src.getOpcode(),
DL, SExtVT,
6209 DL, SExtVT, Src.getOperand(0),
6221 EVT VT =
N->getValueType(0);
6223 EVT SrcVT = Src.getValueType();
6225 if (Src.getOpcode() !=
ISD::SETCC || !Src.hasOneUse())
6230 EVT CmpVT = Src.getOperand(0).getValueType();
6235 else if (Subtarget.has32S() && Subtarget.hasExtLASX() &&
6248 Opc = UseLASX ? LoongArchISD::XVMSKEQZ : LoongArchISD::VMSKEQZ;
6253 Opc = UseLASX ? LoongArchISD::XVMSKGEZ : LoongArchISD::VMSKGEZ;
6258 Opc = UseLASX ? LoongArchISD::XVMSKGEZ : LoongArchISD::VMSKGEZ;
6263 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
6265 Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
6270 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
6272 Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
6277 Opc = UseLASX ? LoongArchISD::XVMSKNEZ : LoongArchISD::VMSKNEZ;
6294 EVT VT =
N->getValueType(0);
6296 EVT SrcVT = Src.getValueType();
6313 bool UseLASX =
false;
6314 bool PropagateSExt =
false;
6316 if (Src.getOpcode() ==
ISD::SETCC && Src.hasOneUse()) {
6317 EVT CmpVT = Src.getOperand(0).getValueType();
6326 SExtVT = MVT::v2i64;
6329 SExtVT = MVT::v4i32;
6331 SExtVT = MVT::v4i64;
6333 PropagateSExt =
true;
6337 SExtVT = MVT::v8i16;
6339 SExtVT = MVT::v8i32;
6341 PropagateSExt =
true;
6345 SExtVT = MVT::v16i8;
6347 SExtVT = MVT::v16i16;
6349 PropagateSExt =
true;
6353 SExtVT = MVT::v32i8;
6361 if (!Subtarget.has32S() || !Subtarget.hasExtLASX()) {
6362 if (Src.getSimpleValueType() == MVT::v32i8) {
6370 }
else if (UseLASX) {
6376 Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
6389 EVT ValTy =
N->getValueType(0);
6390 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
6394 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
6396 bool SwapAndRetried =
false;
6399 if (!Subtarget.has32S())
6405 if (ValBits != 32 && ValBits != 64)
6420 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
6423 (MaskIdx0 + MaskLen0 <= ValBits)) {
6444 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
6445 (MaskIdx0 + MaskLen0 <= ValBits)) {
6462 (MaskIdx0 + MaskLen0 <= 64) &&
6470 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
6471 : (MaskIdx0 + MaskLen0 - 1),
6487 (MaskIdx0 + MaskLen0 <= ValBits)) {
6510 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
6511 : (MaskIdx0 + MaskLen0 - 1),
6526 unsigned MaskIdx, MaskLen;
6527 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
6536 return DAG.
getNode(LoongArchISD::BSTRINS,
DL, ValTy, N0,
6554 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
6560 return DAG.
getNode(LoongArchISD::BSTRINS,
DL, ValTy, N0,
6568 if (!SwapAndRetried) {
6570 SwapAndRetried =
true;
6574 SwapAndRetried =
false;
6591 return DAG.
getNode(LoongArchISD::BSTRINS,
DL, ValTy, N0,
6600 if (!SwapAndRetried) {
6602 SwapAndRetried =
true;
6612 switch (V.getNode()->getOpcode()) {
6624 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
6632 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
6709 SDNode *AndNode =
N->getOperand(0).getNode();
6717 SDValue CmpInputValue =
N->getOperand(1);
6726 if (!CN || !CN->
isZero())
6728 AndInputValue1 = AndInputValue1.
getOperand(0);
6732 if (AndInputValue2 != CmpInputValue)
6765 TruncInputValue1, TruncInputValue2);
6767 DAG.
getSetCC(
SDLoc(
N),
N->getValueType(0), NewAnd, TruncInputValue2, CC);
6780 if (Src.getOpcode() != LoongArchISD::REVB_2W)
6783 return DAG.
getNode(LoongArchISD::BITREV_4B,
SDLoc(
N),
N->getValueType(0),
6808 LHS.getOperand(0).getValueType() == Subtarget.
getGRLenVT()) {
6836 ShAmt =
LHS.getValueSizeInBits() - 1 - ShAmt;
6869 return DAG.
getNode(LoongArchISD::BR_CC,
DL,
N->getValueType(0),
6870 N->getOperand(0),
LHS,
RHS, CC,
N->getOperand(4));
6886 EVT VT =
N->getValueType(0);
6889 if (TrueV == FalseV)
6920 return DAG.
getNode(LoongArchISD::SELECT_CC,
DL,
N->getValueType(0),
6921 {LHS, RHS, CC, TrueV, FalseV});
6926template <
unsigned N>
6930 bool IsSigned =
false) {
6934 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
6935 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
6937 ": argument out of range.");
6943template <
unsigned N>
6947 EVT ResTy =
Node->getValueType(0);
6951 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
6952 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
6954 ": argument out of range.");
6959 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
6965 EVT ResTy =
Node->getValueType(0);
6973 EVT ResTy =
Node->getValueType(0);
6982template <
unsigned N>
6985 EVT ResTy =
Node->getValueType(0);
6990 ": argument out of range.");
7000template <
unsigned N>
7003 EVT ResTy =
Node->getValueType(0);
7008 ": argument out of range.");
7017template <
unsigned N>
7020 EVT ResTy =
Node->getValueType(0);
7025 ": argument out of range.");
7034template <
unsigned W>
7037 unsigned Imm =
N->getConstantOperandVal(2);
7039 const StringRef ErrorMsg =
"argument out of range";
7041 return DAG.
getUNDEF(
N->getValueType(0));
7047 return DAG.
getNode(ResOp,
DL,
N->getValueType(0), Vec, Idx, EltVT);
7055 switch (
N->getConstantOperandVal(0)) {
7058 case Intrinsic::loongarch_lsx_vadd_b:
7059 case Intrinsic::loongarch_lsx_vadd_h:
7060 case Intrinsic::loongarch_lsx_vadd_w:
7061 case Intrinsic::loongarch_lsx_vadd_d:
7062 case Intrinsic::loongarch_lasx_xvadd_b:
7063 case Intrinsic::loongarch_lasx_xvadd_h:
7064 case Intrinsic::loongarch_lasx_xvadd_w:
7065 case Intrinsic::loongarch_lasx_xvadd_d:
7068 case Intrinsic::loongarch_lsx_vaddi_bu:
7069 case Intrinsic::loongarch_lsx_vaddi_hu:
7070 case Intrinsic::loongarch_lsx_vaddi_wu:
7071 case Intrinsic::loongarch_lsx_vaddi_du:
7072 case Intrinsic::loongarch_lasx_xvaddi_bu:
7073 case Intrinsic::loongarch_lasx_xvaddi_hu:
7074 case Intrinsic::loongarch_lasx_xvaddi_wu:
7075 case Intrinsic::loongarch_lasx_xvaddi_du:
7078 case Intrinsic::loongarch_lsx_vsub_b:
7079 case Intrinsic::loongarch_lsx_vsub_h:
7080 case Intrinsic::loongarch_lsx_vsub_w:
7081 case Intrinsic::loongarch_lsx_vsub_d:
7082 case Intrinsic::loongarch_lasx_xvsub_b:
7083 case Intrinsic::loongarch_lasx_xvsub_h:
7084 case Intrinsic::loongarch_lasx_xvsub_w:
7085 case Intrinsic::loongarch_lasx_xvsub_d:
7088 case Intrinsic::loongarch_lsx_vsubi_bu:
7089 case Intrinsic::loongarch_lsx_vsubi_hu:
7090 case Intrinsic::loongarch_lsx_vsubi_wu:
7091 case Intrinsic::loongarch_lsx_vsubi_du:
7092 case Intrinsic::loongarch_lasx_xvsubi_bu:
7093 case Intrinsic::loongarch_lasx_xvsubi_hu:
7094 case Intrinsic::loongarch_lasx_xvsubi_wu:
7095 case Intrinsic::loongarch_lasx_xvsubi_du:
7098 case Intrinsic::loongarch_lsx_vneg_b:
7099 case Intrinsic::loongarch_lsx_vneg_h:
7100 case Intrinsic::loongarch_lsx_vneg_w:
7101 case Intrinsic::loongarch_lsx_vneg_d:
7102 case Intrinsic::loongarch_lasx_xvneg_b:
7103 case Intrinsic::loongarch_lasx_xvneg_h:
7104 case Intrinsic::loongarch_lasx_xvneg_w:
7105 case Intrinsic::loongarch_lasx_xvneg_d:
7109 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
7111 SDLoc(
N),
N->getValueType(0)),
7113 case Intrinsic::loongarch_lsx_vmax_b:
7114 case Intrinsic::loongarch_lsx_vmax_h:
7115 case Intrinsic::loongarch_lsx_vmax_w:
7116 case Intrinsic::loongarch_lsx_vmax_d:
7117 case Intrinsic::loongarch_lasx_xvmax_b:
7118 case Intrinsic::loongarch_lasx_xvmax_h:
7119 case Intrinsic::loongarch_lasx_xvmax_w:
7120 case Intrinsic::loongarch_lasx_xvmax_d:
7123 case Intrinsic::loongarch_lsx_vmax_bu:
7124 case Intrinsic::loongarch_lsx_vmax_hu:
7125 case Intrinsic::loongarch_lsx_vmax_wu:
7126 case Intrinsic::loongarch_lsx_vmax_du:
7127 case Intrinsic::loongarch_lasx_xvmax_bu:
7128 case Intrinsic::loongarch_lasx_xvmax_hu:
7129 case Intrinsic::loongarch_lasx_xvmax_wu:
7130 case Intrinsic::loongarch_lasx_xvmax_du:
7133 case Intrinsic::loongarch_lsx_vmaxi_b:
7134 case Intrinsic::loongarch_lsx_vmaxi_h:
7135 case Intrinsic::loongarch_lsx_vmaxi_w:
7136 case Intrinsic::loongarch_lsx_vmaxi_d:
7137 case Intrinsic::loongarch_lasx_xvmaxi_b:
7138 case Intrinsic::loongarch_lasx_xvmaxi_h:
7139 case Intrinsic::loongarch_lasx_xvmaxi_w:
7140 case Intrinsic::loongarch_lasx_xvmaxi_d:
7143 case Intrinsic::loongarch_lsx_vmaxi_bu:
7144 case Intrinsic::loongarch_lsx_vmaxi_hu:
7145 case Intrinsic::loongarch_lsx_vmaxi_wu:
7146 case Intrinsic::loongarch_lsx_vmaxi_du:
7147 case Intrinsic::loongarch_lasx_xvmaxi_bu:
7148 case Intrinsic::loongarch_lasx_xvmaxi_hu:
7149 case Intrinsic::loongarch_lasx_xvmaxi_wu:
7150 case Intrinsic::loongarch_lasx_xvmaxi_du:
7153 case Intrinsic::loongarch_lsx_vmin_b:
7154 case Intrinsic::loongarch_lsx_vmin_h:
7155 case Intrinsic::loongarch_lsx_vmin_w:
7156 case Intrinsic::loongarch_lsx_vmin_d:
7157 case Intrinsic::loongarch_lasx_xvmin_b:
7158 case Intrinsic::loongarch_lasx_xvmin_h:
7159 case Intrinsic::loongarch_lasx_xvmin_w:
7160 case Intrinsic::loongarch_lasx_xvmin_d:
7163 case Intrinsic::loongarch_lsx_vmin_bu:
7164 case Intrinsic::loongarch_lsx_vmin_hu:
7165 case Intrinsic::loongarch_lsx_vmin_wu:
7166 case Intrinsic::loongarch_lsx_vmin_du:
7167 case Intrinsic::loongarch_lasx_xvmin_bu:
7168 case Intrinsic::loongarch_lasx_xvmin_hu:
7169 case Intrinsic::loongarch_lasx_xvmin_wu:
7170 case Intrinsic::loongarch_lasx_xvmin_du:
7173 case Intrinsic::loongarch_lsx_vmini_b:
7174 case Intrinsic::loongarch_lsx_vmini_h:
7175 case Intrinsic::loongarch_lsx_vmini_w:
7176 case Intrinsic::loongarch_lsx_vmini_d:
7177 case Intrinsic::loongarch_lasx_xvmini_b:
7178 case Intrinsic::loongarch_lasx_xvmini_h:
7179 case Intrinsic::loongarch_lasx_xvmini_w:
7180 case Intrinsic::loongarch_lasx_xvmini_d:
7183 case Intrinsic::loongarch_lsx_vmini_bu:
7184 case Intrinsic::loongarch_lsx_vmini_hu:
7185 case Intrinsic::loongarch_lsx_vmini_wu:
7186 case Intrinsic::loongarch_lsx_vmini_du:
7187 case Intrinsic::loongarch_lasx_xvmini_bu:
7188 case Intrinsic::loongarch_lasx_xvmini_hu:
7189 case Intrinsic::loongarch_lasx_xvmini_wu:
7190 case Intrinsic::loongarch_lasx_xvmini_du:
7193 case Intrinsic::loongarch_lsx_vmul_b:
7194 case Intrinsic::loongarch_lsx_vmul_h:
7195 case Intrinsic::loongarch_lsx_vmul_w:
7196 case Intrinsic::loongarch_lsx_vmul_d:
7197 case Intrinsic::loongarch_lasx_xvmul_b:
7198 case Intrinsic::loongarch_lasx_xvmul_h:
7199 case Intrinsic::loongarch_lasx_xvmul_w:
7200 case Intrinsic::loongarch_lasx_xvmul_d:
7203 case Intrinsic::loongarch_lsx_vmadd_b:
7204 case Intrinsic::loongarch_lsx_vmadd_h:
7205 case Intrinsic::loongarch_lsx_vmadd_w:
7206 case Intrinsic::loongarch_lsx_vmadd_d:
7207 case Intrinsic::loongarch_lasx_xvmadd_b:
7208 case Intrinsic::loongarch_lasx_xvmadd_h:
7209 case Intrinsic::loongarch_lasx_xvmadd_w:
7210 case Intrinsic::loongarch_lasx_xvmadd_d: {
7211 EVT ResTy =
N->getValueType(0);
7216 case Intrinsic::loongarch_lsx_vmsub_b:
7217 case Intrinsic::loongarch_lsx_vmsub_h:
7218 case Intrinsic::loongarch_lsx_vmsub_w:
7219 case Intrinsic::loongarch_lsx_vmsub_d:
7220 case Intrinsic::loongarch_lasx_xvmsub_b:
7221 case Intrinsic::loongarch_lasx_xvmsub_h:
7222 case Intrinsic::loongarch_lasx_xvmsub_w:
7223 case Intrinsic::loongarch_lasx_xvmsub_d: {
7224 EVT ResTy =
N->getValueType(0);
7229 case Intrinsic::loongarch_lsx_vdiv_b:
7230 case Intrinsic::loongarch_lsx_vdiv_h:
7231 case Intrinsic::loongarch_lsx_vdiv_w:
7232 case Intrinsic::loongarch_lsx_vdiv_d:
7233 case Intrinsic::loongarch_lasx_xvdiv_b:
7234 case Intrinsic::loongarch_lasx_xvdiv_h:
7235 case Intrinsic::loongarch_lasx_xvdiv_w:
7236 case Intrinsic::loongarch_lasx_xvdiv_d:
7239 case Intrinsic::loongarch_lsx_vdiv_bu:
7240 case Intrinsic::loongarch_lsx_vdiv_hu:
7241 case Intrinsic::loongarch_lsx_vdiv_wu:
7242 case Intrinsic::loongarch_lsx_vdiv_du:
7243 case Intrinsic::loongarch_lasx_xvdiv_bu:
7244 case Intrinsic::loongarch_lasx_xvdiv_hu:
7245 case Intrinsic::loongarch_lasx_xvdiv_wu:
7246 case Intrinsic::loongarch_lasx_xvdiv_du:
7249 case Intrinsic::loongarch_lsx_vmod_b:
7250 case Intrinsic::loongarch_lsx_vmod_h:
7251 case Intrinsic::loongarch_lsx_vmod_w:
7252 case Intrinsic::loongarch_lsx_vmod_d:
7253 case Intrinsic::loongarch_lasx_xvmod_b:
7254 case Intrinsic::loongarch_lasx_xvmod_h:
7255 case Intrinsic::loongarch_lasx_xvmod_w:
7256 case Intrinsic::loongarch_lasx_xvmod_d:
7259 case Intrinsic::loongarch_lsx_vmod_bu:
7260 case Intrinsic::loongarch_lsx_vmod_hu:
7261 case Intrinsic::loongarch_lsx_vmod_wu:
7262 case Intrinsic::loongarch_lsx_vmod_du:
7263 case Intrinsic::loongarch_lasx_xvmod_bu:
7264 case Intrinsic::loongarch_lasx_xvmod_hu:
7265 case Intrinsic::loongarch_lasx_xvmod_wu:
7266 case Intrinsic::loongarch_lasx_xvmod_du:
7269 case Intrinsic::loongarch_lsx_vand_v:
7270 case Intrinsic::loongarch_lasx_xvand_v:
7273 case Intrinsic::loongarch_lsx_vor_v:
7274 case Intrinsic::loongarch_lasx_xvor_v:
7277 case Intrinsic::loongarch_lsx_vxor_v:
7278 case Intrinsic::loongarch_lasx_xvxor_v:
7281 case Intrinsic::loongarch_lsx_vnor_v:
7282 case Intrinsic::loongarch_lasx_xvnor_v: {
7287 case Intrinsic::loongarch_lsx_vandi_b:
7288 case Intrinsic::loongarch_lasx_xvandi_b:
7291 case Intrinsic::loongarch_lsx_vori_b:
7292 case Intrinsic::loongarch_lasx_xvori_b:
7295 case Intrinsic::loongarch_lsx_vxori_b:
7296 case Intrinsic::loongarch_lasx_xvxori_b:
7299 case Intrinsic::loongarch_lsx_vsll_b:
7300 case Intrinsic::loongarch_lsx_vsll_h:
7301 case Intrinsic::loongarch_lsx_vsll_w:
7302 case Intrinsic::loongarch_lsx_vsll_d:
7303 case Intrinsic::loongarch_lasx_xvsll_b:
7304 case Intrinsic::loongarch_lasx_xvsll_h:
7305 case Intrinsic::loongarch_lasx_xvsll_w:
7306 case Intrinsic::loongarch_lasx_xvsll_d:
7309 case Intrinsic::loongarch_lsx_vslli_b:
7310 case Intrinsic::loongarch_lasx_xvslli_b:
7313 case Intrinsic::loongarch_lsx_vslli_h:
7314 case Intrinsic::loongarch_lasx_xvslli_h:
7317 case Intrinsic::loongarch_lsx_vslli_w:
7318 case Intrinsic::loongarch_lasx_xvslli_w:
7321 case Intrinsic::loongarch_lsx_vslli_d:
7322 case Intrinsic::loongarch_lasx_xvslli_d:
7325 case Intrinsic::loongarch_lsx_vsrl_b:
7326 case Intrinsic::loongarch_lsx_vsrl_h:
7327 case Intrinsic::loongarch_lsx_vsrl_w:
7328 case Intrinsic::loongarch_lsx_vsrl_d:
7329 case Intrinsic::loongarch_lasx_xvsrl_b:
7330 case Intrinsic::loongarch_lasx_xvsrl_h:
7331 case Intrinsic::loongarch_lasx_xvsrl_w:
7332 case Intrinsic::loongarch_lasx_xvsrl_d:
7335 case Intrinsic::loongarch_lsx_vsrli_b:
7336 case Intrinsic::loongarch_lasx_xvsrli_b:
7339 case Intrinsic::loongarch_lsx_vsrli_h:
7340 case Intrinsic::loongarch_lasx_xvsrli_h:
7343 case Intrinsic::loongarch_lsx_vsrli_w:
7344 case Intrinsic::loongarch_lasx_xvsrli_w:
7347 case Intrinsic::loongarch_lsx_vsrli_d:
7348 case Intrinsic::loongarch_lasx_xvsrli_d:
7351 case Intrinsic::loongarch_lsx_vsra_b:
7352 case Intrinsic::loongarch_lsx_vsra_h:
7353 case Intrinsic::loongarch_lsx_vsra_w:
7354 case Intrinsic::loongarch_lsx_vsra_d:
7355 case Intrinsic::loongarch_lasx_xvsra_b:
7356 case Intrinsic::loongarch_lasx_xvsra_h:
7357 case Intrinsic::loongarch_lasx_xvsra_w:
7358 case Intrinsic::loongarch_lasx_xvsra_d:
7361 case Intrinsic::loongarch_lsx_vsrai_b:
7362 case Intrinsic::loongarch_lasx_xvsrai_b:
7365 case Intrinsic::loongarch_lsx_vsrai_h:
7366 case Intrinsic::loongarch_lasx_xvsrai_h:
7369 case Intrinsic::loongarch_lsx_vsrai_w:
7370 case Intrinsic::loongarch_lasx_xvsrai_w:
7373 case Intrinsic::loongarch_lsx_vsrai_d:
7374 case Intrinsic::loongarch_lasx_xvsrai_d:
7377 case Intrinsic::loongarch_lsx_vclz_b:
7378 case Intrinsic::loongarch_lsx_vclz_h:
7379 case Intrinsic::loongarch_lsx_vclz_w:
7380 case Intrinsic::loongarch_lsx_vclz_d:
7381 case Intrinsic::loongarch_lasx_xvclz_b:
7382 case Intrinsic::loongarch_lasx_xvclz_h:
7383 case Intrinsic::loongarch_lasx_xvclz_w:
7384 case Intrinsic::loongarch_lasx_xvclz_d:
7386 case Intrinsic::loongarch_lsx_vpcnt_b:
7387 case Intrinsic::loongarch_lsx_vpcnt_h:
7388 case Intrinsic::loongarch_lsx_vpcnt_w:
7389 case Intrinsic::loongarch_lsx_vpcnt_d:
7390 case Intrinsic::loongarch_lasx_xvpcnt_b:
7391 case Intrinsic::loongarch_lasx_xvpcnt_h:
7392 case Intrinsic::loongarch_lasx_xvpcnt_w:
7393 case Intrinsic::loongarch_lasx_xvpcnt_d:
7395 case Intrinsic::loongarch_lsx_vbitclr_b:
7396 case Intrinsic::loongarch_lsx_vbitclr_h:
7397 case Intrinsic::loongarch_lsx_vbitclr_w:
7398 case Intrinsic::loongarch_lsx_vbitclr_d:
7399 case Intrinsic::loongarch_lasx_xvbitclr_b:
7400 case Intrinsic::loongarch_lasx_xvbitclr_h:
7401 case Intrinsic::loongarch_lasx_xvbitclr_w:
7402 case Intrinsic::loongarch_lasx_xvbitclr_d:
7404 case Intrinsic::loongarch_lsx_vbitclri_b:
7405 case Intrinsic::loongarch_lasx_xvbitclri_b:
7407 case Intrinsic::loongarch_lsx_vbitclri_h:
7408 case Intrinsic::loongarch_lasx_xvbitclri_h:
7410 case Intrinsic::loongarch_lsx_vbitclri_w:
7411 case Intrinsic::loongarch_lasx_xvbitclri_w:
7413 case Intrinsic::loongarch_lsx_vbitclri_d:
7414 case Intrinsic::loongarch_lasx_xvbitclri_d:
7416 case Intrinsic::loongarch_lsx_vbitset_b:
7417 case Intrinsic::loongarch_lsx_vbitset_h:
7418 case Intrinsic::loongarch_lsx_vbitset_w:
7419 case Intrinsic::loongarch_lsx_vbitset_d:
7420 case Intrinsic::loongarch_lasx_xvbitset_b:
7421 case Intrinsic::loongarch_lasx_xvbitset_h:
7422 case Intrinsic::loongarch_lasx_xvbitset_w:
7423 case Intrinsic::loongarch_lasx_xvbitset_d: {
7424 EVT VecTy =
N->getValueType(0);
7430 case Intrinsic::loongarch_lsx_vbitseti_b:
7431 case Intrinsic::loongarch_lasx_xvbitseti_b:
7433 case Intrinsic::loongarch_lsx_vbitseti_h:
7434 case Intrinsic::loongarch_lasx_xvbitseti_h:
7436 case Intrinsic::loongarch_lsx_vbitseti_w:
7437 case Intrinsic::loongarch_lasx_xvbitseti_w:
7439 case Intrinsic::loongarch_lsx_vbitseti_d:
7440 case Intrinsic::loongarch_lasx_xvbitseti_d:
7442 case Intrinsic::loongarch_lsx_vbitrev_b:
7443 case Intrinsic::loongarch_lsx_vbitrev_h:
7444 case Intrinsic::loongarch_lsx_vbitrev_w:
7445 case Intrinsic::loongarch_lsx_vbitrev_d:
7446 case Intrinsic::loongarch_lasx_xvbitrev_b:
7447 case Intrinsic::loongarch_lasx_xvbitrev_h:
7448 case Intrinsic::loongarch_lasx_xvbitrev_w:
7449 case Intrinsic::loongarch_lasx_xvbitrev_d: {
7450 EVT VecTy =
N->getValueType(0);
7456 case Intrinsic::loongarch_lsx_vbitrevi_b:
7457 case Intrinsic::loongarch_lasx_xvbitrevi_b:
7459 case Intrinsic::loongarch_lsx_vbitrevi_h:
7460 case Intrinsic::loongarch_lasx_xvbitrevi_h:
7462 case Intrinsic::loongarch_lsx_vbitrevi_w:
7463 case Intrinsic::loongarch_lasx_xvbitrevi_w:
7465 case Intrinsic::loongarch_lsx_vbitrevi_d:
7466 case Intrinsic::loongarch_lasx_xvbitrevi_d:
7468 case Intrinsic::loongarch_lsx_vfadd_s:
7469 case Intrinsic::loongarch_lsx_vfadd_d:
7470 case Intrinsic::loongarch_lasx_xvfadd_s:
7471 case Intrinsic::loongarch_lasx_xvfadd_d:
7474 case Intrinsic::loongarch_lsx_vfsub_s:
7475 case Intrinsic::loongarch_lsx_vfsub_d:
7476 case Intrinsic::loongarch_lasx_xvfsub_s:
7477 case Intrinsic::loongarch_lasx_xvfsub_d:
7480 case Intrinsic::loongarch_lsx_vfmul_s:
7481 case Intrinsic::loongarch_lsx_vfmul_d:
7482 case Intrinsic::loongarch_lasx_xvfmul_s:
7483 case Intrinsic::loongarch_lasx_xvfmul_d:
7486 case Intrinsic::loongarch_lsx_vfdiv_s:
7487 case Intrinsic::loongarch_lsx_vfdiv_d:
7488 case Intrinsic::loongarch_lasx_xvfdiv_s:
7489 case Intrinsic::loongarch_lasx_xvfdiv_d:
7492 case Intrinsic::loongarch_lsx_vfmadd_s:
7493 case Intrinsic::loongarch_lsx_vfmadd_d:
7494 case Intrinsic::loongarch_lasx_xvfmadd_s:
7495 case Intrinsic::loongarch_lasx_xvfmadd_d:
7497 N->getOperand(2),
N->getOperand(3));
7498 case Intrinsic::loongarch_lsx_vinsgr2vr_b:
7500 N->getOperand(1),
N->getOperand(2),
7502 case Intrinsic::loongarch_lsx_vinsgr2vr_h:
7503 case Intrinsic::loongarch_lasx_xvinsgr2vr_w:
7505 N->getOperand(1),
N->getOperand(2),
7507 case Intrinsic::loongarch_lsx_vinsgr2vr_w:
7508 case Intrinsic::loongarch_lasx_xvinsgr2vr_d:
7510 N->getOperand(1),
N->getOperand(2),
7512 case Intrinsic::loongarch_lsx_vinsgr2vr_d:
7514 N->getOperand(1),
N->getOperand(2),
7516 case Intrinsic::loongarch_lsx_vreplgr2vr_b:
7517 case Intrinsic::loongarch_lsx_vreplgr2vr_h:
7518 case Intrinsic::loongarch_lsx_vreplgr2vr_w:
7519 case Intrinsic::loongarch_lsx_vreplgr2vr_d:
7520 case Intrinsic::loongarch_lasx_xvreplgr2vr_b:
7521 case Intrinsic::loongarch_lasx_xvreplgr2vr_h:
7522 case Intrinsic::loongarch_lasx_xvreplgr2vr_w:
7523 case Intrinsic::loongarch_lasx_xvreplgr2vr_d:
7524 return DAG.
getNode(LoongArchISD::VREPLGR2VR,
DL,
N->getValueType(0),
7527 case Intrinsic::loongarch_lsx_vreplve_b:
7528 case Intrinsic::loongarch_lsx_vreplve_h:
7529 case Intrinsic::loongarch_lsx_vreplve_w:
7530 case Intrinsic::loongarch_lsx_vreplve_d:
7531 case Intrinsic::loongarch_lasx_xvreplve_b:
7532 case Intrinsic::loongarch_lasx_xvreplve_h:
7533 case Intrinsic::loongarch_lasx_xvreplve_w:
7534 case Intrinsic::loongarch_lasx_xvreplve_d:
7535 return DAG.
getNode(LoongArchISD::VREPLVE,
DL,
N->getValueType(0),
7539 case Intrinsic::loongarch_lsx_vpickve2gr_b:
7543 case Intrinsic::loongarch_lsx_vpickve2gr_h:
7544 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
7548 case Intrinsic::loongarch_lsx_vpickve2gr_w:
7552 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
7556 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
7557 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
7561 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
7565 case Intrinsic::loongarch_lsx_bz_b:
7566 case Intrinsic::loongarch_lsx_bz_h:
7567 case Intrinsic::loongarch_lsx_bz_w:
7568 case Intrinsic::loongarch_lsx_bz_d:
7569 case Intrinsic::loongarch_lasx_xbz_b:
7570 case Intrinsic::loongarch_lasx_xbz_h:
7571 case Intrinsic::loongarch_lasx_xbz_w:
7572 case Intrinsic::loongarch_lasx_xbz_d:
7574 return DAG.
getNode(LoongArchISD::VALL_ZERO,
DL,
N->getValueType(0),
7577 case Intrinsic::loongarch_lsx_bz_v:
7578 case Intrinsic::loongarch_lasx_xbz_v:
7580 return DAG.
getNode(LoongArchISD::VANY_ZERO,
DL,
N->getValueType(0),
7583 case Intrinsic::loongarch_lsx_bnz_b:
7584 case Intrinsic::loongarch_lsx_bnz_h:
7585 case Intrinsic::loongarch_lsx_bnz_w:
7586 case Intrinsic::loongarch_lsx_bnz_d:
7587 case Intrinsic::loongarch_lasx_xbnz_b:
7588 case Intrinsic::loongarch_lasx_xbnz_h:
7589 case Intrinsic::loongarch_lasx_xbnz_w:
7590 case Intrinsic::loongarch_lasx_xbnz_d:
7592 return DAG.
getNode(LoongArchISD::VALL_NONZERO,
DL,
N->getValueType(0),
7595 case Intrinsic::loongarch_lsx_bnz_v:
7596 case Intrinsic::loongarch_lasx_xbnz_v:
7598 return DAG.
getNode(LoongArchISD::VANY_NONZERO,
DL,
N->getValueType(0),
7601 case Intrinsic::loongarch_lasx_concat_128_s:
7602 case Intrinsic::loongarch_lasx_concat_128_d:
7603 case Intrinsic::loongarch_lasx_concat_128:
7605 N->getOperand(1),
N->getOperand(2));
7617 if (Op0.
getOpcode() == LoongArchISD::MOVFR2GR_S_LA64)
7629 if (Op0->
getOpcode() == LoongArchISD::MOVGR2FR_W_LA64) {
7631 "Unexpected value type!");
7640 MVT VT =
N->getSimpleValueType(0);
7661 if (Op0->
getOpcode() == LoongArchISD::BUILD_PAIR_F64)
7674 APInt V =
C->getValueAPF().bitcastToAPInt();
7689 MVT VT =
N->getSimpleValueType(0);
7750 EVT VT =
N->getValueType(0);
7752 if (VT != MVT::f32 && VT != MVT::f64)
7754 if (VT == MVT::f32 && !Subtarget.hasBasicF())
7756 if (VT == MVT::f64 && !Subtarget.hasBasicD())
7779 return DAG.
getNode(LoongArchISD::SITOF,
SDLoc(
N), VT, Load);
7836 Subtarget.hasExtLASX() && N1.
hasOneUse())
7848 return DAG.
getNode(
N.getOpcode(),
DL, VT, N0, N1);
7858 EVT VT =
N.getValueType();
7875 switch (
N.getOpcode()) {
7891 EVT VT =
N->getValueType(0);
7906 EVT VT =
N->getValueType(0);
7908 if (VT.
isVector() &&
N->getNumOperands() == 2)
7921 EVT VT =
N->getValueType(0);
7933 SDValue TrueVal =
N->getOperand(1);
7934 SDValue FalseVal =
N->getOperand(2);
7968 if (FalseVal.getOpcode() !=
ISD::ADD)
7971 SDValue Add0 = FalseVal.getOperand(0);
7972 SDValue Add1 = FalseVal.getOperand(1);
8042 : LoongArchISD::VSRAR,
8049 switch (
N->getOpcode()) {
8070 case LoongArchISD::BITREV_W:
8072 case LoongArchISD::BR_CC:
8074 case LoongArchISD::SELECT_CC:
8078 case LoongArchISD::MOVGR2FR_W_LA64:
8080 case LoongArchISD::MOVFR2GR_S_LA64:
8082 case LoongArchISD::VMSKLTZ:
8083 case LoongArchISD::XVMSKLTZ:
8085 case LoongArchISD::SPLIT_PAIR_F64:
8087 case LoongArchISD::VANDN:
8093 case LoongArchISD::VPACKEV:
8094 case LoongArchISD::VPERMI:
8120 MF->
insert(It, BreakMBB);
8124 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->end());
8125 SinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
8137 MBB->addSuccessor(BreakMBB);
8138 MBB->addSuccessor(SinkMBB);
8144 BreakMBB->addSuccessor(SinkMBB);
8156 switch (
MI.getOpcode()) {
8159 case LoongArch::PseudoVBZ:
8160 CondOpc = LoongArch::VSETEQZ_V;
8162 case LoongArch::PseudoVBZ_B:
8163 CondOpc = LoongArch::VSETANYEQZ_B;
8165 case LoongArch::PseudoVBZ_H:
8166 CondOpc = LoongArch::VSETANYEQZ_H;
8168 case LoongArch::PseudoVBZ_W:
8169 CondOpc = LoongArch::VSETANYEQZ_W;
8171 case LoongArch::PseudoVBZ_D:
8172 CondOpc = LoongArch::VSETANYEQZ_D;
8174 case LoongArch::PseudoVBNZ:
8175 CondOpc = LoongArch::VSETNEZ_V;
8177 case LoongArch::PseudoVBNZ_B:
8178 CondOpc = LoongArch::VSETALLNEZ_B;
8180 case LoongArch::PseudoVBNZ_H:
8181 CondOpc = LoongArch::VSETALLNEZ_H;
8183 case LoongArch::PseudoVBNZ_W:
8184 CondOpc = LoongArch::VSETALLNEZ_W;
8186 case LoongArch::PseudoVBNZ_D:
8187 CondOpc = LoongArch::VSETALLNEZ_D;
8189 case LoongArch::PseudoXVBZ:
8190 CondOpc = LoongArch::XVSETEQZ_V;
8192 case LoongArch::PseudoXVBZ_B:
8193 CondOpc = LoongArch::XVSETANYEQZ_B;
8195 case LoongArch::PseudoXVBZ_H:
8196 CondOpc = LoongArch::XVSETANYEQZ_H;
8198 case LoongArch::PseudoXVBZ_W:
8199 CondOpc = LoongArch::XVSETANYEQZ_W;
8201 case LoongArch::PseudoXVBZ_D:
8202 CondOpc = LoongArch::XVSETANYEQZ_D;
8204 case LoongArch::PseudoXVBNZ:
8205 CondOpc = LoongArch::XVSETNEZ_V;
8207 case LoongArch::PseudoXVBNZ_B:
8208 CondOpc = LoongArch::XVSETALLNEZ_B;
8210 case LoongArch::PseudoXVBNZ_H:
8211 CondOpc = LoongArch::XVSETALLNEZ_H;
8213 case LoongArch::PseudoXVBNZ_W:
8214 CondOpc = LoongArch::XVSETALLNEZ_W;
8216 case LoongArch::PseudoXVBNZ_D:
8217 CondOpc = LoongArch::XVSETALLNEZ_D;
8232 F->insert(It, FalseBB);
8233 F->insert(It, TrueBB);
8234 F->insert(It, SinkBB);
8237 SinkBB->
splice(SinkBB->
end(), BB, std::next(
MI.getIterator()), BB->
end());
8266 MI.getOperand(0).getReg())
8273 MI.eraseFromParent();
8281 unsigned BroadcastOp;
8283 switch (
MI.getOpcode()) {
8286 case LoongArch::PseudoXVINSGR2VR_B:
8288 BroadcastOp = LoongArch::XVREPLGR2VR_B;
8289 InsOp = LoongArch::XVEXTRINS_B;
8291 case LoongArch::PseudoXVINSGR2VR_H:
8293 BroadcastOp = LoongArch::XVREPLGR2VR_H;
8294 InsOp = LoongArch::XVEXTRINS_H;
8306 unsigned Idx =
MI.getOperand(3).getImm();
8314 .
addReg(XSrc, {}, LoongArch::sub_128);
8316 TII->get(HalfSize == 8 ? LoongArch::VINSGR2VR_H
8317 : LoongArch::VINSGR2VR_B),
8325 .
addImm(LoongArch::sub_128);
8332 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::XVPERMI_Q), ScratchReg2)
8335 .
addImm(Idx >= HalfSize ? 48 : 18);
8340 .
addImm((Idx >= HalfSize ? Idx - HalfSize : Idx) * 17);
8343 MI.eraseFromParent();
8350 assert(Subtarget.hasExtLSX());
8358 unsigned BroadcastOp, CTOp, PickOp;
8359 switch (
MI.getOpcode()) {
8362 case LoongArch::PseudoCTPOP_B:
8363 BroadcastOp = LoongArch::VREPLGR2VR_B;
8364 CTOp = LoongArch::VPCNT_B;
8365 PickOp = LoongArch::VPICKVE2GR_B;
8367 case LoongArch::PseudoCTPOP_H:
8368 case LoongArch::PseudoCTPOP_H_LA32:
8369 BroadcastOp = LoongArch::VREPLGR2VR_H;
8370 CTOp = LoongArch::VPCNT_H;
8371 PickOp = LoongArch::VPICKVE2GR_H;
8373 case LoongArch::PseudoCTPOP_W:
8374 case LoongArch::PseudoCTPOP_W_LA32:
8375 BroadcastOp = LoongArch::VREPLGR2VR_W;
8376 CTOp = LoongArch::VPCNT_W;
8377 PickOp = LoongArch::VPICKVE2GR_W;
8379 case LoongArch::PseudoCTPOP_D:
8380 BroadcastOp = LoongArch::VREPLGR2VR_D;
8381 CTOp = LoongArch::VPCNT_D;
8382 PickOp = LoongArch::VPICKVE2GR_D;
8392 MI.eraseFromParent();
8406 unsigned EleBits = 8;
8407 unsigned NotOpc = 0;
8410 switch (
MI.getOpcode()) {
8413 case LoongArch::PseudoVMSKLTZ_B:
8414 MskOpc = LoongArch::VMSKLTZ_B;
8416 case LoongArch::PseudoVMSKLTZ_H:
8417 MskOpc = LoongArch::VMSKLTZ_H;
8420 case LoongArch::PseudoVMSKLTZ_W:
8421 MskOpc = LoongArch::VMSKLTZ_W;
8424 case LoongArch::PseudoVMSKLTZ_D:
8425 MskOpc = LoongArch::VMSKLTZ_D;
8428 case LoongArch::PseudoVMSKGEZ_B:
8429 MskOpc = LoongArch::VMSKGEZ_B;
8431 case LoongArch::PseudoVMSKEQZ_B:
8432 MskOpc = LoongArch::VMSKNZ_B;
8433 NotOpc = LoongArch::VNOR_V;
8435 case LoongArch::PseudoVMSKNEZ_B:
8436 MskOpc = LoongArch::VMSKNZ_B;
8438 case LoongArch::PseudoXVMSKLTZ_B:
8439 MskOpc = LoongArch::XVMSKLTZ_B;
8440 RC = &LoongArch::LASX256RegClass;
8442 case LoongArch::PseudoXVMSKLTZ_H:
8443 MskOpc = LoongArch::XVMSKLTZ_H;
8444 RC = &LoongArch::LASX256RegClass;
8447 case LoongArch::PseudoXVMSKLTZ_W:
8448 MskOpc = LoongArch::XVMSKLTZ_W;
8449 RC = &LoongArch::LASX256RegClass;
8452 case LoongArch::PseudoXVMSKLTZ_D:
8453 MskOpc = LoongArch::XVMSKLTZ_D;
8454 RC = &LoongArch::LASX256RegClass;
8457 case LoongArch::PseudoXVMSKGEZ_B:
8458 MskOpc = LoongArch::XVMSKGEZ_B;
8459 RC = &LoongArch::LASX256RegClass;
8461 case LoongArch::PseudoXVMSKEQZ_B:
8462 MskOpc = LoongArch::XVMSKNZ_B;
8463 NotOpc = LoongArch::XVNOR_V;
8464 RC = &LoongArch::LASX256RegClass;
8466 case LoongArch::PseudoXVMSKNEZ_B:
8467 MskOpc = LoongArch::XVMSKNZ_B;
8468 RC = &LoongArch::LASX256RegClass;
8483 if (
TRI->getRegSizeInBits(*RC) > 128) {
8493 TII->get(Subtarget.
is64Bit() ? LoongArch::BSTRINS_D
8494 : LoongArch::BSTRINS_W),
8498 .
addImm(256 / EleBits - 1)
8506 MI.eraseFromParent();
8513 assert(
MI.getOpcode() == LoongArch::SplitPairF64Pseudo &&
8514 "Unexpected instruction");
8526 MI.eraseFromParent();
8533 assert(
MI.getOpcode() == LoongArch::BuildPairF64Pseudo &&
8534 "Unexpected instruction");
8550 MI.eraseFromParent();
8555 switch (
MI.getOpcode()) {
8558 case LoongArch::Select_GPR_Using_CC_GPR:
8594 if (
MI.getOperand(2).isReg())
8595 RHS =
MI.getOperand(2).getReg();
8596 auto CC =
static_cast<unsigned>(
MI.getOperand(3).
getImm());
8600 SelectDests.
insert(
MI.getOperand(0).getReg());
8604 SequenceMBBI !=
E; ++SequenceMBBI) {
8605 if (SequenceMBBI->isDebugInstr())
8608 if (SequenceMBBI->getOperand(1).getReg() !=
LHS ||
8609 !SequenceMBBI->getOperand(2).isReg() ||
8610 SequenceMBBI->getOperand(2).getReg() !=
RHS ||
8611 SequenceMBBI->getOperand(3).getImm() != CC ||
8612 SelectDests.
count(SequenceMBBI->getOperand(4).getReg()) ||
8613 SelectDests.
count(SequenceMBBI->getOperand(5).getReg()))
8615 LastSelectPseudo = &*SequenceMBBI;
8617 SelectDests.
insert(SequenceMBBI->getOperand(0).getReg());
8620 if (SequenceMBBI->hasUnmodeledSideEffects() ||
8621 SequenceMBBI->mayLoadOrStore() ||
8622 SequenceMBBI->usesCustomInsertionHook())
8625 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
8640 F->insert(
I, IfFalseMBB);
8641 F->insert(
I, TailMBB);
8644 unsigned CallFrameSize =
TII.getCallFrameSizeAt(*LastSelectPseudo);
8650 TailMBB->
push_back(DebugInstr->removeFromParent());
8654 TailMBB->
splice(TailMBB->
end(), HeadMBB,
8664 if (
MI.getOperand(2).isImm())
8676 auto SelectMBBI =
MI.getIterator();
8677 auto SelectEnd = std::next(LastSelectPseudo->
getIterator());
8679 while (SelectMBBI != SelectEnd) {
8680 auto Next = std::next(SelectMBBI);
8684 TII.get(LoongArch::PHI), SelectMBBI->getOperand(0).getReg())
8685 .
addReg(SelectMBBI->getOperand(4).getReg())
8687 .
addReg(SelectMBBI->getOperand(5).getReg())
8694 F->getProperties().resetNoPHIs();
8700 const TargetInstrInfo *
TII = Subtarget.getInstrInfo();
8703 switch (
MI.getOpcode()) {
8706 case LoongArch::DIV_W:
8707 case LoongArch::DIV_WU:
8708 case LoongArch::MOD_W:
8709 case LoongArch::MOD_WU:
8710 case LoongArch::DIV_D:
8711 case LoongArch::DIV_DU:
8712 case LoongArch::MOD_D:
8713 case LoongArch::MOD_DU:
8716 case LoongArch::WRFCSR: {
8718 LoongArch::FCSR0 +
MI.getOperand(0).getImm())
8719 .
addReg(
MI.getOperand(1).getReg());
8720 MI.eraseFromParent();
8723 case LoongArch::RDFCSR: {
8724 MachineInstr *ReadFCSR =
8726 MI.getOperand(0).getReg())
8727 .
addReg(LoongArch::FCSR0 +
MI.getOperand(1).getImm());
8729 MI.eraseFromParent();
8732 case LoongArch::Select_GPR_Using_CC_GPR:
8734 case LoongArch::BuildPairF64Pseudo:
8736 case LoongArch::SplitPairF64Pseudo:
8738 case LoongArch::PseudoVBZ:
8739 case LoongArch::PseudoVBZ_B:
8740 case LoongArch::PseudoVBZ_H:
8741 case LoongArch::PseudoVBZ_W:
8742 case LoongArch::PseudoVBZ_D:
8743 case LoongArch::PseudoVBNZ:
8744 case LoongArch::PseudoVBNZ_B:
8745 case LoongArch::PseudoVBNZ_H:
8746 case LoongArch::PseudoVBNZ_W:
8747 case LoongArch::PseudoVBNZ_D:
8748 case LoongArch::PseudoXVBZ:
8749 case LoongArch::PseudoXVBZ_B:
8750 case LoongArch::PseudoXVBZ_H:
8751 case LoongArch::PseudoXVBZ_W:
8752 case LoongArch::PseudoXVBZ_D:
8753 case LoongArch::PseudoXVBNZ:
8754 case LoongArch::PseudoXVBNZ_B:
8755 case LoongArch::PseudoXVBNZ_H:
8756 case LoongArch::PseudoXVBNZ_W:
8757 case LoongArch::PseudoXVBNZ_D:
8759 case LoongArch::PseudoXVINSGR2VR_B:
8760 case LoongArch::PseudoXVINSGR2VR_H:
8762 case LoongArch::PseudoCTPOP_B:
8763 case LoongArch::PseudoCTPOP_H:
8764 case LoongArch::PseudoCTPOP_W:
8765 case LoongArch::PseudoCTPOP_D:
8766 case LoongArch::PseudoCTPOP_H_LA32:
8767 case LoongArch::PseudoCTPOP_W_LA32:
8769 case LoongArch::PseudoVMSKLTZ_B:
8770 case LoongArch::PseudoVMSKLTZ_H:
8771 case LoongArch::PseudoVMSKLTZ_W:
8772 case LoongArch::PseudoVMSKLTZ_D:
8773 case LoongArch::PseudoVMSKGEZ_B:
8774 case LoongArch::PseudoVMSKEQZ_B:
8775 case LoongArch::PseudoVMSKNEZ_B:
8776 case LoongArch::PseudoXVMSKLTZ_B:
8777 case LoongArch::PseudoXVMSKLTZ_H:
8778 case LoongArch::PseudoXVMSKLTZ_W:
8779 case LoongArch::PseudoXVMSKLTZ_D:
8780 case LoongArch::PseudoXVMSKGEZ_B:
8781 case LoongArch::PseudoXVMSKEQZ_B:
8782 case LoongArch::PseudoXVMSKNEZ_B:
8784 case TargetOpcode::STATEPOINT:
8790 MI.addOperand(*
MI.getMF(),
8792 LoongArch::R1,
true,
8795 if (!Subtarget.is64Bit())
8803 unsigned *
Fast)
const {
8804 if (!Subtarget.hasUAL())
8822 LoongArch::R7, LoongArch::R8, LoongArch::R9,
8823 LoongArch::R10, LoongArch::R11};
8838 LoongArch::R23, LoongArch::R24, LoongArch::R25, LoongArch::R26,
8839 LoongArch::R27, LoongArch::R28, LoongArch::R29, LoongArch::R30,
8840 LoongArch::R4, LoongArch::R5, LoongArch::R6, LoongArch::R7,
8841 LoongArch::R8, LoongArch::R9, LoongArch::R10, LoongArch::R11,
8842 LoongArch::R12, LoongArch::R13, LoongArch::R14, LoongArch::R15,
8843 LoongArch::R16, LoongArch::R17, LoongArch::R18, LoongArch::R19,
8849 LoongArch::F3, LoongArch::F4, LoongArch::F5,
8850 LoongArch::F6, LoongArch::F7};
8853 LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
8854 LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
8857 LoongArch::VR3, LoongArch::VR4, LoongArch::VR5,
8858 LoongArch::VR6, LoongArch::VR7};
8861 LoongArch::XR3, LoongArch::XR4, LoongArch::XR5,
8862 LoongArch::XR6, LoongArch::XR7};
8865 switch (State.getCallingConv()) {
8867 if (!State.isVarArg())
8871 return State.AllocateReg(
ArgGPRs);
8879 unsigned ValNo2,
MVT ValVT2,
MVT LocVT2,
8881 unsigned GRLenInBytes = GRLen / 8;
8892 State.AllocateStack(GRLenInBytes, StackAlign),
8895 ValNo2, ValVT2, State.AllocateStack(GRLenInBytes,
Align(GRLenInBytes)),
8906 ValNo2, ValVT2, State.AllocateStack(GRLenInBytes,
Align(GRLenInBytes)),
8914 unsigned ValNo,
MVT ValVT,
8917 unsigned GRLen =
DL.getLargestLegalIntTypeSizeInBits();
8918 assert((GRLen == 32 || GRLen == 64) &&
"Unspport GRLen");
8919 MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
8924 if (IsRet && ValNo > 1)
8928 bool UseGPRForFloat =
true;
8938 UseGPRForFloat = ArgFlags.
isVarArg();
8951 unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
8954 DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
8955 unsigned RegIdx = State.getFirstUnallocated(
ArgGPRs);
8957 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
8963 State.getPendingArgFlags();
8966 "PendingLocs and PendingArgFlags out of sync");
8970 UseGPRForFloat =
true;
8972 if (UseGPRForFloat && ValVT == MVT::f32) {
8975 }
else if (UseGPRForFloat && GRLen == 64 && ValVT == MVT::f64) {
8978 }
else if (UseGPRForFloat && GRLen == 32 && ValVT == MVT::f64) {
8981 assert(PendingLocs.
empty() &&
"Can't lower f64 if it is split");
9023 PendingLocs.
size() <= 2) {
9024 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
9029 PendingLocs.
clear();
9030 PendingArgFlags.
clear();
9037 unsigned StoreSizeBytes = GRLen / 8;
9040 if (ValVT == MVT::f32 && !UseGPRForFloat) {
9042 }
else if (ValVT == MVT::f64 && !UseGPRForFloat) {
9046 UseGPRForFloat =
false;
9047 StoreSizeBytes = 16;
9048 StackAlign =
Align(16);
9051 UseGPRForFloat =
false;
9052 StoreSizeBytes = 32;
9053 StackAlign =
Align(32);
9059 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9063 if (!PendingLocs.
empty()) {
9065 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
9066 for (
auto &It : PendingLocs) {
9068 It.convertToReg(
Reg);
9073 PendingLocs.clear();
9074 PendingArgFlags.
clear();
9077 assert((!UseGPRForFloat || LocVT == GRLenVT) &&
9078 "Expected an GRLenVT at this stage");
9095void LoongArchTargetLowering::analyzeInputArgs(
9098 LoongArchCCAssignFn Fn)
const {
9100 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
9101 MVT ArgVT = Ins[i].VT;
9102 Type *ArgTy =
nullptr;
9104 ArgTy = FType->getReturnType();
9105 else if (Ins[i].isOrigArg())
9106 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
9110 CCInfo, IsRet, ArgTy)) {
9111 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type " << ArgVT
9118void LoongArchTargetLowering::analyzeOutputArgs(
9121 CallLoweringInfo *CLI, LoongArchCCAssignFn Fn)
const {
9122 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
9123 MVT ArgVT = Outs[i].VT;
9124 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
9128 CCInfo, IsRet, OrigTy)) {
9129 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type " << ArgVT
9148 Val = DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
DL, MVT::f32, Val);
9170 if (In.isOrigArg()) {
9175 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
9176 (
BitWidth < 32 && In.Flags.isZExt())) {
9226 Register LoVReg =
RegInfo.createVirtualRegister(&LoongArch::GPRRegClass);
9239 Register HiVReg =
RegInfo.createVirtualRegister(&LoongArch::GPRRegClass);
9243 return DAG.
getNode(LoongArchISD::BUILD_PAIR_F64,
DL, MVT::f64,
Lo,
Hi);
9257 Val = DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Val);
9269 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9273 LoongArch::R23, LoongArch::R24, LoongArch::R25,
9274 LoongArch::R26, LoongArch::R27, LoongArch::R28,
9275 LoongArch::R29, LoongArch::R30, LoongArch::R31};
9282 if (LocVT == MVT::f32) {
9285 static const MCPhysReg FPR32List[] = {LoongArch::F24, LoongArch::F25,
9286 LoongArch::F26, LoongArch::F27};
9293 if (LocVT == MVT::f64) {
9296 static const MCPhysReg FPR64List[] = {LoongArch::F28_64, LoongArch::F29_64,
9297 LoongArch::F30_64, LoongArch::F31_64};
9328 "GHC calling convention requires the F and D extensions");
9332 MVT GRLenVT = Subtarget.getGRLenVT();
9333 unsigned GRLenInBytes = Subtarget.getGRLen() / 8;
9335 std::vector<SDValue> OutChains;
9344 analyzeInputArgs(MF, CCInfo, Ins,
false,
CC_LoongArch);
9346 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
9363 unsigned ArgIndex = Ins[InsIdx].OrigArgIndex;
9364 unsigned ArgPartOffset = Ins[InsIdx].PartOffset;
9365 assert(ArgPartOffset == 0);
9366 while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
9368 unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
9392 int VaArgOffset, VarArgsSaveSize;
9396 if (ArgRegs.
size() == Idx) {
9398 VarArgsSaveSize = 0;
9400 VarArgsSaveSize = GRLenInBytes * (ArgRegs.
size() - Idx);
9401 VaArgOffset = -VarArgsSaveSize;
9407 LoongArchFI->setVarArgsFrameIndex(FI);
9415 VarArgsSaveSize += GRLenInBytes;
9420 for (
unsigned I = Idx;
I < ArgRegs.
size();
9421 ++
I, VaArgOffset += GRLenInBytes) {
9422 const Register Reg = RegInfo.createVirtualRegister(RC);
9423 RegInfo.addLiveIn(ArgRegs[
I], Reg);
9431 ->setValue((
Value *)
nullptr);
9432 OutChains.push_back(Store);
9434 LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
9439 if (!OutChains.empty()) {
9440 OutChains.push_back(Chain);
9455 if (
N->getNumValues() != 1)
9457 if (!
N->hasNUsesOfValue(1, 0))
9460 SDNode *Copy = *
N->user_begin();
9466 if (Copy->getGluedNode())
9470 bool HasRet =
false;
9472 if (
Node->getOpcode() != LoongArchISD::RET)
9480 Chain = Copy->getOperand(0);
9485bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
9489 auto CalleeCC = CLI.CallConv;
9490 auto &Outs = CLI.Outs;
9492 auto CallerCC = Caller.getCallingConv();
9499 for (
auto &VA : ArgLocs)
9505 auto IsCallerStructRet = Caller.hasStructRetAttr();
9506 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
9507 if (IsCallerStructRet || IsCalleeStructRet)
9511 for (
auto &Arg : Outs)
9512 if (Arg.Flags.isByVal())
9517 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
9518 if (CalleeCC != CallerCC) {
9519 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
9520 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
9546 MVT GRLenVT = Subtarget.getGRLenVT();
9558 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
CC_LoongArch);
9562 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
9568 "site marked musttail");
9575 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
9577 if (!Flags.isByVal())
9581 unsigned Size = Flags.getByValSize();
9582 Align Alignment = Flags.getNonZeroByValAlign();
9589 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr, Arg, SizeNode, Alignment,
9591 false,
nullptr, std::nullopt,
9603 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(), OutIdx = 0; i != e;
9606 SDValue ArgValue = OutVals[OutIdx];
9614 DAG.
getNode(LoongArchISD::SPLIT_PAIR_F64,
DL,
9615 DAG.
getVTList(MVT::i32, MVT::i32), ArgValue);
9627 if (!StackPtr.getNode())
9639 RegsToPass.
push_back(std::make_pair(RegHigh,
Hi));
9654 unsigned ArgIndex = Outs[OutIdx].OrigArgIndex;
9655 unsigned ArgPartOffset = Outs[OutIdx].PartOffset;
9656 assert(ArgPartOffset == 0);
9661 while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
9662 SDValue PartValue = OutVals[OutIdx + 1];
9663 unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
9678 for (
const auto &Part : Parts) {
9679 SDValue PartValue = Part.first;
9680 SDValue PartOffset = Part.second;
9687 ArgValue = SpillSlot;
9693 if (Flags.isByVal())
9694 ArgValue = ByValArgs[j++];
9701 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
9702 "for passing parameters");
9705 if (!StackPtr.getNode())
9718 if (!MemOpChains.
empty())
9724 for (
auto &Reg : RegsToPass) {
9725 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
9747 Ops.push_back(Chain);
9748 Ops.push_back(Callee);
9752 for (
auto &Reg : RegsToPass)
9753 Ops.push_back(DAG.
getRegister(Reg.first, Reg.second.getValueType()));
9758 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
9759 assert(Mask &&
"Missing call preserved mask for calling convention");
9765 Ops.push_back(Glue);
9774 Op = IsTailCall ? LoongArchISD::TAIL : LoongArchISD::CALL;
9777 Op = IsTailCall ? LoongArchISD::TAIL_MEDIUM : LoongArchISD::CALL_MEDIUM;
9780 assert(Subtarget.is64Bit() &&
"Large code model requires LA64");
9781 Op = IsTailCall ? LoongArchISD::TAIL_LARGE : LoongArchISD::CALL_LARGE;
9803 analyzeInputArgs(MF, RetCCInfo, Ins,
true,
CC_LoongArch);
9806 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
9807 auto &VA = RVLocs[i];
9815 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9816 assert(VA.needsCustom());
9821 RetValue = DAG.
getNode(LoongArchISD::BUILD_PAIR_F64,
DL, MVT::f64,
9822 RetValue, RetValue2);
9835 const Type *RetTy)
const {
9837 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
9839 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
9843 Outs[i].Flags, CCInfo,
true,
nullptr))
9869 for (
unsigned i = 0, e = RVLocs.
size(), OutIdx = 0; i < e; ++i, ++OutIdx) {
9870 SDValue Val = OutVals[OutIdx];
9879 DAG.
getVTList(MVT::i32, MVT::i32), Val);
9883 Register RegHi = RVLocs[++i].getLocReg();
9908 return DAG.
getNode(LoongArchISD::RET,
DL, MVT::Other, RetOps);
9916 const APInt &SplatValue,
const unsigned SplatBitSize)
const {
9919 if (SplatBitSize == 16 && !(V & 0x00FF)) {
9921 RequiredImm = (0b10101 << 8) | (V >> 8);
9922 return {
true, RequiredImm};
9923 }
else if (SplatBitSize == 32) {
9925 if (!(V & 0xFFFF00FF)) {
9926 RequiredImm = (0b10001 << 8) | (V >> 8);
9927 return {
true, RequiredImm};
9930 if (!(V & 0xFF00FFFF)) {
9931 RequiredImm = (0b10010 << 8) | (V >> 16);
9932 return {
true, RequiredImm};
9935 if (!(V & 0x00FFFFFF)) {
9936 RequiredImm = (0b10011 << 8) | (V >> 24);
9937 return {
true, RequiredImm};
9940 if ((V & 0xFFFF00FF) == 0xFF) {
9941 RequiredImm = (0b10110 << 8) | (V >> 8);
9942 return {
true, RequiredImm};
9945 if ((V & 0xFF00FFFF) == 0xFFFF) {
9946 RequiredImm = (0b10111 << 8) | (V >> 16);
9947 return {
true, RequiredImm};
9950 if ((V & 0x7E07FFFF) == 0x3E000000 || (V & 0x7E07FFFF) == 0x40000000) {
9952 (0b11010 << 8) | (((V >> 24) & 0xC0) ^ 0x40) | ((V >> 19) & 0x3F);
9953 return {
true, RequiredImm};
9955 }
else if (SplatBitSize == 64) {
9957 if ((V & 0xFFFFFFFF7E07FFFFULL) == 0x3E000000ULL ||
9958 (V & 0xFFFFFFFF7E07FFFFULL) == 0x40000000ULL) {
9960 (0b11011 << 8) | (((V >> 24) & 0xC0) ^ 0x40) | ((V >> 19) & 0x3F);
9961 return {
true, RequiredImm};
9964 if ((V & 0x7FC0FFFFFFFFFFFFULL) == 0x4000000000000000ULL ||
9965 (V & 0x7FC0FFFFFFFFFFFFULL) == 0x3FC0000000000000ULL) {
9967 (0b11100 << 8) | (((V >> 56) & 0xC0) ^ 0x40) | ((V >> 48) & 0x3F);
9968 return {
true, RequiredImm};
9971 auto sameBitsPreByte = [](
uint64_t x) -> std::pair<bool, uint8_t> {
9973 for (
int i = 0; i < 8; ++i) {
9975 if (
byte == 0 ||
byte == 0xFF)
9976 res |= ((
byte & 1) << i);
9983 auto [IsSame, Suffix] = sameBitsPreByte(V);
9985 RequiredImm = (0b11001 << 8) | Suffix;
9986 return {
true, RequiredImm};
9989 return {
false, RequiredImm};
9994 if (!Subtarget.hasExtLSX())
9997 if (VT == MVT::f32) {
9998 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7e07ffff;
9999 return (masked == 0x3e000000 || masked == 0x40000000);
10002 if (VT == MVT::f64) {
10003 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7fc0ffffffffffff;
10004 return (masked == 0x3fc0000000000000 || masked == 0x4000000000000000);
10010bool LoongArchTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
10011 bool ForCodeSize)
const {
10013 if (VT == MVT::f32 && !Subtarget.hasBasicF())
10015 if (VT == MVT::f64 && !Subtarget.hasBasicD())
10017 return (Imm.isZero() || Imm.isExactlyValue(1.0) ||
isFPImmVLDILegal(Imm, VT));
10028bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
10038 Type *Ty =
I->getOperand(0)->getType();
10040 unsigned Size = Ty->getIntegerBitWidth();
10060 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
10065 if (Subtarget.hasExtLASX())
10067 else if (Subtarget.hasExtLSX())
10074 EVT VT =
Y.getValueType();
10077 return Subtarget.hasExtLSX() && VT.
isInteger();
10088 case Intrinsic::loongarch_masked_atomicrmw_xchg_i32:
10089 case Intrinsic::loongarch_masked_atomicrmw_add_i32:
10090 case Intrinsic::loongarch_masked_atomicrmw_sub_i32:
10091 case Intrinsic::loongarch_masked_atomicrmw_nand_i32: {
10094 Info.memVT = MVT::i32;
10095 Info.ptrVal =
I.getArgOperand(0);
10097 Info.align =
Align(4);
10116 "Unable to expand");
10117 unsigned MinWordSize = 4;
10129 Value *AlignedAddr = Builder.CreateIntrinsic(
10130 Intrinsic::ptrmask, {PtrTy, IntTy},
10131 {Addr, ConstantInt::get(IntTy, ~(
uint64_t)(MinWordSize - 1))},
nullptr,
10134 Value *AddrInt = Builder.CreatePtrToInt(Addr, IntTy);
10135 Value *PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
10136 Value *ShiftAmt = Builder.CreateShl(PtrLSB, 3);
10137 ShiftAmt = Builder.CreateTrunc(ShiftAmt, WordType,
"ShiftAmt");
10138 Value *Mask = Builder.CreateShl(
10139 ConstantInt::get(WordType,
10140 (1 << (
DL.getTypeStoreSize(
ValueType) * 8)) - 1),
10142 Value *Inv_Mask = Builder.CreateNot(Mask,
"Inv_Mask");
10143 Value *ValOperand_Shifted =
10144 Builder.CreateShl(Builder.CreateZExt(AI->
getValOperand(), WordType),
10145 ShiftAmt,
"ValOperand_Shifted");
10148 NewOperand = Builder.CreateOr(ValOperand_Shifted, Inv_Mask,
"AndOperand");
10150 NewOperand = ValOperand_Shifted;
10153 Builder.CreateAtomicRMW(
Op, AlignedAddr, NewOperand,
Align(MinWordSize),
10156 Value *Shift = Builder.CreateLShr(NewAI, ShiftAmt,
"shifted");
10157 Value *Trunc = Builder.CreateTrunc(Shift,
ValueType,
"extracted");
10158 Value *FinalOldResult = Builder.CreateBitCast(Trunc,
ValueType);
10177 if (Subtarget.hasLAM_BH() && Subtarget.is64Bit() &&
10185 if (Subtarget.hasLAMCAS()) {
10207 return Intrinsic::loongarch_masked_atomicrmw_xchg_i64;
10209 return Intrinsic::loongarch_masked_atomicrmw_add_i64;
10211 return Intrinsic::loongarch_masked_atomicrmw_sub_i64;
10213 return Intrinsic::loongarch_masked_atomicrmw_nand_i64;
10215 return Intrinsic::loongarch_masked_atomicrmw_umax_i64;
10217 return Intrinsic::loongarch_masked_atomicrmw_umin_i64;
10219 return Intrinsic::loongarch_masked_atomicrmw_max_i64;
10221 return Intrinsic::loongarch_masked_atomicrmw_min_i64;
10231 return Intrinsic::loongarch_masked_atomicrmw_xchg_i32;
10233 return Intrinsic::loongarch_masked_atomicrmw_add_i32;
10235 return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
10237 return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
10239 return Intrinsic::loongarch_masked_atomicrmw_umax_i32;
10241 return Intrinsic::loongarch_masked_atomicrmw_umin_i32;
10243 return Intrinsic::loongarch_masked_atomicrmw_max_i32;
10245 return Intrinsic::loongarch_masked_atomicrmw_min_i32;
10257 if (Subtarget.hasLAMCAS())
10269 unsigned GRLen = Subtarget.getGRLen();
10271 Value *FailureOrdering =
10272 Builder.getIntN(Subtarget.getGRLen(),
static_cast<uint64_t>(FailOrd));
10273 Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i32;
10275 CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
10276 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
10277 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
10278 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10281 Value *Result = Builder.CreateIntrinsic(
10282 CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
10284 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10300 Builder.CreateNot(Mask,
"Inv_Mask"),
10307 unsigned GRLen = Subtarget.getGRLen();
10316 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
10317 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10318 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
10331 unsigned ValWidth =
10334 Builder.CreateSub(Builder.getIntN(GRLen, GRLen - ValWidth), ShiftAmt);
10335 Result = Builder.CreateCall(LlwOpScwLoop,
10336 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
10339 Builder.CreateCall(LlwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
10343 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10366 const Constant *PersonalityFn)
const {
10367 return LoongArch::R4;
10371 const Constant *PersonalityFn)
const {
10372 return LoongArch::R5;
10383 int RefinementSteps = VT.
getScalarType() == MVT::f64 ? 2 : 1;
10384 return RefinementSteps;
10389 assert(Subtarget.hasFrecipe() &&
10390 "Reciprocal estimate queried on unsupported target");
10401 return Subtarget.hasBasicD();
10405 return Subtarget.hasExtLSX();
10409 return Subtarget.hasExtLASX();
10418 int &RefinementSteps,
10419 bool &UseOneConstNR,
10420 bool Reciprocal)
const {
10422 "Enabled should never be Disabled here");
10424 if (!Subtarget.hasFrecipe())
10439 UseOneConstNR =
false;
10445 if (Reciprocal || RefinementSteps > 0)
10455 int &RefinementSteps)
const {
10457 "Enabled should never be Disabled here");
10459 if (!Subtarget.hasFrecipe())
10473 return DAG.
getNode(LoongArchISD::FRECIPE,
DL, VT, Operand);
10481LoongArchTargetLowering::getConstraintType(
StringRef Constraint)
const {
10501 if (Constraint.
size() == 1) {
10502 switch (Constraint[0]) {
10518 if (Constraint ==
"ZC" || Constraint ==
"ZB")
10527 return StringSwitch<InlineAsm::ConstraintCode>(ConstraintCode)
10534std::pair<unsigned, const TargetRegisterClass *>
10535LoongArchTargetLowering::getRegForInlineAsmConstraint(
10539 if (Constraint.
size() == 1) {
10540 switch (Constraint[0]) {
10545 return std::make_pair(0U, &LoongArch::GPRRegClass);
10547 return std::make_pair(0U, &LoongArch::GPRNoR0R1RegClass);
10549 if (Subtarget.hasBasicF() && VT == MVT::f32)
10550 return std::make_pair(0U, &LoongArch::FPR32RegClass);
10551 if (Subtarget.hasBasicD() && VT == MVT::f64)
10552 return std::make_pair(0U, &LoongArch::FPR64RegClass);
10553 if (Subtarget.hasExtLSX() &&
10554 TRI->isTypeLegalForClass(LoongArch::LSX128RegClass, VT))
10555 return std::make_pair(0U, &LoongArch::LSX128RegClass);
10556 if (Subtarget.hasExtLASX() &&
10557 TRI->isTypeLegalForClass(LoongArch::LASX256RegClass, VT))
10558 return std::make_pair(0U, &LoongArch::LASX256RegClass);
10578 bool IsFP = Constraint[2] ==
'f';
10579 std::pair<StringRef, StringRef> Temp = Constraint.
split(
'$');
10580 std::pair<unsigned, const TargetRegisterClass *>
R;
10585 unsigned RegNo =
R.first;
10586 if (LoongArch::F0 <= RegNo && RegNo <= LoongArch::F31) {
10587 if (Subtarget.hasBasicD() && (VT == MVT::f64 || VT == MVT::Other)) {
10588 unsigned DReg = RegNo - LoongArch::F0 + LoongArch::F0_64;
10589 return std::make_pair(DReg, &LoongArch::FPR64RegClass);
10599void LoongArchTargetLowering::LowerAsmOperandForConstraint(
10603 if (Constraint.
size() == 1) {
10604 switch (Constraint[0]) {
10608 uint64_t CVal =
C->getSExtValue();
10611 Subtarget.getGRLenVT()));
10617 uint64_t CVal =
C->getSExtValue();
10620 Subtarget.getGRLenVT()));
10626 if (
C->getZExtValue() == 0)
10633 uint64_t CVal =
C->getZExtValue();
10646#define GET_REGISTER_MATCHER
10647#include "LoongArchGenAsmMatcher.inc"
10653 std::string NewRegName = Name.second.str();
10659 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
10660 if (!ReservedRegs.
test(Reg))
10677 const APInt &Imm = ConstNode->getAPIntValue();
10679 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
10680 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
10683 if (ConstNode->hasOneUse() &&
10684 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
10685 (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2()))
10691 if (ConstNode->hasOneUse() && !(Imm.sge(-2048) && Imm.sle(4095))) {
10692 unsigned Shifts = Imm.countr_zero();
10698 APInt ImmPop = Imm.ashr(Shifts);
10699 if (ImmPop == 3 || ImmPop == 5 || ImmPop == 9 || ImmPop == 17)
10703 APInt ImmSmall =
APInt(Imm.getBitWidth(), 1ULL << Shifts,
true);
10704 if ((Imm - ImmSmall).isPowerOf2() || (Imm + ImmSmall).isPowerOf2() ||
10705 (ImmSmall - Imm).isPowerOf2())
10715 Type *Ty,
unsigned AS,
10734 switch (AM.
Scale) {
10770 EVT MemVT = LD->getMemoryVT();
10771 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
10782 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
10791 if (
Y.getValueType().isVector())
10803 Type *Ty,
bool IsSigned)
const {
10804 if (Subtarget.is64Bit() && Ty->isIntegerTy(32))
10813 if (Subtarget.isSoftFPABI() && (
Type.isFloatingPoint() && !
Type.isVector() &&
10814 Type.getSizeInBits() < Subtarget.getGRLen()))
10824 Align &PrefAlign)
const {
10828 if (Subtarget.is64Bit()) {
10830 PrefAlign =
Align(8);
10833 PrefAlign =
Align(4);
10848bool LoongArchTargetLowering::splitValueIntoRegisterParts(
10850 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
10851 bool IsABIRegCopy = CC.has_value();
10854 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
10855 PartVT == MVT::f32) {
10870SDValue LoongArchTargetLowering::joinRegisterPartsIntoValue(
10872 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID> CC)
const {
10873 bool IsABIRegCopy = CC.has_value();
10875 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
10876 PartVT == MVT::f32) {
10893 if (VT == MVT::f16 && Subtarget.hasBasicF())
10899unsigned LoongArchTargetLowering::getNumRegistersForCallingConv(
10902 if (VT == MVT::f16 && Subtarget.hasBasicF())
10911 unsigned Opc =
Op.getOpcode();
10916 case LoongArchISD::VPICK_ZEXT_ELT: {
10930 unsigned Depth)
const {
10931 EVT VT =
Op.getValueType();
10933 unsigned Opc =
Op.getOpcode();
10937 case LoongArchISD::VMSKLTZ:
10938 case LoongArchISD::XVMSKLTZ: {
10940 MVT SrcVT = Src.getSimpleValueType();
10945 if (OriginalDemandedBits.
countr_zero() >= NumElts)
10949 APInt KnownUndef, KnownZero;
10965 if (KnownSrc.
One[SrcBits - 1])
10967 else if (KnownSrc.
Zero[SrcBits - 1])
10972 Src, DemandedSrcBits, DemandedElts, TLO.
DAG,
Depth + 1))
10979 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO,
Depth);
11002 unsigned Index)
const {
11011 unsigned Index)
const {
11015 return (EltVT == MVT::f32 || EltVT == MVT::f64) && Index == 0;
static MCRegister MatchRegisterName(StringRef Name)
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSELECT_CCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static uint64_t getConstant(const Value *IndexValue)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static MachineBasicBlock * emitSelectPseudo(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static SDValue performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
const MCPhysReg ArgFPR32s[]
static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Dispatching routine to lower various 128-bit LoongArch vector shuffles.
static SDValue lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVSHUF4I (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKEV (if possible).
static SDValue combineSelectToBinOp(SDNode *N, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKOD (if possible).
static SDValue unpackF64OnLA32DSoftABI(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const CCValAssign &HiVA, const SDLoc &DL)
static bool fitsRegularPattern(typename SmallVectorImpl< ValType >::const_iterator Begin, unsigned CheckStride, typename SmallVectorImpl< ValType >::const_iterator End, ValType ExpectedIndex, unsigned ExpectedIndexStride)
Determine whether a range fits a regular pattern of values.
static SDValue lowerVECTOR_SHUFFLE_IsReverse(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE whose result is the reversed source vector.
static SDValue PromoteMaskArithmetic(SDValue N, const SDLoc &DL, EVT VT, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned Depth)
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static cl::opt< bool > ZeroDivCheck("loongarch-check-zero-division", cl::Hidden, cl::desc("Trap on integer division by zero."), cl::init(false))
static SDValue lowerVECTOR_SHUFFLE_XVPERMI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVPERMI (if possible).
static SDValue lowerVECTOR_SHUFFLE_VSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VSHUF.
static int getEstimateRefinementSteps(EVT VT, const LoongArchSubtarget &Subtarget)
static bool isSupportedReciprocalEstimateType(EVT VT, const LoongArchSubtarget &Subtarget)
static void emitErrorAndReplaceIntrinsicResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, StringRef ErrorMsg, bool WithChain=true)
static SDValue lowerVECTOR_SHUFFLEAsByteRotate(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE as byte rotate (if possible).
static SDValue checkIntrinsicImmArg(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
static SDValue lowerVECTOR_SHUFFLE_XVINSVE0(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVINSVE0 (if possible).
static SDValue performMOVFR2GR_SCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVH (if possible).
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI, unsigned ValNo, MVT ValVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet, Type *OrigTy)
static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static SDValue performSPLIT_PAIR_F64Combine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performBITCASTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static MachineBasicBlock * emitSplitPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorBitSetImm(SDNode *Node, SelectionDAG &DAG)
static SDValue performSETCC_BITCASTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performEXTENDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKOD (if possible).
static bool buildVPERMIInfo(ArrayRef< int > Mask, SDValue V1, SDValue V2, SmallVectorImpl< SDValue > &SrcVec, unsigned &MaskImm)
static std::optional< bool > matchSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue Val)
static SDValue combineAndNotIntoVANDN(SDNode *N, const SDLoc &DL, SelectionDAG &DAG)
Try to fold: (and (xor X, -1), Y) -> (vandn X, Y).
static SDValue lowerBUILD_VECTORAsBroadCastLoad(BuildVectorSDNode *BVOp, const SDLoc &DL, SelectionDAG &DAG)
#define CRC_CASE_EXT_BINARYOP(NAME, NODE)
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG)
static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size, unsigned Depth)
static bool isConstantSplatVector(SDValue N, APInt &SplatValue, unsigned MinSizeInBits)
static SDValue lowerVECTOR_SHUFFLEAsShift(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, const APInt &Zeroable)
Lower VECTOR_SHUFFLE as shift (if possible).
static SDValue lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VSHUF4I (if possible).
static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG)
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_VEXTRINS(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VEXTRINS (if possible).
static SDValue lowerVectorBitClear(SDNode *Node, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_VPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKEV (if possible).
static MachineBasicBlock * emitPseudoVMSKCOND(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performVANDNCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
Do target-specific dag combines on LoongArchISD::VANDN nodes.
static void replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
static SDValue lowerVECTOR_SHUFFLEAsZeroOrAnyExtend(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const APInt &Zeroable)
Lower VECTOR_SHUFFLE as ZERO_EXTEND Or ANY_EXTEND (if possible).
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, bool IsSigned=false)
static cl::opt< MaterializeFPImm > MaterializeFPImmInsNum("loongarch-materialize-float-imm", cl::Hidden, cl::desc("Maximum number of instructions used (including code sequence " "to generate the value and moving the value to FPR) when " "materializing floating-point immediates (default = 3)"), cl::init(MaterializeFPImm3Ins), cl::values(clEnumValN(NoMaterializeFPImm, "0", "Use constant pool"), clEnumValN(MaterializeFPImm2Ins, "2", "Materialize FP immediate within 2 instructions"), clEnumValN(MaterializeFPImm3Ins, "3", "Materialize FP immediate within 3 instructions"), clEnumValN(MaterializeFPImm4Ins, "4", "Materialize FP immediate within 4 instructions"), clEnumValN(MaterializeFPImm5Ins, "5", "Materialize FP immediate within 5 instructions"), clEnumValN(MaterializeFPImm6Ins, "6", "Materialize FP immediate within 6 instructions " "(behaves same as 5 on loongarch64)")))
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
static unsigned getLoongArchWOpcode(unsigned Opcode)
const MCPhysReg ArgFPR64s[]
static MachineBasicBlock * emitPseudoCTPOP(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performMOVGR2FR_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRWR_CASE(NAME, NODE)
#define CRC_CASE_EXT_UNARYOP(NAME, NODE)
static SDValue lowerVECTOR_SHUFFLE_VPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKOD (if possible).
static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT, SDValue Src, const SDLoc &DL)
static SDValue isNOT(SDValue V, SelectionDAG &DAG)
static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Dispatching routine to lower various 256-bit LoongArch vector shuffles.
static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VREPLVEI (if possible).
static MachineBasicBlock * emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
const MCPhysReg PreserveNoneArgGPRs[]
static void fillVector(ArrayRef< SDValue > Ops, SelectionDAG &DAG, SDLoc DL, const LoongArchSubtarget &Subtarget, SDValue &Vector, EVT ResTy)
static SDValue fillSubVectorFromBuildVector(BuildVectorSDNode *Node, SelectionDAG &DAG, SDLoc DL, const LoongArchSubtarget &Subtarget, EVT ResTy, unsigned first)
static bool isSelectPseudo(MachineInstr &MI)
static SDValue foldBinOpIntoSelectIfProfitable(SDNode *BO, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorSplatImm(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
const MCPhysReg ArgGPRs[]
static SDValue lowerVECTOR_SHUFFLE_XVPERM(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVPERM (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVL (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPERMI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VPERMI (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVEXTRINS(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVEXTRINS (if possible).
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static void replaceVecCondBranchResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
#define ASRT_LE_GT_CASE(NAME)
static SDValue lowerVECTOR_SHUFFLE_XVPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKEV (if possible).
static SDValue performBR_CCCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void computeZeroableShuffleElements(ArrayRef< int > Mask, SDValue V1, SDValue V2, APInt &KnownUndef, APInt &KnownZero)
Compute whether each element of a shuffle is zeroable.
static SDValue combineFP_ROUND(SDValue N, const SDLoc &DL, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue performCONCAT_VECTORSCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue widenShuffleMask(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
static MachineBasicBlock * emitVecCondBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static bool canonicalizeShuffleVectorByLane(const SDLoc &DL, MutableArrayRef< int > Mask, MVT VT, SDValue &V1, SDValue &V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Shuffle vectors by lane to generate more optimized instructions.
static SDValue lowerVECTOR_SHUFFLE_XVILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVH (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVSHUF (if possible).
static void replaceCMP_XCHG_128Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static SDValue lowerVectorPickVE2GR(SDNode *N, SelectionDAG &DAG, unsigned ResOp)
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRRD_CASE(NAME, NODE)
static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2, ArrayRef< int > Mask)
Attempts to match vector shuffle as byte rotation.
static SDValue lowerVECTOR_SHUFFLE_XVPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKEV (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVREPLVEI (if possible).
static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode, unsigned ScalarSizeInBits, ArrayRef< int > Mask, int MaskOffset, const APInt &Zeroable)
Attempts to match a shuffle mask against the VBSLL, VBSRL, VSLLI and VSRLI instruction.
static SDValue lowerVECTOR_SHUFFLE_VILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVL (if possible).
static SDValue lowerVectorBitClearImm(SDNode *Node, SelectionDAG &DAG)
static MachineBasicBlock * emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLEAsLanePermuteAndShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE as lane permute and then shuffle (if possible).
static SDValue performVMSKLTZCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void replaceINTRINSIC_WO_CHAINResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKOD (if possible).
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, ISD::CondCode &CC, SelectionDAG &DAG)
static Register allocateArgGPR(CCState &State)
static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT, ArrayRef< int > Mask, SmallVectorImpl< int > &RepeatedMask)
Test whether a shuffle mask is equivalent within each sub-lane.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue, bool AllowSymbol=false)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool isSequentialOrUndefInRange(ArrayRef< int > Mask, unsigned Pos, unsigned Size, int Low, int Step=1)
Return true if every element in Mask, beginning from position Pos and ending in Pos + Size,...
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
This class represents an incoming formal argument to a Function.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
Value * getPointerOperand()
bool isFloatingPointOperation() const
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM Basic Block Representation.
bool test(unsigned Idx) const
Returns true if bit Idx is set.
size_type count() const
Returns the number of bits which are set.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
The size in bits of the pointer representation in a given address space.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Argument * getArg(unsigned i) const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Common base class shared among various IRBuilders.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
void addSExt32Register(Register Reg)
const LoongArchRegisterInfo * getRegisterInfo() const override
const LoongArchInstrInfo * getInstrInfo() const override
unsigned getGRLen() const
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< bool, uint64_t > isImmVLDILegalForMode1(const APInt &SplatValue, const unsigned SplatBitSize) const
Check if a constant splat can be generated using [x]vldi, where imm[12] is 1.
void getTgtMemIntrinsic(SmallVectorImpl< IntrinsicInfo > &Infos, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
bool isExtractVecEltCheap(EVT VT, unsigned Index) const override
Return true if extraction of a scalar element from the given vector type at the given index is cheap.
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Determine if the target supports unaligned memory accesses.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, Align &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
bool shouldScalarizeBinop(SDValue VecOp) const override
Try to convert an extract element of a vector binary operation into an extract element followed by a ...
bool isFPImmVLDILegal(const APFloat &Imm, EVT VT) const
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
void emitExpandAtomicRMW(AtomicRMWInst *AI) const override
Perform a atomicrmw expansion using a target-specific way.
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
LoongArchTargetLowering(const TargetMachine &TM, const LoongArchSubtarget &STI)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const override
Return a reciprocal estimate value for the input operand.
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
Wrapper class representing physical registers. Should be passed by value.
bool hasFeature(unsigned Feature) const
static MVT getFloatingPointVT(unsigned BitWidth)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool is256BitVector() const
Return true if this is a 256-bit vector type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getDoubleNumVectorElementsVT() const
MVT getHalfNumVectorElementsVT() const
Return a VT for a vector type with the same element type but half the number of elements.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
MVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void push_back(MachineInstr *MI)
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
bool isImplicitDef() const
LLVM_ABI void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Represent a mutable reference to an array (0 or more elements consecutively in memory),...
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
size_t use_size() const
Return the number of uses of this node.
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
bool isSafeToSpeculativelyExecute(unsigned Opcode) const
Some opcodes may create immediate undefined behavior when used with some values (integer division-by-...
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
ArrayRef< int > getMask() const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
typename SuperClass::const_iterator const_iterator
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
Get the string size.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxBytesForAlignment(unsigned MaxBytes)
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
Primary interface to the complete machine description for the target machine.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isExtVecInRegOpcode(unsigned Opcode)
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
ABI getTargetABI(StringRef ABIName)
InstSeq generateInstSeq(int64_t Val)
LLVM_ABI Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI bool widenShuffleMaskElts(int Scale, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
Try to transform a shuffle mask by replacing elements with the scaled index for an equivalent mask of...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
constexpr unsigned BitWidth
std::string join_items(Sep Separator, Args &&... Items)
Joins the strings in the parameter pack Items, adding Separator between the elements....
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
Align getNonZeroOrigAlign() const
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const
LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)