29#include "llvm/IR/IntrinsicsLoongArch.h"
38#define DEBUG_TYPE "loongarch-isel-lowering"
43 cl::desc(
"Trap on integer division by zero."),
55 if (Subtarget.hasBasicF())
57 if (Subtarget.hasBasicD())
61 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
63 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
65 if (Subtarget.hasExtLSX())
69 if (Subtarget.hasExtLASX())
70 for (
MVT VT : LASXVTs)
166 if (Subtarget.hasBasicF()) {
190 if (!Subtarget.hasBasicD()) {
201 if (Subtarget.hasBasicD()) {
230 if (Subtarget.hasExtLSX()) {
245 for (
MVT VT : LSXVTs) {
258 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
272 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
276 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
290 if (Subtarget.hasExtLASX()) {
291 for (
MVT VT : LASXVTs) {
304 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
318 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
322 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
343 if (Subtarget.hasExtLSX())
377 switch (
Op.getOpcode()) {
379 return lowerATOMIC_FENCE(
Op, DAG);
381 return lowerEH_DWARF_CFA(
Op, DAG);
383 return lowerGlobalAddress(
Op, DAG);
385 return lowerGlobalTLSAddress(
Op, DAG);
387 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
389 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
391 return lowerINTRINSIC_VOID(
Op, DAG);
393 return lowerBlockAddress(
Op, DAG);
395 return lowerJumpTable(
Op, DAG);
397 return lowerShiftLeftParts(
Op, DAG);
399 return lowerShiftRightParts(
Op, DAG,
true);
401 return lowerShiftRightParts(
Op, DAG,
false);
403 return lowerConstantPool(
Op, DAG);
405 return lowerFP_TO_SINT(
Op, DAG);
407 return lowerBITCAST(
Op, DAG);
409 return lowerUINT_TO_FP(
Op, DAG);
411 return lowerSINT_TO_FP(
Op, DAG);
413 return lowerVASTART(
Op, DAG);
415 return lowerFRAMEADDR(
Op, DAG);
417 return lowerRETURNADDR(
Op, DAG);
419 return lowerWRITE_REGISTER(
Op, DAG);
421 return lowerINSERT_VECTOR_ELT(
Op, DAG);
423 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
425 return lowerBUILD_VECTOR(
Op, DAG);
427 return lowerVECTOR_SHUFFLE(
Op, DAG);
434template <
typename ValType>
437 unsigned CheckStride,
439 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
443 if (*
I != -1 && *
I != ExpectedIndex)
445 ExpectedIndex += ExpectedIndexStride;
449 for (
unsigned n = 0; n < CheckStride &&
I !=
End; ++n, ++
I)
468 for (
const auto &M : Mask) {
475 if (SplatIndex == -1)
478 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
479 if (fitsRegularPattern<int>(Mask.begin(), 1, Mask.end(), SplatIndex, 0)) {
480 APInt Imm(64, SplatIndex);
514 int SubMask[4] = {-1, -1, -1, -1};
515 for (
unsigned i = 0; i < 4; ++i) {
516 for (
unsigned j = i; j < Mask.size(); j += 4) {
523 if (Idx < 0 || Idx >= 4)
529 if (SubMask[i] == -1)
533 else if (
Idx != -1 &&
Idx != SubMask[i])
540 for (
int i = 3; i >= 0; --i) {
541 int Idx = SubMask[i];
573 const auto &Begin = Mask.begin();
574 const auto &
End = Mask.end();
575 SDValue OriV1 = V1, OriV2 = V2;
577 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 2))
579 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 2))
584 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 2))
586 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 2))
613 const auto &Begin = Mask.begin();
614 const auto &
End = Mask.end();
615 SDValue OriV1 = V1, OriV2 = V2;
617 if (fitsRegularPattern<int>(Begin, 2,
End, 1, 2))
619 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + 1, 2))
624 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 1, 2))
626 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + 1, 2))
654 const auto &Begin = Mask.begin();
655 const auto &
End = Mask.end();
656 unsigned HalfSize = Mask.size() / 2;
657 SDValue OriV1 = V1, OriV2 = V2;
659 if (fitsRegularPattern<int>(Begin, 2,
End, HalfSize, 1))
661 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + HalfSize, 1))
666 if (fitsRegularPattern<int>(Begin + 1, 2,
End, HalfSize, 1))
668 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + HalfSize,
697 const auto &Begin = Mask.begin();
698 const auto &
End = Mask.end();
699 SDValue OriV1 = V1, OriV2 = V2;
701 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 1))
703 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 1))
708 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 1))
710 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 1))
737 const auto &Begin = Mask.begin();
738 const auto &Mid = Mask.begin() + Mask.size() / 2;
739 const auto &
End = Mask.end();
740 SDValue OriV1 = V1, OriV2 = V2;
742 if (fitsRegularPattern<int>(Begin, 1, Mid, 0, 2))
744 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size(), 2))
749 if (fitsRegularPattern<int>(Mid, 1,
End, 0, 2))
751 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size(), 2))
779 const auto &Begin = Mask.begin();
780 const auto &Mid = Mask.begin() + Mask.size() / 2;
781 const auto &
End = Mask.end();
782 SDValue OriV1 = V1, OriV2 = V2;
784 if (fitsRegularPattern<int>(Begin, 1, Mid, 1, 2))
786 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size() + 1, 2))
791 if (fitsRegularPattern<int>(Mid, 1,
End, 1, 2))
793 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size() + 1, 2))
835 "Vector type is unsupported for lsx!");
837 "Two operands have different types!");
839 "Unexpected mask size for shuffle!");
840 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
888 for (
const auto &M : Mask) {
895 if (SplatIndex == -1)
898 const auto &Begin = Mask.begin();
899 const auto &
End = Mask.end();
900 unsigned HalfSize = Mask.size() / 2;
902 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
903 if (fitsRegularPattern<int>(Begin, 1,
End - HalfSize, SplatIndex, 0) &&
904 fitsRegularPattern<int>(Begin + HalfSize, 1,
End, SplatIndex + HalfSize,
906 APInt Imm(64, SplatIndex);
920 if (Mask.size() <= 4)
944 const auto &Begin = Mask.begin();
945 const auto &
End = Mask.end();
946 unsigned HalfSize = Mask.size() / 2;
947 unsigned LeftSize = HalfSize / 2;
948 SDValue OriV1 = V1, OriV2 = V2;
950 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, HalfSize - LeftSize,
952 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize + LeftSize, 1))
954 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize,
955 Mask.size() + HalfSize - LeftSize, 1) &&
956 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
957 Mask.size() + HalfSize + LeftSize, 1))
962 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, HalfSize - LeftSize,
964 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize + LeftSize,
967 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize,
968 Mask.size() + HalfSize - LeftSize, 1) &&
969 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
970 Mask.size() + HalfSize + LeftSize, 1))
983 const auto &Begin = Mask.begin();
984 const auto &
End = Mask.end();
985 unsigned HalfSize = Mask.size() / 2;
986 SDValue OriV1 = V1, OriV2 = V2;
988 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, 0, 1) &&
989 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize, 1))
991 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, Mask.size(), 1) &&
992 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
993 Mask.size() + HalfSize, 1))
998 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, 0, 1) &&
999 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize, 1))
1001 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, Mask.size(),
1003 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
1004 Mask.size() + HalfSize, 1))
1017 const auto &Begin = Mask.begin();
1018 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1019 const auto &Mid = Mask.begin() + Mask.size() / 2;
1020 const auto &RightMid = Mask.end() - Mask.size() / 4;
1021 const auto &
End = Mask.end();
1022 unsigned HalfSize = Mask.size() / 2;
1023 SDValue OriV1 = V1, OriV2 = V2;
1025 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 0, 2) &&
1026 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize, 2))
1028 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size(), 2) &&
1029 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize, 2))
1034 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 0, 2) &&
1035 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize, 2))
1037 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size(), 2) &&
1038 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize, 2))
1052 const auto &Begin = Mask.begin();
1053 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1054 const auto &Mid = Mask.begin() + Mask.size() / 2;
1055 const auto &RightMid = Mask.end() - Mask.size() / 4;
1056 const auto &
End = Mask.end();
1057 unsigned HalfSize = Mask.size() / 2;
1058 SDValue OriV1 = V1, OriV2 = V2;
1060 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 1, 2) &&
1061 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize + 1, 2))
1063 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size() + 1, 2) &&
1064 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize + 1,
1070 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 1, 2) &&
1071 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize + 1, 2))
1073 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size() + 1, 2) &&
1074 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize + 1,
1088 int MaskSize = Mask.size();
1089 int HalfSize = Mask.size() / 2;
1090 const auto &Begin = Mask.begin();
1091 const auto &Mid = Mask.begin() + HalfSize;
1092 const auto &
End = Mask.end();
1104 for (
auto it = Begin; it < Mid; it++) {
1107 else if ((*it >= 0 && *it < HalfSize) ||
1108 (*it >= MaskSize && *it <= MaskSize + HalfSize)) {
1109 int M = *it < HalfSize ? *it : *it - HalfSize;
1114 assert((
int)MaskAlloc.
size() == HalfSize &&
"xvshuf convert failed!");
1116 for (
auto it = Mid; it <
End; it++) {
1119 else if ((*it >= HalfSize && *it < MaskSize) ||
1120 (*it >= MaskSize + HalfSize && *it < MaskSize * 2)) {
1121 int M = *it < MaskSize ? *it - HalfSize : *it - MaskSize;
1126 assert((
int)MaskAlloc.
size() == MaskSize &&
"xvshuf convert failed!");
1157 enum HalfMaskType { HighLaneTy, LowLaneTy,
None };
1159 int MaskSize = Mask.size();
1160 int HalfSize = Mask.size() / 2;
1162 HalfMaskType preMask =
None, postMask =
None;
1164 if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1165 return M < 0 || (M >= 0 && M < HalfSize) ||
1166 (M >= MaskSize && M < MaskSize + HalfSize);
1168 preMask = HighLaneTy;
1169 else if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1170 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1171 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1173 preMask = LowLaneTy;
1175 if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1176 return M < 0 || (M >= 0 && M < HalfSize) ||
1177 (M >= MaskSize && M < MaskSize + HalfSize);
1179 postMask = HighLaneTy;
1180 else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1181 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1182 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1184 postMask = LowLaneTy;
1192 if (preMask == HighLaneTy && postMask == LowLaneTy) {
1195 if (preMask == LowLaneTy && postMask == HighLaneTy) {
1201 if (!V2.isUndef()) {
1208 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1209 *it = *it < 0 ? *it : *it - HalfSize;
1211 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1212 *it = *it < 0 ? *it : *it + HalfSize;
1214 }
else if (preMask == LowLaneTy && postMask == LowLaneTy) {
1220 if (!V2.isUndef()) {
1227 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1228 *it = *it < 0 ? *it : *it - HalfSize;
1230 }
else if (preMask == HighLaneTy && postMask == HighLaneTy) {
1236 if (!V2.isUndef()) {
1243 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1244 *it = *it < 0 ? *it : *it + HalfSize;
1260 "Vector type is unsupported for lasx!");
1262 "Two operands have different types!");
1264 "Unexpected mask size for shuffle!");
1265 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
1266 assert(Mask.size() >= 4 &&
"Mask size is less than 4.");
1311 MVT VT =
Op.getSimpleValueType();
1315 bool V1IsUndef = V1.
isUndef();
1316 bool V2IsUndef =
V2.isUndef();
1317 if (V1IsUndef && V2IsUndef)
1330 any_of(OrigMask, [NumElements](
int M) {
return M >= NumElements; })) {
1332 for (
int &M : NewMask)
1333 if (M >= NumElements)
1339 int MaskUpperLimit = OrigMask.
size() * (V2IsUndef ? 1 : 2);
1340 (void)MaskUpperLimit;
1342 [&](
int M) {
return -1 <=
M &&
M < MaskUpperLimit; }) &&
1343 "Out of bounds shuffle index");
1358 if (isa<ConstantSDNode>(
Op))
1360 if (isa<ConstantFPSDNode>(
Op))
1375 EVT ResTy =
Op->getValueType(0);
1377 APInt SplatValue, SplatUndef;
1378 unsigned SplatBitSize;
1383 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
1384 (!Subtarget.hasExtLASX() || !Is256Vec))
1387 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
1389 SplatBitSize <= 64) {
1391 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
1397 switch (SplatBitSize) {
1401 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
1404 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
1407 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
1410 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
1418 if (ViaVecTy != ResTy)
1431 EVT ResTy =
Node->getValueType(0);
1437 for (
unsigned i = 0; i < NumElts; ++i) {
1439 Node->getOperand(i),
1449LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
1451 EVT VecTy =
Op->getOperand(0)->getValueType(0);
1456 if (isa<ConstantSDNode>(
Idx) &&
1457 (EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
1458 EltTy == MVT::f64 ||
Idx->getAsZExtVal() < NumElts / 2))
1465LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
1467 if (isa<ConstantSDNode>(
Op->getOperand(2)))
1491 if (Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
1493 "On LA64, only 64-bit registers can be written.");
1494 return Op.getOperand(0);
1497 if (!Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
1499 "On LA32, only 32-bit registers can be written.");
1500 return Op.getOperand(0);
1508 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
1510 "be a constant integer");
1517 EVT VT =
Op.getValueType();
1520 unsigned Depth =
Op.getConstantOperandVal(0);
1521 int GRLenInBytes = Subtarget.
getGRLen() / 8;
1524 int Offset = -(GRLenInBytes * 2);
1539 if (
Op.getConstantOperandVal(0) != 0) {
1541 "return address can only be determined for the current frame");
1575 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
1583 !Subtarget.hasBasicD() &&
"unexpected target features");
1588 auto *
C = dyn_cast<ConstantSDNode>(Op0.
getOperand(1));
1589 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
1599 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLT(MVT::i32))
1603 EVT RetVT =
Op.getValueType();
1605 MakeLibCallOptions CallOptions;
1606 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1609 std::tie(Result, Chain) =
1617 !Subtarget.hasBasicD() &&
"unexpected target features");
1624 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLE(MVT::i32))
1628 EVT RetVT =
Op.getValueType();
1630 MakeLibCallOptions CallOptions;
1631 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1634 std::tie(Result, Chain) =
1645 if (
Op.getValueType() == MVT::f32 && Op0.
getValueType() == MVT::i32 &&
1646 Subtarget.
is64Bit() && Subtarget.hasBasicF()) {
1658 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
1659 !Subtarget.hasBasicD()) {
1684 N->getOffset(), Flags);
1692template <
class NodeTy>
1695 bool IsLocal)
const {
1706 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
1758 return getAddr(cast<BlockAddressSDNode>(
Op), DAG,
1764 return getAddr(cast<JumpTableSDNode>(
Op), DAG,
1770 return getAddr(cast<ConstantPoolSDNode>(
Op), DAG,
1777 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1781 if (GV->
isDSOLocal() && isa<GlobalVariable>(GV)) {
1782 if (
auto GCM = dyn_cast<GlobalVariable>(GV)->
getCodeModel())
1791 unsigned Opc,
bool UseGOT,
1842 Args.push_back(Entry);
1874LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
1881 assert((!Large || Subtarget.
is64Bit()) &&
"Large code model requires LA64");
1884 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1898 return getDynamicTLSAddr(
N, DAG,
1899 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
1900 : LoongArch::PseudoLA_TLS_GD,
1907 return getDynamicTLSAddr(
N, DAG,
1908 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
1909 : LoongArch::PseudoLA_TLS_LD,
1914 return getStaticTLSAddr(
N, DAG,
1915 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
1916 : LoongArch::PseudoLA_TLS_IE,
1923 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
1927 return getTLSDescAddr(
N, DAG,
1928 Large ? LoongArch::PseudoLA_TLS_DESC_PC_LARGE
1929 : LoongArch::PseudoLA_TLS_DESC_PC,
1933template <
unsigned N>
1936 auto *CImm = cast<ConstantSDNode>(
Op->getOperand(ImmOp));
1938 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
1939 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
1941 ": argument out of range.");
1948LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
1951 switch (
Op.getConstantOperandVal(0)) {
1954 case Intrinsic::thread_pointer: {
1958 case Intrinsic::loongarch_lsx_vpickve2gr_d:
1959 case Intrinsic::loongarch_lsx_vpickve2gr_du:
1960 case Intrinsic::loongarch_lsx_vreplvei_d:
1961 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
1962 return checkIntrinsicImmArg<1>(
Op, 2, DAG);
1963 case Intrinsic::loongarch_lsx_vreplvei_w:
1964 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
1965 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
1966 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
1967 case Intrinsic::loongarch_lasx_xvpickve_d:
1968 case Intrinsic::loongarch_lasx_xvpickve_d_f:
1969 return checkIntrinsicImmArg<2>(
Op, 2, DAG);
1970 case Intrinsic::loongarch_lasx_xvinsve0_d:
1971 return checkIntrinsicImmArg<2>(
Op, 3, DAG);
1972 case Intrinsic::loongarch_lsx_vsat_b:
1973 case Intrinsic::loongarch_lsx_vsat_bu:
1974 case Intrinsic::loongarch_lsx_vrotri_b:
1975 case Intrinsic::loongarch_lsx_vsllwil_h_b:
1976 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
1977 case Intrinsic::loongarch_lsx_vsrlri_b:
1978 case Intrinsic::loongarch_lsx_vsrari_b:
1979 case Intrinsic::loongarch_lsx_vreplvei_h:
1980 case Intrinsic::loongarch_lasx_xvsat_b:
1981 case Intrinsic::loongarch_lasx_xvsat_bu:
1982 case Intrinsic::loongarch_lasx_xvrotri_b:
1983 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
1984 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
1985 case Intrinsic::loongarch_lasx_xvsrlri_b:
1986 case Intrinsic::loongarch_lasx_xvsrari_b:
1987 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
1988 case Intrinsic::loongarch_lasx_xvpickve_w:
1989 case Intrinsic::loongarch_lasx_xvpickve_w_f:
1990 return checkIntrinsicImmArg<3>(
Op, 2, DAG);
1991 case Intrinsic::loongarch_lasx_xvinsve0_w:
1992 return checkIntrinsicImmArg<3>(
Op, 3, DAG);
1993 case Intrinsic::loongarch_lsx_vsat_h:
1994 case Intrinsic::loongarch_lsx_vsat_hu:
1995 case Intrinsic::loongarch_lsx_vrotri_h:
1996 case Intrinsic::loongarch_lsx_vsllwil_w_h:
1997 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
1998 case Intrinsic::loongarch_lsx_vsrlri_h:
1999 case Intrinsic::loongarch_lsx_vsrari_h:
2000 case Intrinsic::loongarch_lsx_vreplvei_b:
2001 case Intrinsic::loongarch_lasx_xvsat_h:
2002 case Intrinsic::loongarch_lasx_xvsat_hu:
2003 case Intrinsic::loongarch_lasx_xvrotri_h:
2004 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
2005 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
2006 case Intrinsic::loongarch_lasx_xvsrlri_h:
2007 case Intrinsic::loongarch_lasx_xvsrari_h:
2008 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
2009 return checkIntrinsicImmArg<4>(
Op, 2, DAG);
2010 case Intrinsic::loongarch_lsx_vsrlni_b_h:
2011 case Intrinsic::loongarch_lsx_vsrani_b_h:
2012 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
2013 case Intrinsic::loongarch_lsx_vsrarni_b_h:
2014 case Intrinsic::loongarch_lsx_vssrlni_b_h:
2015 case Intrinsic::loongarch_lsx_vssrani_b_h:
2016 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
2017 case Intrinsic::loongarch_lsx_vssrani_bu_h:
2018 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
2019 case Intrinsic::loongarch_lsx_vssrarni_b_h:
2020 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
2021 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
2022 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
2023 case Intrinsic::loongarch_lasx_xvsrani_b_h:
2024 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
2025 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
2026 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
2027 case Intrinsic::loongarch_lasx_xvssrani_b_h:
2028 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
2029 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
2030 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
2031 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
2032 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
2033 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
2034 return checkIntrinsicImmArg<4>(
Op, 3, DAG);
2035 case Intrinsic::loongarch_lsx_vsat_w:
2036 case Intrinsic::loongarch_lsx_vsat_wu:
2037 case Intrinsic::loongarch_lsx_vrotri_w:
2038 case Intrinsic::loongarch_lsx_vsllwil_d_w:
2039 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
2040 case Intrinsic::loongarch_lsx_vsrlri_w:
2041 case Intrinsic::loongarch_lsx_vsrari_w:
2042 case Intrinsic::loongarch_lsx_vslei_bu:
2043 case Intrinsic::loongarch_lsx_vslei_hu:
2044 case Intrinsic::loongarch_lsx_vslei_wu:
2045 case Intrinsic::loongarch_lsx_vslei_du:
2046 case Intrinsic::loongarch_lsx_vslti_bu:
2047 case Intrinsic::loongarch_lsx_vslti_hu:
2048 case Intrinsic::loongarch_lsx_vslti_wu:
2049 case Intrinsic::loongarch_lsx_vslti_du:
2050 case Intrinsic::loongarch_lsx_vbsll_v:
2051 case Intrinsic::loongarch_lsx_vbsrl_v:
2052 case Intrinsic::loongarch_lasx_xvsat_w:
2053 case Intrinsic::loongarch_lasx_xvsat_wu:
2054 case Intrinsic::loongarch_lasx_xvrotri_w:
2055 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
2056 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
2057 case Intrinsic::loongarch_lasx_xvsrlri_w:
2058 case Intrinsic::loongarch_lasx_xvsrari_w:
2059 case Intrinsic::loongarch_lasx_xvslei_bu:
2060 case Intrinsic::loongarch_lasx_xvslei_hu:
2061 case Intrinsic::loongarch_lasx_xvslei_wu:
2062 case Intrinsic::loongarch_lasx_xvslei_du:
2063 case Intrinsic::loongarch_lasx_xvslti_bu:
2064 case Intrinsic::loongarch_lasx_xvslti_hu:
2065 case Intrinsic::loongarch_lasx_xvslti_wu:
2066 case Intrinsic::loongarch_lasx_xvslti_du:
2067 case Intrinsic::loongarch_lasx_xvbsll_v:
2068 case Intrinsic::loongarch_lasx_xvbsrl_v:
2069 return checkIntrinsicImmArg<5>(
Op, 2, DAG);
2070 case Intrinsic::loongarch_lsx_vseqi_b:
2071 case Intrinsic::loongarch_lsx_vseqi_h:
2072 case Intrinsic::loongarch_lsx_vseqi_w:
2073 case Intrinsic::loongarch_lsx_vseqi_d:
2074 case Intrinsic::loongarch_lsx_vslei_b:
2075 case Intrinsic::loongarch_lsx_vslei_h:
2076 case Intrinsic::loongarch_lsx_vslei_w:
2077 case Intrinsic::loongarch_lsx_vslei_d:
2078 case Intrinsic::loongarch_lsx_vslti_b:
2079 case Intrinsic::loongarch_lsx_vslti_h:
2080 case Intrinsic::loongarch_lsx_vslti_w:
2081 case Intrinsic::loongarch_lsx_vslti_d:
2082 case Intrinsic::loongarch_lasx_xvseqi_b:
2083 case Intrinsic::loongarch_lasx_xvseqi_h:
2084 case Intrinsic::loongarch_lasx_xvseqi_w:
2085 case Intrinsic::loongarch_lasx_xvseqi_d:
2086 case Intrinsic::loongarch_lasx_xvslei_b:
2087 case Intrinsic::loongarch_lasx_xvslei_h:
2088 case Intrinsic::loongarch_lasx_xvslei_w:
2089 case Intrinsic::loongarch_lasx_xvslei_d:
2090 case Intrinsic::loongarch_lasx_xvslti_b:
2091 case Intrinsic::loongarch_lasx_xvslti_h:
2092 case Intrinsic::loongarch_lasx_xvslti_w:
2093 case Intrinsic::loongarch_lasx_xvslti_d:
2094 return checkIntrinsicImmArg<5>(
Op, 2, DAG,
true);
2095 case Intrinsic::loongarch_lsx_vsrlni_h_w:
2096 case Intrinsic::loongarch_lsx_vsrani_h_w:
2097 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
2098 case Intrinsic::loongarch_lsx_vsrarni_h_w:
2099 case Intrinsic::loongarch_lsx_vssrlni_h_w:
2100 case Intrinsic::loongarch_lsx_vssrani_h_w:
2101 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
2102 case Intrinsic::loongarch_lsx_vssrani_hu_w:
2103 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
2104 case Intrinsic::loongarch_lsx_vssrarni_h_w:
2105 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
2106 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
2107 case Intrinsic::loongarch_lsx_vfrstpi_b:
2108 case Intrinsic::loongarch_lsx_vfrstpi_h:
2109 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
2110 case Intrinsic::loongarch_lasx_xvsrani_h_w:
2111 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
2112 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
2113 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
2114 case Intrinsic::loongarch_lasx_xvssrani_h_w:
2115 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
2116 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
2117 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
2118 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
2119 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
2120 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
2121 case Intrinsic::loongarch_lasx_xvfrstpi_b:
2122 case Intrinsic::loongarch_lasx_xvfrstpi_h:
2123 return checkIntrinsicImmArg<5>(
Op, 3, DAG);
2124 case Intrinsic::loongarch_lsx_vsat_d:
2125 case Intrinsic::loongarch_lsx_vsat_du:
2126 case Intrinsic::loongarch_lsx_vrotri_d:
2127 case Intrinsic::loongarch_lsx_vsrlri_d:
2128 case Intrinsic::loongarch_lsx_vsrari_d:
2129 case Intrinsic::loongarch_lasx_xvsat_d:
2130 case Intrinsic::loongarch_lasx_xvsat_du:
2131 case Intrinsic::loongarch_lasx_xvrotri_d:
2132 case Intrinsic::loongarch_lasx_xvsrlri_d:
2133 case Intrinsic::loongarch_lasx_xvsrari_d:
2134 return checkIntrinsicImmArg<6>(
Op, 2, DAG);
2135 case Intrinsic::loongarch_lsx_vsrlni_w_d:
2136 case Intrinsic::loongarch_lsx_vsrani_w_d:
2137 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
2138 case Intrinsic::loongarch_lsx_vsrarni_w_d:
2139 case Intrinsic::loongarch_lsx_vssrlni_w_d:
2140 case Intrinsic::loongarch_lsx_vssrani_w_d:
2141 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
2142 case Intrinsic::loongarch_lsx_vssrani_wu_d:
2143 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
2144 case Intrinsic::loongarch_lsx_vssrarni_w_d:
2145 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
2146 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
2147 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
2148 case Intrinsic::loongarch_lasx_xvsrani_w_d:
2149 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
2150 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
2151 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
2152 case Intrinsic::loongarch_lasx_xvssrani_w_d:
2153 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
2154 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
2155 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
2156 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
2157 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
2158 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
2159 return checkIntrinsicImmArg<6>(
Op, 3, DAG);
2160 case Intrinsic::loongarch_lsx_vsrlni_d_q:
2161 case Intrinsic::loongarch_lsx_vsrani_d_q:
2162 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
2163 case Intrinsic::loongarch_lsx_vsrarni_d_q:
2164 case Intrinsic::loongarch_lsx_vssrlni_d_q:
2165 case Intrinsic::loongarch_lsx_vssrani_d_q:
2166 case Intrinsic::loongarch_lsx_vssrlni_du_q:
2167 case Intrinsic::loongarch_lsx_vssrani_du_q:
2168 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
2169 case Intrinsic::loongarch_lsx_vssrarni_d_q:
2170 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
2171 case Intrinsic::loongarch_lsx_vssrarni_du_q:
2172 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
2173 case Intrinsic::loongarch_lasx_xvsrani_d_q:
2174 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
2175 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
2176 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
2177 case Intrinsic::loongarch_lasx_xvssrani_d_q:
2178 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
2179 case Intrinsic::loongarch_lasx_xvssrani_du_q:
2180 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
2181 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
2182 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
2183 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
2184 return checkIntrinsicImmArg<7>(
Op, 3, DAG);
2185 case Intrinsic::loongarch_lsx_vnori_b:
2186 case Intrinsic::loongarch_lsx_vshuf4i_b:
2187 case Intrinsic::loongarch_lsx_vshuf4i_h:
2188 case Intrinsic::loongarch_lsx_vshuf4i_w:
2189 case Intrinsic::loongarch_lasx_xvnori_b:
2190 case Intrinsic::loongarch_lasx_xvshuf4i_b:
2191 case Intrinsic::loongarch_lasx_xvshuf4i_h:
2192 case Intrinsic::loongarch_lasx_xvshuf4i_w:
2193 case Intrinsic::loongarch_lasx_xvpermi_d:
2194 return checkIntrinsicImmArg<8>(
Op, 2, DAG);
2195 case Intrinsic::loongarch_lsx_vshuf4i_d:
2196 case Intrinsic::loongarch_lsx_vpermi_w:
2197 case Intrinsic::loongarch_lsx_vbitseli_b:
2198 case Intrinsic::loongarch_lsx_vextrins_b:
2199 case Intrinsic::loongarch_lsx_vextrins_h:
2200 case Intrinsic::loongarch_lsx_vextrins_w:
2201 case Intrinsic::loongarch_lsx_vextrins_d:
2202 case Intrinsic::loongarch_lasx_xvshuf4i_d:
2203 case Intrinsic::loongarch_lasx_xvpermi_w:
2204 case Intrinsic::loongarch_lasx_xvpermi_q:
2205 case Intrinsic::loongarch_lasx_xvbitseli_b:
2206 case Intrinsic::loongarch_lasx_xvextrins_b:
2207 case Intrinsic::loongarch_lasx_xvextrins_h:
2208 case Intrinsic::loongarch_lasx_xvextrins_w:
2209 case Intrinsic::loongarch_lasx_xvextrins_d:
2210 return checkIntrinsicImmArg<8>(
Op, 3, DAG);
2211 case Intrinsic::loongarch_lsx_vrepli_b:
2212 case Intrinsic::loongarch_lsx_vrepli_h:
2213 case Intrinsic::loongarch_lsx_vrepli_w:
2214 case Intrinsic::loongarch_lsx_vrepli_d:
2215 case Intrinsic::loongarch_lasx_xvrepli_b:
2216 case Intrinsic::loongarch_lasx_xvrepli_h:
2217 case Intrinsic::loongarch_lasx_xvrepli_w:
2218 case Intrinsic::loongarch_lasx_xvrepli_d:
2219 return checkIntrinsicImmArg<10>(
Op, 1, DAG,
true);
2220 case Intrinsic::loongarch_lsx_vldi:
2221 case Intrinsic::loongarch_lasx_xvldi:
2222 return checkIntrinsicImmArg<13>(
Op, 1, DAG,
true);
2237LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
2241 EVT VT =
Op.getValueType();
2243 const StringRef ErrorMsgOOR =
"argument out of range";
2244 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2245 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2247 switch (
Op.getConstantOperandVal(1)) {
2250 case Intrinsic::loongarch_crc_w_b_w:
2251 case Intrinsic::loongarch_crc_w_h_w:
2252 case Intrinsic::loongarch_crc_w_w_w:
2253 case Intrinsic::loongarch_crc_w_d_w:
2254 case Intrinsic::loongarch_crcc_w_b_w:
2255 case Intrinsic::loongarch_crcc_w_h_w:
2256 case Intrinsic::loongarch_crcc_w_w_w:
2257 case Intrinsic::loongarch_crcc_w_d_w:
2259 case Intrinsic::loongarch_csrrd_w:
2260 case Intrinsic::loongarch_csrrd_d: {
2261 unsigned Imm =
Op.getConstantOperandVal(2);
2262 return !isUInt<14>(Imm)
2267 case Intrinsic::loongarch_csrwr_w:
2268 case Intrinsic::loongarch_csrwr_d: {
2269 unsigned Imm =
Op.getConstantOperandVal(3);
2270 return !isUInt<14>(Imm)
2273 {Chain,
Op.getOperand(2),
2276 case Intrinsic::loongarch_csrxchg_w:
2277 case Intrinsic::loongarch_csrxchg_d: {
2278 unsigned Imm =
Op.getConstantOperandVal(4);
2279 return !isUInt<14>(Imm)
2282 {Chain,
Op.getOperand(2),
Op.getOperand(3),
2285 case Intrinsic::loongarch_iocsrrd_d: {
2290#define IOCSRRD_CASE(NAME, NODE) \
2291 case Intrinsic::loongarch_##NAME: { \
2292 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
2293 {Chain, Op.getOperand(2)}); \
2299 case Intrinsic::loongarch_cpucfg: {
2301 {Chain,
Op.getOperand(2)});
2303 case Intrinsic::loongarch_lddir_d: {
2304 unsigned Imm =
Op.getConstantOperandVal(3);
2305 return !isUInt<8>(Imm)
2309 case Intrinsic::loongarch_movfcsr2gr: {
2310 if (!Subtarget.hasBasicF())
2312 unsigned Imm =
Op.getConstantOperandVal(2);
2313 return !isUInt<2>(Imm)
2318 case Intrinsic::loongarch_lsx_vld:
2319 case Intrinsic::loongarch_lsx_vldrepl_b:
2320 case Intrinsic::loongarch_lasx_xvld:
2321 case Intrinsic::loongarch_lasx_xvldrepl_b:
2322 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2325 case Intrinsic::loongarch_lsx_vldrepl_h:
2326 case Intrinsic::loongarch_lasx_xvldrepl_h:
2327 return !isShiftedInt<11, 1>(
2328 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2330 Op,
"argument out of range or not a multiple of 2", DAG)
2332 case Intrinsic::loongarch_lsx_vldrepl_w:
2333 case Intrinsic::loongarch_lasx_xvldrepl_w:
2334 return !isShiftedInt<10, 2>(
2335 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2337 Op,
"argument out of range or not a multiple of 4", DAG)
2339 case Intrinsic::loongarch_lsx_vldrepl_d:
2340 case Intrinsic::loongarch_lasx_xvldrepl_d:
2341 return !isShiftedInt<9, 3>(
2342 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2344 Op,
"argument out of range or not a multiple of 8", DAG)
2355 return Op.getOperand(0);
2363 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
2365 const StringRef ErrorMsgOOR =
"argument out of range";
2366 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2367 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
2368 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2370 switch (IntrinsicEnum) {
2374 case Intrinsic::loongarch_cacop_d:
2375 case Intrinsic::loongarch_cacop_w: {
2376 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.
is64Bit())
2378 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.
is64Bit())
2382 int Imm2 = cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue();
2383 if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
2387 case Intrinsic::loongarch_dbar: {
2389 return !isUInt<15>(Imm)
2394 case Intrinsic::loongarch_ibar: {
2396 return !isUInt<15>(Imm)
2401 case Intrinsic::loongarch_break: {
2403 return !isUInt<15>(Imm)
2408 case Intrinsic::loongarch_movgr2fcsr: {
2409 if (!Subtarget.hasBasicF())
2412 return !isUInt<2>(Imm)
2419 case Intrinsic::loongarch_syscall: {
2421 return !isUInt<15>(Imm)
2426#define IOCSRWR_CASE(NAME, NODE) \
2427 case Intrinsic::loongarch_##NAME: { \
2428 SDValue Op3 = Op.getOperand(3); \
2429 return Subtarget.is64Bit() \
2430 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
2431 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
2432 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
2433 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
2440 case Intrinsic::loongarch_iocsrwr_d: {
2448#define ASRT_LE_GT_CASE(NAME) \
2449 case Intrinsic::loongarch_##NAME: { \
2450 return !Subtarget.is64Bit() \
2451 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
2456#undef ASRT_LE_GT_CASE
2457 case Intrinsic::loongarch_ldpte_d: {
2458 unsigned Imm =
Op.getConstantOperandVal(3);
2464 case Intrinsic::loongarch_lsx_vst:
2465 case Intrinsic::loongarch_lasx_xvst:
2466 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue())
2469 case Intrinsic::loongarch_lasx_xvstelm_b:
2470 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2471 !isUInt<5>(
Op.getConstantOperandVal(5)))
2474 case Intrinsic::loongarch_lsx_vstelm_b:
2475 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2476 !isUInt<4>(
Op.getConstantOperandVal(5)))
2479 case Intrinsic::loongarch_lasx_xvstelm_h:
2480 return (!isShiftedInt<8, 1>(
2481 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2482 !isUInt<4>(
Op.getConstantOperandVal(5)))
2484 Op,
"argument out of range or not a multiple of 2", DAG)
2486 case Intrinsic::loongarch_lsx_vstelm_h:
2487 return (!isShiftedInt<8, 1>(
2488 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2489 !isUInt<3>(
Op.getConstantOperandVal(5)))
2491 Op,
"argument out of range or not a multiple of 2", DAG)
2493 case Intrinsic::loongarch_lasx_xvstelm_w:
2494 return (!isShiftedInt<8, 2>(
2495 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2496 !isUInt<3>(
Op.getConstantOperandVal(5)))
2498 Op,
"argument out of range or not a multiple of 4", DAG)
2500 case Intrinsic::loongarch_lsx_vstelm_w:
2501 return (!isShiftedInt<8, 2>(
2502 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2503 !isUInt<2>(
Op.getConstantOperandVal(5)))
2505 Op,
"argument out of range or not a multiple of 4", DAG)
2507 case Intrinsic::loongarch_lasx_xvstelm_d:
2508 return (!isShiftedInt<8, 3>(
2509 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2510 !isUInt<2>(
Op.getConstantOperandVal(5)))
2512 Op,
"argument out of range or not a multiple of 8", DAG)
2514 case Intrinsic::loongarch_lsx_vstelm_d:
2515 return (!isShiftedInt<8, 3>(
2516 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2517 !isUInt<1>(
Op.getConstantOperandVal(5)))
2519 Op,
"argument out of range or not a multiple of 8", DAG)
2530 EVT VT =
Lo.getValueType();
2570 EVT VT =
Lo.getValueType();
2657 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2658 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
2662 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2668 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
2695 StringRef ErrorMsg,
bool WithChain =
true) {
2700 Results.push_back(
N->getOperand(0));
2703template <
unsigned N>
2708 const StringRef ErrorMsgOOR =
"argument out of range";
2709 unsigned Imm =
Node->getConstantOperandVal(2);
2710 if (!isUInt<N>(Imm)) {
2743 switch (
N->getConstantOperandVal(0)) {
2746 case Intrinsic::loongarch_lsx_vpickve2gr_b:
2747 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2750 case Intrinsic::loongarch_lsx_vpickve2gr_h:
2751 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
2752 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2755 case Intrinsic::loongarch_lsx_vpickve2gr_w:
2756 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2759 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
2760 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2763 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
2764 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
2765 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2768 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
2769 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2772 case Intrinsic::loongarch_lsx_bz_b:
2773 case Intrinsic::loongarch_lsx_bz_h:
2774 case Intrinsic::loongarch_lsx_bz_w:
2775 case Intrinsic::loongarch_lsx_bz_d:
2776 case Intrinsic::loongarch_lasx_xbz_b:
2777 case Intrinsic::loongarch_lasx_xbz_h:
2778 case Intrinsic::loongarch_lasx_xbz_w:
2779 case Intrinsic::loongarch_lasx_xbz_d:
2783 case Intrinsic::loongarch_lsx_bz_v:
2784 case Intrinsic::loongarch_lasx_xbz_v:
2788 case Intrinsic::loongarch_lsx_bnz_b:
2789 case Intrinsic::loongarch_lsx_bnz_h:
2790 case Intrinsic::loongarch_lsx_bnz_w:
2791 case Intrinsic::loongarch_lsx_bnz_d:
2792 case Intrinsic::loongarch_lasx_xbnz_b:
2793 case Intrinsic::loongarch_lasx_xbnz_h:
2794 case Intrinsic::loongarch_lasx_xbnz_w:
2795 case Intrinsic::loongarch_lasx_xbnz_d:
2799 case Intrinsic::loongarch_lsx_bnz_v:
2800 case Intrinsic::loongarch_lasx_xbnz_v:
2810 EVT VT =
N->getValueType(0);
2811 switch (
N->getOpcode()) {
2817 "Unexpected custom legalisation");
2823 "Unexpected custom legalisation");
2830 "Unexpected custom legalisation");
2839 "Unexpected custom legalisation");
2844 "Unexpected custom legalisation");
2858 EVT OpVT = Src.getValueType();
2862 std::tie(Result, Chain) =
2869 EVT SrcVT = Src.getValueType();
2870 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.
is64Bit() &&
2871 Subtarget.hasBasicF()) {
2880 "Unexpected custom legalisation");
2883 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
2889 assert((VT == MVT::i16 || VT == MVT::i32) &&
2890 "Unexpected custom legalization");
2911 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.
is64Bit())) &&
2912 "Unexpected custom legalization");
2932 "Unexpected custom legalisation");
2940 const StringRef ErrorMsgOOR =
"argument out of range";
2941 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2942 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2944 switch (
N->getConstantOperandVal(1)) {
2947 case Intrinsic::loongarch_movfcsr2gr: {
2948 if (!Subtarget.hasBasicF()) {
2953 if (!isUInt<2>(Imm)) {
2965#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
2966 case Intrinsic::loongarch_##NAME: { \
2967 SDValue NODE = DAG.getNode( \
2968 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2969 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
2970 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
2971 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
2972 Results.push_back(NODE.getValue(1)); \
2981#undef CRC_CASE_EXT_BINARYOP
2983#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
2984 case Intrinsic::loongarch_##NAME: { \
2985 SDValue NODE = DAG.getNode( \
2986 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
2988 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
2989 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
2990 Results.push_back(NODE.getValue(1)); \
2995#undef CRC_CASE_EXT_UNARYOP
2996#define CSR_CASE(ID) \
2997 case Intrinsic::loongarch_##ID: { \
2998 if (!Subtarget.is64Bit()) \
2999 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
3007 case Intrinsic::loongarch_csrrd_w: {
3009 if (!isUInt<14>(Imm)) {
3021 case Intrinsic::loongarch_csrwr_w: {
3022 unsigned Imm =
N->getConstantOperandVal(3);
3023 if (!isUInt<14>(Imm)) {
3036 case Intrinsic::loongarch_csrxchg_w: {
3037 unsigned Imm =
N->getConstantOperandVal(4);
3038 if (!isUInt<14>(Imm)) {
3052#define IOCSRRD_CASE(NAME, NODE) \
3053 case Intrinsic::loongarch_##NAME: { \
3054 SDValue IOCSRRDResults = \
3055 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3056 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
3057 Results.push_back( \
3058 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
3059 Results.push_back(IOCSRRDResults.getValue(1)); \
3066 case Intrinsic::loongarch_cpucfg: {
3075 case Intrinsic::loongarch_lddir_d: {
3088 "On LA64, only 64-bit registers can be read.");
3091 "On LA32, only 32-bit registers can be read.");
3093 Results.push_back(
N->getOperand(0));
3109 SDValue FirstOperand =
N->getOperand(0);
3110 SDValue SecondOperand =
N->getOperand(1);
3111 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
3112 EVT ValTy =
N->getValueType(0);
3115 unsigned SMIdx, SMLen;
3121 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)) ||
3132 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
3173 NewOperand = FirstOperand;
3176 msb = lsb + SMLen - 1;
3180 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
3201 SDValue FirstOperand =
N->getOperand(0);
3203 EVT ValTy =
N->getValueType(0);
3206 unsigned MaskIdx, MaskLen;
3212 !(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
3217 if (!(CN = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
3221 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
3234 EVT ValTy =
N->getValueType(0);
3235 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
3239 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
3241 bool SwapAndRetried =
false;
3246 if (ValBits != 32 && ValBits != 64)
3256 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3259 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3261 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
3262 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3264 (MaskIdx0 + MaskLen0 <= ValBits)) {
3278 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3281 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3283 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3285 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
3286 (MaskIdx0 + MaskLen0 <= ValBits)) {
3301 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3303 (MaskIdx0 + MaskLen0 <= 64) &&
3304 (CN1 = dyn_cast<ConstantSDNode>(N1->getOperand(1))) &&
3311 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3312 : (MaskIdx0 + MaskLen0 - 1),
3324 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3326 MaskIdx0 == 0 && (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3328 (MaskIdx0 + MaskLen0 <= ValBits)) {
3343 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3345 (CN1 = dyn_cast<ConstantSDNode>(N1)) &&
3351 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3352 : (MaskIdx0 + MaskLen0 - 1),
3367 unsigned MaskIdx, MaskLen;
3368 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
3369 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3371 MaskIdx == 0 && (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3393 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3395 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
3396 (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3409 if (!SwapAndRetried) {
3411 SwapAndRetried =
true;
3415 SwapAndRetried =
false;
3427 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3441 if (!SwapAndRetried) {
3443 SwapAndRetried =
true;
3453 switch (V.getNode()->getOpcode()) {
3455 LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
3464 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3465 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3472 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3473 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3550 SDNode *AndNode =
N->getOperand(0).getNode();
3558 SDValue CmpInputValue =
N->getOperand(1);
3566 CN = dyn_cast<ConstantSDNode>(CmpInputValue);
3569 AndInputValue1 = AndInputValue1.
getOperand(0);
3573 if (AndInputValue2 != CmpInputValue)
3606 TruncInputValue1, TruncInputValue2);
3628template <
unsigned N>
3632 bool IsSigned =
false) {
3634 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3636 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3637 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3639 ": argument out of range.");
3645template <
unsigned N>
3649 EVT ResTy =
Node->getValueType(0);
3650 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3653 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3654 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3656 ": argument out of range.");
3661 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
3667 EVT ResTy =
Node->getValueType(0);
3675 EVT ResTy =
Node->getValueType(0);
3684template <
unsigned N>
3687 EVT ResTy =
Node->getValueType(0);
3688 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3690 if (!isUInt<N>(CImm->getZExtValue())) {
3692 ": argument out of range.");
3702template <
unsigned N>
3705 EVT ResTy =
Node->getValueType(0);
3706 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3708 if (!isUInt<N>(CImm->getZExtValue())) {
3710 ": argument out of range.");
3719template <
unsigned N>
3722 EVT ResTy =
Node->getValueType(0);
3723 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3725 if (!isUInt<N>(CImm->getZExtValue())) {
3727 ": argument out of range.");
3741 switch (
N->getConstantOperandVal(0)) {
3744 case Intrinsic::loongarch_lsx_vadd_b:
3745 case Intrinsic::loongarch_lsx_vadd_h:
3746 case Intrinsic::loongarch_lsx_vadd_w:
3747 case Intrinsic::loongarch_lsx_vadd_d:
3748 case Intrinsic::loongarch_lasx_xvadd_b:
3749 case Intrinsic::loongarch_lasx_xvadd_h:
3750 case Intrinsic::loongarch_lasx_xvadd_w:
3751 case Intrinsic::loongarch_lasx_xvadd_d:
3754 case Intrinsic::loongarch_lsx_vaddi_bu:
3755 case Intrinsic::loongarch_lsx_vaddi_hu:
3756 case Intrinsic::loongarch_lsx_vaddi_wu:
3757 case Intrinsic::loongarch_lsx_vaddi_du:
3758 case Intrinsic::loongarch_lasx_xvaddi_bu:
3759 case Intrinsic::loongarch_lasx_xvaddi_hu:
3760 case Intrinsic::loongarch_lasx_xvaddi_wu:
3761 case Intrinsic::loongarch_lasx_xvaddi_du:
3763 lowerVectorSplatImm<5>(
N, 2, DAG));
3764 case Intrinsic::loongarch_lsx_vsub_b:
3765 case Intrinsic::loongarch_lsx_vsub_h:
3766 case Intrinsic::loongarch_lsx_vsub_w:
3767 case Intrinsic::loongarch_lsx_vsub_d:
3768 case Intrinsic::loongarch_lasx_xvsub_b:
3769 case Intrinsic::loongarch_lasx_xvsub_h:
3770 case Intrinsic::loongarch_lasx_xvsub_w:
3771 case Intrinsic::loongarch_lasx_xvsub_d:
3774 case Intrinsic::loongarch_lsx_vsubi_bu:
3775 case Intrinsic::loongarch_lsx_vsubi_hu:
3776 case Intrinsic::loongarch_lsx_vsubi_wu:
3777 case Intrinsic::loongarch_lsx_vsubi_du:
3778 case Intrinsic::loongarch_lasx_xvsubi_bu:
3779 case Intrinsic::loongarch_lasx_xvsubi_hu:
3780 case Intrinsic::loongarch_lasx_xvsubi_wu:
3781 case Intrinsic::loongarch_lasx_xvsubi_du:
3783 lowerVectorSplatImm<5>(
N, 2, DAG));
3784 case Intrinsic::loongarch_lsx_vneg_b:
3785 case Intrinsic::loongarch_lsx_vneg_h:
3786 case Intrinsic::loongarch_lsx_vneg_w:
3787 case Intrinsic::loongarch_lsx_vneg_d:
3788 case Intrinsic::loongarch_lasx_xvneg_b:
3789 case Intrinsic::loongarch_lasx_xvneg_h:
3790 case Intrinsic::loongarch_lasx_xvneg_w:
3791 case Intrinsic::loongarch_lasx_xvneg_d:
3795 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
3797 SDLoc(
N),
N->getValueType(0)),
3799 case Intrinsic::loongarch_lsx_vmax_b:
3800 case Intrinsic::loongarch_lsx_vmax_h:
3801 case Intrinsic::loongarch_lsx_vmax_w:
3802 case Intrinsic::loongarch_lsx_vmax_d:
3803 case Intrinsic::loongarch_lasx_xvmax_b:
3804 case Intrinsic::loongarch_lasx_xvmax_h:
3805 case Intrinsic::loongarch_lasx_xvmax_w:
3806 case Intrinsic::loongarch_lasx_xvmax_d:
3809 case Intrinsic::loongarch_lsx_vmax_bu:
3810 case Intrinsic::loongarch_lsx_vmax_hu:
3811 case Intrinsic::loongarch_lsx_vmax_wu:
3812 case Intrinsic::loongarch_lsx_vmax_du:
3813 case Intrinsic::loongarch_lasx_xvmax_bu:
3814 case Intrinsic::loongarch_lasx_xvmax_hu:
3815 case Intrinsic::loongarch_lasx_xvmax_wu:
3816 case Intrinsic::loongarch_lasx_xvmax_du:
3819 case Intrinsic::loongarch_lsx_vmaxi_b:
3820 case Intrinsic::loongarch_lsx_vmaxi_h:
3821 case Intrinsic::loongarch_lsx_vmaxi_w:
3822 case Intrinsic::loongarch_lsx_vmaxi_d:
3823 case Intrinsic::loongarch_lasx_xvmaxi_b:
3824 case Intrinsic::loongarch_lasx_xvmaxi_h:
3825 case Intrinsic::loongarch_lasx_xvmaxi_w:
3826 case Intrinsic::loongarch_lasx_xvmaxi_d:
3828 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3829 case Intrinsic::loongarch_lsx_vmaxi_bu:
3830 case Intrinsic::loongarch_lsx_vmaxi_hu:
3831 case Intrinsic::loongarch_lsx_vmaxi_wu:
3832 case Intrinsic::loongarch_lsx_vmaxi_du:
3833 case Intrinsic::loongarch_lasx_xvmaxi_bu:
3834 case Intrinsic::loongarch_lasx_xvmaxi_hu:
3835 case Intrinsic::loongarch_lasx_xvmaxi_wu:
3836 case Intrinsic::loongarch_lasx_xvmaxi_du:
3838 lowerVectorSplatImm<5>(
N, 2, DAG));
3839 case Intrinsic::loongarch_lsx_vmin_b:
3840 case Intrinsic::loongarch_lsx_vmin_h:
3841 case Intrinsic::loongarch_lsx_vmin_w:
3842 case Intrinsic::loongarch_lsx_vmin_d:
3843 case Intrinsic::loongarch_lasx_xvmin_b:
3844 case Intrinsic::loongarch_lasx_xvmin_h:
3845 case Intrinsic::loongarch_lasx_xvmin_w:
3846 case Intrinsic::loongarch_lasx_xvmin_d:
3849 case Intrinsic::loongarch_lsx_vmin_bu:
3850 case Intrinsic::loongarch_lsx_vmin_hu:
3851 case Intrinsic::loongarch_lsx_vmin_wu:
3852 case Intrinsic::loongarch_lsx_vmin_du:
3853 case Intrinsic::loongarch_lasx_xvmin_bu:
3854 case Intrinsic::loongarch_lasx_xvmin_hu:
3855 case Intrinsic::loongarch_lasx_xvmin_wu:
3856 case Intrinsic::loongarch_lasx_xvmin_du:
3859 case Intrinsic::loongarch_lsx_vmini_b:
3860 case Intrinsic::loongarch_lsx_vmini_h:
3861 case Intrinsic::loongarch_lsx_vmini_w:
3862 case Intrinsic::loongarch_lsx_vmini_d:
3863 case Intrinsic::loongarch_lasx_xvmini_b:
3864 case Intrinsic::loongarch_lasx_xvmini_h:
3865 case Intrinsic::loongarch_lasx_xvmini_w:
3866 case Intrinsic::loongarch_lasx_xvmini_d:
3868 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3869 case Intrinsic::loongarch_lsx_vmini_bu:
3870 case Intrinsic::loongarch_lsx_vmini_hu:
3871 case Intrinsic::loongarch_lsx_vmini_wu:
3872 case Intrinsic::loongarch_lsx_vmini_du:
3873 case Intrinsic::loongarch_lasx_xvmini_bu:
3874 case Intrinsic::loongarch_lasx_xvmini_hu:
3875 case Intrinsic::loongarch_lasx_xvmini_wu:
3876 case Intrinsic::loongarch_lasx_xvmini_du:
3878 lowerVectorSplatImm<5>(
N, 2, DAG));
3879 case Intrinsic::loongarch_lsx_vmul_b:
3880 case Intrinsic::loongarch_lsx_vmul_h:
3881 case Intrinsic::loongarch_lsx_vmul_w:
3882 case Intrinsic::loongarch_lsx_vmul_d:
3883 case Intrinsic::loongarch_lasx_xvmul_b:
3884 case Intrinsic::loongarch_lasx_xvmul_h:
3885 case Intrinsic::loongarch_lasx_xvmul_w:
3886 case Intrinsic::loongarch_lasx_xvmul_d:
3889 case Intrinsic::loongarch_lsx_vmadd_b:
3890 case Intrinsic::loongarch_lsx_vmadd_h:
3891 case Intrinsic::loongarch_lsx_vmadd_w:
3892 case Intrinsic::loongarch_lsx_vmadd_d:
3893 case Intrinsic::loongarch_lasx_xvmadd_b:
3894 case Intrinsic::loongarch_lasx_xvmadd_h:
3895 case Intrinsic::loongarch_lasx_xvmadd_w:
3896 case Intrinsic::loongarch_lasx_xvmadd_d: {
3897 EVT ResTy =
N->getValueType(0);
3902 case Intrinsic::loongarch_lsx_vmsub_b:
3903 case Intrinsic::loongarch_lsx_vmsub_h:
3904 case Intrinsic::loongarch_lsx_vmsub_w:
3905 case Intrinsic::loongarch_lsx_vmsub_d:
3906 case Intrinsic::loongarch_lasx_xvmsub_b:
3907 case Intrinsic::loongarch_lasx_xvmsub_h:
3908 case Intrinsic::loongarch_lasx_xvmsub_w:
3909 case Intrinsic::loongarch_lasx_xvmsub_d: {
3910 EVT ResTy =
N->getValueType(0);
3915 case Intrinsic::loongarch_lsx_vdiv_b:
3916 case Intrinsic::loongarch_lsx_vdiv_h:
3917 case Intrinsic::loongarch_lsx_vdiv_w:
3918 case Intrinsic::loongarch_lsx_vdiv_d:
3919 case Intrinsic::loongarch_lasx_xvdiv_b:
3920 case Intrinsic::loongarch_lasx_xvdiv_h:
3921 case Intrinsic::loongarch_lasx_xvdiv_w:
3922 case Intrinsic::loongarch_lasx_xvdiv_d:
3925 case Intrinsic::loongarch_lsx_vdiv_bu:
3926 case Intrinsic::loongarch_lsx_vdiv_hu:
3927 case Intrinsic::loongarch_lsx_vdiv_wu:
3928 case Intrinsic::loongarch_lsx_vdiv_du:
3929 case Intrinsic::loongarch_lasx_xvdiv_bu:
3930 case Intrinsic::loongarch_lasx_xvdiv_hu:
3931 case Intrinsic::loongarch_lasx_xvdiv_wu:
3932 case Intrinsic::loongarch_lasx_xvdiv_du:
3935 case Intrinsic::loongarch_lsx_vmod_b:
3936 case Intrinsic::loongarch_lsx_vmod_h:
3937 case Intrinsic::loongarch_lsx_vmod_w:
3938 case Intrinsic::loongarch_lsx_vmod_d:
3939 case Intrinsic::loongarch_lasx_xvmod_b:
3940 case Intrinsic::loongarch_lasx_xvmod_h:
3941 case Intrinsic::loongarch_lasx_xvmod_w:
3942 case Intrinsic::loongarch_lasx_xvmod_d:
3945 case Intrinsic::loongarch_lsx_vmod_bu:
3946 case Intrinsic::loongarch_lsx_vmod_hu:
3947 case Intrinsic::loongarch_lsx_vmod_wu:
3948 case Intrinsic::loongarch_lsx_vmod_du:
3949 case Intrinsic::loongarch_lasx_xvmod_bu:
3950 case Intrinsic::loongarch_lasx_xvmod_hu:
3951 case Intrinsic::loongarch_lasx_xvmod_wu:
3952 case Intrinsic::loongarch_lasx_xvmod_du:
3955 case Intrinsic::loongarch_lsx_vand_v:
3956 case Intrinsic::loongarch_lasx_xvand_v:
3959 case Intrinsic::loongarch_lsx_vor_v:
3960 case Intrinsic::loongarch_lasx_xvor_v:
3963 case Intrinsic::loongarch_lsx_vxor_v:
3964 case Intrinsic::loongarch_lasx_xvxor_v:
3967 case Intrinsic::loongarch_lsx_vnor_v:
3968 case Intrinsic::loongarch_lasx_xvnor_v: {
3973 case Intrinsic::loongarch_lsx_vandi_b:
3974 case Intrinsic::loongarch_lasx_xvandi_b:
3976 lowerVectorSplatImm<8>(
N, 2, DAG));
3977 case Intrinsic::loongarch_lsx_vori_b:
3978 case Intrinsic::loongarch_lasx_xvori_b:
3980 lowerVectorSplatImm<8>(
N, 2, DAG));
3981 case Intrinsic::loongarch_lsx_vxori_b:
3982 case Intrinsic::loongarch_lasx_xvxori_b:
3984 lowerVectorSplatImm<8>(
N, 2, DAG));
3985 case Intrinsic::loongarch_lsx_vsll_b:
3986 case Intrinsic::loongarch_lsx_vsll_h:
3987 case Intrinsic::loongarch_lsx_vsll_w:
3988 case Intrinsic::loongarch_lsx_vsll_d:
3989 case Intrinsic::loongarch_lasx_xvsll_b:
3990 case Intrinsic::loongarch_lasx_xvsll_h:
3991 case Intrinsic::loongarch_lasx_xvsll_w:
3992 case Intrinsic::loongarch_lasx_xvsll_d:
3995 case Intrinsic::loongarch_lsx_vslli_b:
3996 case Intrinsic::loongarch_lasx_xvslli_b:
3998 lowerVectorSplatImm<3>(
N, 2, DAG));
3999 case Intrinsic::loongarch_lsx_vslli_h:
4000 case Intrinsic::loongarch_lasx_xvslli_h:
4002 lowerVectorSplatImm<4>(
N, 2, DAG));
4003 case Intrinsic::loongarch_lsx_vslli_w:
4004 case Intrinsic::loongarch_lasx_xvslli_w:
4006 lowerVectorSplatImm<5>(
N, 2, DAG));
4007 case Intrinsic::loongarch_lsx_vslli_d:
4008 case Intrinsic::loongarch_lasx_xvslli_d:
4010 lowerVectorSplatImm<6>(
N, 2, DAG));
4011 case Intrinsic::loongarch_lsx_vsrl_b:
4012 case Intrinsic::loongarch_lsx_vsrl_h:
4013 case Intrinsic::loongarch_lsx_vsrl_w:
4014 case Intrinsic::loongarch_lsx_vsrl_d:
4015 case Intrinsic::loongarch_lasx_xvsrl_b:
4016 case Intrinsic::loongarch_lasx_xvsrl_h:
4017 case Intrinsic::loongarch_lasx_xvsrl_w:
4018 case Intrinsic::loongarch_lasx_xvsrl_d:
4021 case Intrinsic::loongarch_lsx_vsrli_b:
4022 case Intrinsic::loongarch_lasx_xvsrli_b:
4024 lowerVectorSplatImm<3>(
N, 2, DAG));
4025 case Intrinsic::loongarch_lsx_vsrli_h:
4026 case Intrinsic::loongarch_lasx_xvsrli_h:
4028 lowerVectorSplatImm<4>(
N, 2, DAG));
4029 case Intrinsic::loongarch_lsx_vsrli_w:
4030 case Intrinsic::loongarch_lasx_xvsrli_w:
4032 lowerVectorSplatImm<5>(
N, 2, DAG));
4033 case Intrinsic::loongarch_lsx_vsrli_d:
4034 case Intrinsic::loongarch_lasx_xvsrli_d:
4036 lowerVectorSplatImm<6>(
N, 2, DAG));
4037 case Intrinsic::loongarch_lsx_vsra_b:
4038 case Intrinsic::loongarch_lsx_vsra_h:
4039 case Intrinsic::loongarch_lsx_vsra_w:
4040 case Intrinsic::loongarch_lsx_vsra_d:
4041 case Intrinsic::loongarch_lasx_xvsra_b:
4042 case Intrinsic::loongarch_lasx_xvsra_h:
4043 case Intrinsic::loongarch_lasx_xvsra_w:
4044 case Intrinsic::loongarch_lasx_xvsra_d:
4047 case Intrinsic::loongarch_lsx_vsrai_b:
4048 case Intrinsic::loongarch_lasx_xvsrai_b:
4050 lowerVectorSplatImm<3>(
N, 2, DAG));
4051 case Intrinsic::loongarch_lsx_vsrai_h:
4052 case Intrinsic::loongarch_lasx_xvsrai_h:
4054 lowerVectorSplatImm<4>(
N, 2, DAG));
4055 case Intrinsic::loongarch_lsx_vsrai_w:
4056 case Intrinsic::loongarch_lasx_xvsrai_w:
4058 lowerVectorSplatImm<5>(
N, 2, DAG));
4059 case Intrinsic::loongarch_lsx_vsrai_d:
4060 case Intrinsic::loongarch_lasx_xvsrai_d:
4062 lowerVectorSplatImm<6>(
N, 2, DAG));
4063 case Intrinsic::loongarch_lsx_vclz_b:
4064 case Intrinsic::loongarch_lsx_vclz_h:
4065 case Intrinsic::loongarch_lsx_vclz_w:
4066 case Intrinsic::loongarch_lsx_vclz_d:
4067 case Intrinsic::loongarch_lasx_xvclz_b:
4068 case Intrinsic::loongarch_lasx_xvclz_h:
4069 case Intrinsic::loongarch_lasx_xvclz_w:
4070 case Intrinsic::loongarch_lasx_xvclz_d:
4072 case Intrinsic::loongarch_lsx_vpcnt_b:
4073 case Intrinsic::loongarch_lsx_vpcnt_h:
4074 case Intrinsic::loongarch_lsx_vpcnt_w:
4075 case Intrinsic::loongarch_lsx_vpcnt_d:
4076 case Intrinsic::loongarch_lasx_xvpcnt_b:
4077 case Intrinsic::loongarch_lasx_xvpcnt_h:
4078 case Intrinsic::loongarch_lasx_xvpcnt_w:
4079 case Intrinsic::loongarch_lasx_xvpcnt_d:
4081 case Intrinsic::loongarch_lsx_vbitclr_b:
4082 case Intrinsic::loongarch_lsx_vbitclr_h:
4083 case Intrinsic::loongarch_lsx_vbitclr_w:
4084 case Intrinsic::loongarch_lsx_vbitclr_d:
4085 case Intrinsic::loongarch_lasx_xvbitclr_b:
4086 case Intrinsic::loongarch_lasx_xvbitclr_h:
4087 case Intrinsic::loongarch_lasx_xvbitclr_w:
4088 case Intrinsic::loongarch_lasx_xvbitclr_d:
4090 case Intrinsic::loongarch_lsx_vbitclri_b:
4091 case Intrinsic::loongarch_lasx_xvbitclri_b:
4092 return lowerVectorBitClearImm<3>(
N, DAG);
4093 case Intrinsic::loongarch_lsx_vbitclri_h:
4094 case Intrinsic::loongarch_lasx_xvbitclri_h:
4095 return lowerVectorBitClearImm<4>(
N, DAG);
4096 case Intrinsic::loongarch_lsx_vbitclri_w:
4097 case Intrinsic::loongarch_lasx_xvbitclri_w:
4098 return lowerVectorBitClearImm<5>(
N, DAG);
4099 case Intrinsic::loongarch_lsx_vbitclri_d:
4100 case Intrinsic::loongarch_lasx_xvbitclri_d:
4101 return lowerVectorBitClearImm<6>(
N, DAG);
4102 case Intrinsic::loongarch_lsx_vbitset_b:
4103 case Intrinsic::loongarch_lsx_vbitset_h:
4104 case Intrinsic::loongarch_lsx_vbitset_w:
4105 case Intrinsic::loongarch_lsx_vbitset_d:
4106 case Intrinsic::loongarch_lasx_xvbitset_b:
4107 case Intrinsic::loongarch_lasx_xvbitset_h:
4108 case Intrinsic::loongarch_lasx_xvbitset_w:
4109 case Intrinsic::loongarch_lasx_xvbitset_d: {
4110 EVT VecTy =
N->getValueType(0);
4116 case Intrinsic::loongarch_lsx_vbitseti_b:
4117 case Intrinsic::loongarch_lasx_xvbitseti_b:
4118 return lowerVectorBitSetImm<3>(
N, DAG);
4119 case Intrinsic::loongarch_lsx_vbitseti_h:
4120 case Intrinsic::loongarch_lasx_xvbitseti_h:
4121 return lowerVectorBitSetImm<4>(
N, DAG);
4122 case Intrinsic::loongarch_lsx_vbitseti_w:
4123 case Intrinsic::loongarch_lasx_xvbitseti_w:
4124 return lowerVectorBitSetImm<5>(
N, DAG);
4125 case Intrinsic::loongarch_lsx_vbitseti_d:
4126 case Intrinsic::loongarch_lasx_xvbitseti_d:
4127 return lowerVectorBitSetImm<6>(
N, DAG);
4128 case Intrinsic::loongarch_lsx_vbitrev_b:
4129 case Intrinsic::loongarch_lsx_vbitrev_h:
4130 case Intrinsic::loongarch_lsx_vbitrev_w:
4131 case Intrinsic::loongarch_lsx_vbitrev_d:
4132 case Intrinsic::loongarch_lasx_xvbitrev_b:
4133 case Intrinsic::loongarch_lasx_xvbitrev_h:
4134 case Intrinsic::loongarch_lasx_xvbitrev_w:
4135 case Intrinsic::loongarch_lasx_xvbitrev_d: {
4136 EVT VecTy =
N->getValueType(0);
4142 case Intrinsic::loongarch_lsx_vbitrevi_b:
4143 case Intrinsic::loongarch_lasx_xvbitrevi_b:
4144 return lowerVectorBitRevImm<3>(
N, DAG);
4145 case Intrinsic::loongarch_lsx_vbitrevi_h:
4146 case Intrinsic::loongarch_lasx_xvbitrevi_h:
4147 return lowerVectorBitRevImm<4>(
N, DAG);
4148 case Intrinsic::loongarch_lsx_vbitrevi_w:
4149 case Intrinsic::loongarch_lasx_xvbitrevi_w:
4150 return lowerVectorBitRevImm<5>(
N, DAG);
4151 case Intrinsic::loongarch_lsx_vbitrevi_d:
4152 case Intrinsic::loongarch_lasx_xvbitrevi_d:
4153 return lowerVectorBitRevImm<6>(
N, DAG);
4154 case Intrinsic::loongarch_lsx_vfadd_s:
4155 case Intrinsic::loongarch_lsx_vfadd_d:
4156 case Intrinsic::loongarch_lasx_xvfadd_s:
4157 case Intrinsic::loongarch_lasx_xvfadd_d:
4160 case Intrinsic::loongarch_lsx_vfsub_s:
4161 case Intrinsic::loongarch_lsx_vfsub_d:
4162 case Intrinsic::loongarch_lasx_xvfsub_s:
4163 case Intrinsic::loongarch_lasx_xvfsub_d:
4166 case Intrinsic::loongarch_lsx_vfmul_s:
4167 case Intrinsic::loongarch_lsx_vfmul_d:
4168 case Intrinsic::loongarch_lasx_xvfmul_s:
4169 case Intrinsic::loongarch_lasx_xvfmul_d:
4172 case Intrinsic::loongarch_lsx_vfdiv_s:
4173 case Intrinsic::loongarch_lsx_vfdiv_d:
4174 case Intrinsic::loongarch_lasx_xvfdiv_s:
4175 case Intrinsic::loongarch_lasx_xvfdiv_d:
4178 case Intrinsic::loongarch_lsx_vfmadd_s:
4179 case Intrinsic::loongarch_lsx_vfmadd_d:
4180 case Intrinsic::loongarch_lasx_xvfmadd_s:
4181 case Intrinsic::loongarch_lasx_xvfmadd_d:
4183 N->getOperand(2),
N->getOperand(3));
4184 case Intrinsic::loongarch_lsx_vinsgr2vr_b:
4186 N->getOperand(1),
N->getOperand(2),
4187 legalizeIntrinsicImmArg<4>(
N, 3, DAG, Subtarget));
4188 case Intrinsic::loongarch_lsx_vinsgr2vr_h:
4189 case Intrinsic::loongarch_lasx_xvinsgr2vr_w:
4191 N->getOperand(1),
N->getOperand(2),
4192 legalizeIntrinsicImmArg<3>(
N, 3, DAG, Subtarget));
4193 case Intrinsic::loongarch_lsx_vinsgr2vr_w:
4194 case Intrinsic::loongarch_lasx_xvinsgr2vr_d:
4196 N->getOperand(1),
N->getOperand(2),
4197 legalizeIntrinsicImmArg<2>(
N, 3, DAG, Subtarget));
4198 case Intrinsic::loongarch_lsx_vinsgr2vr_d:
4200 N->getOperand(1),
N->getOperand(2),
4201 legalizeIntrinsicImmArg<1>(
N, 3, DAG, Subtarget));
4202 case Intrinsic::loongarch_lsx_vreplgr2vr_b:
4203 case Intrinsic::loongarch_lsx_vreplgr2vr_h:
4204 case Intrinsic::loongarch_lsx_vreplgr2vr_w:
4205 case Intrinsic::loongarch_lsx_vreplgr2vr_d:
4206 case Intrinsic::loongarch_lasx_xvreplgr2vr_b:
4207 case Intrinsic::loongarch_lasx_xvreplgr2vr_h:
4208 case Intrinsic::loongarch_lasx_xvreplgr2vr_w:
4209 case Intrinsic::loongarch_lasx_xvreplgr2vr_d: {
4210 EVT ResTy =
N->getValueType(0);
4214 case Intrinsic::loongarch_lsx_vreplve_b:
4215 case Intrinsic::loongarch_lsx_vreplve_h:
4216 case Intrinsic::loongarch_lsx_vreplve_w:
4217 case Intrinsic::loongarch_lsx_vreplve_d:
4218 case Intrinsic::loongarch_lasx_xvreplve_b:
4219 case Intrinsic::loongarch_lasx_xvreplve_h:
4220 case Intrinsic::loongarch_lasx_xvreplve_w:
4221 case Intrinsic::loongarch_lasx_xvreplve_d:
4233 switch (
N->getOpcode()) {
4270 MF->
insert(It, BreakMBB);
4274 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->
end());
4275 SinkMBB->transferSuccessorsAndUpdatePHIs(