28#include "llvm/IR/IntrinsicsLoongArch.h"
37#define DEBUG_TYPE "loongarch-isel-lowering"
42 cl::desc(
"Trap on integer division by zero."),
54 if (Subtarget.hasBasicF())
56 if (Subtarget.hasBasicD())
60 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
62 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
64 if (Subtarget.hasExtLSX())
68 if (Subtarget.hasExtLASX())
69 for (
MVT VT : LASXVTs)
167 if (Subtarget.hasBasicF()) {
191 if (!Subtarget.hasBasicD()) {
202 if (Subtarget.hasBasicD()) {
231 if (Subtarget.hasExtLSX()) {
246 for (
MVT VT : LSXVTs) {
259 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
273 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
275 for (
MVT VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
277 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
281 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
300 if (Subtarget.hasExtLASX()) {
301 for (
MVT VT : LASXVTs) {
315 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
329 for (
MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
331 for (
MVT VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64})
333 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
337 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
358 if (Subtarget.hasExtLSX())
381 if (Subtarget.hasLAMCAS())
396 switch (
Op.getOpcode()) {
398 return lowerATOMIC_FENCE(
Op, DAG);
400 return lowerEH_DWARF_CFA(
Op, DAG);
402 return lowerGlobalAddress(
Op, DAG);
404 return lowerGlobalTLSAddress(
Op, DAG);
406 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
408 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
410 return lowerINTRINSIC_VOID(
Op, DAG);
412 return lowerBlockAddress(
Op, DAG);
414 return lowerJumpTable(
Op, DAG);
416 return lowerShiftLeftParts(
Op, DAG);
418 return lowerShiftRightParts(
Op, DAG,
true);
420 return lowerShiftRightParts(
Op, DAG,
false);
422 return lowerConstantPool(
Op, DAG);
424 return lowerFP_TO_SINT(
Op, DAG);
426 return lowerBITCAST(
Op, DAG);
428 return lowerUINT_TO_FP(
Op, DAG);
430 return lowerSINT_TO_FP(
Op, DAG);
432 return lowerVASTART(
Op, DAG);
434 return lowerFRAMEADDR(
Op, DAG);
436 return lowerRETURNADDR(
Op, DAG);
438 return lowerWRITE_REGISTER(
Op, DAG);
440 return lowerINSERT_VECTOR_ELT(
Op, DAG);
442 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
444 return lowerBUILD_VECTOR(
Op, DAG);
446 return lowerVECTOR_SHUFFLE(
Op, DAG);
448 return lowerBITREVERSE(
Op, DAG);
455 EVT ResTy =
Op->getValueType(0);
466 for (
unsigned int i = 0; i < NewEltNum; i++) {
469 SDValue RevOp = DAG.
getNode((ResTy == MVT::v16i8 || ResTy == MVT::v32i8)
489 for (
unsigned int i = 0; i < NewEltNum; i++)
490 for (
int j = OrigEltNum / NewEltNum - 1; j >= 0; j--)
491 Mask.push_back(j + (OrigEltNum / NewEltNum) * i);
499template <
typename ValType>
502 unsigned CheckStride,
504 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
508 if (*
I != -1 && *
I != ExpectedIndex)
510 ExpectedIndex += ExpectedIndexStride;
514 for (
unsigned n = 0; n < CheckStride &&
I !=
End; ++n, ++
I)
533 for (
const auto &M : Mask) {
540 if (SplatIndex == -1)
543 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
544 if (fitsRegularPattern<int>(Mask.begin(), 1, Mask.end(), SplatIndex, 0)) {
545 APInt Imm(64, SplatIndex);
579 int SubMask[4] = {-1, -1, -1, -1};
580 for (
unsigned i = 0; i < 4; ++i) {
581 for (
unsigned j = i; j < Mask.size(); j += 4) {
588 if (Idx < 0 || Idx >= 4)
594 if (SubMask[i] == -1)
598 else if (
Idx != -1 &&
Idx != SubMask[i])
605 for (
int i = 3; i >= 0; --i) {
606 int Idx = SubMask[i];
638 const auto &Begin = Mask.begin();
639 const auto &
End = Mask.end();
640 SDValue OriV1 = V1, OriV2 = V2;
642 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 2))
644 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 2))
649 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 2))
651 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 2))
678 const auto &Begin = Mask.begin();
679 const auto &
End = Mask.end();
680 SDValue OriV1 = V1, OriV2 = V2;
682 if (fitsRegularPattern<int>(Begin, 2,
End, 1, 2))
684 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + 1, 2))
689 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 1, 2))
691 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + 1, 2))
719 const auto &Begin = Mask.begin();
720 const auto &
End = Mask.end();
721 unsigned HalfSize = Mask.size() / 2;
722 SDValue OriV1 = V1, OriV2 = V2;
724 if (fitsRegularPattern<int>(Begin, 2,
End, HalfSize, 1))
726 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + HalfSize, 1))
731 if (fitsRegularPattern<int>(Begin + 1, 2,
End, HalfSize, 1))
733 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + HalfSize,
762 const auto &Begin = Mask.begin();
763 const auto &
End = Mask.end();
764 SDValue OriV1 = V1, OriV2 = V2;
766 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 1))
768 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 1))
773 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 1))
775 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 1))
802 const auto &Begin = Mask.begin();
803 const auto &Mid = Mask.begin() + Mask.size() / 2;
804 const auto &
End = Mask.end();
805 SDValue OriV1 = V1, OriV2 = V2;
807 if (fitsRegularPattern<int>(Begin, 1, Mid, 0, 2))
809 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size(), 2))
814 if (fitsRegularPattern<int>(Mid, 1,
End, 0, 2))
816 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size(), 2))
844 const auto &Begin = Mask.begin();
845 const auto &Mid = Mask.begin() + Mask.size() / 2;
846 const auto &
End = Mask.end();
847 SDValue OriV1 = V1, OriV2 = V2;
849 if (fitsRegularPattern<int>(Begin, 1, Mid, 1, 2))
851 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size() + 1, 2))
856 if (fitsRegularPattern<int>(Mid, 1,
End, 1, 2))
858 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size() + 1, 2))
900 "Vector type is unsupported for lsx!");
902 "Two operands have different types!");
904 "Unexpected mask size for shuffle!");
905 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
953 for (
const auto &M : Mask) {
960 if (SplatIndex == -1)
963 const auto &Begin = Mask.begin();
964 const auto &
End = Mask.end();
965 unsigned HalfSize = Mask.size() / 2;
967 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
968 if (fitsRegularPattern<int>(Begin, 1,
End - HalfSize, SplatIndex, 0) &&
969 fitsRegularPattern<int>(Begin + HalfSize, 1,
End, SplatIndex + HalfSize,
971 APInt Imm(64, SplatIndex);
985 if (Mask.size() <= 4)
1009 const auto &Begin = Mask.begin();
1010 const auto &
End = Mask.end();
1011 unsigned HalfSize = Mask.size() / 2;
1012 unsigned LeftSize = HalfSize / 2;
1013 SDValue OriV1 = V1, OriV2 = V2;
1015 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, HalfSize - LeftSize,
1017 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize + LeftSize, 1))
1019 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize,
1020 Mask.size() + HalfSize - LeftSize, 1) &&
1021 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
1022 Mask.size() + HalfSize + LeftSize, 1))
1027 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, HalfSize - LeftSize,
1029 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize + LeftSize,
1032 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize,
1033 Mask.size() + HalfSize - LeftSize, 1) &&
1034 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
1035 Mask.size() + HalfSize + LeftSize, 1))
1048 const auto &Begin = Mask.begin();
1049 const auto &
End = Mask.end();
1050 unsigned HalfSize = Mask.size() / 2;
1051 SDValue OriV1 = V1, OriV2 = V2;
1053 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, 0, 1) &&
1054 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize, 1))
1056 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, Mask.size(), 1) &&
1057 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
1058 Mask.size() + HalfSize, 1))
1063 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, 0, 1) &&
1064 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize, 1))
1066 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, Mask.size(),
1068 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
1069 Mask.size() + HalfSize, 1))
1082 const auto &Begin = Mask.begin();
1083 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1084 const auto &Mid = Mask.begin() + Mask.size() / 2;
1085 const auto &RightMid = Mask.end() - Mask.size() / 4;
1086 const auto &
End = Mask.end();
1087 unsigned HalfSize = Mask.size() / 2;
1088 SDValue OriV1 = V1, OriV2 = V2;
1090 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 0, 2) &&
1091 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize, 2))
1093 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size(), 2) &&
1094 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize, 2))
1099 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 0, 2) &&
1100 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize, 2))
1102 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size(), 2) &&
1103 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize, 2))
1117 const auto &Begin = Mask.begin();
1118 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1119 const auto &Mid = Mask.begin() + Mask.size() / 2;
1120 const auto &RightMid = Mask.end() - Mask.size() / 4;
1121 const auto &
End = Mask.end();
1122 unsigned HalfSize = Mask.size() / 2;
1123 SDValue OriV1 = V1, OriV2 = V2;
1125 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 1, 2) &&
1126 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize + 1, 2))
1128 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size() + 1, 2) &&
1129 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize + 1,
1135 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 1, 2) &&
1136 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize + 1, 2))
1138 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size() + 1, 2) &&
1139 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize + 1,
1153 int MaskSize = Mask.size();
1154 int HalfSize = Mask.size() / 2;
1155 const auto &Begin = Mask.begin();
1156 const auto &Mid = Mask.begin() + HalfSize;
1157 const auto &
End = Mask.end();
1169 for (
auto it = Begin; it < Mid; it++) {
1172 else if ((*it >= 0 && *it < HalfSize) ||
1173 (*it >= MaskSize && *it <= MaskSize + HalfSize)) {
1174 int M = *it < HalfSize ? *it : *it - HalfSize;
1179 assert((
int)MaskAlloc.
size() == HalfSize &&
"xvshuf convert failed!");
1181 for (
auto it = Mid; it <
End; it++) {
1184 else if ((*it >= HalfSize && *it < MaskSize) ||
1185 (*it >= MaskSize + HalfSize && *it < MaskSize * 2)) {
1186 int M = *it < MaskSize ? *it - HalfSize : *it - MaskSize;
1191 assert((
int)MaskAlloc.
size() == MaskSize &&
"xvshuf convert failed!");
1222 enum HalfMaskType { HighLaneTy, LowLaneTy,
None };
1224 int MaskSize = Mask.size();
1225 int HalfSize = Mask.size() / 2;
1227 HalfMaskType preMask =
None, postMask =
None;
1229 if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1230 return M < 0 || (M >= 0 && M < HalfSize) ||
1231 (M >= MaskSize && M < MaskSize + HalfSize);
1233 preMask = HighLaneTy;
1234 else if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1235 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1236 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1238 preMask = LowLaneTy;
1240 if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1241 return M < 0 || (M >= 0 && M < HalfSize) ||
1242 (M >= MaskSize && M < MaskSize + HalfSize);
1244 postMask = HighLaneTy;
1245 else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1246 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1247 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1249 postMask = LowLaneTy;
1257 if (preMask == HighLaneTy && postMask == LowLaneTy) {
1260 if (preMask == LowLaneTy && postMask == HighLaneTy) {
1266 if (!V2.isUndef()) {
1273 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1274 *it = *it < 0 ? *it : *it - HalfSize;
1276 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1277 *it = *it < 0 ? *it : *it + HalfSize;
1279 }
else if (preMask == LowLaneTy && postMask == LowLaneTy) {
1285 if (!V2.isUndef()) {
1292 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1293 *it = *it < 0 ? *it : *it - HalfSize;
1295 }
else if (preMask == HighLaneTy && postMask == HighLaneTy) {
1301 if (!V2.isUndef()) {
1308 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1309 *it = *it < 0 ? *it : *it + HalfSize;
1325 "Vector type is unsupported for lasx!");
1327 "Two operands have different types!");
1329 "Unexpected mask size for shuffle!");
1330 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
1331 assert(Mask.size() >= 4 &&
"Mask size is less than 4.");
1376 MVT VT =
Op.getSimpleValueType();
1380 bool V1IsUndef = V1.
isUndef();
1381 bool V2IsUndef =
V2.isUndef();
1382 if (V1IsUndef && V2IsUndef)
1395 any_of(OrigMask, [NumElements](
int M) {
return M >= NumElements; })) {
1397 for (
int &M : NewMask)
1398 if (M >= NumElements)
1404 int MaskUpperLimit = OrigMask.
size() * (V2IsUndef ? 1 : 2);
1405 (void)MaskUpperLimit;
1407 [&](
int M) {
return -1 <=
M &&
M < MaskUpperLimit; }) &&
1408 "Out of bounds shuffle index");
1423 if (isa<ConstantSDNode>(
Op))
1425 if (isa<ConstantFPSDNode>(
Op))
1440 EVT ResTy =
Op->getValueType(0);
1442 APInt SplatValue, SplatUndef;
1443 unsigned SplatBitSize;
1448 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
1449 (!Subtarget.hasExtLASX() || !Is256Vec))
1452 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
1454 SplatBitSize <= 64) {
1456 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
1462 switch (SplatBitSize) {
1466 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
1469 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
1472 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
1475 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
1483 if (ViaVecTy != ResTy)
1496 EVT ResTy =
Node->getValueType(0);
1502 for (
unsigned i = 0; i < NumElts; ++i) {
1504 Node->getOperand(i),
1514LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
1516 EVT VecTy =
Op->getOperand(0)->getValueType(0);
1521 if (isa<ConstantSDNode>(
Idx) &&
1522 (EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
1523 EltTy == MVT::f64 ||
Idx->getAsZExtVal() < NumElts / 2))
1530LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
1532 if (isa<ConstantSDNode>(
Op->getOperand(2)))
1556 if (Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
1558 "On LA64, only 64-bit registers can be written.");
1559 return Op.getOperand(0);
1562 if (!Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
1564 "On LA32, only 32-bit registers can be written.");
1565 return Op.getOperand(0);
1573 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
1575 "be a constant integer");
1582 EVT VT =
Op.getValueType();
1585 unsigned Depth =
Op.getConstantOperandVal(0);
1586 int GRLenInBytes = Subtarget.
getGRLen() / 8;
1589 int Offset = -(GRLenInBytes * 2);
1604 if (
Op.getConstantOperandVal(0) != 0) {
1606 "return address can only be determined for the current frame");
1640 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
1648 !Subtarget.hasBasicD() &&
"unexpected target features");
1653 auto *
C = dyn_cast<ConstantSDNode>(Op0.
getOperand(1));
1654 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
1664 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLT(MVT::i32))
1668 EVT RetVT =
Op.getValueType();
1670 MakeLibCallOptions CallOptions;
1671 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1674 std::tie(Result, Chain) =
1682 !Subtarget.hasBasicD() &&
"unexpected target features");
1689 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLE(MVT::i32))
1693 EVT RetVT =
Op.getValueType();
1695 MakeLibCallOptions CallOptions;
1696 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1699 std::tie(Result, Chain) =
1710 if (
Op.getValueType() == MVT::f32 && Op0.
getValueType() == MVT::i32 &&
1711 Subtarget.
is64Bit() && Subtarget.hasBasicF()) {
1727 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
1728 !Subtarget.hasBasicD()) {
1752 N->getOffset(), Flags);
1760template <
class NodeTy>
1763 bool IsLocal)
const {
1774 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
1826 return getAddr(cast<BlockAddressSDNode>(
Op), DAG,
1832 return getAddr(cast<JumpTableSDNode>(
Op), DAG,
1838 return getAddr(cast<ConstantPoolSDNode>(
Op), DAG,
1845 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1849 if (GV->
isDSOLocal() && isa<GlobalVariable>(GV)) {
1850 if (
auto GCM = dyn_cast<GlobalVariable>(GV)->
getCodeModel())
1859 unsigned Opc,
bool UseGOT,
1877 if (Opc == LoongArch::PseudoLA_TLS_LE && !Large)
1918 Args.push_back(Entry);
1950LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
1957 assert((!Large || Subtarget.
is64Bit()) &&
"Large code model requires LA64");
1960 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1974 return getDynamicTLSAddr(
N, DAG,
1975 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
1976 : LoongArch::PseudoLA_TLS_GD,
1983 return getDynamicTLSAddr(
N, DAG,
1984 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
1985 : LoongArch::PseudoLA_TLS_LD,
1990 return getStaticTLSAddr(
N, DAG,
1991 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
1992 : LoongArch::PseudoLA_TLS_IE,
1999 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
2003 return getTLSDescAddr(
N, DAG,
2004 Large ? LoongArch::PseudoLA_TLS_DESC_LARGE
2005 : LoongArch::PseudoLA_TLS_DESC,
2009template <
unsigned N>
2012 auto *CImm = cast<ConstantSDNode>(
Op->getOperand(ImmOp));
2014 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
2015 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
2017 ": argument out of range.");
2024LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
2027 switch (
Op.getConstantOperandVal(0)) {
2030 case Intrinsic::thread_pointer: {
2034 case Intrinsic::loongarch_lsx_vpickve2gr_d:
2035 case Intrinsic::loongarch_lsx_vpickve2gr_du:
2036 case Intrinsic::loongarch_lsx_vreplvei_d:
2037 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
2038 return checkIntrinsicImmArg<1>(
Op, 2, DAG);
2039 case Intrinsic::loongarch_lsx_vreplvei_w:
2040 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
2041 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
2042 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
2043 case Intrinsic::loongarch_lasx_xvpickve_d:
2044 case Intrinsic::loongarch_lasx_xvpickve_d_f:
2045 return checkIntrinsicImmArg<2>(
Op, 2, DAG);
2046 case Intrinsic::loongarch_lasx_xvinsve0_d:
2047 return checkIntrinsicImmArg<2>(
Op, 3, DAG);
2048 case Intrinsic::loongarch_lsx_vsat_b:
2049 case Intrinsic::loongarch_lsx_vsat_bu:
2050 case Intrinsic::loongarch_lsx_vrotri_b:
2051 case Intrinsic::loongarch_lsx_vsllwil_h_b:
2052 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
2053 case Intrinsic::loongarch_lsx_vsrlri_b:
2054 case Intrinsic::loongarch_lsx_vsrari_b:
2055 case Intrinsic::loongarch_lsx_vreplvei_h:
2056 case Intrinsic::loongarch_lasx_xvsat_b:
2057 case Intrinsic::loongarch_lasx_xvsat_bu:
2058 case Intrinsic::loongarch_lasx_xvrotri_b:
2059 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
2060 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
2061 case Intrinsic::loongarch_lasx_xvsrlri_b:
2062 case Intrinsic::loongarch_lasx_xvsrari_b:
2063 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
2064 case Intrinsic::loongarch_lasx_xvpickve_w:
2065 case Intrinsic::loongarch_lasx_xvpickve_w_f:
2066 return checkIntrinsicImmArg<3>(
Op, 2, DAG);
2067 case Intrinsic::loongarch_lasx_xvinsve0_w:
2068 return checkIntrinsicImmArg<3>(
Op, 3, DAG);
2069 case Intrinsic::loongarch_lsx_vsat_h:
2070 case Intrinsic::loongarch_lsx_vsat_hu:
2071 case Intrinsic::loongarch_lsx_vrotri_h:
2072 case Intrinsic::loongarch_lsx_vsllwil_w_h:
2073 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
2074 case Intrinsic::loongarch_lsx_vsrlri_h:
2075 case Intrinsic::loongarch_lsx_vsrari_h:
2076 case Intrinsic::loongarch_lsx_vreplvei_b:
2077 case Intrinsic::loongarch_lasx_xvsat_h:
2078 case Intrinsic::loongarch_lasx_xvsat_hu:
2079 case Intrinsic::loongarch_lasx_xvrotri_h:
2080 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
2081 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
2082 case Intrinsic::loongarch_lasx_xvsrlri_h:
2083 case Intrinsic::loongarch_lasx_xvsrari_h:
2084 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
2085 return checkIntrinsicImmArg<4>(
Op, 2, DAG);
2086 case Intrinsic::loongarch_lsx_vsrlni_b_h:
2087 case Intrinsic::loongarch_lsx_vsrani_b_h:
2088 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
2089 case Intrinsic::loongarch_lsx_vsrarni_b_h:
2090 case Intrinsic::loongarch_lsx_vssrlni_b_h:
2091 case Intrinsic::loongarch_lsx_vssrani_b_h:
2092 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
2093 case Intrinsic::loongarch_lsx_vssrani_bu_h:
2094 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
2095 case Intrinsic::loongarch_lsx_vssrarni_b_h:
2096 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
2097 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
2098 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
2099 case Intrinsic::loongarch_lasx_xvsrani_b_h:
2100 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
2101 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
2102 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
2103 case Intrinsic::loongarch_lasx_xvssrani_b_h:
2104 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
2105 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
2106 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
2107 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
2108 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
2109 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
2110 return checkIntrinsicImmArg<4>(
Op, 3, DAG);
2111 case Intrinsic::loongarch_lsx_vsat_w:
2112 case Intrinsic::loongarch_lsx_vsat_wu:
2113 case Intrinsic::loongarch_lsx_vrotri_w:
2114 case Intrinsic::loongarch_lsx_vsllwil_d_w:
2115 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
2116 case Intrinsic::loongarch_lsx_vsrlri_w:
2117 case Intrinsic::loongarch_lsx_vsrari_w:
2118 case Intrinsic::loongarch_lsx_vslei_bu:
2119 case Intrinsic::loongarch_lsx_vslei_hu:
2120 case Intrinsic::loongarch_lsx_vslei_wu:
2121 case Intrinsic::loongarch_lsx_vslei_du:
2122 case Intrinsic::loongarch_lsx_vslti_bu:
2123 case Intrinsic::loongarch_lsx_vslti_hu:
2124 case Intrinsic::loongarch_lsx_vslti_wu:
2125 case Intrinsic::loongarch_lsx_vslti_du:
2126 case Intrinsic::loongarch_lsx_vbsll_v:
2127 case Intrinsic::loongarch_lsx_vbsrl_v:
2128 case Intrinsic::loongarch_lasx_xvsat_w:
2129 case Intrinsic::loongarch_lasx_xvsat_wu:
2130 case Intrinsic::loongarch_lasx_xvrotri_w:
2131 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
2132 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
2133 case Intrinsic::loongarch_lasx_xvsrlri_w:
2134 case Intrinsic::loongarch_lasx_xvsrari_w:
2135 case Intrinsic::loongarch_lasx_xvslei_bu:
2136 case Intrinsic::loongarch_lasx_xvslei_hu:
2137 case Intrinsic::loongarch_lasx_xvslei_wu:
2138 case Intrinsic::loongarch_lasx_xvslei_du:
2139 case Intrinsic::loongarch_lasx_xvslti_bu:
2140 case Intrinsic::loongarch_lasx_xvslti_hu:
2141 case Intrinsic::loongarch_lasx_xvslti_wu:
2142 case Intrinsic::loongarch_lasx_xvslti_du:
2143 case Intrinsic::loongarch_lasx_xvbsll_v:
2144 case Intrinsic::loongarch_lasx_xvbsrl_v:
2145 return checkIntrinsicImmArg<5>(
Op, 2, DAG);
2146 case Intrinsic::loongarch_lsx_vseqi_b:
2147 case Intrinsic::loongarch_lsx_vseqi_h:
2148 case Intrinsic::loongarch_lsx_vseqi_w:
2149 case Intrinsic::loongarch_lsx_vseqi_d:
2150 case Intrinsic::loongarch_lsx_vslei_b:
2151 case Intrinsic::loongarch_lsx_vslei_h:
2152 case Intrinsic::loongarch_lsx_vslei_w:
2153 case Intrinsic::loongarch_lsx_vslei_d:
2154 case Intrinsic::loongarch_lsx_vslti_b:
2155 case Intrinsic::loongarch_lsx_vslti_h:
2156 case Intrinsic::loongarch_lsx_vslti_w:
2157 case Intrinsic::loongarch_lsx_vslti_d:
2158 case Intrinsic::loongarch_lasx_xvseqi_b:
2159 case Intrinsic::loongarch_lasx_xvseqi_h:
2160 case Intrinsic::loongarch_lasx_xvseqi_w:
2161 case Intrinsic::loongarch_lasx_xvseqi_d:
2162 case Intrinsic::loongarch_lasx_xvslei_b:
2163 case Intrinsic::loongarch_lasx_xvslei_h:
2164 case Intrinsic::loongarch_lasx_xvslei_w:
2165 case Intrinsic::loongarch_lasx_xvslei_d:
2166 case Intrinsic::loongarch_lasx_xvslti_b:
2167 case Intrinsic::loongarch_lasx_xvslti_h:
2168 case Intrinsic::loongarch_lasx_xvslti_w:
2169 case Intrinsic::loongarch_lasx_xvslti_d:
2170 return checkIntrinsicImmArg<5>(
Op, 2, DAG,
true);
2171 case Intrinsic::loongarch_lsx_vsrlni_h_w:
2172 case Intrinsic::loongarch_lsx_vsrani_h_w:
2173 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
2174 case Intrinsic::loongarch_lsx_vsrarni_h_w:
2175 case Intrinsic::loongarch_lsx_vssrlni_h_w:
2176 case Intrinsic::loongarch_lsx_vssrani_h_w:
2177 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
2178 case Intrinsic::loongarch_lsx_vssrani_hu_w:
2179 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
2180 case Intrinsic::loongarch_lsx_vssrarni_h_w:
2181 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
2182 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
2183 case Intrinsic::loongarch_lsx_vfrstpi_b:
2184 case Intrinsic::loongarch_lsx_vfrstpi_h:
2185 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
2186 case Intrinsic::loongarch_lasx_xvsrani_h_w:
2187 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
2188 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
2189 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
2190 case Intrinsic::loongarch_lasx_xvssrani_h_w:
2191 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
2192 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
2193 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
2194 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
2195 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
2196 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
2197 case Intrinsic::loongarch_lasx_xvfrstpi_b:
2198 case Intrinsic::loongarch_lasx_xvfrstpi_h:
2199 return checkIntrinsicImmArg<5>(
Op, 3, DAG);
2200 case Intrinsic::loongarch_lsx_vsat_d:
2201 case Intrinsic::loongarch_lsx_vsat_du:
2202 case Intrinsic::loongarch_lsx_vrotri_d:
2203 case Intrinsic::loongarch_lsx_vsrlri_d:
2204 case Intrinsic::loongarch_lsx_vsrari_d:
2205 case Intrinsic::loongarch_lasx_xvsat_d:
2206 case Intrinsic::loongarch_lasx_xvsat_du:
2207 case Intrinsic::loongarch_lasx_xvrotri_d:
2208 case Intrinsic::loongarch_lasx_xvsrlri_d:
2209 case Intrinsic::loongarch_lasx_xvsrari_d:
2210 return checkIntrinsicImmArg<6>(
Op, 2, DAG);
2211 case Intrinsic::loongarch_lsx_vsrlni_w_d:
2212 case Intrinsic::loongarch_lsx_vsrani_w_d:
2213 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
2214 case Intrinsic::loongarch_lsx_vsrarni_w_d:
2215 case Intrinsic::loongarch_lsx_vssrlni_w_d:
2216 case Intrinsic::loongarch_lsx_vssrani_w_d:
2217 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
2218 case Intrinsic::loongarch_lsx_vssrani_wu_d:
2219 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
2220 case Intrinsic::loongarch_lsx_vssrarni_w_d:
2221 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
2222 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
2223 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
2224 case Intrinsic::loongarch_lasx_xvsrani_w_d:
2225 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
2226 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
2227 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
2228 case Intrinsic::loongarch_lasx_xvssrani_w_d:
2229 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
2230 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
2231 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
2232 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
2233 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
2234 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
2235 return checkIntrinsicImmArg<6>(
Op, 3, DAG);
2236 case Intrinsic::loongarch_lsx_vsrlni_d_q:
2237 case Intrinsic::loongarch_lsx_vsrani_d_q:
2238 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
2239 case Intrinsic::loongarch_lsx_vsrarni_d_q:
2240 case Intrinsic::loongarch_lsx_vssrlni_d_q:
2241 case Intrinsic::loongarch_lsx_vssrani_d_q:
2242 case Intrinsic::loongarch_lsx_vssrlni_du_q:
2243 case Intrinsic::loongarch_lsx_vssrani_du_q:
2244 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
2245 case Intrinsic::loongarch_lsx_vssrarni_d_q:
2246 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
2247 case Intrinsic::loongarch_lsx_vssrarni_du_q:
2248 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
2249 case Intrinsic::loongarch_lasx_xvsrani_d_q:
2250 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
2251 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
2252 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
2253 case Intrinsic::loongarch_lasx_xvssrani_d_q:
2254 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
2255 case Intrinsic::loongarch_lasx_xvssrani_du_q:
2256 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
2257 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
2258 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
2259 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
2260 return checkIntrinsicImmArg<7>(
Op, 3, DAG);
2261 case Intrinsic::loongarch_lsx_vnori_b:
2262 case Intrinsic::loongarch_lsx_vshuf4i_b:
2263 case Intrinsic::loongarch_lsx_vshuf4i_h:
2264 case Intrinsic::loongarch_lsx_vshuf4i_w:
2265 case Intrinsic::loongarch_lasx_xvnori_b:
2266 case Intrinsic::loongarch_lasx_xvshuf4i_b:
2267 case Intrinsic::loongarch_lasx_xvshuf4i_h:
2268 case Intrinsic::loongarch_lasx_xvshuf4i_w:
2269 case Intrinsic::loongarch_lasx_xvpermi_d:
2270 return checkIntrinsicImmArg<8>(
Op, 2, DAG);
2271 case Intrinsic::loongarch_lsx_vshuf4i_d:
2272 case Intrinsic::loongarch_lsx_vpermi_w:
2273 case Intrinsic::loongarch_lsx_vbitseli_b:
2274 case Intrinsic::loongarch_lsx_vextrins_b:
2275 case Intrinsic::loongarch_lsx_vextrins_h:
2276 case Intrinsic::loongarch_lsx_vextrins_w:
2277 case Intrinsic::loongarch_lsx_vextrins_d:
2278 case Intrinsic::loongarch_lasx_xvshuf4i_d:
2279 case Intrinsic::loongarch_lasx_xvpermi_w:
2280 case Intrinsic::loongarch_lasx_xvpermi_q:
2281 case Intrinsic::loongarch_lasx_xvbitseli_b:
2282 case Intrinsic::loongarch_lasx_xvextrins_b:
2283 case Intrinsic::loongarch_lasx_xvextrins_h:
2284 case Intrinsic::loongarch_lasx_xvextrins_w:
2285 case Intrinsic::loongarch_lasx_xvextrins_d:
2286 return checkIntrinsicImmArg<8>(
Op, 3, DAG);
2287 case Intrinsic::loongarch_lsx_vrepli_b:
2288 case Intrinsic::loongarch_lsx_vrepli_h:
2289 case Intrinsic::loongarch_lsx_vrepli_w:
2290 case Intrinsic::loongarch_lsx_vrepli_d:
2291 case Intrinsic::loongarch_lasx_xvrepli_b:
2292 case Intrinsic::loongarch_lasx_xvrepli_h:
2293 case Intrinsic::loongarch_lasx_xvrepli_w:
2294 case Intrinsic::loongarch_lasx_xvrepli_d:
2295 return checkIntrinsicImmArg<10>(
Op, 1, DAG,
true);
2296 case Intrinsic::loongarch_lsx_vldi:
2297 case Intrinsic::loongarch_lasx_xvldi:
2298 return checkIntrinsicImmArg<13>(
Op, 1, DAG,
true);
2313LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
2317 EVT VT =
Op.getValueType();
2319 const StringRef ErrorMsgOOR =
"argument out of range";
2320 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2321 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2323 switch (
Op.getConstantOperandVal(1)) {
2326 case Intrinsic::loongarch_crc_w_b_w:
2327 case Intrinsic::loongarch_crc_w_h_w:
2328 case Intrinsic::loongarch_crc_w_w_w:
2329 case Intrinsic::loongarch_crc_w_d_w:
2330 case Intrinsic::loongarch_crcc_w_b_w:
2331 case Intrinsic::loongarch_crcc_w_h_w:
2332 case Intrinsic::loongarch_crcc_w_w_w:
2333 case Intrinsic::loongarch_crcc_w_d_w:
2335 case Intrinsic::loongarch_csrrd_w:
2336 case Intrinsic::loongarch_csrrd_d: {
2337 unsigned Imm =
Op.getConstantOperandVal(2);
2338 return !isUInt<14>(Imm)
2343 case Intrinsic::loongarch_csrwr_w:
2344 case Intrinsic::loongarch_csrwr_d: {
2345 unsigned Imm =
Op.getConstantOperandVal(3);
2346 return !isUInt<14>(Imm)
2349 {Chain,
Op.getOperand(2),
2352 case Intrinsic::loongarch_csrxchg_w:
2353 case Intrinsic::loongarch_csrxchg_d: {
2354 unsigned Imm =
Op.getConstantOperandVal(4);
2355 return !isUInt<14>(Imm)
2358 {Chain,
Op.getOperand(2),
Op.getOperand(3),
2361 case Intrinsic::loongarch_iocsrrd_d: {
2366#define IOCSRRD_CASE(NAME, NODE) \
2367 case Intrinsic::loongarch_##NAME: { \
2368 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
2369 {Chain, Op.getOperand(2)}); \
2375 case Intrinsic::loongarch_cpucfg: {
2377 {Chain,
Op.getOperand(2)});
2379 case Intrinsic::loongarch_lddir_d: {
2380 unsigned Imm =
Op.getConstantOperandVal(3);
2381 return !isUInt<8>(Imm)
2385 case Intrinsic::loongarch_movfcsr2gr: {
2386 if (!Subtarget.hasBasicF())
2388 unsigned Imm =
Op.getConstantOperandVal(2);
2389 return !isUInt<2>(Imm)
2394 case Intrinsic::loongarch_lsx_vld:
2395 case Intrinsic::loongarch_lsx_vldrepl_b:
2396 case Intrinsic::loongarch_lasx_xvld:
2397 case Intrinsic::loongarch_lasx_xvldrepl_b:
2398 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2401 case Intrinsic::loongarch_lsx_vldrepl_h:
2402 case Intrinsic::loongarch_lasx_xvldrepl_h:
2403 return !isShiftedInt<11, 1>(
2404 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2406 Op,
"argument out of range or not a multiple of 2", DAG)
2408 case Intrinsic::loongarch_lsx_vldrepl_w:
2409 case Intrinsic::loongarch_lasx_xvldrepl_w:
2410 return !isShiftedInt<10, 2>(
2411 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2413 Op,
"argument out of range or not a multiple of 4", DAG)
2415 case Intrinsic::loongarch_lsx_vldrepl_d:
2416 case Intrinsic::loongarch_lasx_xvldrepl_d:
2417 return !isShiftedInt<9, 3>(
2418 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2420 Op,
"argument out of range or not a multiple of 8", DAG)
2431 return Op.getOperand(0);
2439 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
2441 const StringRef ErrorMsgOOR =
"argument out of range";
2442 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2443 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
2444 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2446 switch (IntrinsicEnum) {
2450 case Intrinsic::loongarch_cacop_d:
2451 case Intrinsic::loongarch_cacop_w: {
2452 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.
is64Bit())
2454 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.
is64Bit())
2458 int Imm2 = cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue();
2459 if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
2463 case Intrinsic::loongarch_dbar: {
2465 return !isUInt<15>(Imm)
2470 case Intrinsic::loongarch_ibar: {
2472 return !isUInt<15>(Imm)
2477 case Intrinsic::loongarch_break: {
2479 return !isUInt<15>(Imm)
2484 case Intrinsic::loongarch_movgr2fcsr: {
2485 if (!Subtarget.hasBasicF())
2488 return !isUInt<2>(Imm)
2495 case Intrinsic::loongarch_syscall: {
2497 return !isUInt<15>(Imm)
2502#define IOCSRWR_CASE(NAME, NODE) \
2503 case Intrinsic::loongarch_##NAME: { \
2504 SDValue Op3 = Op.getOperand(3); \
2505 return Subtarget.is64Bit() \
2506 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
2507 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
2508 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
2509 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
2516 case Intrinsic::loongarch_iocsrwr_d: {
2524#define ASRT_LE_GT_CASE(NAME) \
2525 case Intrinsic::loongarch_##NAME: { \
2526 return !Subtarget.is64Bit() \
2527 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
2532#undef ASRT_LE_GT_CASE
2533 case Intrinsic::loongarch_ldpte_d: {
2534 unsigned Imm =
Op.getConstantOperandVal(3);
2540 case Intrinsic::loongarch_lsx_vst:
2541 case Intrinsic::loongarch_lasx_xvst:
2542 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue())
2545 case Intrinsic::loongarch_lasx_xvstelm_b:
2546 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2547 !isUInt<5>(
Op.getConstantOperandVal(5)))
2550 case Intrinsic::loongarch_lsx_vstelm_b:
2551 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2552 !isUInt<4>(
Op.getConstantOperandVal(5)))
2555 case Intrinsic::loongarch_lasx_xvstelm_h:
2556 return (!isShiftedInt<8, 1>(
2557 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2558 !isUInt<4>(
Op.getConstantOperandVal(5)))
2560 Op,
"argument out of range or not a multiple of 2", DAG)
2562 case Intrinsic::loongarch_lsx_vstelm_h:
2563 return (!isShiftedInt<8, 1>(
2564 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2565 !isUInt<3>(
Op.getConstantOperandVal(5)))
2567 Op,
"argument out of range or not a multiple of 2", DAG)
2569 case Intrinsic::loongarch_lasx_xvstelm_w:
2570 return (!isShiftedInt<8, 2>(
2571 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2572 !isUInt<3>(
Op.getConstantOperandVal(5)))
2574 Op,
"argument out of range or not a multiple of 4", DAG)
2576 case Intrinsic::loongarch_lsx_vstelm_w:
2577 return (!isShiftedInt<8, 2>(
2578 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2579 !isUInt<2>(
Op.getConstantOperandVal(5)))
2581 Op,
"argument out of range or not a multiple of 4", DAG)
2583 case Intrinsic::loongarch_lasx_xvstelm_d:
2584 return (!isShiftedInt<8, 3>(
2585 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2586 !isUInt<2>(
Op.getConstantOperandVal(5)))
2588 Op,
"argument out of range or not a multiple of 8", DAG)
2590 case Intrinsic::loongarch_lsx_vstelm_d:
2591 return (!isShiftedInt<8, 3>(
2592 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2593 !isUInt<1>(
Op.getConstantOperandVal(5)))
2595 Op,
"argument out of range or not a multiple of 8", DAG)
2606 EVT VT =
Lo.getValueType();
2647 EVT VT =
Lo.getValueType();
2739 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2740 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
2744 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2750 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
2777 StringRef ErrorMsg,
bool WithChain =
true) {
2782 Results.push_back(
N->getOperand(0));
2785template <
unsigned N>
2790 const StringRef ErrorMsgOOR =
"argument out of range";
2791 unsigned Imm =
Node->getConstantOperandVal(2);
2792 if (!isUInt<N>(Imm)) {
2825 switch (
N->getConstantOperandVal(0)) {
2828 case Intrinsic::loongarch_lsx_vpickve2gr_b:
2829 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2832 case Intrinsic::loongarch_lsx_vpickve2gr_h:
2833 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
2834 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2837 case Intrinsic::loongarch_lsx_vpickve2gr_w:
2838 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2841 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
2842 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2845 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
2846 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
2847 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2850 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
2851 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2854 case Intrinsic::loongarch_lsx_bz_b:
2855 case Intrinsic::loongarch_lsx_bz_h:
2856 case Intrinsic::loongarch_lsx_bz_w:
2857 case Intrinsic::loongarch_lsx_bz_d:
2858 case Intrinsic::loongarch_lasx_xbz_b:
2859 case Intrinsic::loongarch_lasx_xbz_h:
2860 case Intrinsic::loongarch_lasx_xbz_w:
2861 case Intrinsic::loongarch_lasx_xbz_d:
2865 case Intrinsic::loongarch_lsx_bz_v:
2866 case Intrinsic::loongarch_lasx_xbz_v:
2870 case Intrinsic::loongarch_lsx_bnz_b:
2871 case Intrinsic::loongarch_lsx_bnz_h:
2872 case Intrinsic::loongarch_lsx_bnz_w:
2873 case Intrinsic::loongarch_lsx_bnz_d:
2874 case Intrinsic::loongarch_lasx_xbnz_b:
2875 case Intrinsic::loongarch_lasx_xbnz_h:
2876 case Intrinsic::loongarch_lasx_xbnz_w:
2877 case Intrinsic::loongarch_lasx_xbnz_d:
2881 case Intrinsic::loongarch_lsx_bnz_v:
2882 case Intrinsic::loongarch_lasx_xbnz_v:
2892 EVT VT =
N->getValueType(0);
2893 switch (
N->getOpcode()) {
2899 "Unexpected custom legalisation");
2907 "Unexpected custom legalisation");
2909 Subtarget.hasDiv32() && VT == MVT::i32
2917 "Unexpected custom legalisation");
2926 "Unexpected custom legalisation");
2931 "Unexpected custom legalisation");
2936 if (Src.getValueType() == MVT::f16)
2947 EVT OpVT = Src.getValueType();
2951 std::tie(Result, Chain) =
2958 EVT SrcVT = Src.getValueType();
2959 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.
is64Bit() &&
2960 Subtarget.hasBasicF()) {
2969 "Unexpected custom legalisation");
2972 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
2978 assert((VT == MVT::i16 || VT == MVT::i32) &&
2979 "Unexpected custom legalization");
3000 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.
is64Bit())) &&
3001 "Unexpected custom legalization");
3021 "Unexpected custom legalisation");
3029 const StringRef ErrorMsgOOR =
"argument out of range";
3030 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
3031 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
3033 switch (
N->getConstantOperandVal(1)) {
3036 case Intrinsic::loongarch_movfcsr2gr: {
3037 if (!Subtarget.hasBasicF()) {
3042 if (!isUInt<2>(Imm)) {
3054#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
3055 case Intrinsic::loongarch_##NAME: { \
3056 SDValue NODE = DAG.getNode( \
3057 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3058 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
3059 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
3060 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
3061 Results.push_back(NODE.getValue(1)); \
3070#undef CRC_CASE_EXT_BINARYOP
3072#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
3073 case Intrinsic::loongarch_##NAME: { \
3074 SDValue NODE = DAG.getNode( \
3075 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3077 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
3078 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
3079 Results.push_back(NODE.getValue(1)); \
3084#undef CRC_CASE_EXT_UNARYOP
3085#define CSR_CASE(ID) \
3086 case Intrinsic::loongarch_##ID: { \
3087 if (!Subtarget.is64Bit()) \
3088 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
3096 case Intrinsic::loongarch_csrrd_w: {
3098 if (!isUInt<14>(Imm)) {
3110 case Intrinsic::loongarch_csrwr_w: {
3111 unsigned Imm =
N->getConstantOperandVal(3);
3112 if (!isUInt<14>(Imm)) {
3125 case Intrinsic::loongarch_csrxchg_w: {
3126 unsigned Imm =
N->getConstantOperandVal(4);
3127 if (!isUInt<14>(Imm)) {
3141#define IOCSRRD_CASE(NAME, NODE) \
3142 case Intrinsic::loongarch_##NAME: { \
3143 SDValue IOCSRRDResults = \
3144 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3145 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
3146 Results.push_back( \
3147 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
3148 Results.push_back(IOCSRRDResults.getValue(1)); \
3155 case Intrinsic::loongarch_cpucfg: {
3164 case Intrinsic::loongarch_lddir_d: {
3177 "On LA64, only 64-bit registers can be read.");
3180 "On LA32, only 32-bit registers can be read.");
3182 Results.push_back(
N->getOperand(0));
3193 OpVT == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
3210 SDValue FirstOperand =
N->getOperand(0);
3211 SDValue SecondOperand =
N->getOperand(1);
3212 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
3213 EVT ValTy =
N->getValueType(0);
3216 unsigned SMIdx, SMLen;
3222 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)) ||
3233 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
3274 NewOperand = FirstOperand;
3277 msb = lsb + SMLen - 1;
3281 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
3302 SDValue FirstOperand =
N->getOperand(0);
3304 EVT ValTy =
N->getValueType(0);
3307 unsigned MaskIdx, MaskLen;
3313 !(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
3318 if (!(CN = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
3322 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
3335 EVT ValTy =
N->getValueType(0);
3336 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
3340 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
3342 bool SwapAndRetried =
false;
3347 if (ValBits != 32 && ValBits != 64)
3357 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3360 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3362 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
3363 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3365 (MaskIdx0 + MaskLen0 <= ValBits)) {
3379 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3382 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3384 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3386 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
3387 (MaskIdx0 + MaskLen0 <= ValBits)) {
3402 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3404 (MaskIdx0 + MaskLen0 <= 64) &&
3405 (CN1 = dyn_cast<ConstantSDNode>(N1->getOperand(1))) &&
3412 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3413 : (MaskIdx0 + MaskLen0 - 1),
3425 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3427 MaskIdx0 == 0 && (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3429 (MaskIdx0 + MaskLen0 <= ValBits)) {
3444 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3446 (CN1 = dyn_cast<ConstantSDNode>(N1)) &&
3452 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3453 : (MaskIdx0 + MaskLen0 - 1),
3468 unsigned MaskIdx, MaskLen;
3469 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
3470 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3472 MaskIdx == 0 && (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3494 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3496 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
3497 (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3510 if (!SwapAndRetried) {
3512 SwapAndRetried =
true;
3516 SwapAndRetried =
false;
3528 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3542 if (!SwapAndRetried) {
3544 SwapAndRetried =
true;
3554 switch (V.getNode()->getOpcode()) {
3556 LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
3565 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3566 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3573 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3574 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3651 SDNode *AndNode =
N->getOperand(0).getNode();
3659 SDValue CmpInputValue =
N->getOperand(1);
3667 CN = dyn_cast<ConstantSDNode>(CmpInputValue);
3670 AndInputValue1 = AndInputValue1.
getOperand(0);
3674 if (AndInputValue2 != CmpInputValue)
3707 TruncInputValue1, TruncInputValue2);
3729template <
unsigned N>
3733 bool IsSigned =
false) {
3735 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3737 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3738 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3740 ": argument out of range.");
3746template <
unsigned N>
3750 EVT ResTy =
Node->getValueType(0);
3751 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3754 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3755 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3757 ": argument out of range.");
3762 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
3768 EVT ResTy =
Node->getValueType(0);
3776 EVT ResTy =
Node->getValueType(0);
3785template <
unsigned N>
3788 EVT ResTy =
Node->getValueType(0);
3789 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3791 if (!isUInt<N>(CImm->getZExtValue())) {
3793 ": argument out of range.");
3803template <
unsigned N>
3806 EVT ResTy =
Node->getValueType(0);
3807 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3809 if (!isUInt<N>(CImm->getZExtValue())) {
3811 ": argument out of range.");
3820template <
unsigned N>
3823 EVT ResTy =
Node->getValueType(0);
3824 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3826 if (!isUInt<N>(CImm->getZExtValue())) {
3828 ": argument out of range.");
3842 switch (
N->getConstantOperandVal(0)) {
3845 case Intrinsic::loongarch_lsx_vadd_b:
3846 case Intrinsic::loongarch_lsx_vadd_h:
3847 case Intrinsic::loongarch_lsx_vadd_w:
3848 case Intrinsic::loongarch_lsx_vadd_d:
3849 case Intrinsic::loongarch_lasx_xvadd_b:
3850 case Intrinsic::loongarch_lasx_xvadd_h:
3851 case Intrinsic::loongarch_lasx_xvadd_w:
3852 case Intrinsic::loongarch_lasx_xvadd_d:
3855 case Intrinsic::loongarch_lsx_vaddi_bu:
3856 case Intrinsic::loongarch_lsx_vaddi_hu:
3857 case Intrinsic::loongarch_lsx_vaddi_wu:
3858 case Intrinsic::loongarch_lsx_vaddi_du:
3859 case Intrinsic::loongarch_lasx_xvaddi_bu:
3860 case Intrinsic::loongarch_lasx_xvaddi_hu:
3861 case Intrinsic::loongarch_lasx_xvaddi_wu:
3862 case Intrinsic::loongarch_lasx_xvaddi_du:
3864 lowerVectorSplatImm<5>(
N, 2, DAG));
3865 case Intrinsic::loongarch_lsx_vsub_b:
3866 case Intrinsic::loongarch_lsx_vsub_h:
3867 case Intrinsic::loongarch_lsx_vsub_w:
3868 case Intrinsic::loongarch_lsx_vsub_d:
3869 case Intrinsic::loongarch_lasx_xvsub_b:
3870 case Intrinsic::loongarch_lasx_xvsub_h:
3871 case Intrinsic::loongarch_lasx_xvsub_w:
3872 case Intrinsic::loongarch_lasx_xvsub_d:
3875 case Intrinsic::loongarch_lsx_vsubi_bu:
3876 case Intrinsic::loongarch_lsx_vsubi_hu:
3877 case Intrinsic::loongarch_lsx_vsubi_wu:
3878 case Intrinsic::loongarch_lsx_vsubi_du:
3879 case Intrinsic::loongarch_lasx_xvsubi_bu:
3880 case Intrinsic::loongarch_lasx_xvsubi_hu:
3881 case Intrinsic::loongarch_lasx_xvsubi_wu:
3882 case Intrinsic::loongarch_lasx_xvsubi_du:
3884 lowerVectorSplatImm<5>(
N, 2, DAG));
3885 case Intrinsic::loongarch_lsx_vneg_b:
3886 case Intrinsic::loongarch_lsx_vneg_h:
3887 case Intrinsic::loongarch_lsx_vneg_w:
3888 case Intrinsic::loongarch_lsx_vneg_d:
3889 case Intrinsic::loongarch_lasx_xvneg_b:
3890 case Intrinsic::loongarch_lasx_xvneg_h:
3891 case Intrinsic::loongarch_lasx_xvneg_w:
3892 case Intrinsic::loongarch_lasx_xvneg_d:
3896 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
3898 SDLoc(
N),
N->getValueType(0)),
3900 case Intrinsic::loongarch_lsx_vmax_b:
3901 case Intrinsic::loongarch_lsx_vmax_h:
3902 case Intrinsic::loongarch_lsx_vmax_w:
3903 case Intrinsic::loongarch_lsx_vmax_d:
3904 case Intrinsic::loongarch_lasx_xvmax_b:
3905 case Intrinsic::loongarch_lasx_xvmax_h:
3906 case Intrinsic::loongarch_lasx_xvmax_w:
3907 case Intrinsic::loongarch_lasx_xvmax_d:
3910 case Intrinsic::loongarch_lsx_vmax_bu:
3911 case Intrinsic::loongarch_lsx_vmax_hu:
3912 case Intrinsic::loongarch_lsx_vmax_wu:
3913 case Intrinsic::loongarch_lsx_vmax_du:
3914 case Intrinsic::loongarch_lasx_xvmax_bu:
3915 case Intrinsic::loongarch_lasx_xvmax_hu:
3916 case Intrinsic::loongarch_lasx_xvmax_wu:
3917 case Intrinsic::loongarch_lasx_xvmax_du:
3920 case Intrinsic::loongarch_lsx_vmaxi_b:
3921 case Intrinsic::loongarch_lsx_vmaxi_h:
3922 case Intrinsic::loongarch_lsx_vmaxi_w:
3923 case Intrinsic::loongarch_lsx_vmaxi_d:
3924 case Intrinsic::loongarch_lasx_xvmaxi_b:
3925 case Intrinsic::loongarch_lasx_xvmaxi_h:
3926 case Intrinsic::loongarch_lasx_xvmaxi_w:
3927 case Intrinsic::loongarch_lasx_xvmaxi_d:
3929 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3930 case Intrinsic::loongarch_lsx_vmaxi_bu:
3931 case Intrinsic::loongarch_lsx_vmaxi_hu:
3932 case Intrinsic::loongarch_lsx_vmaxi_wu:
3933 case Intrinsic::loongarch_lsx_vmaxi_du:
3934 case Intrinsic::loongarch_lasx_xvmaxi_bu:
3935 case Intrinsic::loongarch_lasx_xvmaxi_hu:
3936 case Intrinsic::loongarch_lasx_xvmaxi_wu:
3937 case Intrinsic::loongarch_lasx_xvmaxi_du:
3939 lowerVectorSplatImm<5>(
N, 2, DAG));
3940 case Intrinsic::loongarch_lsx_vmin_b:
3941 case Intrinsic::loongarch_lsx_vmin_h:
3942 case Intrinsic::loongarch_lsx_vmin_w:
3943 case Intrinsic::loongarch_lsx_vmin_d:
3944 case Intrinsic::loongarch_lasx_xvmin_b:
3945 case Intrinsic::loongarch_lasx_xvmin_h:
3946 case Intrinsic::loongarch_lasx_xvmin_w:
3947 case Intrinsic::loongarch_lasx_xvmin_d:
3950 case Intrinsic::loongarch_lsx_vmin_bu:
3951 case Intrinsic::loongarch_lsx_vmin_hu:
3952 case Intrinsic::loongarch_lsx_vmin_wu:
3953 case Intrinsic::loongarch_lsx_vmin_du:
3954 case Intrinsic::loongarch_lasx_xvmin_bu:
3955 case Intrinsic::loongarch_lasx_xvmin_hu:
3956 case Intrinsic::loongarch_lasx_xvmin_wu:
3957 case Intrinsic::loongarch_lasx_xvmin_du:
3960 case Intrinsic::loongarch_lsx_vmini_b:
3961 case Intrinsic::loongarch_lsx_vmini_h:
3962 case Intrinsic::loongarch_lsx_vmini_w:
3963 case Intrinsic::loongarch_lsx_vmini_d:
3964 case Intrinsic::loongarch_lasx_xvmini_b:
3965 case Intrinsic::loongarch_lasx_xvmini_h:
3966 case Intrinsic::loongarch_lasx_xvmini_w:
3967 case Intrinsic::loongarch_lasx_xvmini_d:
3969 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3970 case Intrinsic::loongarch_lsx_vmini_bu:
3971 case Intrinsic::loongarch_lsx_vmini_hu:
3972 case Intrinsic::loongarch_lsx_vmini_wu:
3973 case Intrinsic::loongarch_lsx_vmini_du:
3974 case Intrinsic::loongarch_lasx_xvmini_bu:
3975 case Intrinsic::loongarch_lasx_xvmini_hu:
3976 case Intrinsic::loongarch_lasx_xvmini_wu:
3977 case Intrinsic::loongarch_lasx_xvmini_du:
3979 lowerVectorSplatImm<5>(
N, 2, DAG));
3980 case Intrinsic::loongarch_lsx_vmul_b:
3981 case Intrinsic::loongarch_lsx_vmul_h:
3982 case Intrinsic::loongarch_lsx_vmul_w:
3983 case Intrinsic::loongarch_lsx_vmul_d:
3984 case Intrinsic::loongarch_lasx_xvmul_b:
3985 case Intrinsic::loongarch_lasx_xvmul_h:
3986 case Intrinsic::loongarch_lasx_xvmul_w:
3987 case Intrinsic::loongarch_lasx_xvmul_d:
3990 case Intrinsic::loongarch_lsx_vmadd_b:
3991 case Intrinsic::loongarch_lsx_vmadd_h:
3992 case Intrinsic::loongarch_lsx_vmadd_w:
3993 case Intrinsic::loongarch_lsx_vmadd_d:
3994 case Intrinsic::loongarch_lasx_xvmadd_b:
3995 case Intrinsic::loongarch_lasx_xvmadd_h:
3996 case Intrinsic::loongarch_lasx_xvmadd_w:
3997 case Intrinsic::loongarch_lasx_xvmadd_d: {
3998 EVT ResTy =
N->getValueType(0);
4003 case Intrinsic::loongarch_lsx_vmsub_b:
4004 case Intrinsic::loongarch_lsx_vmsub_h:
4005 case Intrinsic::loongarch_lsx_vmsub_w:
4006 case Intrinsic::loongarch_lsx_vmsub_d:
4007 case Intrinsic::loongarch_lasx_xvmsub_b:
4008 case Intrinsic::loongarch_lasx_xvmsub_h:
4009 case Intrinsic::loongarch_lasx_xvmsub_w:
4010 case Intrinsic::loongarch_lasx_xvmsub_d: {
4011 EVT ResTy =
N->getValueType(0);
4016 case Intrinsic::loongarch_lsx_vdiv_b:
4017 case Intrinsic::loongarch_lsx_vdiv_h:
4018 case Intrinsic::loongarch_lsx_vdiv_w:
4019 case Intrinsic::loongarch_lsx_vdiv_d:
4020 case Intrinsic::loongarch_lasx_xvdiv_b:
4021 case Intrinsic::loongarch_lasx_xvdiv_h:
4022 case Intrinsic::loongarch_lasx_xvdiv_w:
4023 case Intrinsic::loongarch_lasx_xvdiv_d:
4026 case Intrinsic::loongarch_lsx_vdiv_bu:
4027 case Intrinsic::loongarch_lsx_vdiv_hu:
4028 case Intrinsic::loongarch_lsx_vdiv_wu:
4029 case Intrinsic::loongarch_lsx_vdiv_du:
4030 case Intrinsic::loongarch_lasx_xvdiv_bu:
4031 case Intrinsic::loongarch_lasx_xvdiv_hu:
4032 case Intrinsic::loongarch_lasx_xvdiv_wu:
4033 case Intrinsic::loongarch_lasx_xvdiv_du:
4036 case Intrinsic::loongarch_lsx_vmod_b:
4037 case Intrinsic::loongarch_lsx_vmod_h:
4038 case Intrinsic::loongarch_lsx_vmod_w:
4039 case Intrinsic::loongarch_lsx_vmod_d:
4040 case Intrinsic::loongarch_lasx_xvmod_b:
4041 case Intrinsic::loongarch_lasx_xvmod_h:
4042 case Intrinsic::loongarch_lasx_xvmod_w:
4043 case Intrinsic::loongarch_lasx_xvmod_d:
4046 case Intrinsic::loongarch_lsx_vmod_bu:
4047 case Intrinsic::loongarch_lsx_vmod_hu:
4048 case Intrinsic::loongarch_lsx_vmod_wu:
4049 case Intrinsic::loongarch_lsx_vmod_du:
4050 case Intrinsic::loongarch_lasx_xvmod_bu:
4051 case Intrinsic::loongarch_lasx_xvmod_hu:
4052 case Intrinsic::loongarch_lasx_xvmod_wu:
4053 case Intrinsic::loongarch_lasx_xvmod_du:
4056 case Intrinsic::loongarch_lsx_vand_v:
4057 case Intrinsic::loongarch_lasx_xvand_v:
4060 case Intrinsic::loongarch_lsx_vor_v:
4061 case Intrinsic::loongarch_lasx_xvor_v:
4064 case Intrinsic::loongarch_lsx_vxor_v:
4065 case Intrinsic::loongarch_lasx_xvxor_v:
4068 case Intrinsic::loongarch_lsx_vnor_v:
4069 case Intrinsic::loongarch_lasx_xvnor_v: {
4074 case Intrinsic::loongarch_lsx_vandi_b:
4075 case Intrinsic::loongarch_lasx_xvandi_b:
4077 lowerVectorSplatImm<8>(
N, 2, DAG));
4078 case Intrinsic::loongarch_lsx_vori_b:
4079 case Intrinsic::loongarch_lasx_xvori_b:
4081 lowerVectorSplatImm<8>(
N, 2, DAG));
4082 case Intrinsic::loongarch_lsx_vxori_b:
4083 case Intrinsic::loongarch_lasx_xvxori_b:
4085 lowerVectorSplatImm<8>(
N, 2, DAG));
4086 case Intrinsic::loongarch_lsx_vsll_b:
4087 case Intrinsic::loongarch_lsx_vsll_h:
4088 case Intrinsic::loongarch_lsx_vsll_w:
4089 case Intrinsic::loongarch_lsx_vsll_d:
4090 case Intrinsic::loongarch_lasx_xvsll_b:
4091 case Intrinsic::loongarch_lasx_xvsll_h:
4092 case Intrinsic::loongarch_lasx_xvsll_w:
4093 case Intrinsic::loongarch_lasx_xvsll_d:
4096 case Intrinsic::loongarch_lsx_vslli_b:
4097 case Intrinsic::loongarch_lasx_xvslli_b:
4099 lowerVectorSplatImm<3>(
N, 2, DAG));
4100 case Intrinsic::loongarch_lsx_vslli_h:
4101 case Intrinsic::loongarch_lasx_xvslli_h:
4103 lowerVectorSplatImm<4>(
N, 2, DAG));
4104 case Intrinsic::loongarch_lsx_vslli_w:
4105 case Intrinsic::loongarch_lasx_xvslli_w:
4107 lowerVectorSplatImm<5>(
N, 2, DAG));
4108 case Intrinsic::loongarch_lsx_vslli_d:
4109 case Intrinsic::loongarch_lasx_xvslli_d:
4111 lowerVectorSplatImm<6>(
N, 2, DAG));
4112 case Intrinsic::loongarch_lsx_vsrl_b:
4113 case Intrinsic::loongarch_lsx_vsrl_h:
4114 case Intrinsic::loongarch_lsx_vsrl_w:
4115 case Intrinsic::loongarch_lsx_vsrl_d:
4116 case Intrinsic::loongarch_lasx_xvsrl_b:
4117 case Intrinsic::loongarch_lasx_xvsrl_h:
4118 case Intrinsic::loongarch_lasx_xvsrl_w:
4119 case Intrinsic::loongarch_lasx_xvsrl_d:
4122 case Intrinsic::loongarch_lsx_vsrli_b:
4123 case Intrinsic::loongarch_lasx_xvsrli_b:
4125 lowerVectorSplatImm<3>(
N, 2, DAG));
4126 case Intrinsic::loongarch_lsx_vsrli_h:
4127 case Intrinsic::loongarch_lasx_xvsrli_h:
4129 lowerVectorSplatImm<4>(
N, 2, DAG));
4130 case Intrinsic::loongarch_lsx_vsrli_w:
4131 case Intrinsic::loongarch_lasx_xvsrli_w:
4133 lowerVectorSplatImm<5>(
N, 2, DAG));
4134 case Intrinsic::loongarch_lsx_vsrli_d:
4135 case Intrinsic::loongarch_lasx_xvsrli_d:
4137 lowerVectorSplatImm<6>(
N, 2, DAG));
4138 case Intrinsic::loongarch_lsx_vsra_b:
4139 case Intrinsic::loongarch_lsx_vsra_h:
4140 case Intrinsic::loongarch_lsx_vsra_w:
4141 case Intrinsic::loongarch_lsx_vsra_d:
4142 case Intrinsic::loongarch_lasx_xvsra_b:
4143 case Intrinsic::loongarch_lasx_xvsra_h:
4144 case Intrinsic::loongarch_lasx_xvsra_w:
4145 case Intrinsic::loongarch_lasx_xvsra_d:
4148 case Intrinsic::loongarch_lsx_vsrai_b:
4149 case Intrinsic::loongarch_lasx_xvsrai_b:
4151 lowerVectorSplatImm<3>(
N, 2, DAG));
4152 case Intrinsic::loongarch_lsx_vsrai_h:
4153 case Intrinsic::loongarch_lasx_xvsrai_h:
4155 lowerVectorSplatImm<4>(
N, 2, DAG));
4156 case Intrinsic::loongarch_lsx_vsrai_w:
4157 case Intrinsic::loongarch_lasx_xvsrai_w:
4159 lowerVectorSplatImm<5>(
N, 2, DAG));
4160 case Intrinsic::loongarch_lsx_vsrai_d:
4161 case Intrinsic::loongarch_lasx_xvsrai_d:
4163 lowerVectorSplatImm<6>(
N, 2, DAG));
4164 case Intrinsic::loongarch_lsx_vclz_b:
4165 case Intrinsic::loongarch_lsx_vclz_h:
4166 case Intrinsic::loongarch_lsx_vclz_w:
4167 case Intrinsic::loongarch_lsx_vclz_d:
4168 case Intrinsic::loongarch_lasx_xvclz_b:
4169 case Intrinsic::loongarch_lasx_xvclz_h:
4170 case Intrinsic::loongarch_lasx_xvclz_w:
4171 case Intrinsic::loongarch_lasx_xvclz_d:
4173 case Intrinsic::loongarch_lsx_vpcnt_b:
4174 case Intrinsic::loongarch_lsx_vpcnt_h:
4175 case Intrinsic::loongarch_lsx_vpcnt_w:
4176 case Intrinsic::loongarch_lsx_vpcnt_d:
4177 case Intrinsic::loongarch_lasx_xvpcnt_b:
4178 case Intrinsic::loongarch_lasx_xvpcnt_h:
4179 case Intrinsic::loongarch_lasx_xvpcnt_w:
4180 case Intrinsic::loongarch_lasx_xvpcnt_d:
4182 case Intrinsic::loongarch_lsx_vbitclr_b:
4183 case Intrinsic::loongarch_lsx_vbitclr_h:
4184 case Intrinsic::loongarch_lsx_vbitclr_w:
4185 case Intrinsic::loongarch_lsx_vbitclr_d:
4186 case Intrinsic::loongarch_lasx_xvbitclr_b:
4187 case Intrinsic::loongarch_lasx_xvbitclr_h:
4188 case Intrinsic::loongarch_lasx_xvbitclr_w:
4189 case Intrinsic::loongarch_lasx_xvbitclr_d:
4191 case Intrinsic::loongarch_lsx_vbitclri_b:
4192 case Intrinsic::loongarch_lasx_xvbitclri_b:
4193 return lowerVectorBitClearImm<3>(
N, DAG);
4194 case Intrinsic::loongarch_lsx_vbitclri_h:
4195 case Intrinsic::loongarch_lasx_xvbitclri_h:
4196 return lowerVectorBitClearImm<4>(
N, DAG);
4197 case Intrinsic::loongarch_lsx_vbitclri_w:
4198 case Intrinsic::loongarch_lasx_xvbitclri_w:
4199 return lowerVectorBitClearImm<5>(
N, DAG);
4200 case Intrinsic::loongarch_lsx_vbitclri_d:
4201 case Intrinsic::loongarch_lasx_xvbitclri_d:
4202 return lowerVectorBitClearImm<6>(
N, DAG);
4203 case Intrinsic::loongarch_lsx_vbitset_b:
4204 case Intrinsic::loongarch_lsx_vbitset_h:
4205 case Intrinsic::loongarch_lsx_vbitset_w:
4206 case Intrinsic::loongarch_lsx_vbitset_d:
4207 case Intrinsic::loongarch_lasx_xvbitset_b:
4208 case Intrinsic::loongarch_lasx_xvbitset_h:
4209 case Intrinsic::loongarch_lasx_xvbitset_w:
4210 case Intrinsic::loongarch_lasx_xvbitset_d: {
4211 EVT VecTy =
N->getValueType(0);
4217 case Intrinsic::loongarch_lsx_vbitseti_b:
4218 case Intrinsic::loongarch_lasx_xvbitseti_b:
4219 return lowerVectorBitSetImm<3>(
N, DAG);
4220 case Intrinsic::loongarch_lsx_vbitseti_h:
4221 case Intrinsic::loongarch_lasx_xvbitseti_h:
4222 return lowerVectorBitSetImm<4>(
N, DAG);
4223 case Intrinsic::loongarch_lsx_vbitseti_w:
4224 case Intrinsic::loongarch_lasx_xvbitseti_w:
4225 return lowerVectorBitSetImm<5>(
N, DAG);
4226 case Intrinsic::loongarch_lsx_vbitseti_d:
4227 case Intrinsic::loongarch_lasx_xvbitseti_d:
4228 return lowerVectorBitSetImm<6>(
N, DAG);
4229 case Intrinsic::loongarch_lsx_vbitrev_b:
4230 case Intrinsic::loongarch_lsx_vbitrev_h:
4231 case Intrinsic::loongarch_lsx_vbitrev_w:
4232 case Intrinsic::loongarch_lsx_vbitrev_d:
4233 case Intrinsic::loongarch_lasx_xvbitrev_b:
4234 case Intrinsic::loongarch_lasx_xvbitrev_h:
4235 case Intrinsic::loongarch_lasx_xvbitrev_w:
4236 case Intrinsic::loongarch_lasx_xvbitrev_d: {
4237 EVT VecTy =
N->getValueType(0);
4243 case Intrinsic::loongarch_lsx_vbitrevi_b:
4244 case Intrinsic::loongarch_lasx_xvbitrevi_b:
4245 return lowerVectorBitRevImm<3>(
N, DAG);
4246 case Intrinsic::loongarch_lsx_vbitrevi_h:
4247 case Intrinsic::loongarch_lasx_xvbitrevi_h:
4248 return lowerVectorBitRevImm<4>(
N, DAG);
4249 case Intrinsic::loongarch_lsx_vbitrevi_w:
4250 case Intrinsic::loongarch_lasx_xvbitrevi_w:
4251 return lowerVectorBitRevImm<5>(
N, DAG);
4252 case Intrinsic::loongarch_lsx_vbitrevi_d:
4253 case Intrinsic::loongarch_lasx_xvbitrevi_d:
4254 return lowerVectorBitRevImm<6>(
N, DAG);
4255 case Intrinsic::loongarch_lsx_vfadd_s:
4256 case Intrinsic::loongarch_lsx_vfadd_d:
4257 case Intrinsic::loongarch_lasx_xvfadd_s:
4258 case Intrinsic::loongarch_lasx_xvfadd_d:
4261 case Intrinsic::loongarch_lsx_vfsub_s:
4262 case Intrinsic::loongarch_lsx_vfsub_d:
4263 case Intrinsic::loongarch_lasx_xvfsub_s:
4264 case Intrinsic::loongarch_lasx_xvfsub_d:
4267 case Intrinsic::loongarch_lsx_vfmul_s:
4268 case Intrinsic::loongarch_lsx_vfmul_d:
4269 case Intrinsic::loongarch_lasx_xvfmul_s:
4270 case Intrinsic::loongarch_lasx_xvfmul_d:
4273 case Intrinsic::loongarch_lsx_vfdiv_s:
4274 case Intrinsic::loongarch_lsx_vfdiv_d:
4275 case Intrinsic::loongarch_lasx_xvfdiv_s:
4276 case Intrinsic::loongarch_lasx_xvfdiv_d: