32#include "llvm/IR/IntrinsicsLoongArch.h"
42#define DEBUG_TYPE "loongarch-isel-lowering"
57 cl::desc(
"Maximum number of instructions used (including code sequence "
58 "to generate the value and moving the value to FPR) when "
59 "materializing floating-point immediates (default = 3)"),
63 "Materialize FP immediate within 2 instructions"),
65 "Materialize FP immediate within 3 instructions"),
67 "Materialize FP immediate within 4 instructions"),
69 "Materialize FP immediate within 5 instructions"),
71 "Materialize FP immediate within 6 instructions "
72 "(behaves same as 5 on loongarch64)")));
75 cl::desc(
"Trap on integer division by zero."),
82 MVT GRLenVT = Subtarget.getGRLenVT();
87 if (Subtarget.hasBasicF())
89 if (Subtarget.hasBasicD())
93 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
95 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
97 if (Subtarget.hasExtLSX())
101 if (Subtarget.hasExtLASX())
102 for (
MVT VT : LASXVTs)
170 if (Subtarget.is64Bit()) {
198 if (!Subtarget.is64Bit()) {
204 if (Subtarget.hasBasicD())
216 if (Subtarget.hasBasicF()) {
248 if (Subtarget.is64Bit())
251 if (!Subtarget.hasBasicD()) {
253 if (Subtarget.is64Bit()) {
262 if (Subtarget.hasBasicD()) {
295 if (Subtarget.is64Bit())
301 if (Subtarget.hasExtLSX()) {
316 for (
MVT VT : LSXVTs) {
330 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
354 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
356 for (
MVT VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
358 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
362 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
386 {MVT::v16i8, MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v8i16, MVT::v4i16,
387 MVT::v2i16, MVT::v4i32, MVT::v2i32, MVT::v2i64}) {
402 if (Subtarget.hasExtLASX()) {
403 for (
MVT VT : LASXVTs) {
418 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
443 for (
MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
445 for (
MVT VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64})
447 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
451 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
471 if (Subtarget.hasBasicF()) {
482 if (Subtarget.hasExtLSX()) {
507 if (Subtarget.hasLAMCAS())
510 if (Subtarget.hasSCQ()) {
530 switch (
Op.getOpcode()) {
532 return lowerATOMIC_FENCE(
Op, DAG);
534 return lowerEH_DWARF_CFA(
Op, DAG);
536 return lowerGlobalAddress(
Op, DAG);
538 return lowerGlobalTLSAddress(
Op, DAG);
540 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
542 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
544 return lowerINTRINSIC_VOID(
Op, DAG);
546 return lowerBlockAddress(
Op, DAG);
548 return lowerJumpTable(
Op, DAG);
550 return lowerShiftLeftParts(
Op, DAG);
552 return lowerShiftRightParts(
Op, DAG,
true);
554 return lowerShiftRightParts(
Op, DAG,
false);
556 return lowerConstantPool(
Op, DAG);
558 return lowerFP_TO_SINT(
Op, DAG);
560 return lowerBITCAST(
Op, DAG);
562 return lowerUINT_TO_FP(
Op, DAG);
564 return lowerSINT_TO_FP(
Op, DAG);
566 return lowerVASTART(
Op, DAG);
568 return lowerFRAMEADDR(
Op, DAG);
570 return lowerRETURNADDR(
Op, DAG);
572 return lowerWRITE_REGISTER(
Op, DAG);
574 return lowerINSERT_VECTOR_ELT(
Op, DAG);
576 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
578 return lowerBUILD_VECTOR(
Op, DAG);
580 return lowerCONCAT_VECTORS(
Op, DAG);
582 return lowerVECTOR_SHUFFLE(
Op, DAG);
584 return lowerBITREVERSE(
Op, DAG);
586 return lowerSCALAR_TO_VECTOR(
Op, DAG);
588 return lowerPREFETCH(
Op, DAG);
590 return lowerSELECT(
Op, DAG);
592 return lowerBRCOND(
Op, DAG);
594 return lowerFP_TO_FP16(
Op, DAG);
596 return lowerFP16_TO_FP(
Op, DAG);
598 return lowerFP_TO_BF16(
Op, DAG);
600 return lowerBF16_TO_FP(
Op, DAG);
602 return lowerVECREDUCE_ADD(
Op, DAG);
605 return lowerRotate(
Op, DAG);
613 return lowerVECREDUCE(
Op, DAG);
615 return lowerConstantFP(
Op, DAG);
617 return lowerSETCC(
Op, DAG);
626 EVT VT = V.getValueType();
632 return V.getOperand(0);
636 (
isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
638 Not = DAG.
getBitcast(V.getOperand(0).getValueType(), Not);
648 if (!V->isOnlyUserOf(SplatValue.getNode()))
652 Not = DAG.
getBitcast(V.getOperand(0).getValueType(), Not);
660 V.getOperand(0).hasOneUse() && V.getOperand(1).hasOneUse()) {
677 EVT VT =
Op.getValueType();
682 assert((VT == MVT::f32 && Subtarget.hasBasicF()) ||
683 (VT == MVT::f64 && Subtarget.hasBasicD()));
700 int InsNum = Seq.size() + ((VT == MVT::f64 && !Subtarget.is64Bit()) ? 2 : 1);
710 if (Subtarget.is64Bit())
712 return DAG.
getNode(Subtarget.is64Bit() ? LoongArchISD::MOVGR2FR_W_LA64
713 : LoongArchISD::MOVGR2FR_W,
717 if (Subtarget.is64Bit()) {
719 return DAG.
getNode(LoongArchISD::MOVGR2FR_D,
DL, VT, NewVal);
723 return DAG.
getNode(LoongArchISD::MOVGR2FR_D_LO_HI,
DL, VT,
Lo,
Hi);
735 EVT ResultVT =
Op.getValueType();
736 EVT OperandVT =
Op.getOperand(0).getValueType();
741 if (ResultVT == SetCCResultVT)
744 assert(
Op.getOperand(0).getValueType() ==
Op.getOperand(1).getValueType() &&
745 "SETCC operands must have the same type!");
749 Op.getOperand(1),
Op.getOperand(2));
751 if (ResultVT.
bitsGT(SetCCResultVT))
753 else if (ResultVT.
bitsLT(SetCCResultVT))
771 MVT OpVT =
Op.getSimpleValueType();
778 unsigned LegalVecSize = 128;
779 bool isLASX256Vector =
789 if (isLASX256Vector) {
794 for (
unsigned i = 1; i < NumEles; i *= 2, EleBits *= 2) {
797 Val = DAG.
getNode(LoongArchISD::VHADDW,
DL, VecTy, Val, Val);
800 if (isLASX256Vector) {
826 MVT OpVT =
Op.getSimpleValueType();
839 MVT GRLenVT = Subtarget.getGRLenVT();
841 for (
int i = NumEles; i > 1; i /= 2) {
844 Val = DAG.
getNode(Opcode,
DL, VecTy, Tmp, Val);
853 unsigned IsData =
Op.getConstantOperandVal(4);
858 return Op.getOperand(0);
865 MVT VT =
Op.getSimpleValueType();
871 unsigned Opcode =
Op.getOpcode();
874 auto checkCstSplat = [](
SDValue V, APInt &CstSplatValue) {
880 CstSplatValue =
C->getAPIntValue();
889 bool IsCstSplat = checkCstSplat(Amt, CstSplatValue);
893 if (IsCstSplat && CstSplatValue.
urem(EltSizeInBits) == 0)
909 return DAG.
getNode(Opcode,
DL, VT, R, Urem);
925 if (
LHS == LHS2 &&
RHS == RHS2) {
930 }
else if (
LHS == RHS2 &&
RHS == LHS2) {
946 MVT VT =
N->getSimpleValueType(0);
977 if (~TrueVal == FalseVal) {
1017 unsigned SelOpNo = 0;
1027 unsigned ConstSelOpNo = 1;
1028 unsigned OtherSelOpNo = 2;
1035 if (!ConstSelOpNode || ConstSelOpNode->
isOpaque())
1040 if (!ConstBinOpNode || ConstBinOpNode->
isOpaque())
1046 SDValue NewConstOps[2] = {ConstSelOp, ConstBinOp};
1048 std::swap(NewConstOps[0], NewConstOps[1]);
1060 SDValue NewNonConstOps[2] = {OtherSelOp, ConstBinOp};
1062 std::swap(NewNonConstOps[0], NewNonConstOps[1]);
1065 SDValue NewT = (ConstSelOpNo == 1) ? NewConstOp : NewNonConstOp;
1066 SDValue NewF = (ConstSelOpNo == 1) ? NewNonConstOp : NewConstOp;
1086 ShAmt =
LHS.getValueSizeInBits() - 1 -
Log2_64(Mask);
1100 int64_t
C = RHSC->getSExtValue();
1143 MVT VT =
Op.getSimpleValueType();
1144 MVT GRLenVT = Subtarget.getGRLenVT();
1149 if (
Op.hasOneUse()) {
1150 unsigned UseOpc =
Op->user_begin()->getOpcode();
1152 SDNode *BinOp = *
Op->user_begin();
1159 return lowerSELECT(NewSel, DAG);
1176 return DAG.
getNode(LoongArchISD::SELECT_CC,
DL, VT,
Ops);
1199 if (TrueVal - 1 == FalseVal)
1201 if (TrueVal + 1 == FalseVal)
1208 RHS == TrueV &&
LHS == FalseV) {
1233 return DAG.
getNode(LoongArchISD::SELECT_CC,
DL, VT,
Ops);
1240 MVT GRLenVT = Subtarget.getGRLenVT();
1251 return DAG.
getNode(LoongArchISD::BR_CC,
DL,
Op.getValueType(),
1252 Op.getOperand(0),
LHS,
RHS, TargetCC,
1255 return DAG.
getNode(LoongArchISD::BRCOND,
DL,
Op.getValueType(),
1256 Op.getOperand(0), CondV,
Op.getOperand(2));
1260 return DAG.
getNode(LoongArchISD::BR_CC,
DL,
Op.getValueType(),
1266LoongArchTargetLowering::lowerSCALAR_TO_VECTOR(
SDValue Op,
1269 MVT OpVT =
Op.getSimpleValueType();
1280 EVT ResTy =
Op->getValueType(0);
1285 if (!Subtarget.is64Bit() && (ResTy == MVT::v16i8 || ResTy == MVT::v32i8))
1295 for (
unsigned int i = 0; i < NewEltNum; i++) {
1298 unsigned RevOp = (ResTy == MVT::v16i8 || ResTy == MVT::v32i8)
1299 ? (
unsigned)LoongArchISD::BITREV_8B
1317 for (
unsigned int i = 0; i < NewEltNum; i++)
1318 for (
int j = OrigEltNum / NewEltNum - 1;
j >= 0;
j--)
1319 Mask.push_back(j + (OrigEltNum / NewEltNum) * i);
1337 if (EltBits > 32 || EltBits == 1)
1365 int MaskOffset,
const APInt &Zeroable) {
1366 int Size = Mask.size();
1367 unsigned SizeInBits =
Size * ScalarSizeInBits;
1369 auto CheckZeros = [&](
int Shift,
int Scale,
bool Left) {
1370 for (
int i = 0; i <
Size; i += Scale)
1371 for (
int j = 0; j < Shift; ++j)
1372 if (!Zeroable[i + j + (
Left ? 0 : (Scale - Shift))])
1380 for (
unsigned i = Pos, e = Pos +
Size; i != e; ++i,
Low += Step)
1381 if (!(Mask[i] == -1 || Mask[i] ==
Low))
1386 auto MatchShift = [&](
int Shift,
int Scale,
bool Left) {
1387 for (
int i = 0; i !=
Size; i += Scale) {
1388 unsigned Pos =
Left ? i + Shift : i;
1389 unsigned Low =
Left ? i : i + Shift;
1390 unsigned Len = Scale - Shift;
1395 int ShiftEltBits = ScalarSizeInBits * Scale;
1396 bool ByteShift = ShiftEltBits > 64;
1397 Opcode =
Left ? (ByteShift ? LoongArchISD::VBSLL : LoongArchISD::VSLLI)
1398 : (ByteShift ? LoongArchISD::VBSRL : LoongArchISD::VSRLI);
1399 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
1403 Scale = ByteShift ? Scale / 2 : Scale;
1409 return (
int)ShiftAmt;
1412 unsigned MaxWidth = 128;
1413 for (
int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
1414 for (
int Shift = 1; Shift != Scale; ++Shift)
1415 for (
bool Left : {
true,
false})
1416 if (CheckZeros(Shift, Scale,
Left)) {
1417 int ShiftAmt = MatchShift(Shift, Scale,
Left);
1442 const APInt &Zeroable) {
1443 int Size = Mask.size();
1457 Mask,
Size, Zeroable);
1465 "Illegal integer vector type");
1474template <
typename ValType>
1477 unsigned CheckStride,
1479 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
1483 if (*
I != -1 && *
I != ExpectedIndex)
1485 ExpectedIndex += ExpectedIndexStride;
1489 for (
unsigned n = 0; n < CheckStride &&
I != End; ++n, ++
I)
1501 int Size = Mask.size();
1511 int ScalarSizeInBits = VectorSizeInBits /
Size;
1512 assert(!(VectorSizeInBits % ScalarSizeInBits) &&
"Illegal shuffle mask size");
1513 (void)ScalarSizeInBits;
1515 for (
int i = 0; i <
Size; ++i) {
1521 if ((M >= 0 && M <
Size && V1IsZero) || (M >=
Size && V2IsZero)) {
1538 RepeatedMask.
assign(LaneSize, -1);
1539 int Size = Mask.size();
1540 for (
int i = 0; i <
Size; ++i) {
1541 assert(Mask[i] == -1 || Mask[i] >= 0);
1544 if ((Mask[i] %
Size) / LaneSize != i / LaneSize)
1551 Mask[i] <
Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
1552 if (RepeatedMask[i % LaneSize] < 0)
1554 RepeatedMask[i % LaneSize] = LocalM;
1555 else if (RepeatedMask[i % LaneSize] != LocalM)
1572 int NumElts = RepeatedMask.
size();
1574 int Scale = 16 / NumElts;
1576 for (
int i = 0; i < NumElts; ++i) {
1577 int M = RepeatedMask[i];
1578 assert((M == -1 || (0 <= M && M < (2 * NumElts))) &&
1579 "Unexpected mask index.");
1584 int StartIdx = i - (M % NumElts);
1591 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
1594 Rotation = CandidateRotation;
1595 else if (Rotation != CandidateRotation)
1599 SDValue MaskV = M < NumElts ? V1 : V2;
1610 else if (TargetV != MaskV)
1615 assert(Rotation != 0 &&
"Failed to locate a viable rotation!");
1616 assert((
Lo ||
Hi) &&
"Failed to find a rotated input vector!");
1625 return Rotation * Scale;
1644 if (ByteRotation <= 0)
1651 int LoByteShift = 16 - ByteRotation;
1652 int HiByteShift = ByteRotation;
1675 const APInt &Zeroable) {
1689 for (
int i = 0; i < NumElements; i++) {
1693 if (i % Scale != 0) {
1704 SDValue V = M < NumElements ? V1 : V2;
1705 M = M % NumElements;
1708 Offset = M - (i / Scale);
1711 if (
Offset % (NumElements / Scale))
1713 }
else if (InputV != V)
1716 if (M != (
Offset + (i / Scale)))
1726 unsigned VilVLoHi = LoongArchISD::VILVL;
1727 if (
Offset >= (NumElements / 2)) {
1728 VilVLoHi = LoongArchISD::VILVH;
1729 Offset -= (NumElements / 2);
1736 InputV = DAG.
getNode(VilVLoHi,
DL, InputVT, Ext, InputV);
1740 }
while (Scale > 1);
1746 for (
int NumExtElements = Bits / 64; NumExtElements < NumElements;
1747 NumExtElements *= 2) {
1767 int SplatIndex = -1;
1768 for (
const auto &M : Mask) {
1775 if (SplatIndex == -1)
1778 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
1780 return DAG.
getNode(LoongArchISD::VREPLVEI,
DL, VT, V1,
1810 unsigned SubVecSize = 4;
1811 if (VT == MVT::v2f64 || VT == MVT::v2i64)
1814 int SubMask[4] = {-1, -1, -1, -1};
1815 for (
unsigned i = 0; i < SubVecSize; ++i) {
1816 for (
unsigned j = i; j < Mask.size(); j += SubVecSize) {
1822 M -= 4 * (j / SubVecSize);
1823 if (M < 0 || M >= 4)
1829 if (SubMask[i] == -1)
1833 else if (M != -1 && M != SubMask[i])
1840 for (
int i = SubVecSize - 1; i >= 0; --i) {
1853 if (VT == MVT::v2f64 || VT == MVT::v2i64)
1854 return DAG.
getNode(LoongArchISD::VSHUF4I_D,
DL, VT, V1, V2,
1857 return DAG.
getNode(LoongArchISD::VSHUF4I,
DL, VT, V1,
1875 if (VT != MVT::v16i8 && VT != MVT::v8i16 && VT != MVT::v32i8 &&
1884 for (
int i = 0; i < WidenNumElts; ++i)
1885 WidenMask[i] = WidenNumElts - 1 - i;
1893 return DAG.
getNode(LoongArchISD::VSHUF4I,
DL, VT,
1917 const auto &Begin = Mask.begin();
1918 const auto &End = Mask.end();
1919 SDValue OriV1 = V1, OriV2 = V2;
1935 return DAG.
getNode(LoongArchISD::VPACKEV,
DL, VT, V2, V1);
1957 const auto &Begin = Mask.begin();
1958 const auto &End = Mask.end();
1959 SDValue OriV1 = V1, OriV2 = V2;
1975 return DAG.
getNode(LoongArchISD::VPACKOD,
DL, VT, V2, V1);
1998 const auto &Begin = Mask.begin();
1999 const auto &End = Mask.end();
2000 unsigned HalfSize = Mask.size() / 2;
2001 SDValue OriV1 = V1, OriV2 = V2;
2018 return DAG.
getNode(LoongArchISD::VILVH,
DL, VT, V2, V1);
2041 const auto &Begin = Mask.begin();
2042 const auto &End = Mask.end();
2043 SDValue OriV1 = V1, OriV2 = V2;
2059 return DAG.
getNode(LoongArchISD::VILVL,
DL, VT, V2, V1);
2081 const auto &Begin = Mask.begin();
2082 const auto &Mid = Mask.begin() + Mask.size() / 2;
2083 const auto &End = Mask.end();
2084 SDValue OriV1 = V1, OriV2 = V2;
2101 return DAG.
getNode(LoongArchISD::VPICKEV,
DL, VT, V2, V1);
2123 const auto &Begin = Mask.begin();
2124 const auto &Mid = Mask.begin() + Mask.size() / 2;
2125 const auto &End = Mask.end();
2126 SDValue OriV1 = V1, OriV2 = V2;
2142 return DAG.
getNode(LoongArchISD::VPICKOD,
DL, VT, V2, V1);
2168 return DAG.
getNode(LoongArchISD::VSHUF,
DL, VT, MaskVec, V2, V1);
2181 "Vector type is unsupported for lsx!");
2183 "Two operands have different types!");
2185 "Unexpected mask size for shuffle!");
2186 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
2188 APInt KnownUndef, KnownZero;
2190 APInt Zeroable = KnownUndef | KnownZero;
2257 int SplatIndex = -1;
2258 for (
const auto &M : Mask) {
2265 if (SplatIndex == -1)
2268 const auto &Begin = Mask.begin();
2269 const auto &End = Mask.end();
2270 int HalfSize = Mask.size() / 2;
2272 if (SplatIndex >= HalfSize)
2275 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
2279 return DAG.
getNode(LoongArchISD::VREPLVEI,
DL, VT, V1,
2293 if (Mask.size() <= 4)
2304 if (Mask.size() != 4 || (VT != MVT::v4i64 && VT != MVT::v4f64))
2307 unsigned MaskImm = 0;
2308 for (
unsigned i = 0; i < Mask.size(); ++i) {
2311 MaskImm |= Mask[i] << (i * 2);
2314 return DAG.
getNode(LoongArchISD::XVPERMI,
DL, VT, V1,
2323 if (Mask.size() != 8 || (VT != MVT::v8i32 && VT != MVT::v8f32))
2327 unsigned HalfSize = NumElts / 2;
2328 bool FrontLo =
true, FrontHi =
true;
2329 bool BackLo =
true, BackHi =
true;
2331 auto inRange = [](
int val,
int low,
int high) {
2332 return (val == -1) || (val >= low && val < high);
2335 for (
unsigned i = 0; i < HalfSize; ++i) {
2336 int Fronti = Mask[i];
2337 int Backi = Mask[i + HalfSize];
2339 FrontLo &=
inRange(Fronti, 0, HalfSize);
2340 FrontHi &=
inRange(Fronti, HalfSize, NumElts);
2341 BackLo &=
inRange(Backi, 0, HalfSize);
2342 BackHi &=
inRange(Backi, HalfSize, NumElts);
2348 if ((FrontLo || FrontHi) && (BackLo || BackHi))
2353 for (
unsigned i = 0; i < NumElts; ++i)
2358 return DAG.
getNode(LoongArchISD::XVPERM,
DL, VT, V1, MaskVec);
2380 const auto &Begin = Mask.begin();
2381 const auto &End = Mask.end();
2382 unsigned HalfSize = Mask.size() / 2;
2383 unsigned LeftSize = HalfSize / 2;
2384 SDValue OriV1 = V1, OriV2 = V2;
2391 Mask.size() + HalfSize - LeftSize, 1) &&
2393 Mask.size() + HalfSize + LeftSize, 1))
2404 Mask.size() + HalfSize - LeftSize, 1) &&
2406 Mask.size() + HalfSize + LeftSize, 1))
2411 return DAG.
getNode(LoongArchISD::VILVH,
DL, VT, V2, V1);
2419 const auto &Begin = Mask.begin();
2420 const auto &End = Mask.end();
2421 unsigned HalfSize = Mask.size() / 2;
2422 SDValue OriV1 = V1, OriV2 = V2;
2429 Mask.size() + HalfSize, 1))
2440 Mask.size() + HalfSize, 1))
2445 return DAG.
getNode(LoongArchISD::VILVL,
DL, VT, V2, V1);
2453 const auto &Begin = Mask.begin();
2454 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
2455 const auto &Mid = Mask.begin() + Mask.size() / 2;
2456 const auto &RightMid = Mask.end() - Mask.size() / 4;
2457 const auto &End = Mask.end();
2458 unsigned HalfSize = Mask.size() / 2;
2459 SDValue OriV1 = V1, OriV2 = V2;
2480 return DAG.
getNode(LoongArchISD::VPICKEV,
DL, VT, V2, V1);
2488 const auto &Begin = Mask.begin();
2489 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
2490 const auto &Mid = Mask.begin() + Mask.size() / 2;
2491 const auto &RightMid = Mask.end() - Mask.size() / 4;
2492 const auto &End = Mask.end();
2493 unsigned HalfSize = Mask.size() / 2;
2494 SDValue OriV1 = V1, OriV2 = V2;
2516 return DAG.
getNode(LoongArchISD::VPICKOD,
DL, VT, V2, V1);
2525 if (VT != MVT::v8i32 && VT != MVT::v8f32 && VT != MVT::v4i64 &&
2530 int MaskSize = Mask.size();
2536 auto checkReplaceOne = [&](
int Base,
int Replaced) ->
int {
2538 for (
int i = 0; i < MaskSize; ++i) {
2539 if (Mask[i] ==
Base + i || Mask[i] == -1)
2541 if (Mask[i] != Replaced)
2552 int Idx = checkReplaceOne(0, MaskSize);
2554 return DAG.
getNode(LoongArchISD::XVINSVE0,
DL, VT, V1, V2,
2558 Idx = checkReplaceOne(MaskSize, 0);
2560 return DAG.
getNode(LoongArchISD::XVINSVE0,
DL, VT, V2, V1,
2571 int MaskSize = Mask.size();
2572 int HalfSize = Mask.size() / 2;
2573 const auto &Begin = Mask.begin();
2574 const auto &Mid = Mask.begin() + HalfSize;
2575 const auto &End = Mask.end();
2587 for (
auto it = Begin; it < Mid; it++) {
2590 else if ((*it >= 0 && *it < HalfSize) ||
2591 (*it >= MaskSize && *it < MaskSize + HalfSize)) {
2592 int M = *it < HalfSize ? *it : *it - HalfSize;
2597 assert((
int)MaskAlloc.
size() == HalfSize &&
"xvshuf convert failed!");
2599 for (
auto it = Mid; it < End; it++) {
2602 else if ((*it >= HalfSize && *it < MaskSize) ||
2603 (*it >= MaskSize + HalfSize && *it < MaskSize * 2)) {
2604 int M = *it < MaskSize ? *it - HalfSize : *it - MaskSize;
2609 assert((
int)MaskAlloc.
size() == MaskSize &&
"xvshuf convert failed!");
2613 return DAG.
getNode(LoongArchISD::VSHUF,
DL, VT, MaskVec, V2, V1);
2641 enum HalfMaskType { HighLaneTy, LowLaneTy,
None };
2643 int MaskSize = Mask.size();
2644 int HalfSize = Mask.size() / 2;
2647 HalfMaskType preMask =
None, postMask =
None;
2649 if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
2650 return M < 0 || (M >= 0 && M < HalfSize) ||
2651 (M >= MaskSize && M < MaskSize + HalfSize);
2653 preMask = HighLaneTy;
2654 else if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
2655 return M < 0 || (M >= HalfSize && M < MaskSize) ||
2656 (M >= MaskSize + HalfSize && M < MaskSize * 2);
2658 preMask = LowLaneTy;
2660 if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
2661 return M < 0 || (M >= HalfSize && M < MaskSize) ||
2662 (M >= MaskSize + HalfSize && M < MaskSize * 2);
2664 postMask = LowLaneTy;
2665 else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
2666 return M < 0 || (M >= 0 && M < HalfSize) ||
2667 (M >= MaskSize && M < MaskSize + HalfSize);
2669 postMask = HighLaneTy;
2677 if (preMask == HighLaneTy && postMask == LowLaneTy) {
2680 if (preMask == LowLaneTy && postMask == HighLaneTy) {
2682 V1 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V1,
2688 V2 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V2,
2693 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
2694 *it = *it < 0 ? *it : *it - HalfSize;
2696 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
2697 *it = *it < 0 ? *it : *it + HalfSize;
2699 }
else if (preMask == LowLaneTy && postMask == LowLaneTy) {
2701 V1 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V1,
2707 V2 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V2,
2712 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
2713 *it = *it < 0 ? *it : *it - HalfSize;
2715 }
else if (preMask == HighLaneTy && postMask == HighLaneTy) {
2717 V1 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V1,
2723 V2 = DAG.
getNode(LoongArchISD::XVPERMI,
DL, MVT::v4i64, V2,
2728 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
2729 *it = *it < 0 ? *it : *it + HalfSize;
2754 int Size = Mask.size();
2755 int LaneSize =
Size / 2;
2757 bool LaneCrossing[2] = {
false,
false};
2758 for (
int i = 0; i <
Size; ++i)
2759 if (Mask[i] >= 0 && ((Mask[i] %
Size) / LaneSize) != (i / LaneSize))
2760 LaneCrossing[(Mask[i] %
Size) / LaneSize] =
true;
2763 if (!LaneCrossing[0] && !LaneCrossing[1])
2767 InLaneMask.
assign(Mask.begin(), Mask.end());
2768 for (
int i = 0; i <
Size; ++i) {
2769 int &M = InLaneMask[i];
2772 if (((M %
Size) / LaneSize) != (i / LaneSize))
2773 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) +
Size;
2778 DAG.
getUNDEF(MVT::v4i64), {2, 3, 0, 1});
2793 "Vector type is unsupported for lasx!");
2795 "Two operands have different types!");
2797 "Unexpected mask size for shuffle!");
2798 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
2799 assert(Mask.size() >= 4 &&
"Mask size is less than 4.");
2801 APInt KnownUndef, KnownZero;
2803 APInt Zeroable = KnownUndef | KnownZero;
2880 ArrayRef<int> OrigMask = SVOp->
getMask();
2883 MVT VT =
Op.getSimpleValueType();
2887 bool V1IsUndef = V1.
isUndef();
2888 bool V2IsUndef = V2.
isUndef();
2889 if (V1IsUndef && V2IsUndef)
2902 any_of(OrigMask, [NumElements](
int M) {
return M >= NumElements; })) {
2903 SmallVector<int, 8> NewMask(OrigMask);
2904 for (
int &M : NewMask)
2905 if (M >= NumElements)
2911 int MaskUpperLimit = OrigMask.
size() * (V2IsUndef ? 1 : 2);
2912 (void)MaskUpperLimit;
2914 [&](
int M) {
return -1 <=
M &&
M < MaskUpperLimit; }) &&
2915 "Out of bounds shuffle index");
2937 std::tie(Res, Chain) =
2938 makeLibCall(DAG, LC, MVT::f32, Op0, CallOptions,
DL, Chain);
2939 if (Subtarget.is64Bit())
2940 return DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Res);
2952 SDValue Arg = Subtarget.is64Bit() ? DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
2956 std::tie(Res, Chain) =
makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg,
2957 CallOptions,
DL, Chain);
2963 assert(Subtarget.hasBasicF() &&
"Unexpected custom legalization");
2969 makeLibCall(DAG, LC, MVT::f32,
Op.getOperand(0), CallOptions,
DL).first;
2970 if (Subtarget.is64Bit())
2971 return DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Res);
2977 assert(Subtarget.hasBasicF() &&
"Unexpected custom legalization");
2978 MVT VT =
Op.getSimpleValueType();
2983 SDValue Res = Subtarget.is64Bit() ? DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
3004 "Unsupported vector type for broadcast.");
3007 bool IsIdeneity =
true;
3009 for (
int i = 0; i !=
NumOps; i++) {
3011 if (
Op.getOpcode() !=
ISD::LOAD || (IdentitySrc &&
Op != IdentitySrc)) {
3023 auto ExtType = LN->getExtensionType();
3028 assert(LN->isUnindexed() &&
"Unexpected indexed load.");
3033 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
3051 for (
unsigned i = 1; i <
Ops.size(); ++i) {
3065 EVT ResTy,
unsigned first) {
3068 assert(first + NumElts <= Node->getSimpleValueType(0).getVectorNumElements());
3071 Node->op_begin() + first + NumElts);
3080 MVT VT =
Node->getSimpleValueType(0);
3081 EVT ResTy =
Op->getValueType(0);
3084 APInt SplatValue, SplatUndef;
3085 unsigned SplatBitSize;
3088 bool UseSameConstant =
true;
3093 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
3094 (!Subtarget.hasExtLASX() || !Is256Vec))
3100 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
3102 SplatBitSize <= 64) {
3104 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
3108 if (SplatBitSize == 64 && !Subtarget.is64Bit()) {
3115 if ((Is128Vec && ResTy == MVT::v4i32) ||
3116 (Is256Vec && ResTy == MVT::v8i32))
3122 switch (SplatBitSize) {
3126 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
3129 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
3132 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
3135 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
3143 if (ViaVecTy != ResTy)
3152 for (
unsigned i = 0; i < NumElts; ++i) {
3157 ConstantValue = Opi;
3158 else if (ConstantValue != Opi)
3159 UseSameConstant =
false;
3164 if (IsConstant && UseSameConstant && ResTy != MVT::v2f64) {
3166 for (
unsigned i = 0; i < NumElts; ++i) {
3184 BitVector UndefElements;
3185 if (
Node->getRepeatedSequence(Sequence, &UndefElements) &&
3186 UndefElements.
count() == 0) {
3190 EVT FillTy = Is256Vec
3196 fillVector(Sequence, DAG,
DL, Subtarget, FillVec, FillTy);
3199 unsigned SplatLen = NumElts / SeqLen;
3205 if (SplatEltTy == MVT::i128)
3206 SplatTy = MVT::v4i64;
3214 DAG.
getNode((SplatEltTy == MVT::i128) ? LoongArchISD::XVREPLVE0Q
3215 : LoongArchISD::XVREPLVE0,
3216 DL, SplatTy, SrcVec);
3218 SplatVec = DAG.
getNode(LoongArchISD::VREPLVEI,
DL, SplatTy, SrcVec,
3231 if (ResTy == MVT::v8i32 || ResTy == MVT::v8f32 || ResTy == MVT::v4i64 ||
3232 ResTy == MVT::v4f64) {
3233 unsigned NonUndefCount = 0;
3234 for (
unsigned i = NumElts / 2; i < NumElts; ++i) {
3235 if (!
Node->getOperand(i).isUndef()) {
3237 if (NonUndefCount > 1)
3241 if (NonUndefCount == 1)
3254 VecTy, NumElts / 2);
3265 MVT ResVT =
Op.getSimpleValueType();
3269 unsigned NumFreezeUndef = 0;
3270 unsigned NumZero = 0;
3271 unsigned NumNonZero = 0;
3272 unsigned NonZeros = 0;
3273 SmallSet<SDValue, 4> Undefs;
3274 for (
unsigned i = 0; i != NumOperands; ++i) {
3289 assert(i <
sizeof(NonZeros) * CHAR_BIT);
3296 if (NumNonZero > 2) {
3300 Ops.slice(0, NumOperands / 2));
3302 Ops.slice(NumOperands / 2));
3315 MVT SubVT =
Op.getOperand(0).getSimpleValueType();
3317 for (
unsigned i = 0; i != NumOperands; ++i) {
3318 if ((NonZeros & (1 << i)) == 0)
3329LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
3331 MVT EltVT =
Op.getSimpleValueType();
3336 MVT GRLenVT = Subtarget.getGRLenVT();
3364 ? DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
DL, MVT::f32, Idx)
3368 DAG.
getBitcast((VecTy == MVT::v4f64) ? MVT::v4i64 : VecTy, IdxVec);
3370 DAG.
getNode(LoongArchISD::VSHUF,
DL, VecTy, MaskVec, TmpVec, Vec);
3379 DAG.
getNode(LoongArchISD::XVPERM,
DL, VecTy, Vec, SplatIdx);
3388LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
3390 MVT VT =
Op.getSimpleValueType();
3413 if (!Subtarget.is64Bit() && IdxTy == MVT::i64) {
3415 for (
unsigned i = 0; i < NumElts; ++i) {
3423 for (
unsigned i = 0; i < NumElts; ++i) {
3432 for (
unsigned i = 0; i < NumElts; ++i)
3464 if (Subtarget.is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
3466 "On LA64, only 64-bit registers can be written.");
3467 return Op.getOperand(0);
3470 if (!Subtarget.is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
3472 "On LA32, only 32-bit registers can be written.");
3473 return Op.getOperand(0);
3483 "be a constant integer");
3489 Register FrameReg = Subtarget.getRegisterInfo()->getFrameRegister(MF);
3490 EVT VT =
Op.getValueType();
3493 unsigned Depth =
Op.getConstantOperandVal(0);
3494 int GRLenInBytes = Subtarget.getGRLen() / 8;
3497 int Offset = -(GRLenInBytes * 2);
3509 if (
Op.getConstantOperandVal(0) != 0) {
3511 "return address can only be determined for the current frame");
3517 MVT GRLenVT = Subtarget.getGRLenVT();
3529 auto Size = Subtarget.getGRLen() / 8;
3537 auto *FuncInfo = MF.
getInfo<LoongArchMachineFunctionInfo>();
3547 MachinePointerInfo(SV));
3552 assert(Subtarget.is64Bit() && Subtarget.hasBasicF() &&
3553 !Subtarget.hasBasicD() &&
"unexpected target features");
3559 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
3563 if (Op0->
getOpcode() == LoongArchISD::BSTRPICK &&
3573 EVT RetVT =
Op.getValueType();
3579 std::tie(Result, Chain) =
3586 assert(Subtarget.is64Bit() && Subtarget.hasBasicF() &&
3587 !Subtarget.hasBasicD() &&
"unexpected target features");
3598 EVT RetVT =
Op.getValueType();
3604 std::tie(Result, Chain) =
3613 EVT VT =
Op.getValueType();
3617 if (
Op.getValueType() == MVT::f32 && Op0VT == MVT::i32 &&
3618 Subtarget.is64Bit() && Subtarget.hasBasicF()) {
3620 return DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
DL, MVT::f32, NewOp0);
3622 if (VT == MVT::f64 && Op0VT == MVT::i64 && !Subtarget.is64Bit()) {
3625 return DAG.
getNode(LoongArchISD::BUILD_PAIR_F64,
DL, MVT::f64,
Lo,
Hi);
3639 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
3640 !Subtarget.hasBasicD()) {
3642 return DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Dst);
3664 N->getOffset(), Flags);
3672template <
class NodeTy>
3675 bool IsLocal)
const {
3686 assert(Subtarget.is64Bit() &&
"Large code model requires LA64");
3767 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
3769 const GlobalValue *GV =
N->getGlobal();
3781 unsigned Opc,
bool UseGOT,
3785 MVT GRLenVT = Subtarget.getGRLenVT();
3799 if (
Opc == LoongArch::PseudoLA_TLS_LE && !Large)
3837 Args.emplace_back(Load, CallTy);
3840 TargetLowering::CallLoweringInfo CLI(DAG);
3855 const GlobalValue *GV =
N->getGlobal();
3869LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
3876 assert((!Large || Subtarget.is64Bit()) &&
"Large code model requires LA64");
3879 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
3892 return getDynamicTLSAddr(
N, DAG,
3893 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
3894 : LoongArch::PseudoLA_TLS_GD,
3901 return getDynamicTLSAddr(
N, DAG,
3902 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
3903 : LoongArch::PseudoLA_TLS_LD,
3908 return getStaticTLSAddr(
N, DAG,
3909 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
3910 : LoongArch::PseudoLA_TLS_IE,
3917 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
3921 return getTLSDescAddr(
N, DAG,
3922 Large ? LoongArch::PseudoLA_TLS_DESC_LARGE
3923 : LoongArch::PseudoLA_TLS_DESC,
3927template <
unsigned N>
3932 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
3933 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
3935 ": argument out of range.");
3942LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
3944 switch (
Op.getConstantOperandVal(0)) {
3947 case Intrinsic::thread_pointer: {
3951 case Intrinsic::loongarch_lsx_vpickve2gr_d:
3952 case Intrinsic::loongarch_lsx_vpickve2gr_du:
3953 case Intrinsic::loongarch_lsx_vreplvei_d:
3954 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
3956 case Intrinsic::loongarch_lsx_vreplvei_w:
3957 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
3958 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
3959 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
3960 case Intrinsic::loongarch_lasx_xvpickve_d:
3961 case Intrinsic::loongarch_lasx_xvpickve_d_f:
3963 case Intrinsic::loongarch_lasx_xvinsve0_d:
3965 case Intrinsic::loongarch_lsx_vsat_b:
3966 case Intrinsic::loongarch_lsx_vsat_bu:
3967 case Intrinsic::loongarch_lsx_vrotri_b:
3968 case Intrinsic::loongarch_lsx_vsllwil_h_b:
3969 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
3970 case Intrinsic::loongarch_lsx_vsrlri_b:
3971 case Intrinsic::loongarch_lsx_vsrari_b:
3972 case Intrinsic::loongarch_lsx_vreplvei_h:
3973 case Intrinsic::loongarch_lasx_xvsat_b:
3974 case Intrinsic::loongarch_lasx_xvsat_bu:
3975 case Intrinsic::loongarch_lasx_xvrotri_b:
3976 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
3977 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
3978 case Intrinsic::loongarch_lasx_xvsrlri_b:
3979 case Intrinsic::loongarch_lasx_xvsrari_b:
3980 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
3981 case Intrinsic::loongarch_lasx_xvpickve_w:
3982 case Intrinsic::loongarch_lasx_xvpickve_w_f:
3984 case Intrinsic::loongarch_lasx_xvinsve0_w:
3986 case Intrinsic::loongarch_lsx_vsat_h:
3987 case Intrinsic::loongarch_lsx_vsat_hu:
3988 case Intrinsic::loongarch_lsx_vrotri_h:
3989 case Intrinsic::loongarch_lsx_vsllwil_w_h:
3990 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
3991 case Intrinsic::loongarch_lsx_vsrlri_h:
3992 case Intrinsic::loongarch_lsx_vsrari_h:
3993 case Intrinsic::loongarch_lsx_vreplvei_b:
3994 case Intrinsic::loongarch_lasx_xvsat_h:
3995 case Intrinsic::loongarch_lasx_xvsat_hu:
3996 case Intrinsic::loongarch_lasx_xvrotri_h:
3997 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
3998 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
3999 case Intrinsic::loongarch_lasx_xvsrlri_h:
4000 case Intrinsic::loongarch_lasx_xvsrari_h:
4001 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
4003 case Intrinsic::loongarch_lsx_vsrlni_b_h:
4004 case Intrinsic::loongarch_lsx_vsrani_b_h:
4005 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
4006 case Intrinsic::loongarch_lsx_vsrarni_b_h:
4007 case Intrinsic::loongarch_lsx_vssrlni_b_h:
4008 case Intrinsic::loongarch_lsx_vssrani_b_h:
4009 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
4010 case Intrinsic::loongarch_lsx_vssrani_bu_h:
4011 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
4012 case Intrinsic::loongarch_lsx_vssrarni_b_h:
4013 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
4014 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
4015 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
4016 case Intrinsic::loongarch_lasx_xvsrani_b_h:
4017 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
4018 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
4019 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
4020 case Intrinsic::loongarch_lasx_xvssrani_b_h:
4021 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
4022 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
4023 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
4024 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
4025 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
4026 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
4028 case Intrinsic::loongarch_lsx_vsat_w:
4029 case Intrinsic::loongarch_lsx_vsat_wu:
4030 case Intrinsic::loongarch_lsx_vrotri_w:
4031 case Intrinsic::loongarch_lsx_vsllwil_d_w:
4032 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
4033 case Intrinsic::loongarch_lsx_vsrlri_w:
4034 case Intrinsic::loongarch_lsx_vsrari_w:
4035 case Intrinsic::loongarch_lsx_vslei_bu:
4036 case Intrinsic::loongarch_lsx_vslei_hu:
4037 case Intrinsic::loongarch_lsx_vslei_wu:
4038 case Intrinsic::loongarch_lsx_vslei_du:
4039 case Intrinsic::loongarch_lsx_vslti_bu:
4040 case Intrinsic::loongarch_lsx_vslti_hu:
4041 case Intrinsic::loongarch_lsx_vslti_wu:
4042 case Intrinsic::loongarch_lsx_vslti_du:
4043 case Intrinsic::loongarch_lsx_vbsll_v:
4044 case Intrinsic::loongarch_lsx_vbsrl_v:
4045 case Intrinsic::loongarch_lasx_xvsat_w:
4046 case Intrinsic::loongarch_lasx_xvsat_wu:
4047 case Intrinsic::loongarch_lasx_xvrotri_w:
4048 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
4049 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
4050 case Intrinsic::loongarch_lasx_xvsrlri_w:
4051 case Intrinsic::loongarch_lasx_xvsrari_w:
4052 case Intrinsic::loongarch_lasx_xvslei_bu:
4053 case Intrinsic::loongarch_lasx_xvslei_hu:
4054 case Intrinsic::loongarch_lasx_xvslei_wu:
4055 case Intrinsic::loongarch_lasx_xvslei_du:
4056 case Intrinsic::loongarch_lasx_xvslti_bu:
4057 case Intrinsic::loongarch_lasx_xvslti_hu:
4058 case Intrinsic::loongarch_lasx_xvslti_wu:
4059 case Intrinsic::loongarch_lasx_xvslti_du:
4060 case Intrinsic::loongarch_lasx_xvbsll_v:
4061 case Intrinsic::loongarch_lasx_xvbsrl_v:
4063 case Intrinsic::loongarch_lsx_vseqi_b:
4064 case Intrinsic::loongarch_lsx_vseqi_h:
4065 case Intrinsic::loongarch_lsx_vseqi_w:
4066 case Intrinsic::loongarch_lsx_vseqi_d:
4067 case Intrinsic::loongarch_lsx_vslei_b:
4068 case Intrinsic::loongarch_lsx_vslei_h:
4069 case Intrinsic::loongarch_lsx_vslei_w:
4070 case Intrinsic::loongarch_lsx_vslei_d:
4071 case Intrinsic::loongarch_lsx_vslti_b:
4072 case Intrinsic::loongarch_lsx_vslti_h:
4073 case Intrinsic::loongarch_lsx_vslti_w:
4074 case Intrinsic::loongarch_lsx_vslti_d:
4075 case Intrinsic::loongarch_lasx_xvseqi_b:
4076 case Intrinsic::loongarch_lasx_xvseqi_h:
4077 case Intrinsic::loongarch_lasx_xvseqi_w:
4078 case Intrinsic::loongarch_lasx_xvseqi_d:
4079 case Intrinsic::loongarch_lasx_xvslei_b:
4080 case Intrinsic::loongarch_lasx_xvslei_h:
4081 case Intrinsic::loongarch_lasx_xvslei_w:
4082 case Intrinsic::loongarch_lasx_xvslei_d:
4083 case Intrinsic::loongarch_lasx_xvslti_b:
4084 case Intrinsic::loongarch_lasx_xvslti_h:
4085 case Intrinsic::loongarch_lasx_xvslti_w:
4086 case Intrinsic::loongarch_lasx_xvslti_d:
4088 case Intrinsic::loongarch_lsx_vsrlni_h_w:
4089 case Intrinsic::loongarch_lsx_vsrani_h_w:
4090 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
4091 case Intrinsic::loongarch_lsx_vsrarni_h_w:
4092 case Intrinsic::loongarch_lsx_vssrlni_h_w:
4093 case Intrinsic::loongarch_lsx_vssrani_h_w:
4094 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
4095 case Intrinsic::loongarch_lsx_vssrani_hu_w:
4096 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
4097 case Intrinsic::loongarch_lsx_vssrarni_h_w:
4098 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
4099 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
4100 case Intrinsic::loongarch_lsx_vfrstpi_b:
4101 case Intrinsic::loongarch_lsx_vfrstpi_h:
4102 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
4103 case Intrinsic::loongarch_lasx_xvsrani_h_w:
4104 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
4105 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
4106 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
4107 case Intrinsic::loongarch_lasx_xvssrani_h_w:
4108 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
4109 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
4110 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
4111 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
4112 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
4113 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
4114 case Intrinsic::loongarch_lasx_xvfrstpi_b:
4115 case Intrinsic::loongarch_lasx_xvfrstpi_h:
4117 case Intrinsic::loongarch_lsx_vsat_d:
4118 case Intrinsic::loongarch_lsx_vsat_du:
4119 case Intrinsic::loongarch_lsx_vrotri_d:
4120 case Intrinsic::loongarch_lsx_vsrlri_d:
4121 case Intrinsic::loongarch_lsx_vsrari_d:
4122 case Intrinsic::loongarch_lasx_xvsat_d:
4123 case Intrinsic::loongarch_lasx_xvsat_du:
4124 case Intrinsic::loongarch_lasx_xvrotri_d:
4125 case Intrinsic::loongarch_lasx_xvsrlri_d:
4126 case Intrinsic::loongarch_lasx_xvsrari_d:
4128 case Intrinsic::loongarch_lsx_vsrlni_w_d:
4129 case Intrinsic::loongarch_lsx_vsrani_w_d:
4130 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
4131 case Intrinsic::loongarch_lsx_vsrarni_w_d:
4132 case Intrinsic::loongarch_lsx_vssrlni_w_d:
4133 case Intrinsic::loongarch_lsx_vssrani_w_d:
4134 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
4135 case Intrinsic::loongarch_lsx_vssrani_wu_d:
4136 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
4137 case Intrinsic::loongarch_lsx_vssrarni_w_d:
4138 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
4139 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
4140 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
4141 case Intrinsic::loongarch_lasx_xvsrani_w_d:
4142 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
4143 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
4144 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
4145 case Intrinsic::loongarch_lasx_xvssrani_w_d:
4146 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
4147 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
4148 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
4149 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
4150 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
4151 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
4153 case Intrinsic::loongarch_lsx_vsrlni_d_q:
4154 case Intrinsic::loongarch_lsx_vsrani_d_q:
4155 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
4156 case Intrinsic::loongarch_lsx_vsrarni_d_q:
4157 case Intrinsic::loongarch_lsx_vssrlni_d_q:
4158 case Intrinsic::loongarch_lsx_vssrani_d_q:
4159 case Intrinsic::loongarch_lsx_vssrlni_du_q:
4160 case Intrinsic::loongarch_lsx_vssrani_du_q:
4161 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
4162 case Intrinsic::loongarch_lsx_vssrarni_d_q:
4163 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
4164 case Intrinsic::loongarch_lsx_vssrarni_du_q:
4165 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
4166 case Intrinsic::loongarch_lasx_xvsrani_d_q:
4167 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
4168 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
4169 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
4170 case Intrinsic::loongarch_lasx_xvssrani_d_q:
4171 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
4172 case Intrinsic::loongarch_lasx_xvssrani_du_q:
4173 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
4174 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
4175 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
4176 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
4178 case Intrinsic::loongarch_lsx_vnori_b:
4179 case Intrinsic::loongarch_lsx_vshuf4i_b:
4180 case Intrinsic::loongarch_lsx_vshuf4i_h:
4181 case Intrinsic::loongarch_lsx_vshuf4i_w:
4182 case Intrinsic::loongarch_lasx_xvnori_b:
4183 case Intrinsic::loongarch_lasx_xvshuf4i_b:
4184 case Intrinsic::loongarch_lasx_xvshuf4i_h:
4185 case Intrinsic::loongarch_lasx_xvshuf4i_w:
4186 case Intrinsic::loongarch_lasx_xvpermi_d:
4188 case Intrinsic::loongarch_lsx_vshuf4i_d:
4189 case Intrinsic::loongarch_lsx_vpermi_w:
4190 case Intrinsic::loongarch_lsx_vbitseli_b:
4191 case Intrinsic::loongarch_lsx_vextrins_b:
4192 case Intrinsic::loongarch_lsx_vextrins_h:
4193 case Intrinsic::loongarch_lsx_vextrins_w:
4194 case Intrinsic::loongarch_lsx_vextrins_d:
4195 case Intrinsic::loongarch_lasx_xvshuf4i_d:
4196 case Intrinsic::loongarch_lasx_xvpermi_w:
4197 case Intrinsic::loongarch_lasx_xvpermi_q:
4198 case Intrinsic::loongarch_lasx_xvbitseli_b:
4199 case Intrinsic::loongarch_lasx_xvextrins_b:
4200 case Intrinsic::loongarch_lasx_xvextrins_h:
4201 case Intrinsic::loongarch_lasx_xvextrins_w:
4202 case Intrinsic::loongarch_lasx_xvextrins_d:
4204 case Intrinsic::loongarch_lsx_vrepli_b:
4205 case Intrinsic::loongarch_lsx_vrepli_h:
4206 case Intrinsic::loongarch_lsx_vrepli_w:
4207 case Intrinsic::loongarch_lsx_vrepli_d:
4208 case Intrinsic::loongarch_lasx_xvrepli_b:
4209 case Intrinsic::loongarch_lasx_xvrepli_h:
4210 case Intrinsic::loongarch_lasx_xvrepli_w:
4211 case Intrinsic::loongarch_lasx_xvrepli_d:
4213 case Intrinsic::loongarch_lsx_vldi:
4214 case Intrinsic::loongarch_lasx_xvldi:
4230LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
4233 MVT GRLenVT = Subtarget.getGRLenVT();
4234 EVT VT =
Op.getValueType();
4236 const StringRef ErrorMsgOOR =
"argument out of range";
4237 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
4238 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
4240 switch (
Op.getConstantOperandVal(1)) {
4243 case Intrinsic::loongarch_crc_w_b_w:
4244 case Intrinsic::loongarch_crc_w_h_w:
4245 case Intrinsic::loongarch_crc_w_w_w:
4246 case Intrinsic::loongarch_crc_w_d_w:
4247 case Intrinsic::loongarch_crcc_w_b_w:
4248 case Intrinsic::loongarch_crcc_w_h_w:
4249 case Intrinsic::loongarch_crcc_w_w_w:
4250 case Intrinsic::loongarch_crcc_w_d_w:
4252 case Intrinsic::loongarch_csrrd_w:
4253 case Intrinsic::loongarch_csrrd_d: {
4254 unsigned Imm =
Op.getConstantOperandVal(2);
4257 : DAG.
getNode(LoongArchISD::CSRRD,
DL, {GRLenVT, MVT::Other},
4260 case Intrinsic::loongarch_csrwr_w:
4261 case Intrinsic::loongarch_csrwr_d: {
4262 unsigned Imm =
Op.getConstantOperandVal(3);
4265 : DAG.
getNode(LoongArchISD::CSRWR,
DL, {GRLenVT, MVT::Other},
4266 {Chain,
Op.getOperand(2),
4269 case Intrinsic::loongarch_csrxchg_w:
4270 case Intrinsic::loongarch_csrxchg_d: {
4271 unsigned Imm =
Op.getConstantOperandVal(4);
4274 : DAG.
getNode(LoongArchISD::CSRXCHG,
DL, {GRLenVT, MVT::Other},
4275 {Chain,
Op.getOperand(2),
Op.getOperand(3),
4278 case Intrinsic::loongarch_iocsrrd_d: {
4280 LoongArchISD::IOCSRRD_D,
DL, {GRLenVT, MVT::Other},
4283#define IOCSRRD_CASE(NAME, NODE) \
4284 case Intrinsic::loongarch_##NAME: { \
4285 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
4286 {Chain, Op.getOperand(2)}); \
4292 case Intrinsic::loongarch_cpucfg: {
4293 return DAG.
getNode(LoongArchISD::CPUCFG,
DL, {GRLenVT, MVT::Other},
4294 {Chain,
Op.getOperand(2)});
4296 case Intrinsic::loongarch_lddir_d: {
4297 unsigned Imm =
Op.getConstantOperandVal(3);
4302 case Intrinsic::loongarch_movfcsr2gr: {
4303 if (!Subtarget.hasBasicF())
4305 unsigned Imm =
Op.getConstantOperandVal(2);
4308 : DAG.
getNode(LoongArchISD::MOVFCSR2GR,
DL, {VT, MVT::Other},
4311 case Intrinsic::loongarch_lsx_vld:
4312 case Intrinsic::loongarch_lsx_vldrepl_b:
4313 case Intrinsic::loongarch_lasx_xvld:
4314 case Intrinsic::loongarch_lasx_xvldrepl_b:
4318 case Intrinsic::loongarch_lsx_vldrepl_h:
4319 case Intrinsic::loongarch_lasx_xvldrepl_h:
4323 Op,
"argument out of range or not a multiple of 2", DAG)
4325 case Intrinsic::loongarch_lsx_vldrepl_w:
4326 case Intrinsic::loongarch_lasx_xvldrepl_w:
4330 Op,
"argument out of range or not a multiple of 4", DAG)
4332 case Intrinsic::loongarch_lsx_vldrepl_d:
4333 case Intrinsic::loongarch_lasx_xvldrepl_d:
4337 Op,
"argument out of range or not a multiple of 8", DAG)
4348 return Op.getOperand(0);
4354 MVT GRLenVT = Subtarget.getGRLenVT();
4356 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
4358 const StringRef ErrorMsgOOR =
"argument out of range";
4359 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
4360 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
4361 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
4363 switch (IntrinsicEnum) {
4367 case Intrinsic::loongarch_cacop_d:
4368 case Intrinsic::loongarch_cacop_w: {
4369 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.is64Bit())
4371 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.is64Bit())
4380 case Intrinsic::loongarch_dbar: {
4387 case Intrinsic::loongarch_ibar: {
4394 case Intrinsic::loongarch_break: {
4401 case Intrinsic::loongarch_movgr2fcsr: {
4402 if (!Subtarget.hasBasicF())
4412 case Intrinsic::loongarch_syscall: {
4419#define IOCSRWR_CASE(NAME, NODE) \
4420 case Intrinsic::loongarch_##NAME: { \
4421 SDValue Op3 = Op.getOperand(3); \
4422 return Subtarget.is64Bit() \
4423 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
4424 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
4425 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
4426 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
4433 case Intrinsic::loongarch_iocsrwr_d: {
4434 return !Subtarget.is64Bit()
4441#define ASRT_LE_GT_CASE(NAME) \
4442 case Intrinsic::loongarch_##NAME: { \
4443 return !Subtarget.is64Bit() \
4444 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
4449#undef ASRT_LE_GT_CASE
4450 case Intrinsic::loongarch_ldpte_d: {
4451 unsigned Imm =
Op.getConstantOperandVal(3);
4452 return !Subtarget.is64Bit()
4457 case Intrinsic::loongarch_lsx_vst:
4458 case Intrinsic::loongarch_lasx_xvst:
4462 case Intrinsic::loongarch_lasx_xvstelm_b:
4467 case Intrinsic::loongarch_lsx_vstelm_b:
4472 case Intrinsic::loongarch_lasx_xvstelm_h:
4477 Op,
"argument out of range or not a multiple of 2", DAG)
4479 case Intrinsic::loongarch_lsx_vstelm_h:
4484 Op,
"argument out of range or not a multiple of 2", DAG)
4486 case Intrinsic::loongarch_lasx_xvstelm_w:
4491 Op,
"argument out of range or not a multiple of 4", DAG)
4493 case Intrinsic::loongarch_lsx_vstelm_w:
4498 Op,
"argument out of range or not a multiple of 4", DAG)
4500 case Intrinsic::loongarch_lasx_xvstelm_d:
4505 Op,
"argument out of range or not a multiple of 8", DAG)
4507 case Intrinsic::loongarch_lsx_vstelm_d:
4512 Op,
"argument out of range or not a multiple of 8", DAG)
4523 EVT VT =
Lo.getValueType();
4564 EVT VT =
Lo.getValueType();
4618 return LoongArchISD::DIV_W;
4620 return LoongArchISD::DIV_WU;
4622 return LoongArchISD::MOD_W;
4624 return LoongArchISD::MOD_WU;
4626 return LoongArchISD::SLL_W;
4628 return LoongArchISD::SRA_W;
4630 return LoongArchISD::SRL_W;
4633 return LoongArchISD::ROTR_W;
4635 return LoongArchISD::CTZ_W;
4637 return LoongArchISD::CLZ_W;
4656 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
4657 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
4661 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
4667 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
4694 StringRef ErrorMsg,
bool WithChain =
true) {
4699 Results.push_back(
N->getOperand(0));
4702template <
unsigned N>
4707 const StringRef ErrorMsgOOR =
"argument out of range";
4708 unsigned Imm =
Node->getConstantOperandVal(2);
4742 switch (
N->getConstantOperandVal(0)) {
4745 case Intrinsic::loongarch_lsx_vpickve2gr_b:
4747 LoongArchISD::VPICK_SEXT_ELT);
4749 case Intrinsic::loongarch_lsx_vpickve2gr_h:
4750 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
4752 LoongArchISD::VPICK_SEXT_ELT);
4754 case Intrinsic::loongarch_lsx_vpickve2gr_w:
4756 LoongArchISD::VPICK_SEXT_ELT);
4758 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
4760 LoongArchISD::VPICK_ZEXT_ELT);
4762 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
4763 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
4765 LoongArchISD::VPICK_ZEXT_ELT);
4767 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
4769 LoongArchISD::VPICK_ZEXT_ELT);
4771 case Intrinsic::loongarch_lsx_bz_b:
4772 case Intrinsic::loongarch_lsx_bz_h:
4773 case Intrinsic::loongarch_lsx_bz_w:
4774 case Intrinsic::loongarch_lsx_bz_d:
4775 case Intrinsic::loongarch_lasx_xbz_b:
4776 case Intrinsic::loongarch_lasx_xbz_h:
4777 case Intrinsic::loongarch_lasx_xbz_w:
4778 case Intrinsic::loongarch_lasx_xbz_d:
4780 LoongArchISD::VALL_ZERO);
4782 case Intrinsic::loongarch_lsx_bz_v:
4783 case Intrinsic::loongarch_lasx_xbz_v:
4785 LoongArchISD::VANY_ZERO);
4787 case Intrinsic::loongarch_lsx_bnz_b:
4788 case Intrinsic::loongarch_lsx_bnz_h:
4789 case Intrinsic::loongarch_lsx_bnz_w:
4790 case Intrinsic::loongarch_lsx_bnz_d:
4791 case Intrinsic::loongarch_lasx_xbnz_b:
4792 case Intrinsic::loongarch_lasx_xbnz_h:
4793 case Intrinsic::loongarch_lasx_xbnz_w:
4794 case Intrinsic::loongarch_lasx_xbnz_d:
4796 LoongArchISD::VALL_NONZERO);
4798 case Intrinsic::loongarch_lsx_bnz_v:
4799 case Intrinsic::loongarch_lasx_xbnz_v:
4801 LoongArchISD::VANY_NONZERO);
4809 assert(
N->getValueType(0) == MVT::i128 &&
4810 "AtomicCmpSwap on types less than 128 should be legal");
4814 switch (
MemOp->getMergedOrdering()) {
4818 Opcode = LoongArch::PseudoCmpXchg128Acquire;
4822 Opcode = LoongArch::PseudoCmpXchg128;
4829 auto CmpVal = DAG.
SplitScalar(
N->getOperand(2),
DL, MVT::i64, MVT::i64);
4830 auto NewVal = DAG.
SplitScalar(
N->getOperand(3),
DL, MVT::i64, MVT::i64);
4831 SDValue Ops[] = {
N->getOperand(1), CmpVal.first, CmpVal.second,
4832 NewVal.first, NewVal.second,
N->getOperand(0)};
4835 Opcode,
SDLoc(
N), DAG.
getVTList(MVT::i64, MVT::i64, MVT::i64, MVT::Other),
4846 EVT VT =
N->getValueType(0);
4847 switch (
N->getOpcode()) {
4852 assert(
N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4853 "Unexpected custom legalisation");
4860 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4861 "Unexpected custom legalisation");
4863 Subtarget.hasDiv32() && VT == MVT::i32
4870 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4871 "Unexpected custom legalisation");
4879 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4880 "Unexpected custom legalisation");
4884 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4885 "Unexpected custom legalisation");
4892 if (Src.getValueType() == MVT::f16)
4903 EVT OpVT = Src.getValueType();
4907 std::tie(Result, Chain) =
4914 EVT SrcVT = Src.getValueType();
4915 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.is64Bit() &&
4916 Subtarget.hasBasicF()) {
4918 DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Src);
4920 }
else if (VT == MVT::i64 && SrcVT == MVT::f64 && !Subtarget.is64Bit()) {
4922 DAG.
getVTList(MVT::i32, MVT::i32), Src);
4930 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4931 "Unexpected custom legalisation");
4934 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
4940 assert((VT == MVT::i16 || VT == MVT::i32) &&
4941 "Unexpected custom legalization");
4942 MVT GRLenVT = Subtarget.getGRLenVT();
4949 Tmp = DAG.
getNode(LoongArchISD::REVB_2H,
DL, GRLenVT, NewSrc);
4954 Tmp = DAG.
getNode(LoongArchISD::REVB_2W,
DL, GRLenVT, NewSrc);
4962 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
4963 "Unexpected custom legalization");
4964 MVT GRLenVT = Subtarget.getGRLenVT();
4971 Tmp = DAG.
getNode(LoongArchISD::BITREV_4B,
DL, GRLenVT, NewSrc);
4974 Tmp = DAG.
getNode(LoongArchISD::BITREV_W,
DL, GRLenVT, NewSrc);
4982 assert(VT == MVT::i32 && Subtarget.is64Bit() &&
4983 "Unexpected custom legalisation");
4990 MVT GRLenVT = Subtarget.getGRLenVT();
4991 const StringRef ErrorMsgOOR =
"argument out of range";
4992 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
4993 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
4995 switch (
N->getConstantOperandVal(1)) {
4998 case Intrinsic::loongarch_movfcsr2gr: {
4999 if (!Subtarget.hasBasicF()) {
5009 LoongArchISD::MOVFCSR2GR,
SDLoc(
N), {MVT::i64, MVT::Other},
5016#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
5017 case Intrinsic::loongarch_##NAME: { \
5018 SDValue NODE = DAG.getNode( \
5019 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
5020 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
5021 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
5022 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
5023 Results.push_back(NODE.getValue(1)); \
5032#undef CRC_CASE_EXT_BINARYOP
5034#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
5035 case Intrinsic::loongarch_##NAME: { \
5036 SDValue NODE = DAG.getNode( \
5037 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
5039 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
5040 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
5041 Results.push_back(NODE.getValue(1)); \
5046#undef CRC_CASE_EXT_UNARYOP
5047#define CSR_CASE(ID) \
5048 case Intrinsic::loongarch_##ID: { \
5049 if (!Subtarget.is64Bit()) \
5050 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
5058 case Intrinsic::loongarch_csrrd_w: {
5065 DAG.
getNode(LoongArchISD::CSRRD,
DL, {GRLenVT, MVT::Other},
5072 case Intrinsic::loongarch_csrwr_w: {
5073 unsigned Imm =
N->getConstantOperandVal(3);
5079 DAG.
getNode(LoongArchISD::CSRWR,
DL, {GRLenVT, MVT::Other},
5087 case Intrinsic::loongarch_csrxchg_w: {
5088 unsigned Imm =
N->getConstantOperandVal(4);
5094 LoongArchISD::CSRXCHG,
DL, {GRLenVT, MVT::Other},
5103#define IOCSRRD_CASE(NAME, NODE) \
5104 case Intrinsic::loongarch_##NAME: { \
5105 SDValue IOCSRRDResults = \
5106 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
5107 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
5108 Results.push_back( \
5109 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
5110 Results.push_back(IOCSRRDResults.getValue(1)); \
5117 case Intrinsic::loongarch_cpucfg: {
5119 DAG.
getNode(LoongArchISD::CPUCFG,
DL, {GRLenVT, MVT::Other},
5126 case Intrinsic::loongarch_lddir_d: {
5127 if (!Subtarget.is64Bit()) {
5137 if (Subtarget.is64Bit())
5139 "On LA64, only 64-bit registers can be read.");
5142 "On LA32, only 32-bit registers can be read.");
5144 Results.push_back(
N->getOperand(0));
5155 OpVT == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
5168 MVT VT =
N->getSimpleValueType(0);
5174 EVT InVT = In.getValueType();
5185 for (
unsigned I = 0;
I < MinElts; ++
I)
5186 TruncMask[
I] = Scale *
I;
5188 unsigned WidenNumElts = 128 / In.getScalarValueSizeInBits();
5189 MVT SVT = In.getSimpleValueType().getScalarType();
5195 "Illegal vector type in truncation");
5211 assert(
N->getOpcode() ==
ISD::AND &&
"Unexpected opcode combine into ANDN");
5213 MVT VT =
N->getSimpleValueType(0);
5232 return DAG.
getNode(LoongArchISD::VANDN,
DL, VT,
X,
Y);
5241 SDValue FirstOperand =
N->getOperand(0);
5242 SDValue SecondOperand =
N->getOperand(1);
5243 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
5244 EVT ValTy =
N->getValueType(0);
5247 unsigned SMIdx, SMLen;
5256 if (!Subtarget.has32S())
5278 if (SMIdx != 0 || lsb + SMLen > ValTy.getSizeInBits())
5293 if (SMIdx + SMLen > ValTy.getSizeInBits())
5312 NewOperand = FirstOperand;
5315 msb = lsb + SMLen - 1;
5319 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
5332 if (!Subtarget.has32S())
5344 SDValue FirstOperand =
N->getOperand(0);
5346 EVT ValTy =
N->getValueType(0);
5349 unsigned MaskIdx, MaskLen;
5364 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
5365 return DAG.
getNode(LoongArchISD::BSTRPICK,
DL, ValTy,
5380 switch (Src.getOpcode()) {
5383 return Src.getOperand(0).getValueSizeInBits() ==
Size;
5393 return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
5406 switch (Src.getOpcode()) {
5416 Src.getOpcode(),
DL, SExtVT,
5422 DL, SExtVT, Src.getOperand(0),
5434 EVT VT =
N->getValueType(0);
5436 EVT SrcVT = Src.getValueType();
5438 if (Src.getOpcode() !=
ISD::SETCC || !Src.hasOneUse())
5443 EVT CmpVT = Src.getOperand(0).getValueType();
5448 else if (Subtarget.has32S() && Subtarget.hasExtLASX() &&
5461 Opc = UseLASX ? LoongArchISD::XVMSKEQZ : LoongArchISD::VMSKEQZ;
5466 Opc = UseLASX ? LoongArchISD::XVMSKGEZ : LoongArchISD::VMSKGEZ;
5471 Opc = UseLASX ? LoongArchISD::XVMSKGEZ : LoongArchISD::VMSKGEZ;
5476 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
5478 Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
5483 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
5485 Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
5490 Opc = UseLASX ? LoongArchISD::XVMSKNEZ : LoongArchISD::VMSKNEZ;
5507 EVT VT =
N->getValueType(0);
5509 EVT SrcVT = Src.getValueType();
5526 bool UseLASX =
false;
5527 bool PropagateSExt =
false;
5529 if (Src.getOpcode() ==
ISD::SETCC && Src.hasOneUse()) {
5530 EVT CmpVT = Src.getOperand(0).getValueType();
5539 SExtVT = MVT::v2i64;
5542 SExtVT = MVT::v4i32;
5544 SExtVT = MVT::v4i64;
5546 PropagateSExt =
true;
5550 SExtVT = MVT::v8i16;
5552 SExtVT = MVT::v8i32;
5554 PropagateSExt =
true;
5558 SExtVT = MVT::v16i8;
5560 SExtVT = MVT::v16i16;
5562 PropagateSExt =
true;
5566 SExtVT = MVT::v32i8;
5574 if (!Subtarget.has32S() || !Subtarget.hasExtLASX()) {
5575 if (Src.getSimpleValueType() == MVT::v32i8) {
5583 }
else if (UseLASX) {
5589 Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ;
5602 EVT ValTy =
N->getValueType(0);
5603 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
5606 unsigned ValBits = ValTy.getSizeInBits();
5607 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
5609 bool SwapAndRetried =
false;
5612 if (!Subtarget.has32S())
5618 if (ValBits != 32 && ValBits != 64)
5633 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
5636 (MaskIdx0 + MaskLen0 <= ValBits)) {
5657 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
5658 (MaskIdx0 + MaskLen0 <= ValBits)) {
5675 (MaskIdx0 + MaskLen0 <= 64) &&
5683 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
5684 : (MaskIdx0 + MaskLen0 - 1),
5700 (MaskIdx0 + MaskLen0 <= ValBits)) {
5723 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
5724 : (MaskIdx0 + MaskLen0 - 1),
5739 unsigned MaskIdx, MaskLen;
5740 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
5749 return DAG.
getNode(LoongArchISD::BSTRINS,
DL, ValTy, N0,
5767 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
5773 return DAG.
getNode(LoongArchISD::BSTRINS,
DL, ValTy, N0,
5781 if (!SwapAndRetried) {
5783 SwapAndRetried =
true;
5787 SwapAndRetried =
false;
5804 return DAG.
getNode(LoongArchISD::BSTRINS,
DL, ValTy, N0,
5813 if (!SwapAndRetried) {
5815 SwapAndRetried =
true;
5825 switch (V.getNode()->getOpcode()) {
5837 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
5845 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
5922 SDNode *AndNode =
N->getOperand(0).getNode();
5930 SDValue CmpInputValue =
N->getOperand(1);
5939 if (!CN || !CN->
isZero())
5941 AndInputValue1 = AndInputValue1.
getOperand(0);
5945 if (AndInputValue2 != CmpInputValue)
5978 TruncInputValue1, TruncInputValue2);
5980 DAG.
getSetCC(
SDLoc(
N),
N->getValueType(0), NewAnd, TruncInputValue2, CC);
5993 if (Src.getOpcode() != LoongArchISD::REVB_2W)
5996 return DAG.
getNode(LoongArchISD::BITREV_4B,
SDLoc(
N),
N->getValueType(0),
6021 LHS.getOperand(0).getValueType() == Subtarget.
getGRLenVT()) {
6049 ShAmt =
LHS.getValueSizeInBits() - 1 - ShAmt;
6082 return DAG.
getNode(LoongArchISD::BR_CC,
DL,
N->getValueType(0),
6083 N->getOperand(0),
LHS,
RHS, CC,
N->getOperand(4));
6099 EVT VT =
N->getValueType(0);
6102 if (TrueV == FalseV)
6133 return DAG.
getNode(LoongArchISD::SELECT_CC,
DL,
N->getValueType(0),
6134 {LHS, RHS, CC, TrueV, FalseV});
6139template <
unsigned N>
6143 bool IsSigned =
false) {
6147 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
6148 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
6150 ": argument out of range.");
6156template <
unsigned N>
6160 EVT ResTy =
Node->getValueType(0);
6164 if ((IsSigned && !
isInt<N>(CImm->getSExtValue())) ||
6165 (!IsSigned && !
isUInt<N>(CImm->getZExtValue()))) {
6167 ": argument out of range.");
6172 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
6178 EVT ResTy =
Node->getValueType(0);
6186 EVT ResTy =
Node->getValueType(0);
6195template <
unsigned N>
6198 EVT ResTy =
Node->getValueType(0);
6203 ": argument out of range.");
6213template <
unsigned N>
6216 EVT ResTy =
Node->getValueType(0);
6221 ": argument out of range.");
6230template <
unsigned N>
6233 EVT ResTy =
Node->getValueType(0);
6238 ": argument out of range.");
6247template <
unsigned W>
6250 unsigned Imm =
N->getConstantOperandVal(2);
6252 const StringRef ErrorMsg =
"argument out of range";
6254 return DAG.
getUNDEF(
N->getValueType(0));
6260 return DAG.
getNode(ResOp,
DL,
N->getValueType(0), Vec, Idx, EltVT);
6268 switch (
N->getConstantOperandVal(0)) {
6271 case Intrinsic::loongarch_lsx_vadd_b:
6272 case Intrinsic::loongarch_lsx_vadd_h:
6273 case Intrinsic::loongarch_lsx_vadd_w:
6274 case Intrinsic::loongarch_lsx_vadd_d:
6275 case Intrinsic::loongarch_lasx_xvadd_b:
6276 case Intrinsic::loongarch_lasx_xvadd_h:
6277 case Intrinsic::loongarch_lasx_xvadd_w:
6278 case Intrinsic::loongarch_lasx_xvadd_d:
6281 case Intrinsic::loongarch_lsx_vaddi_bu:
6282 case Intrinsic::loongarch_lsx_vaddi_hu:
6283 case Intrinsic::loongarch_lsx_vaddi_wu:
6284 case Intrinsic::loongarch_lsx_vaddi_du:
6285 case Intrinsic::loongarch_lasx_xvaddi_bu:
6286 case Intrinsic::loongarch_lasx_xvaddi_hu:
6287 case Intrinsic::loongarch_lasx_xvaddi_wu:
6288 case Intrinsic::loongarch_lasx_xvaddi_du:
6291 case Intrinsic::loongarch_lsx_vsub_b:
6292 case Intrinsic::loongarch_lsx_vsub_h:
6293 case Intrinsic::loongarch_lsx_vsub_w:
6294 case Intrinsic::loongarch_lsx_vsub_d:
6295 case Intrinsic::loongarch_lasx_xvsub_b:
6296 case Intrinsic::loongarch_lasx_xvsub_h:
6297 case Intrinsic::loongarch_lasx_xvsub_w:
6298 case Intrinsic::loongarch_lasx_xvsub_d:
6301 case Intrinsic::loongarch_lsx_vsubi_bu:
6302 case Intrinsic::loongarch_lsx_vsubi_hu:
6303 case Intrinsic::loongarch_lsx_vsubi_wu:
6304 case Intrinsic::loongarch_lsx_vsubi_du:
6305 case Intrinsic::loongarch_lasx_xvsubi_bu:
6306 case Intrinsic::loongarch_lasx_xvsubi_hu:
6307 case Intrinsic::loongarch_lasx_xvsubi_wu:
6308 case Intrinsic::loongarch_lasx_xvsubi_du:
6311 case Intrinsic::loongarch_lsx_vneg_b:
6312 case Intrinsic::loongarch_lsx_vneg_h:
6313 case Intrinsic::loongarch_lsx_vneg_w:
6314 case Intrinsic::loongarch_lsx_vneg_d:
6315 case Intrinsic::loongarch_lasx_xvneg_b:
6316 case Intrinsic::loongarch_lasx_xvneg_h:
6317 case Intrinsic::loongarch_lasx_xvneg_w:
6318 case Intrinsic::loongarch_lasx_xvneg_d:
6322 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
6324 SDLoc(
N),
N->getValueType(0)),
6326 case Intrinsic::loongarch_lsx_vmax_b:
6327 case Intrinsic::loongarch_lsx_vmax_h:
6328 case Intrinsic::loongarch_lsx_vmax_w:
6329 case Intrinsic::loongarch_lsx_vmax_d:
6330 case Intrinsic::loongarch_lasx_xvmax_b:
6331 case Intrinsic::loongarch_lasx_xvmax_h:
6332 case Intrinsic::loongarch_lasx_xvmax_w:
6333 case Intrinsic::loongarch_lasx_xvmax_d:
6336 case Intrinsic::loongarch_lsx_vmax_bu:
6337 case Intrinsic::loongarch_lsx_vmax_hu:
6338 case Intrinsic::loongarch_lsx_vmax_wu:
6339 case Intrinsic::loongarch_lsx_vmax_du:
6340 case Intrinsic::loongarch_lasx_xvmax_bu:
6341 case Intrinsic::loongarch_lasx_xvmax_hu:
6342 case Intrinsic::loongarch_lasx_xvmax_wu:
6343 case Intrinsic::loongarch_lasx_xvmax_du:
6346 case Intrinsic::loongarch_lsx_vmaxi_b:
6347 case Intrinsic::loongarch_lsx_vmaxi_h:
6348 case Intrinsic::loongarch_lsx_vmaxi_w:
6349 case Intrinsic::loongarch_lsx_vmaxi_d:
6350 case Intrinsic::loongarch_lasx_xvmaxi_b:
6351 case Intrinsic::loongarch_lasx_xvmaxi_h:
6352 case Intrinsic::loongarch_lasx_xvmaxi_w:
6353 case Intrinsic::loongarch_lasx_xvmaxi_d:
6356 case Intrinsic::loongarch_lsx_vmaxi_bu:
6357 case Intrinsic::loongarch_lsx_vmaxi_hu:
6358 case Intrinsic::loongarch_lsx_vmaxi_wu:
6359 case Intrinsic::loongarch_lsx_vmaxi_du:
6360 case Intrinsic::loongarch_lasx_xvmaxi_bu:
6361 case Intrinsic::loongarch_lasx_xvmaxi_hu:
6362 case Intrinsic::loongarch_lasx_xvmaxi_wu:
6363 case Intrinsic::loongarch_lasx_xvmaxi_du:
6366 case Intrinsic::loongarch_lsx_vmin_b:
6367 case Intrinsic::loongarch_lsx_vmin_h:
6368 case Intrinsic::loongarch_lsx_vmin_w:
6369 case Intrinsic::loongarch_lsx_vmin_d:
6370 case Intrinsic::loongarch_lasx_xvmin_b:
6371 case Intrinsic::loongarch_lasx_xvmin_h:
6372 case Intrinsic::loongarch_lasx_xvmin_w:
6373 case Intrinsic::loongarch_lasx_xvmin_d:
6376 case Intrinsic::loongarch_lsx_vmin_bu:
6377 case Intrinsic::loongarch_lsx_vmin_hu:
6378 case Intrinsic::loongarch_lsx_vmin_wu:
6379 case Intrinsic::loongarch_lsx_vmin_du:
6380 case Intrinsic::loongarch_lasx_xvmin_bu:
6381 case Intrinsic::loongarch_lasx_xvmin_hu:
6382 case Intrinsic::loongarch_lasx_xvmin_wu:
6383 case Intrinsic::loongarch_lasx_xvmin_du:
6386 case Intrinsic::loongarch_lsx_vmini_b:
6387 case Intrinsic::loongarch_lsx_vmini_h:
6388 case Intrinsic::loongarch_lsx_vmini_w:
6389 case Intrinsic::loongarch_lsx_vmini_d:
6390 case Intrinsic::loongarch_lasx_xvmini_b:
6391 case Intrinsic::loongarch_lasx_xvmini_h:
6392 case Intrinsic::loongarch_lasx_xvmini_w:
6393 case Intrinsic::loongarch_lasx_xvmini_d:
6396 case Intrinsic::loongarch_lsx_vmini_bu:
6397 case Intrinsic::loongarch_lsx_vmini_hu:
6398 case Intrinsic::loongarch_lsx_vmini_wu:
6399 case Intrinsic::loongarch_lsx_vmini_du:
6400 case Intrinsic::loongarch_lasx_xvmini_bu:
6401 case Intrinsic::loongarch_lasx_xvmini_hu:
6402 case Intrinsic::loongarch_lasx_xvmini_wu:
6403 case Intrinsic::loongarch_lasx_xvmini_du:
6406 case Intrinsic::loongarch_lsx_vmul_b:
6407 case Intrinsic::loongarch_lsx_vmul_h:
6408 case Intrinsic::loongarch_lsx_vmul_w:
6409 case Intrinsic::loongarch_lsx_vmul_d:
6410 case Intrinsic::loongarch_lasx_xvmul_b:
6411 case Intrinsic::loongarch_lasx_xvmul_h:
6412 case Intrinsic::loongarch_lasx_xvmul_w:
6413 case Intrinsic::loongarch_lasx_xvmul_d:
6416 case Intrinsic::loongarch_lsx_vmadd_b:
6417 case Intrinsic::loongarch_lsx_vmadd_h:
6418 case Intrinsic::loongarch_lsx_vmadd_w:
6419 case Intrinsic::loongarch_lsx_vmadd_d:
6420 case Intrinsic::loongarch_lasx_xvmadd_b:
6421 case Intrinsic::loongarch_lasx_xvmadd_h:
6422 case Intrinsic::loongarch_lasx_xvmadd_w:
6423 case Intrinsic::loongarch_lasx_xvmadd_d: {
6424 EVT ResTy =
N->getValueType(0);
6429 case Intrinsic::loongarch_lsx_vmsub_b:
6430 case Intrinsic::loongarch_lsx_vmsub_h:
6431 case Intrinsic::loongarch_lsx_vmsub_w:
6432 case Intrinsic::loongarch_lsx_vmsub_d:
6433 case Intrinsic::loongarch_lasx_xvmsub_b:
6434 case Intrinsic::loongarch_lasx_xvmsub_h:
6435 case Intrinsic::loongarch_lasx_xvmsub_w:
6436 case Intrinsic::loongarch_lasx_xvmsub_d: {
6437 EVT ResTy =
N->getValueType(0);
6442 case Intrinsic::loongarch_lsx_vdiv_b:
6443 case Intrinsic::loongarch_lsx_vdiv_h:
6444 case Intrinsic::loongarch_lsx_vdiv_w:
6445 case Intrinsic::loongarch_lsx_vdiv_d:
6446 case Intrinsic::loongarch_lasx_xvdiv_b:
6447 case Intrinsic::loongarch_lasx_xvdiv_h:
6448 case Intrinsic::loongarch_lasx_xvdiv_w:
6449 case Intrinsic::loongarch_lasx_xvdiv_d:
6452 case Intrinsic::loongarch_lsx_vdiv_bu:
6453 case Intrinsic::loongarch_lsx_vdiv_hu:
6454 case Intrinsic::loongarch_lsx_vdiv_wu:
6455 case Intrinsic::loongarch_lsx_vdiv_du:
6456 case Intrinsic::loongarch_lasx_xvdiv_bu:
6457 case Intrinsic::loongarch_lasx_xvdiv_hu:
6458 case Intrinsic::loongarch_lasx_xvdiv_wu:
6459 case Intrinsic::loongarch_lasx_xvdiv_du:
6462 case Intrinsic::loongarch_lsx_vmod_b:
6463 case Intrinsic::loongarch_lsx_vmod_h:
6464 case Intrinsic::loongarch_lsx_vmod_w:
6465 case Intrinsic::loongarch_lsx_vmod_d:
6466 case Intrinsic::loongarch_lasx_xvmod_b:
6467 case Intrinsic::loongarch_lasx_xvmod_h:
6468 case Intrinsic::loongarch_lasx_xvmod_w:
6469 case Intrinsic::loongarch_lasx_xvmod_d:
6472 case Intrinsic::loongarch_lsx_vmod_bu:
6473 case Intrinsic::loongarch_lsx_vmod_hu:
6474 case Intrinsic::loongarch_lsx_vmod_wu:
6475 case Intrinsic::loongarch_lsx_vmod_du:
6476 case Intrinsic::loongarch_lasx_xvmod_bu:
6477 case Intrinsic::loongarch_lasx_xvmod_hu:
6478 case Intrinsic::loongarch_lasx_xvmod_wu:
6479 case Intrinsic::loongarch_lasx_xvmod_du:
6482 case Intrinsic::loongarch_lsx_vand_v:
6483 case Intrinsic::loongarch_lasx_xvand_v:
6486 case Intrinsic::loongarch_lsx_vor_v:
6487 case Intrinsic::loongarch_lasx_xvor_v:
6490 case Intrinsic::loongarch_lsx_vxor_v:
6491 case Intrinsic::loongarch_lasx_xvxor_v:
6494 case Intrinsic::loongarch_lsx_vnor_v:
6495 case Intrinsic::loongarch_lasx_xvnor_v: {
6500 case Intrinsic::loongarch_lsx_vandi_b:
6501 case Intrinsic::loongarch_lasx_xvandi_b:
6504 case Intrinsic::loongarch_lsx_vori_b:
6505 case Intrinsic::loongarch_lasx_xvori_b:
6508 case Intrinsic::loongarch_lsx_vxori_b:
6509 case Intrinsic::loongarch_lasx_xvxori_b:
6512 case Intrinsic::loongarch_lsx_vsll_b:
6513 case Intrinsic::loongarch_lsx_vsll_h:
6514 case Intrinsic::loongarch_lsx_vsll_w:
6515 case Intrinsic::loongarch_lsx_vsll_d:
6516 case Intrinsic::loongarch_lasx_xvsll_b:
6517 case Intrinsic::loongarch_lasx_xvsll_h:
6518 case Intrinsic::loongarch_lasx_xvsll_w:
6519 case Intrinsic::loongarch_lasx_xvsll_d:
6522 case Intrinsic::loongarch_lsx_vslli_b:
6523 case Intrinsic::loongarch_lasx_xvslli_b:
6526 case Intrinsic::loongarch_lsx_vslli_h:
6527 case Intrinsic::loongarch_lasx_xvslli_h:
6530 case Intrinsic::loongarch_lsx_vslli_w:
6531 case Intrinsic::loongarch_lasx_xvslli_w:
6534 case Intrinsic::loongarch_lsx_vslli_d:
6535 case Intrinsic::loongarch_lasx_xvslli_d:
6538 case Intrinsic::loongarch_lsx_vsrl_b:
6539 case Intrinsic::loongarch_lsx_vsrl_h:
6540 case Intrinsic::loongarch_lsx_vsrl_w:
6541 case Intrinsic::loongarch_lsx_vsrl_d:
6542 case Intrinsic::loongarch_lasx_xvsrl_b:
6543 case Intrinsic::loongarch_lasx_xvsrl_h:
6544 case Intrinsic::loongarch_lasx_xvsrl_w:
6545 case Intrinsic::loongarch_lasx_xvsrl_d:
6548 case Intrinsic::loongarch_lsx_vsrli_b:
6549 case Intrinsic::loongarch_lasx_xvsrli_b:
6552 case Intrinsic::loongarch_lsx_vsrli_h:
6553 case Intrinsic::loongarch_lasx_xvsrli_h:
6556 case Intrinsic::loongarch_lsx_vsrli_w:
6557 case Intrinsic::loongarch_lasx_xvsrli_w:
6560 case Intrinsic::loongarch_lsx_vsrli_d:
6561 case Intrinsic::loongarch_lasx_xvsrli_d:
6564 case Intrinsic::loongarch_lsx_vsra_b:
6565 case Intrinsic::loongarch_lsx_vsra_h:
6566 case Intrinsic::loongarch_lsx_vsra_w:
6567 case Intrinsic::loongarch_lsx_vsra_d:
6568 case Intrinsic::loongarch_lasx_xvsra_b:
6569 case Intrinsic::loongarch_lasx_xvsra_h:
6570 case Intrinsic::loongarch_lasx_xvsra_w:
6571 case Intrinsic::loongarch_lasx_xvsra_d:
6574 case Intrinsic::loongarch_lsx_vsrai_b:
6575 case Intrinsic::loongarch_lasx_xvsrai_b:
6578 case Intrinsic::loongarch_lsx_vsrai_h:
6579 case Intrinsic::loongarch_lasx_xvsrai_h:
6582 case Intrinsic::loongarch_lsx_vsrai_w:
6583 case Intrinsic::loongarch_lasx_xvsrai_w:
6586 case Intrinsic::loongarch_lsx_vsrai_d:
6587 case Intrinsic::loongarch_lasx_xvsrai_d:
6590 case Intrinsic::loongarch_lsx_vclz_b:
6591 case Intrinsic::loongarch_lsx_vclz_h:
6592 case Intrinsic::loongarch_lsx_vclz_w:
6593 case Intrinsic::loongarch_lsx_vclz_d:
6594 case Intrinsic::loongarch_lasx_xvclz_b:
6595 case Intrinsic::loongarch_lasx_xvclz_h:
6596 case Intrinsic::loongarch_lasx_xvclz_w:
6597 case Intrinsic::loongarch_lasx_xvclz_d:
6599 case Intrinsic::loongarch_lsx_vpcnt_b:
6600 case Intrinsic::loongarch_lsx_vpcnt_h:
6601 case Intrinsic::loongarch_lsx_vpcnt_w:
6602 case Intrinsic::loongarch_lsx_vpcnt_d:
6603 case Intrinsic::loongarch_lasx_xvpcnt_b:
6604 case Intrinsic::loongarch_lasx_xvpcnt_h:
6605 case Intrinsic::loongarch_lasx_xvpcnt_w:
6606 case Intrinsic::loongarch_lasx_xvpcnt_d:
6608 case Intrinsic::loongarch_lsx_vbitclr_b:
6609 case Intrinsic::loongarch_lsx_vbitclr_h:
6610 case Intrinsic::loongarch_lsx_vbitclr_w:
6611 case Intrinsic::loongarch_lsx_vbitclr_d:
6612 case Intrinsic::loongarch_lasx_xvbitclr_b:
6613 case Intrinsic::loongarch_lasx_xvbitclr_h:
6614 case Intrinsic::loongarch_lasx_xvbitclr_w:
6615 case Intrinsic::loongarch_lasx_xvbitclr_d:
6617 case Intrinsic::loongarch_lsx_vbitclri_b:
6618 case Intrinsic::loongarch_lasx_xvbitclri_b:
6620 case Intrinsic::loongarch_lsx_vbitclri_h:
6621 case Intrinsic::loongarch_lasx_xvbitclri_h:
6623 case Intrinsic::loongarch_lsx_vbitclri_w:
6624 case Intrinsic::loongarch_lasx_xvbitclri_w:
6626 case Intrinsic::loongarch_lsx_vbitclri_d:
6627 case Intrinsic::loongarch_lasx_xvbitclri_d:
6629 case Intrinsic::loongarch_lsx_vbitset_b:
6630 case Intrinsic::loongarch_lsx_vbitset_h:
6631 case Intrinsic::loongarch_lsx_vbitset_w:
6632 case Intrinsic::loongarch_lsx_vbitset_d:
6633 case Intrinsic::loongarch_lasx_xvbitset_b:
6634 case Intrinsic::loongarch_lasx_xvbitset_h:
6635 case Intrinsic::loongarch_lasx_xvbitset_w:
6636 case Intrinsic::loongarch_lasx_xvbitset_d: {
6637 EVT VecTy =
N->getValueType(0);
6643 case Intrinsic::loongarch_lsx_vbitseti_b:
6644 case Intrinsic::loongarch_lasx_xvbitseti_b:
6646 case Intrinsic::loongarch_lsx_vbitseti_h:
6647 case Intrinsic::loongarch_lasx_xvbitseti_h:
6649 case Intrinsic::loongarch_lsx_vbitseti_w:
6650 case Intrinsic::loongarch_lasx_xvbitseti_w:
6652 case Intrinsic::loongarch_lsx_vbitseti_d:
6653 case Intrinsic::loongarch_lasx_xvbitseti_d:
6655 case Intrinsic::loongarch_lsx_vbitrev_b:
6656 case Intrinsic::loongarch_lsx_vbitrev_h:
6657 case Intrinsic::loongarch_lsx_vbitrev_w:
6658 case Intrinsic::loongarch_lsx_vbitrev_d:
6659 case Intrinsic::loongarch_lasx_xvbitrev_b:
6660 case Intrinsic::loongarch_lasx_xvbitrev_h:
6661 case Intrinsic::loongarch_lasx_xvbitrev_w:
6662 case Intrinsic::loongarch_lasx_xvbitrev_d: {
6663 EVT VecTy =
N->getValueType(0);
6669 case Intrinsic::loongarch_lsx_vbitrevi_b:
6670 case Intrinsic::loongarch_lasx_xvbitrevi_b:
6672 case Intrinsic::loongarch_lsx_vbitrevi_h:
6673 case Intrinsic::loongarch_lasx_xvbitrevi_h:
6675 case Intrinsic::loongarch_lsx_vbitrevi_w:
6676 case Intrinsic::loongarch_lasx_xvbitrevi_w:
6678 case Intrinsic::loongarch_lsx_vbitrevi_d:
6679 case Intrinsic::loongarch_lasx_xvbitrevi_d:
6681 case Intrinsic::loongarch_lsx_vfadd_s:
6682 case Intrinsic::loongarch_lsx_vfadd_d:
6683 case Intrinsic::loongarch_lasx_xvfadd_s:
6684 case Intrinsic::loongarch_lasx_xvfadd_d:
6687 case Intrinsic::loongarch_lsx_vfsub_s:
6688 case Intrinsic::loongarch_lsx_vfsub_d:
6689 case Intrinsic::loongarch_lasx_xvfsub_s:
6690 case Intrinsic::loongarch_lasx_xvfsub_d:
6693 case Intrinsic::loongarch_lsx_vfmul_s:
6694 case Intrinsic::loongarch_lsx_vfmul_d:
6695 case Intrinsic::loongarch_lasx_xvfmul_s:
6696 case Intrinsic::loongarch_lasx_xvfmul_d:
6699 case Intrinsic::loongarch_lsx_vfdiv_s:
6700 case Intrinsic::loongarch_lsx_vfdiv_d:
6701 case Intrinsic::loongarch_lasx_xvfdiv_s:
6702 case Intrinsic::loongarch_lasx_xvfdiv_d:
6705 case Intrinsic::loongarch_lsx_vfmadd_s:
6706 case Intrinsic::loongarch_lsx_vfmadd_d:
6707 case Intrinsic::loongarch_lasx_xvfmadd_s:
6708 case Intrinsic::loongarch_lasx_xvfmadd_d:
6710 N->getOperand(2),
N->getOperand(3));
6711 case Intrinsic::loongarch_lsx_vinsgr2vr_b:
6713 N->getOperand(1),
N->getOperand(2),
6715 case Intrinsic::loongarch_lsx_vinsgr2vr_h:
6716 case Intrinsic::loongarch_lasx_xvinsgr2vr_w:
6718 N->getOperand(1),
N->getOperand(2),
6720 case Intrinsic::loongarch_lsx_vinsgr2vr_w:
6721 case Intrinsic::loongarch_lasx_xvinsgr2vr_d:
6723 N->getOperand(1),
N->getOperand(2),
6725 case Intrinsic::loongarch_lsx_vinsgr2vr_d:
6727 N->getOperand(1),
N->getOperand(2),
6729 case Intrinsic::loongarch_lsx_vreplgr2vr_b:
6730 case Intrinsic::loongarch_lsx_vreplgr2vr_h:
6731 case Intrinsic::loongarch_lsx_vreplgr2vr_w:
6732 case Intrinsic::loongarch_lsx_vreplgr2vr_d:
6733 case Intrinsic::loongarch_lasx_xvreplgr2vr_b:
6734 case Intrinsic::loongarch_lasx_xvreplgr2vr_h:
6735 case Intrinsic::loongarch_lasx_xvreplgr2vr_w:
6736 case Intrinsic::loongarch_lasx_xvreplgr2vr_d:
6737 return DAG.
getNode(LoongArchISD::VREPLGR2VR,
DL,
N->getValueType(0),
6740 case Intrinsic::loongarch_lsx_vreplve_b:
6741 case Intrinsic::loongarch_lsx_vreplve_h:
6742 case Intrinsic::loongarch_lsx_vreplve_w:
6743 case Intrinsic::loongarch_lsx_vreplve_d:
6744 case Intrinsic::loongarch_lasx_xvreplve_b:
6745 case Intrinsic::loongarch_lasx_xvreplve_h:
6746 case Intrinsic::loongarch_lasx_xvreplve_w:
6747 case Intrinsic::loongarch_lasx_xvreplve_d:
6748 return DAG.
getNode(LoongArchISD::VREPLVE,
DL,
N->getValueType(0),
6752 case Intrinsic::loongarch_lsx_vpickve2gr_b:
6756 case Intrinsic::loongarch_lsx_vpickve2gr_h:
6757 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
6761 case Intrinsic::loongarch_lsx_vpickve2gr_w:
6765 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
6769 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
6770 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
6774 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
6778 case Intrinsic::loongarch_lsx_bz_b:
6779 case Intrinsic::loongarch_lsx_bz_h:
6780 case Intrinsic::loongarch_lsx_bz_w:
6781 case Intrinsic::loongarch_lsx_bz_d:
6782 case Intrinsic::loongarch_lasx_xbz_b:
6783 case Intrinsic::loongarch_lasx_xbz_h:
6784 case Intrinsic::loongarch_lasx_xbz_w:
6785 case Intrinsic::loongarch_lasx_xbz_d:
6787 return DAG.
getNode(LoongArchISD::VALL_ZERO,
DL,
N->getValueType(0),
6790 case Intrinsic::loongarch_lsx_bz_v:
6791 case Intrinsic::loongarch_lasx_xbz_v:
6793 return DAG.
getNode(LoongArchISD::VANY_ZERO,
DL,
N->getValueType(0),
6796 case Intrinsic::loongarch_lsx_bnz_b:
6797 case Intrinsic::loongarch_lsx_bnz_h:
6798 case Intrinsic::loongarch_lsx_bnz_w:
6799 case Intrinsic::loongarch_lsx_bnz_d:
6800 case Intrinsic::loongarch_lasx_xbnz_b:
6801 case Intrinsic::loongarch_lasx_xbnz_h:
6802 case Intrinsic::loongarch_lasx_xbnz_w:
6803 case Intrinsic::loongarch_lasx_xbnz_d:
6805 return DAG.
getNode(LoongArchISD::VALL_NONZERO,
DL,
N->getValueType(0),
6808 case Intrinsic::loongarch_lsx_bnz_v:
6809 case Intrinsic::loongarch_lasx_xbnz_v:
6811 return DAG.
getNode(LoongArchISD::VANY_NONZERO,
DL,
N->getValueType(0),
6814 case Intrinsic::loongarch_lasx_concat_128_s:
6815 case Intrinsic::loongarch_lasx_concat_128_d:
6816 case Intrinsic::loongarch_lasx_concat_128:
6818 N->getOperand(1),
N->getOperand(2));
6830 if (Op0.
getOpcode() == LoongArchISD::MOVFR2GR_S_LA64)
6842 if (Op0->
getOpcode() == LoongArchISD::MOVGR2FR_W_LA64) {
6844 "Unexpected value type!");
6853 MVT VT =
N->getSimpleValueType(0);
6874 if (Op0->
getOpcode() == LoongArchISD::BUILD_PAIR_F64)
6887 APInt V =
C->getValueAPF().bitcastToAPInt();
6902 MVT VT =
N->getSimpleValueType(0);
6963 EVT VT =
N->getValueType(0);
6965 if (VT != MVT::f32 && VT != MVT::f64)
6967 if (VT == MVT::f32 && !Subtarget.hasBasicF())
6969 if (VT == MVT::f64 && !Subtarget.hasBasicD())
6992 return DAG.
getNode(LoongArchISD::SITOF,
SDLoc(
N), VT, Load);
7001 switch (
N->getOpcode()) {
7016 case LoongArchISD::BITREV_W:
7018 case LoongArchISD::BR_CC:
7020 case LoongArchISD::SELECT_CC:
7024 case LoongArchISD::MOVGR2FR_W_LA64:
7026 case LoongArchISD::MOVFR2GR_S_LA64:
7028 case LoongArchISD::VMSKLTZ:
7029 case LoongArchISD::XVMSKLTZ:
7031 case LoongArchISD::SPLIT_PAIR_F64:
7033 case LoongArchISD::VANDN:
7057 MF->
insert(It, BreakMBB);
7061 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->end());
7062 SinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
7074 MBB->addSuccessor(BreakMBB);
7075 MBB->addSuccessor(SinkMBB);
7081 BreakMBB->addSuccessor(SinkMBB);
7093 switch (
MI.getOpcode()) {
7096 case LoongArch::PseudoVBZ:
7097 CondOpc = LoongArch::VSETEQZ_V;
7099 case LoongArch::PseudoVBZ_B:
7100 CondOpc = LoongArch::VSETANYEQZ_B;
7102 case LoongArch::PseudoVBZ_H:
7103 CondOpc = LoongArch::VSETANYEQZ_H;
7105 case LoongArch::PseudoVBZ_W:
7106 CondOpc = LoongArch::VSETANYEQZ_W;
7108 case LoongArch::PseudoVBZ_D:
7109 CondOpc = LoongArch::VSETANYEQZ_D;
7111 case LoongArch::PseudoVBNZ:
7112 CondOpc = LoongArch::VSETNEZ_V;
7114 case LoongArch::PseudoVBNZ_B:
7115 CondOpc = LoongArch::VSETALLNEZ_B;
7117 case LoongArch::PseudoVBNZ_H:
7118 CondOpc = LoongArch::VSETALLNEZ_H;
7120 case LoongArch::PseudoVBNZ_W:
7121 CondOpc = LoongArch::VSETALLNEZ_W;
7123 case LoongArch::PseudoVBNZ_D:
7124 CondOpc = LoongArch::VSETALLNEZ_D;
7126 case LoongArch::PseudoXVBZ:
7127 CondOpc = LoongArch::XVSETEQZ_V;
7129 case LoongArch::PseudoXVBZ_B:
7130 CondOpc = LoongArch::XVSETANYEQZ_B;
7132 case LoongArch::PseudoXVBZ_H:
7133 CondOpc = LoongArch::XVSETANYEQZ_H;
7135 case LoongArch::PseudoXVBZ_W:
7136 CondOpc = LoongArch::XVSETANYEQZ_W;
7138 case LoongArch::PseudoXVBZ_D:
7139 CondOpc = LoongArch::XVSETANYEQZ_D;
7141 case LoongArch::PseudoXVBNZ:
7142 CondOpc = LoongArch::XVSETNEZ_V;
7144 case LoongArch::PseudoXVBNZ_B:
7145 CondOpc = LoongArch::XVSETALLNEZ_B;
7147 case LoongArch::PseudoXVBNZ_H:
7148 CondOpc = LoongArch::XVSETALLNEZ_H;
7150 case LoongArch::PseudoXVBNZ_W:
7151 CondOpc = LoongArch::XVSETALLNEZ_W;
7153 case LoongArch::PseudoXVBNZ_D:
7154 CondOpc = LoongArch::XVSETALLNEZ_D;
7169 F->insert(It, FalseBB);
7170 F->insert(It, TrueBB);
7171 F->insert(It, SinkBB);
7174 SinkBB->
splice(SinkBB->
end(), BB, std::next(
MI.getIterator()), BB->
end());
7178 Register FCC =
MRI.createVirtualRegister(&LoongArch::CFRRegClass);
7187 Register RD1 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
7195 Register RD2 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
7203 MI.getOperand(0).getReg())
7210 MI.eraseFromParent();
7218 unsigned BroadcastOp;
7220 switch (
MI.getOpcode()) {
7223 case LoongArch::PseudoXVINSGR2VR_B:
7225 BroadcastOp = LoongArch::XVREPLGR2VR_B;
7226 InsOp = LoongArch::XVEXTRINS_B;
7228 case LoongArch::PseudoXVINSGR2VR_H:
7230 BroadcastOp = LoongArch::XVREPLGR2VR_H;
7231 InsOp = LoongArch::XVEXTRINS_H;
7243 unsigned Idx =
MI.getOperand(3).getImm();
7245 if (XSrc.
isVirtual() &&
MRI.getVRegDef(XSrc)->isImplicitDef() &&
7247 Register ScratchSubReg1 =
MRI.createVirtualRegister(SubRC);
7248 Register ScratchSubReg2 =
MRI.createVirtualRegister(SubRC);
7251 .
addReg(XSrc, {}, LoongArch::sub_128);
7253 TII->get(HalfSize == 8 ? LoongArch::VINSGR2VR_H
7254 : LoongArch::VINSGR2VR_B),
7263 .
addImm(LoongArch::sub_128);
7265 Register ScratchReg1 =
MRI.createVirtualRegister(RC);
7266 Register ScratchReg2 =
MRI.createVirtualRegister(RC);
7270 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::XVPERMI_Q), ScratchReg2)
7273 .
addImm(Idx >= HalfSize ? 48 : 18);
7278 .
addImm((Idx >= HalfSize ? Idx - HalfSize : Idx) * 17);
7281 MI.eraseFromParent();
7288 assert(Subtarget.hasExtLSX());
7295 Register ScratchReg1 =
MRI.createVirtualRegister(RC);
7296 Register ScratchReg2 =
MRI.createVirtualRegister(RC);
7297 Register ScratchReg3 =
MRI.createVirtualRegister(RC);
7301 TII->get(Subtarget.
is64Bit() ? LoongArch::VINSGR2VR_D
7302 : LoongArch::VINSGR2VR_W),
7309 TII->get(Subtarget.
is64Bit() ? LoongArch::VPCNT_D : LoongArch::VPCNT_W),
7313 TII->get(Subtarget.
is64Bit() ? LoongArch::VPICKVE2GR_D
7314 : LoongArch::VPICKVE2GR_W),
7319 MI.eraseFromParent();
7333 unsigned EleBits = 8;
7334 unsigned NotOpc = 0;
7337 switch (
MI.getOpcode()) {
7340 case LoongArch::PseudoVMSKLTZ_B:
7341 MskOpc = LoongArch::VMSKLTZ_B;
7343 case LoongArch::PseudoVMSKLTZ_H:
7344 MskOpc = LoongArch::VMSKLTZ_H;
7347 case LoongArch::PseudoVMSKLTZ_W:
7348 MskOpc = LoongArch::VMSKLTZ_W;
7351 case LoongArch::PseudoVMSKLTZ_D:
7352 MskOpc = LoongArch::VMSKLTZ_D;
7355 case LoongArch::PseudoVMSKGEZ_B:
7356 MskOpc = LoongArch::VMSKGEZ_B;
7358 case LoongArch::PseudoVMSKEQZ_B:
7359 MskOpc = LoongArch::VMSKNZ_B;
7360 NotOpc = LoongArch::VNOR_V;
7362 case LoongArch::PseudoVMSKNEZ_B:
7363 MskOpc = LoongArch::VMSKNZ_B;
7365 case LoongArch::PseudoXVMSKLTZ_B:
7366 MskOpc = LoongArch::XVMSKLTZ_B;
7367 RC = &LoongArch::LASX256RegClass;
7369 case LoongArch::PseudoXVMSKLTZ_H:
7370 MskOpc = LoongArch::XVMSKLTZ_H;
7371 RC = &LoongArch::LASX256RegClass;
7374 case LoongArch::PseudoXVMSKLTZ_W:
7375 MskOpc = LoongArch::XVMSKLTZ_W;
7376 RC = &LoongArch::LASX256RegClass;
7379 case LoongArch::PseudoXVMSKLTZ_D:
7380 MskOpc = LoongArch::XVMSKLTZ_D;
7381 RC = &LoongArch::LASX256RegClass;
7384 case LoongArch::PseudoXVMSKGEZ_B:
7385 MskOpc = LoongArch::XVMSKGEZ_B;
7386 RC = &LoongArch::LASX256RegClass;
7388 case LoongArch::PseudoXVMSKEQZ_B:
7389 MskOpc = LoongArch::XVMSKNZ_B;
7390 NotOpc = LoongArch::XVNOR_V;
7391 RC = &LoongArch::LASX256RegClass;
7393 case LoongArch::PseudoXVMSKNEZ_B:
7394 MskOpc = LoongArch::XVMSKNZ_B;
7395 RC = &LoongArch::LASX256RegClass;
7410 if (
TRI->getRegSizeInBits(*RC) > 128) {
7411 Register Lo =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
7412 Register Hi =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
7420 TII->get(Subtarget.
is64Bit() ? LoongArch::BSTRINS_D
7421 : LoongArch::BSTRINS_W),
7425 .
addImm(256 / EleBits - 1)
7433 MI.eraseFromParent();
7440 assert(
MI.getOpcode() == LoongArch::SplitPairF64Pseudo &&
7441 "Unexpected instruction");
7453 MI.eraseFromParent();
7460 assert(
MI.getOpcode() == LoongArch::BuildPairF64Pseudo &&
7461 "Unexpected instruction");
7467 Register TmpReg =
MRI.createVirtualRegister(&LoongArch::FPR64RegClass);
7477 MI.eraseFromParent();
7482 switch (
MI.getOpcode()) {
7485 case LoongArch::Select_GPR_Using_CC_GPR:
7521 if (
MI.getOperand(2).isReg())
7522 RHS =
MI.getOperand(2).getReg();
7523 auto CC =
static_cast<unsigned>(
MI.getOperand(3).
getImm());
7527 SelectDests.
insert(
MI.getOperand(0).getReg());
7531 SequenceMBBI !=
E; ++SequenceMBBI) {
7532 if (SequenceMBBI->isDebugInstr())
7535 if (SequenceMBBI->getOperand(1).getReg() !=
LHS ||
7536 !SequenceMBBI->getOperand(2).isReg() ||
7537 SequenceMBBI->getOperand(2).getReg() !=
RHS ||
7538 SequenceMBBI->getOperand(3).getImm() != CC ||
7539 SelectDests.
count(SequenceMBBI->getOperand(4).getReg()) ||
7540 SelectDests.
count(SequenceMBBI->getOperand(5).getReg()))
7542 LastSelectPseudo = &*SequenceMBBI;
7544 SelectDests.
insert(SequenceMBBI->getOperand(0).getReg());
7547 if (SequenceMBBI->hasUnmodeledSideEffects() ||
7548 SequenceMBBI->mayLoadOrStore() ||
7549 SequenceMBBI->usesCustomInsertionHook())
7552 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
7567 F->insert(
I, IfFalseMBB);
7568 F->insert(
I, TailMBB);
7571 unsigned CallFrameSize =
TII.getCallFrameSizeAt(*LastSelectPseudo);
7577 TailMBB->
push_back(DebugInstr->removeFromParent());
7581 TailMBB->
splice(TailMBB->
end(), HeadMBB,
7591 if (
MI.getOperand(2).isImm())
7603 auto SelectMBBI =
MI.getIterator();
7604 auto SelectEnd = std::next(LastSelectPseudo->
getIterator());
7606 while (SelectMBBI != SelectEnd) {
7607 auto Next = std::next(SelectMBBI);
7611 TII.get(LoongArch::PHI), SelectMBBI->getOperand(0).getReg())
7612 .
addReg(SelectMBBI->getOperand(4).getReg())
7614 .
addReg(SelectMBBI->getOperand(5).getReg())
7621 F->getProperties().resetNoPHIs();
7627 const TargetInstrInfo *
TII = Subtarget.getInstrInfo();
7630 switch (
MI.getOpcode()) {
7633 case LoongArch::DIV_W:
7634 case LoongArch::DIV_WU:
7635 case LoongArch::MOD_W:
7636 case LoongArch::MOD_WU:
7637 case LoongArch::DIV_D:
7638 case LoongArch::DIV_DU:
7639 case LoongArch::MOD_D:
7640 case LoongArch::MOD_DU:
7643 case LoongArch::WRFCSR: {
7645 LoongArch::FCSR0 +
MI.getOperand(0).getImm())
7646 .
addReg(
MI.getOperand(1).getReg());
7647 MI.eraseFromParent();
7650 case LoongArch::RDFCSR: {
7651 MachineInstr *ReadFCSR =
7653 MI.getOperand(0).getReg())
7654 .
addReg(LoongArch::FCSR0 +
MI.getOperand(1).getImm());
7656 MI.eraseFromParent();
7659 case LoongArch::Select_GPR_Using_CC_GPR:
7661 case LoongArch::BuildPairF64Pseudo:
7663 case LoongArch::SplitPairF64Pseudo:
7665 case LoongArch::PseudoVBZ:
7666 case LoongArch::PseudoVBZ_B:
7667 case LoongArch::PseudoVBZ_H:
7668 case LoongArch::PseudoVBZ_W:
7669 case LoongArch::PseudoVBZ_D:
7670 case LoongArch::PseudoVBNZ:
7671 case LoongArch::PseudoVBNZ_B:
7672 case LoongArch::PseudoVBNZ_H:
7673 case LoongArch::PseudoVBNZ_W:
7674 case LoongArch::PseudoVBNZ_D:
7675 case LoongArch::PseudoXVBZ:
7676 case LoongArch::PseudoXVBZ_B:
7677 case LoongArch::PseudoXVBZ_H:
7678 case LoongArch::PseudoXVBZ_W:
7679 case LoongArch::PseudoXVBZ_D:
7680 case LoongArch::PseudoXVBNZ:
7681 case LoongArch::PseudoXVBNZ_B:
7682 case LoongArch::PseudoXVBNZ_H:
7683 case LoongArch::PseudoXVBNZ_W:
7684 case LoongArch::PseudoXVBNZ_D:
7686 case LoongArch::PseudoXVINSGR2VR_B:
7687 case LoongArch::PseudoXVINSGR2VR_H:
7689 case LoongArch::PseudoCTPOP:
7691 case LoongArch::PseudoVMSKLTZ_B:
7692 case LoongArch::PseudoVMSKLTZ_H:
7693 case LoongArch::PseudoVMSKLTZ_W:
7694 case LoongArch::PseudoVMSKLTZ_D:
7695 case LoongArch::PseudoVMSKGEZ_B:
7696 case LoongArch::PseudoVMSKEQZ_B:
7697 case LoongArch::PseudoVMSKNEZ_B:
7698 case LoongArch::PseudoXVMSKLTZ_B:
7699 case LoongArch::PseudoXVMSKLTZ_H:
7700 case LoongArch::PseudoXVMSKLTZ_W:
7701 case LoongArch::PseudoXVMSKLTZ_D:
7702 case LoongArch::PseudoXVMSKGEZ_B:
7703 case LoongArch::PseudoXVMSKEQZ_B:
7704 case LoongArch::PseudoXVMSKNEZ_B:
7706 case TargetOpcode::STATEPOINT:
7712 MI.addOperand(*
MI.getMF(),
7714 LoongArch::R1,
true,
7717 if (!Subtarget.is64Bit())
7725 unsigned *
Fast)
const {
7726 if (!Subtarget.hasUAL())
7744 LoongArch::R7, LoongArch::R8, LoongArch::R9,
7745 LoongArch::R10, LoongArch::R11};
7760 LoongArch::R23, LoongArch::R24, LoongArch::R25, LoongArch::R26,
7761 LoongArch::R27, LoongArch::R28, LoongArch::R29, LoongArch::R30,
7762 LoongArch::R4, LoongArch::R5, LoongArch::R6, LoongArch::R7,
7763 LoongArch::R8, LoongArch::R9, LoongArch::R10, LoongArch::R11,
7764 LoongArch::R12, LoongArch::R13, LoongArch::R14, LoongArch::R15,
7765 LoongArch::R16, LoongArch::R17, LoongArch::R18, LoongArch::R19,
7771 LoongArch::F3, LoongArch::F4, LoongArch::F5,
7772 LoongArch::F6, LoongArch::F7};
7775 LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
7776 LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
7779 LoongArch::VR3, LoongArch::VR4, LoongArch::VR5,
7780 LoongArch::VR6, LoongArch::VR7};
7783 LoongArch::XR3, LoongArch::XR4, LoongArch::XR5,
7784 LoongArch::XR6, LoongArch::XR7};
7787 switch (State.getCallingConv()) {
7789 if (!State.isVarArg())
7793 return State.AllocateReg(
ArgGPRs);
7801 unsigned ValNo2,
MVT ValVT2,
MVT LocVT2,
7803 unsigned GRLenInBytes = GRLen / 8;
7814 State.AllocateStack(GRLenInBytes, StackAlign),
7817 ValNo2, ValVT2, State.AllocateStack(GRLenInBytes,
Align(GRLenInBytes)),
7828 ValNo2, ValVT2, State.AllocateStack(GRLenInBytes,
Align(GRLenInBytes)),
7836 unsigned ValNo,
MVT ValVT,
7839 unsigned GRLen =
DL.getLargestLegalIntTypeSizeInBits();
7840 assert((GRLen == 32 || GRLen == 64) &&
"Unspport GRLen");
7841 MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
7846 if (IsRet && ValNo > 1)
7850 bool UseGPRForFloat =
true;
7860 UseGPRForFloat = ArgFlags.
isVarArg();
7873 unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
7876 DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
7877 unsigned RegIdx = State.getFirstUnallocated(
ArgGPRs);
7879 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
7885 State.getPendingArgFlags();
7888 "PendingLocs and PendingArgFlags out of sync");
7892 UseGPRForFloat =
true;
7894 if (UseGPRForFloat && ValVT == MVT::f32) {
7897 }
else if (UseGPRForFloat && GRLen == 64 && ValVT == MVT::f64) {
7900 }
else if (UseGPRForFloat && GRLen == 32 && ValVT == MVT::f64) {
7903 assert(PendingLocs.
empty() &&
"Can't lower f64 if it is split");
7945 PendingLocs.
size() <= 2) {
7946 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
7951 PendingLocs.
clear();
7952 PendingArgFlags.
clear();
7959 unsigned StoreSizeBytes = GRLen / 8;
7962 if (ValVT == MVT::f32 && !UseGPRForFloat) {
7964 }
else if (ValVT == MVT::f64 && !UseGPRForFloat) {
7968 UseGPRForFloat =
false;
7969 StoreSizeBytes = 16;
7970 StackAlign =
Align(16);
7973 UseGPRForFloat =
false;
7974 StoreSizeBytes = 32;
7975 StackAlign =
Align(32);
7981 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
7985 if (!PendingLocs.
empty()) {
7987 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
7988 for (
auto &It : PendingLocs) {
7990 It.convertToReg(
Reg);
7995 PendingLocs.clear();
7996 PendingArgFlags.
clear();
7999 assert((!UseGPRForFloat || LocVT == GRLenVT) &&
8000 "Expected an GRLenVT at this stage");
8017void LoongArchTargetLowering::analyzeInputArgs(
8020 LoongArchCCAssignFn Fn)
const {
8022 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
8023 MVT ArgVT = Ins[i].VT;
8024 Type *ArgTy =
nullptr;
8026 ArgTy = FType->getReturnType();
8027 else if (Ins[i].isOrigArg())
8028 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
8032 CCInfo, IsRet, ArgTy)) {
8033 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type " << ArgVT
8040void LoongArchTargetLowering::analyzeOutputArgs(
8043 CallLoweringInfo *CLI, LoongArchCCAssignFn Fn)
const {
8044 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
8045 MVT ArgVT = Outs[i].VT;
8046 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
8050 CCInfo, IsRet, OrigTy)) {
8051 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type " << ArgVT
8070 Val = DAG.
getNode(LoongArchISD::MOVGR2FR_W_LA64,
DL, MVT::f32, Val);
8092 if (In.isOrigArg()) {
8097 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
8098 (
BitWidth < 32 && In.Flags.isZExt())) {
8148 Register LoVReg =
RegInfo.createVirtualRegister(&LoongArch::GPRRegClass);
8161 Register HiVReg =
RegInfo.createVirtualRegister(&LoongArch::GPRRegClass);
8165 return DAG.
getNode(LoongArchISD::BUILD_PAIR_F64,
DL, MVT::f64,
Lo,
Hi);
8179 Val = DAG.
getNode(LoongArchISD::MOVFR2GR_S_LA64,
DL, MVT::i64, Val);
8191 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
8195 LoongArch::R23, LoongArch::R24, LoongArch::R25,
8196 LoongArch::R26, LoongArch::R27, LoongArch::R28,
8197 LoongArch::R29, LoongArch::R30, LoongArch::R31};
8204 if (LocVT == MVT::f32) {
8207 static const MCPhysReg FPR32List[] = {LoongArch::F24, LoongArch::F25,
8208 LoongArch::F26, LoongArch::F27};
8215 if (LocVT == MVT::f64) {
8218 static const MCPhysReg FPR64List[] = {LoongArch::F28_64, LoongArch::F29_64,
8219 LoongArch::F30_64, LoongArch::F31_64};
8251 "GHC calling convention requires the F and D extensions");
8255 MVT GRLenVT = Subtarget.getGRLenVT();
8256 unsigned GRLenInBytes = Subtarget.getGRLen() / 8;
8258 std::vector<SDValue> OutChains;
8267 analyzeInputArgs(MF, CCInfo, Ins,
false,
CC_LoongArch);
8269 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
8286 unsigned ArgIndex = Ins[InsIdx].OrigArgIndex;
8287 unsigned ArgPartOffset = Ins[InsIdx].PartOffset;
8288 assert(ArgPartOffset == 0);
8289 while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
8291 unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
8302 if (Ins[InsIdx].Flags.isByVal())
8303 LoongArchFI->addIncomingByValArgs(ArgValue);
8316 int VaArgOffset, VarArgsSaveSize;
8320 if (ArgRegs.
size() == Idx) {
8322 VarArgsSaveSize = 0;
8324 VarArgsSaveSize = GRLenInBytes * (ArgRegs.
size() - Idx);
8325 VaArgOffset = -VarArgsSaveSize;
8331 LoongArchFI->setVarArgsFrameIndex(FI);
8339 VarArgsSaveSize += GRLenInBytes;
8344 for (
unsigned I = Idx;
I < ArgRegs.
size();
8345 ++
I, VaArgOffset += GRLenInBytes) {
8346 const Register Reg = RegInfo.createVirtualRegister(RC);
8347 RegInfo.addLiveIn(ArgRegs[
I], Reg);
8355 ->setValue((
Value *)
nullptr);
8356 OutChains.push_back(Store);
8358 LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
8361 LoongArchFI->setArgumentStackSize(CCInfo.
getStackSize());
8365 if (!OutChains.empty()) {
8366 OutChains.push_back(Chain);
8381 if (
N->getNumValues() != 1)
8383 if (!
N->hasNUsesOfValue(1, 0))
8386 SDNode *Copy = *
N->user_begin();
8392 if (Copy->getGluedNode())
8396 bool HasRet =
false;
8398 if (
Node->getOpcode() != LoongArchISD::RET)
8406 Chain = Copy->getOperand(0);
8411bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
8415 auto CalleeCC = CLI.CallConv;
8416 auto &Outs = CLI.Outs;
8418 auto CallerCC = Caller.getCallingConv();
8423 if (CCInfo.
getStackSize() > LoongArchFI->getArgumentStackSize())
8427 for (
auto &VA : ArgLocs)
8433 auto IsCallerStructRet = Caller.hasStructRetAttr();
8434 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
8435 if (IsCallerStructRet != IsCalleeStructRet)
8439 for (
unsigned i = 0, j = 0; i < Outs.
size(); i++) {
8440 if (!Outs[i].Flags.isByVal())
8442 if (j++ >= LoongArchFI->getIncomingByValArgsSize())
8444 if (LoongArchFI->getIncomingByValArgs(i).getValueType() != Outs[i].ArgVT)
8450 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
8451 if (CalleeCC != CallerCC) {
8452 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
8453 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
8460 const SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
8487 MVT GRLenVT = Subtarget.getGRLenVT();
8500 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
CC_LoongArch);
8504 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
8510 "site marked musttail");
8517 for (
unsigned i = 0, j = 0, e = Outs.
size(); i != e; ++i) {
8519 if (!Flags.isByVal())
8523 unsigned Size = Flags.getByValSize();
8524 Align Alignment = Flags.getNonZeroByValAlign();
8529 SDValue CallerArg = LoongArchFI->getIncomingByValArgs(j++);
8540 DAG.
getMemcpy(Chain,
DL, Dst, Arg, SizeNode, Alignment,
8542 false,
nullptr, std::nullopt,
8556 bool AfterFormalArgLoads =
false;
8562 for (
unsigned i = 0, j = 0, e = ArgLocs.size(), OutIdx = 0; i != e;
8565 SDValue ArgValue = OutVals[OutIdx];
8573 DAG.
getNode(LoongArchISD::SPLIT_PAIR_F64,
DL,
8574 DAG.
getVTList(MVT::i32, MVT::i32), ArgValue);
8586 if (!StackPtr.getNode())
8598 RegsToPass.
push_back(std::make_pair(RegHigh,
Hi));
8613 unsigned ArgIndex = Outs[OutIdx].OrigArgIndex;
8614 unsigned ArgPartOffset = Outs[OutIdx].PartOffset;
8615 assert(ArgPartOffset == 0);
8620 while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
8621 SDValue PartValue = OutVals[OutIdx + 1];
8622 unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
8637 for (
const auto &Part : Parts) {
8638 SDValue PartValue = Part.first;
8639 SDValue PartOffset = Part.second;
8646 ArgValue = SpillSlot;
8652 if (Flags.isByVal()) {
8656 ArgValue = ByValArgs[j++];
8669 if (!StackPtr.getNode())
8677 if (!AfterFormalArgLoads) {
8679 AfterFormalArgLoads =
true;
8689 DAG.
getStore(Chain,
DL, ArgValue, DstAddr, DstInfo));
8694 if (!MemOpChains.
empty())
8700 for (
auto &Reg : RegsToPass) {
8701 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
8723 Ops.push_back(Chain);
8724 Ops.push_back(Callee);
8728 for (
auto &Reg : RegsToPass)
8729 Ops.push_back(DAG.
getRegister(Reg.first, Reg.second.getValueType()));
8734 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
8735 assert(Mask &&
"Missing call preserved mask for calling convention");
8741 Ops.push_back(Glue);
8750 Op = IsTailCall ? LoongArchISD::TAIL : LoongArchISD::CALL;
8753 Op = IsTailCall ? LoongArchISD::TAIL_MEDIUM : LoongArchISD::CALL_MEDIUM;
8756 assert(Subtarget.is64Bit() &&
"Large code model requires LA64");
8757 Op = IsTailCall ? LoongArchISD::TAIL_LARGE : LoongArchISD::CALL_LARGE;
8779 analyzeInputArgs(MF, RetCCInfo, Ins,
true,
CC_LoongArch);
8782 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
8783 auto &VA = RVLocs[i];
8791 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8792 assert(VA.needsCustom());
8797 RetValue = DAG.
getNode(LoongArchISD::BUILD_PAIR_F64,
DL, MVT::f64,
8798 RetValue, RetValue2);
8811 const Type *RetTy)
const {
8813 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
8815 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
8819 Outs[i].Flags, CCInfo,
true,
nullptr))
8845 for (
unsigned i = 0, e = RVLocs.
size(), OutIdx = 0; i < e; ++i, ++OutIdx) {
8846 SDValue Val = OutVals[OutIdx];
8855 DAG.
getVTList(MVT::i32, MVT::i32), Val);
8859 Register RegHi = RVLocs[++i].getLocReg();
8884 return DAG.
getNode(LoongArchISD::RET,
DL, MVT::Other, RetOps);
8892 const APInt &SplatValue,
const unsigned SplatBitSize)
const {
8895 if (SplatBitSize == 16 && !(V & 0x00FF)) {
8897 RequiredImm = (0b10101 << 8) | (V >> 8);
8898 return {
true, RequiredImm};
8899 }
else if (SplatBitSize == 32) {
8901 if (!(V & 0xFFFF00FF)) {
8902 RequiredImm = (0b10001 << 8) | (V >> 8);
8903 return {
true, RequiredImm};
8906 if (!(V & 0xFF00FFFF)) {
8907 RequiredImm = (0b10010 << 8) | (V >> 16);
8908 return {
true, RequiredImm};
8911 if (!(V & 0x00FFFFFF)) {
8912 RequiredImm = (0b10011 << 8) | (V >> 24);
8913 return {
true, RequiredImm};
8916 if ((V & 0xFFFF00FF) == 0xFF) {
8917 RequiredImm = (0b10110 << 8) | (V >> 8);
8918 return {
true, RequiredImm};
8921 if ((V & 0xFF00FFFF) == 0xFFFF) {
8922 RequiredImm = (0b10111 << 8) | (V >> 16);
8923 return {
true, RequiredImm};
8926 if ((V & 0x7E07FFFF) == 0x3E000000 || (V & 0x7E07FFFF) == 0x40000000) {
8928 (0b11010 << 8) | (((V >> 24) & 0xC0) ^ 0x40) | ((V >> 19) & 0x3F);
8929 return {
true, RequiredImm};
8931 }
else if (SplatBitSize == 64) {
8933 if ((V & 0xFFFFFFFF7E07FFFFULL) == 0x3E000000ULL ||
8934 (V & 0xFFFFFFFF7E07FFFFULL) == 0x40000000ULL) {
8936 (0b11011 << 8) | (((V >> 24) & 0xC0) ^ 0x40) | ((V >> 19) & 0x3F);
8937 return {
true, RequiredImm};
8940 if ((V & 0x7FC0FFFFFFFFFFFFULL) == 0x4000000000000000ULL ||
8941 (V & 0x7FC0FFFFFFFFFFFFULL) == 0x3FC0000000000000ULL) {
8943 (0b11100 << 8) | (((V >> 56) & 0xC0) ^ 0x40) | ((V >> 48) & 0x3F);
8944 return {
true, RequiredImm};
8947 auto sameBitsPreByte = [](
uint64_t x) -> std::pair<bool, uint8_t> {
8949 for (
int i = 0; i < 8; ++i) {
8951 if (
byte == 0 ||
byte == 0xFF)
8952 res |= ((
byte & 1) << i);
8959 auto [IsSame, Suffix] = sameBitsPreByte(V);
8961 RequiredImm = (0b11001 << 8) | Suffix;
8962 return {
true, RequiredImm};
8965 return {
false, RequiredImm};
8970 if (!Subtarget.hasExtLSX())
8973 if (VT == MVT::f32) {
8974 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7e07ffff;
8975 return (masked == 0x3e000000 || masked == 0x40000000);
8978 if (VT == MVT::f64) {
8979 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7fc0ffffffffffff;
8980 return (masked == 0x3fc0000000000000 || masked == 0x4000000000000000);
8986bool LoongArchTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
8987 bool ForCodeSize)
const {
8989 if (VT == MVT::f32 && !Subtarget.hasBasicF())
8991 if (VT == MVT::f64 && !Subtarget.hasBasicD())
8993 return (Imm.isZero() || Imm.isExactlyValue(1.0) ||
isFPImmVLDILegal(Imm, VT));
9004bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
9014 Type *Ty =
I->getOperand(0)->getType();
9016 unsigned Size = Ty->getIntegerBitWidth();
9032 EVT VT =
Y.getValueType();
9035 return Subtarget.hasExtLSX() && VT.
isInteger();
9047 case Intrinsic::loongarch_masked_atomicrmw_xchg_i32:
9048 case Intrinsic::loongarch_masked_atomicrmw_add_i32:
9049 case Intrinsic::loongarch_masked_atomicrmw_sub_i32:
9050 case Intrinsic::loongarch_masked_atomicrmw_nand_i32:
9052 Info.memVT = MVT::i32;
9053 Info.ptrVal =
I.getArgOperand(0);
9055 Info.align =
Align(4);
9072 "Unable to expand");
9073 unsigned MinWordSize = 4;
9085 Value *AlignedAddr = Builder.CreateIntrinsic(
9086 Intrinsic::ptrmask, {PtrTy, IntTy},
9087 {Addr, ConstantInt::get(IntTy, ~(
uint64_t)(MinWordSize - 1))},
nullptr,
9090 Value *AddrInt = Builder.CreatePtrToInt(Addr, IntTy);
9091 Value *PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
9092 Value *ShiftAmt = Builder.CreateShl(PtrLSB, 3);
9093 ShiftAmt = Builder.CreateTrunc(ShiftAmt, WordType,
"ShiftAmt");
9094 Value *Mask = Builder.CreateShl(
9095 ConstantInt::get(WordType,
9098 Value *Inv_Mask = Builder.CreateNot(Mask,
"Inv_Mask");
9099 Value *ValOperand_Shifted =
9100 Builder.CreateShl(Builder.CreateZExt(AI->
getValOperand(), WordType),
9101 ShiftAmt,
"ValOperand_Shifted");
9104 NewOperand = Builder.CreateOr(ValOperand_Shifted, Inv_Mask,
"AndOperand");
9106 NewOperand = ValOperand_Shifted;
9109 Builder.CreateAtomicRMW(
Op, AlignedAddr, NewOperand,
Align(MinWordSize),
9112 Value *Shift = Builder.CreateLShr(NewAI, ShiftAmt,
"shifted");
9113 Value *Trunc = Builder.CreateTrunc(Shift,
ValueType,
"extracted");
9133 if (Subtarget.hasLAM_BH() && Subtarget.is64Bit() &&
9141 if (Subtarget.hasLAMCAS()) {
9163 return Intrinsic::loongarch_masked_atomicrmw_xchg_i64;
9165 return Intrinsic::loongarch_masked_atomicrmw_add_i64;
9167 return Intrinsic::loongarch_masked_atomicrmw_sub_i64;
9169 return Intrinsic::loongarch_masked_atomicrmw_nand_i64;
9171 return Intrinsic::loongarch_masked_atomicrmw_umax_i64;
9173 return Intrinsic::loongarch_masked_atomicrmw_umin_i64;
9175 return Intrinsic::loongarch_masked_atomicrmw_max_i64;
9177 return Intrinsic::loongarch_masked_atomicrmw_min_i64;
9187 return Intrinsic::loongarch_masked_atomicrmw_xchg_i32;
9189 return Intrinsic::loongarch_masked_atomicrmw_add_i32;
9191 return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
9193 return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
9195 return Intrinsic::loongarch_masked_atomicrmw_umax_i32;
9197 return Intrinsic::loongarch_masked_atomicrmw_umin_i32;
9199 return Intrinsic::loongarch_masked_atomicrmw_max_i32;
9201 return Intrinsic::loongarch_masked_atomicrmw_min_i32;
9213 if (Subtarget.hasLAMCAS())
9225 unsigned GRLen = Subtarget.getGRLen();
9227 Value *FailureOrdering =
9228 Builder.getIntN(Subtarget.getGRLen(),
static_cast<uint64_t>(FailOrd));
9229 Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i32;
9231 CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
9232 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
9233 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
9234 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9237 Value *Result = Builder.CreateIntrinsic(
9238 CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
9240 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9256 Builder.CreateNot(Mask,
"Inv_Mask"),
9263 unsigned GRLen = Subtarget.getGRLen();
9272 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
9273 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9274 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
9290 Builder.CreateSub(Builder.getIntN(GRLen, GRLen - ValWidth), ShiftAmt);
9291 Result = Builder.CreateCall(LlwOpScwLoop,
9292 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
9295 Builder.CreateCall(LlwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
9299 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9322 const Constant *PersonalityFn)
const {
9323 return LoongArch::R4;
9327 const Constant *PersonalityFn)
const {
9328 return LoongArch::R5;
9339 int RefinementSteps = VT.
getScalarType() == MVT::f64 ? 2 : 1;
9340 return RefinementSteps;
9345 int &RefinementSteps,
9346 bool &UseOneConstNR,
9347 bool Reciprocal)
const {
9348 if (Subtarget.hasFrecipe()) {
9352 if (VT == MVT::f32 || (VT == MVT::f64 && Subtarget.hasBasicD()) ||
9353 (VT == MVT::v4f32 && Subtarget.hasExtLSX()) ||
9354 (VT == MVT::v2f64 && Subtarget.hasExtLSX()) ||
9355 (VT == MVT::v8f32 && Subtarget.hasExtLASX()) ||
9356 (VT == MVT::v4f64 && Subtarget.hasExtLASX())) {
9375 int &RefinementSteps)
const {
9376 if (Subtarget.hasFrecipe()) {
9380 if (VT == MVT::f32 || (VT == MVT::f64 && Subtarget.hasBasicD()) ||
9381 (VT == MVT::v4f32 && Subtarget.hasExtLSX()) ||
9382 (VT == MVT::v2f64 && Subtarget.hasExtLSX()) ||
9383 (VT == MVT::v8f32 && Subtarget.hasExtLASX()) ||
9384 (VT == MVT::v4f64 && Subtarget.hasExtLASX())) {
9389 return DAG.
getNode(LoongArchISD::FRECIPE,
DL, VT, Operand);
9401LoongArchTargetLowering::getConstraintType(
StringRef Constraint)
const {
9421 if (Constraint.
size() == 1) {
9422 switch (Constraint[0]) {
9438 if (Constraint ==
"ZC" || Constraint ==
"ZB")
9447 return StringSwitch<InlineAsm::ConstraintCode>(ConstraintCode)
9454std::pair<unsigned, const TargetRegisterClass *>
9455LoongArchTargetLowering::getRegForInlineAsmConstraint(
9459 if (Constraint.
size() == 1) {
9460 switch (Constraint[0]) {
9465 return std::make_pair(0U, &LoongArch::GPRRegClass);
9467 return std::make_pair(0U, &LoongArch::GPRNoR0R1RegClass);
9469 if (Subtarget.hasBasicF() && VT == MVT::f32)
9470 return std::make_pair(0U, &LoongArch::FPR32RegClass);
9471 if (Subtarget.hasBasicD() && VT == MVT::f64)
9472 return std::make_pair(0U, &LoongArch::FPR64RegClass);
9473 if (Subtarget.hasExtLSX() &&
9474 TRI->isTypeLegalForClass(LoongArch::LSX128RegClass, VT))
9475 return std::make_pair(0U, &LoongArch::LSX128RegClass);
9476 if (Subtarget.hasExtLASX() &&
9477 TRI->isTypeLegalForClass(LoongArch::LASX256RegClass, VT))
9478 return std::make_pair(0U, &LoongArch::LASX256RegClass);
9498 bool IsFP = Constraint[2] ==
'f';
9499 std::pair<StringRef, StringRef> Temp = Constraint.
split(
'$');
9500 std::pair<unsigned, const TargetRegisterClass *>
R;
9505 unsigned RegNo =
R.first;
9506 if (LoongArch::F0 <= RegNo && RegNo <= LoongArch::F31) {
9507 if (Subtarget.hasBasicD() && (VT == MVT::f64 || VT == MVT::Other)) {
9508 unsigned DReg = RegNo - LoongArch::F0 + LoongArch::F0_64;
9509 return std::make_pair(DReg, &LoongArch::FPR64RegClass);
9519void LoongArchTargetLowering::LowerAsmOperandForConstraint(
9523 if (Constraint.
size() == 1) {
9524 switch (Constraint[0]) {
9528 uint64_t CVal =
C->getSExtValue();
9531 Subtarget.getGRLenVT()));
9537 uint64_t CVal =
C->getSExtValue();
9540 Subtarget.getGRLenVT()));
9546 if (
C->getZExtValue() == 0)
9553 uint64_t CVal =
C->getZExtValue();
9566#define GET_REGISTER_MATCHER
9567#include "LoongArchGenAsmMatcher.inc"
9573 std::string NewRegName = Name.second.str();
9579 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
9580 if (!ReservedRegs.
test(Reg))
9597 const APInt &Imm = ConstNode->getAPIntValue();
9599 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
9600 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
9603 if (ConstNode->hasOneUse() &&
9604 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
9605 (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2()))
9611 if (ConstNode->hasOneUse() && !(Imm.sge(-2048) && Imm.sle(4095))) {
9612 unsigned Shifts = Imm.countr_zero();
9618 APInt ImmPop = Imm.ashr(Shifts);
9619 if (ImmPop == 3 || ImmPop == 5 || ImmPop == 9 || ImmPop == 17)
9623 APInt ImmSmall =
APInt(Imm.getBitWidth(), 1ULL << Shifts,
true);
9624 if ((Imm - ImmSmall).isPowerOf2() || (Imm + ImmSmall).isPowerOf2() ||
9625 (ImmSmall - Imm).isPowerOf2())
9635 Type *Ty,
unsigned AS,
9690 EVT MemVT = LD->getMemoryVT();
9691 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
9702 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
9711 if (
Y.getValueType().isVector())
9723 Type *Ty,
bool IsSigned)
const {
9724 if (Subtarget.is64Bit() && Ty->isIntegerTy(32))
9733 if (Subtarget.isSoftFPABI() && (
Type.isFloatingPoint() && !
Type.isVector() &&
9734 Type.getSizeInBits() < Subtarget.getGRLen()))
9744 Align &PrefAlign)
const {
9748 if (Subtarget.is64Bit()) {
9750 PrefAlign =
Align(8);
9753 PrefAlign =
Align(4);
9768bool LoongArchTargetLowering::splitValueIntoRegisterParts(
9770 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
9771 bool IsABIRegCopy = CC.has_value();
9774 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
9775 PartVT == MVT::f32) {
9790SDValue LoongArchTargetLowering::joinRegisterPartsIntoValue(
9792 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID> CC)
const {
9793 bool IsABIRegCopy = CC.has_value();
9795 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
9796 PartVT == MVT::f32) {
9813 if (VT == MVT::f16 && Subtarget.hasBasicF())
9819unsigned LoongArchTargetLowering::getNumRegistersForCallingConv(
9822 if (VT == MVT::f16 && Subtarget.hasBasicF())
9831 unsigned Depth)
const {
9832 EVT VT =
Op.getValueType();
9834 unsigned Opc =
Op.getOpcode();
9838 case LoongArchISD::VMSKLTZ:
9839 case LoongArchISD::XVMSKLTZ: {
9841 MVT SrcVT = Src.getSimpleValueType();
9846 if (OriginalDemandedBits.
countr_zero() >= NumElts)
9850 APInt KnownUndef, KnownZero;
9866 if (KnownSrc.
One[SrcBits - 1])
9868 else if (KnownSrc.
Zero[SrcBits - 1])
9873 Src, DemandedSrcBits, DemandedElts, TLO.
DAG,
Depth + 1))
9880 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO,
Depth);
9903 unsigned Index)
const {
9912 unsigned Index)
const {
9916 return (EltVT == MVT::f32 || EltVT == MVT::f64) && Index == 0;
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static uint64_t getConstant(const Value *IndexValue)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static MachineBasicBlock * emitSelectPseudo(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static SDValue performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
const MCPhysReg ArgFPR32s[]
static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Dispatching routine to lower various 128-bit LoongArch vector shuffles.
static SDValue lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVSHUF4I (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKEV (if possible).
static SDValue combineSelectToBinOp(SDNode *N, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKOD (if possible).
static SDValue unpackF64OnLA32DSoftABI(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const CCValAssign &HiVA, const SDLoc &DL)
static bool fitsRegularPattern(typename SmallVectorImpl< ValType >::const_iterator Begin, unsigned CheckStride, typename SmallVectorImpl< ValType >::const_iterator End, ValType ExpectedIndex, unsigned ExpectedIndexStride)
Determine whether a range fits a regular pattern of values.
static SDValue lowerVECTOR_SHUFFLE_IsReverse(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE whose result is the reversed source vector.
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static cl::opt< bool > ZeroDivCheck("loongarch-check-zero-division", cl::Hidden, cl::desc("Trap on integer division by zero."), cl::init(false))
static SDValue lowerVECTOR_SHUFFLE_VSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VSHUF.
static int getEstimateRefinementSteps(EVT VT, const LoongArchSubtarget &Subtarget)
static void emitErrorAndReplaceIntrinsicResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, StringRef ErrorMsg, bool WithChain=true)
static SDValue lowerVECTOR_SHUFFLEAsByteRotate(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE as byte rotate (if possible).
static SDValue checkIntrinsicImmArg(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
static SDValue lowerVECTOR_SHUFFLE_XVINSVE0(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVINSVE0 (if possible).
static SDValue performMOVFR2GR_SCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVH (if possible).
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI, unsigned ValNo, MVT ValVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet, Type *OrigTy)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static SDValue performSPLIT_PAIR_F64Combine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performBITCASTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static MachineBasicBlock * emitSplitPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorBitSetImm(SDNode *Node, SelectionDAG &DAG)
static SDValue performSETCC_BITCASTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKOD (if possible).
static std::optional< bool > matchSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue Val)
static SDValue combineAndNotIntoVANDN(SDNode *N, const SDLoc &DL, SelectionDAG &DAG)
Try to fold: (and (xor X, -1), Y) -> (vandn X, Y).
static SDValue lowerBUILD_VECTORAsBroadCastLoad(BuildVectorSDNode *BVOp, const SDLoc &DL, SelectionDAG &DAG)
#define CRC_CASE_EXT_BINARYOP(NAME, NODE)
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG)
static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size, unsigned Depth)
static SDValue lowerVECTOR_SHUFFLEAsShift(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, const APInt &Zeroable)
Lower VECTOR_SHUFFLE as shift (if possible).
static SDValue lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VSHUF4I (if possible).
static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG)
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static SDValue lowerVectorBitClear(SDNode *Node, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_VPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKEV (if possible).
static MachineBasicBlock * emitPseudoVMSKCOND(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue performVANDNCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
Do target-specific dag combines on LoongArchISD::VANDN nodes.
static void replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
static SDValue lowerVECTOR_SHUFFLEAsZeroOrAnyExtend(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const APInt &Zeroable)
Lower VECTOR_SHUFFLE as ZERO_EXTEND Or ANY_EXTEND (if possible).
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, bool IsSigned=false)
static cl::opt< MaterializeFPImm > MaterializeFPImmInsNum("loongarch-materialize-float-imm", cl::Hidden, cl::desc("Maximum number of instructions used (including code sequence " "to generate the value and moving the value to FPR) when " "materializing floating-point immediates (default = 3)"), cl::init(MaterializeFPImm3Ins), cl::values(clEnumValN(NoMaterializeFPImm, "0", "Use constant pool"), clEnumValN(MaterializeFPImm2Ins, "2", "Materialize FP immediate within 2 instructions"), clEnumValN(MaterializeFPImm3Ins, "3", "Materialize FP immediate within 3 instructions"), clEnumValN(MaterializeFPImm4Ins, "4", "Materialize FP immediate within 4 instructions"), clEnumValN(MaterializeFPImm5Ins, "5", "Materialize FP immediate within 5 instructions"), clEnumValN(MaterializeFPImm6Ins, "6", "Materialize FP immediate within 6 instructions " "(behaves same as 5 on loongarch64)")))
static SDValue lowerVECTOR_SHUFFLE_XVPERMI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVPERMI (if possible).
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
static unsigned getLoongArchWOpcode(unsigned Opcode)
const MCPhysReg ArgFPR64s[]
static MachineBasicBlock * emitPseudoCTPOP(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue performMOVGR2FR_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRWR_CASE(NAME, NODE)
#define CRC_CASE_EXT_UNARYOP(NAME, NODE)
static SDValue lowerVECTOR_SHUFFLE_VPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKOD (if possible).
static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT, SDValue Src, const SDLoc &DL)
static SDValue isNOT(SDValue V, SelectionDAG &DAG)
static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Dispatching routine to lower various 256-bit LoongArch vector shuffles.
static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into VREPLVEI (if possible).
static MachineBasicBlock * emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
const MCPhysReg PreserveNoneArgGPRs[]
static void fillVector(ArrayRef< SDValue > Ops, SelectionDAG &DAG, SDLoc DL, const LoongArchSubtarget &Subtarget, SDValue &Vector, EVT ResTy)
static SDValue fillSubVectorFromBuildVector(BuildVectorSDNode *Node, SelectionDAG &DAG, SDLoc DL, const LoongArchSubtarget &Subtarget, EVT ResTy, unsigned first)
static bool isSelectPseudo(MachineInstr &MI)
static SDValue foldBinOpIntoSelectIfProfitable(SDNode *BO, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorSplatImm(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
const MCPhysReg ArgGPRs[]
static SDValue lowerVECTOR_SHUFFLE_XVPERM(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVPERM (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVL (if possible).
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static void replaceVecCondBranchResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
#define ASRT_LE_GT_CASE(NAME)
static SDValue lowerVECTOR_SHUFFLE_XVPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKEV (if possible).
static SDValue performBR_CCCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void computeZeroableShuffleElements(ArrayRef< int > Mask, SDValue V1, SDValue V2, APInt &KnownUndef, APInt &KnownZero)
Compute whether each element of a shuffle is zeroable.
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue widenShuffleMask(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
static MachineBasicBlock * emitVecCondBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static bool canonicalizeShuffleVectorByLane(const SDLoc &DL, MutableArrayRef< int > Mask, MVT VT, SDValue &V1, SDValue &V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Shuffle vectors by lane to generate more optimized instructions.
static SDValue lowerVECTOR_SHUFFLE_XVILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVH (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVSHUF (if possible).
static void replaceCMP_XCHG_128Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static SDValue lowerVectorPickVE2GR(SDNode *N, SelectionDAG &DAG, unsigned ResOp)
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRRD_CASE(NAME, NODE)
static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2, ArrayRef< int > Mask)
Attempts to match vector shuffle as byte rotation.
static SDValue lowerVECTOR_SHUFFLE_XVPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKEV (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
Lower VECTOR_SHUFFLE into XVREPLVEI (if possible).
static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode, unsigned ScalarSizeInBits, ArrayRef< int > Mask, int MaskOffset, const APInt &Zeroable)
Attempts to match a shuffle mask against the VBSLL, VBSRL, VSLLI and VSRLI instruction.
static SDValue lowerVECTOR_SHUFFLE_VILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVL (if possible).
static SDValue lowerVectorBitClearImm(SDNode *Node, SelectionDAG &DAG)
static MachineBasicBlock * emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLEAsLanePermuteAndShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE as lane permute and then shuffle (if possible).
static SDValue performVMSKLTZCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static void replaceINTRINSIC_WO_CHAINResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKOD (if possible).
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, ISD::CondCode &CC, SelectionDAG &DAG)
static Register allocateArgGPR(CCState &State)
static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT, ArrayRef< int > Mask, SmallVectorImpl< int > &RepeatedMask)
Test whether a shuffle mask is equivalent within each sub-lane.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue, bool AllowSymbol=false)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool isSequentialOrUndefInRange(ArrayRef< int > Mask, unsigned Pos, unsigned Size, int Low, int Step=1)
Return true if every element in Mask, beginning from position Pos and ending in Pos + Size,...
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
Value * getPointerOperand()
bool isFloatingPointOperation() const
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM Basic Block Representation.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
The size in bits of the pointer representation in a given address space.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Argument * getArg(unsigned i) const
Common base class shared among various IRBuilders.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
void addSExt32Register(Register Reg)
const LoongArchRegisterInfo * getRegisterInfo() const override
const LoongArchInstrInfo * getInstrInfo() const override
unsigned getGRLen() const
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< bool, uint64_t > isImmVLDILegalForMode1(const APInt &SplatValue, const unsigned SplatBitSize) const
Check if a constant splat can be generated using [x]vldi, where imm[12] is 1.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
bool isExtractVecEltCheap(EVT VT, unsigned Index) const override
Return true if extraction of a scalar element from the given vector type at the given index is cheap.
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Determine if the target supports unaligned memory accesses.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, Align &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
bool shouldScalarizeBinop(SDValue VecOp) const override
Try to convert an extract element of a vector binary operation into an extract element followed by a ...
bool isFPImmVLDILegal(const APFloat &Imm, EVT VT) const
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
void emitExpandAtomicRMW(AtomicRMWInst *AI) const override
Perform a atomicrmw expansion using a target-specific way.
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
LoongArchTargetLowering(const TargetMachine &TM, const LoongArchSubtarget &STI)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const override
Return a reciprocal estimate value for the input operand.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
Wrapper class representing physical registers. Should be passed by value.
bool hasFeature(unsigned Feature) const
static MVT getFloatingPointVT(unsigned BitWidth)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool is256BitVector() const
Return true if this is a 256-bit vector type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getHalfNumVectorElementsVT() const
Return a VT for a vector type with the same element type but half the number of elements.
MVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void push_back(MachineInstr *MI)
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
LLVM_ABI void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
size_t use_size() const
Return the number of uses of this node.
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
bool isSafeToSpeculativelyExecute(unsigned Opcode) const
Some opcodes may create immediate undefined behavior when used with some values (integer division-by-...
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
ArrayRef< int > getMask() const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
typename SuperClass::const_iterator const_iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxBytesForAlignment(unsigned MaxBytes)
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
Primary interface to the complete machine description for the target machine.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
ABI getTargetABI(StringRef ABIName)
InstSeq generateInstSeq(int64_t Val)
LLVM_ABI Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
@ Kill
The last use of a register.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI bool widenShuffleMaskElts(int Scale, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)
Try to transform a shuffle mask by replacing elements with the scaled index for an equivalent mask of...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
constexpr unsigned BitWidth
std::string join_items(Sep Separator, Args &&... Items)
Joins the strings in the parameter pack Items, adding Separator between the elements....
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
Align getNonZeroOrigAlign() const
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const
LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)