82#define DEBUG_TYPE "mips-lower"
88 cl::desc(
"MIPS: Don't trap on integer division by zero."),
94 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
95 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
126 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
131 return NumIntermediates;
147 unsigned Flag)
const {
153 unsigned Flag)
const {
159 unsigned Flag)
const {
165 unsigned Flag)
const {
171 unsigned Flag)
const {
173 N->getOffset(), Flag);
562 if (!
TM.isPositionIndependent() || !
TM.getABI().IsO32() ||
582 EVT Ty =
N->getValueType(0);
583 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
584 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
590 N->getOperand(0),
N->getOperand(1));
595 if (
N->hasAnyUseOfValue(0)) {
604 if (
N->hasAnyUseOfValue(1)) {
646 "Illegal Condition Code");
660 if (!
LHS.getValueType().isFloatingPoint())
772 SDValue ValueIfTrue =
N->getOperand(0), ValueIfFalse =
N->getOperand(2);
788 SDValue FCC =
N->getOperand(1), Glue =
N->getOperand(3);
789 return DAG.
getNode(Opc,
SDLoc(
N), ValueIfFalse.getValueType(),
790 ValueIfFalse, FCC, ValueIfTrue, Glue);
799 SDValue FirstOperand =
N->getOperand(0);
800 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
802 EVT ValTy =
N->getValueType(0);
806 unsigned SMPos, SMSize;
812 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
822 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
842 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
847 if (SMPos != Pos || Pos >= ValTy.
getSizeInBits() || SMSize >= 32 ||
869 NewOperand = FirstOperand;
871 return DAG.
getNode(Opc,
DL, ValTy, NewOperand,
886 SDValue And0 =
N->getOperand(0), And1 =
N->getOperand(1);
887 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
894 if (!(CN = dyn_cast<ConstantSDNode>(And0.
getOperand(1))) ||
900 And1.getOperand(0).getOpcode() ==
ISD::SHL) {
902 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
907 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
912 if (!(CN = dyn_cast<ConstantSDNode>(Shl.
getOperand(1))))
919 EVT ValTy =
N->getValueType(0);
920 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.
getSizeInBits()))
933 if (~CN->
getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
934 ((SMSize0 + SMPos0 <= 64 && Subtarget.
hasMips64r2()) ||
935 (SMSize0 + SMPos0 <= 32))) {
939 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
942 if (!(CN1 = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
951 EVT ValTy =
N->getOperand(0)->getValueType(0);
1026 if (!Mult.hasOneUse())
1034 SDValue MultLHS = Mult->getOperand(0);
1035 SDValue MultRHS = Mult->getOperand(1);
1042 if (!IsSigned && !IsUnsigned)
1048 std::tie(BottomHalf, TopHalf) =
1060 EVT VTs[2] = {MVT::i32, MVT::i32};
1076 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1091 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1109 EVT ValTy =
N->getValueType(0);
1127 SDValue FirstOperand =
N->getOperand(0);
1128 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
1129 SDValue SecondOperand =
N->getOperand(1);
1130 EVT ValTy =
N->getValueType(0);
1134 unsigned SMPos, SMSize;
1139 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1151 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
1157 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.
getSizeInBits())
1172 unsigned Opc =
N->getOpcode();
1211 if (
auto *
C = dyn_cast<ConstantSDNode>(
Y))
1212 return C->getAPIntValue().ule(15);
1220 N->getOperand(0).getOpcode() ==
ISD::SRL) ||
1222 N->getOperand(0).getOpcode() ==
ISD::SHL)) &&
1223 "Expected shift-shift mask");
1225 if (
N->getOperand(0).getValueType().isVector())
1240 switch (
Op.getOpcode())
1287 bool Is64Bit,
bool IsMicroMips) {
1296 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1317 switch (
MI.getOpcode()) {
1320 case Mips::ATOMIC_LOAD_ADD_I8:
1321 return emitAtomicBinaryPartword(
MI, BB, 1);
1322 case Mips::ATOMIC_LOAD_ADD_I16:
1323 return emitAtomicBinaryPartword(
MI, BB, 2);
1324 case Mips::ATOMIC_LOAD_ADD_I32:
1325 return emitAtomicBinary(
MI, BB);
1326 case Mips::ATOMIC_LOAD_ADD_I64:
1327 return emitAtomicBinary(
MI, BB);
1329 case Mips::ATOMIC_LOAD_AND_I8:
1330 return emitAtomicBinaryPartword(
MI, BB, 1);
1331 case Mips::ATOMIC_LOAD_AND_I16:
1332 return emitAtomicBinaryPartword(
MI, BB, 2);
1333 case Mips::ATOMIC_LOAD_AND_I32:
1334 return emitAtomicBinary(
MI, BB);
1335 case Mips::ATOMIC_LOAD_AND_I64:
1336 return emitAtomicBinary(
MI, BB);
1338 case Mips::ATOMIC_LOAD_OR_I8:
1339 return emitAtomicBinaryPartword(
MI, BB, 1);
1340 case Mips::ATOMIC_LOAD_OR_I16:
1341 return emitAtomicBinaryPartword(
MI, BB, 2);
1342 case Mips::ATOMIC_LOAD_OR_I32:
1343 return emitAtomicBinary(
MI, BB);
1344 case Mips::ATOMIC_LOAD_OR_I64:
1345 return emitAtomicBinary(
MI, BB);
1347 case Mips::ATOMIC_LOAD_XOR_I8:
1348 return emitAtomicBinaryPartword(
MI, BB, 1);
1349 case Mips::ATOMIC_LOAD_XOR_I16:
1350 return emitAtomicBinaryPartword(
MI, BB, 2);
1351 case Mips::ATOMIC_LOAD_XOR_I32:
1352 return emitAtomicBinary(
MI, BB);
1353 case Mips::ATOMIC_LOAD_XOR_I64:
1354 return emitAtomicBinary(
MI, BB);
1356 case Mips::ATOMIC_LOAD_NAND_I8:
1357 return emitAtomicBinaryPartword(
MI, BB, 1);
1358 case Mips::ATOMIC_LOAD_NAND_I16:
1359 return emitAtomicBinaryPartword(
MI, BB, 2);
1360 case Mips::ATOMIC_LOAD_NAND_I32:
1361 return emitAtomicBinary(
MI, BB);
1362 case Mips::ATOMIC_LOAD_NAND_I64:
1363 return emitAtomicBinary(
MI, BB);
1365 case Mips::ATOMIC_LOAD_SUB_I8:
1366 return emitAtomicBinaryPartword(
MI, BB, 1);
1367 case Mips::ATOMIC_LOAD_SUB_I16:
1368 return emitAtomicBinaryPartword(
MI, BB, 2);
1369 case Mips::ATOMIC_LOAD_SUB_I32:
1370 return emitAtomicBinary(
MI, BB);
1371 case Mips::ATOMIC_LOAD_SUB_I64:
1372 return emitAtomicBinary(
MI, BB);
1374 case Mips::ATOMIC_SWAP_I8:
1375 return emitAtomicBinaryPartword(
MI, BB, 1);
1376 case Mips::ATOMIC_SWAP_I16:
1377 return emitAtomicBinaryPartword(
MI, BB, 2);
1378 case Mips::ATOMIC_SWAP_I32:
1379 return emitAtomicBinary(
MI, BB);
1380 case Mips::ATOMIC_SWAP_I64:
1381 return emitAtomicBinary(
MI, BB);
1383 case Mips::ATOMIC_CMP_SWAP_I8:
1384 return emitAtomicCmpSwapPartword(
MI, BB, 1);
1385 case Mips::ATOMIC_CMP_SWAP_I16:
1386 return emitAtomicCmpSwapPartword(
MI, BB, 2);
1387 case Mips::ATOMIC_CMP_SWAP_I32:
1388 return emitAtomicCmpSwap(
MI, BB);
1389 case Mips::ATOMIC_CMP_SWAP_I64:
1390 return emitAtomicCmpSwap(
MI, BB);
1392 case Mips::ATOMIC_LOAD_MIN_I8:
1393 return emitAtomicBinaryPartword(
MI, BB, 1);
1394 case Mips::ATOMIC_LOAD_MIN_I16:
1395 return emitAtomicBinaryPartword(
MI, BB, 2);
1396 case Mips::ATOMIC_LOAD_MIN_I32:
1397 return emitAtomicBinary(
MI, BB);
1398 case Mips::ATOMIC_LOAD_MIN_I64:
1399 return emitAtomicBinary(
MI, BB);
1401 case Mips::ATOMIC_LOAD_MAX_I8:
1402 return emitAtomicBinaryPartword(
MI, BB, 1);
1403 case Mips::ATOMIC_LOAD_MAX_I16:
1404 return emitAtomicBinaryPartword(
MI, BB, 2);
1405 case Mips::ATOMIC_LOAD_MAX_I32:
1406 return emitAtomicBinary(
MI, BB);
1407 case Mips::ATOMIC_LOAD_MAX_I64:
1408 return emitAtomicBinary(
MI, BB);
1410 case Mips::ATOMIC_LOAD_UMIN_I8:
1411 return emitAtomicBinaryPartword(
MI, BB, 1);
1412 case Mips::ATOMIC_LOAD_UMIN_I16:
1413 return emitAtomicBinaryPartword(
MI, BB, 2);
1414 case Mips::ATOMIC_LOAD_UMIN_I32:
1415 return emitAtomicBinary(
MI, BB);
1416 case Mips::ATOMIC_LOAD_UMIN_I64:
1417 return emitAtomicBinary(
MI, BB);
1419 case Mips::ATOMIC_LOAD_UMAX_I8:
1420 return emitAtomicBinaryPartword(
MI, BB, 1);
1421 case Mips::ATOMIC_LOAD_UMAX_I16:
1422 return emitAtomicBinaryPartword(
MI, BB, 2);
1423 case Mips::ATOMIC_LOAD_UMAX_I32:
1424 return emitAtomicBinary(
MI, BB);
1425 case Mips::ATOMIC_LOAD_UMAX_I64:
1426 return emitAtomicBinary(
MI, BB);
1428 case Mips::PseudoSDIV:
1429 case Mips::PseudoUDIV:
1436 case Mips::SDIV_MM_Pseudo:
1437 case Mips::UDIV_MM_Pseudo:
1440 case Mips::DIV_MMR6:
1441 case Mips::DIVU_MMR6:
1442 case Mips::MOD_MMR6:
1443 case Mips::MODU_MMR6:
1445 case Mips::PseudoDSDIV:
1446 case Mips::PseudoDUDIV:
1453 case Mips::PseudoSELECT_I:
1454 case Mips::PseudoSELECT_I64:
1455 case Mips::PseudoSELECT_S:
1456 case Mips::PseudoSELECT_D32:
1457 case Mips::PseudoSELECT_D64:
1458 return emitPseudoSELECT(
MI, BB,
false, Mips::BNE);
1459 case Mips::PseudoSELECTFP_F_I:
1460 case Mips::PseudoSELECTFP_F_I64:
1461 case Mips::PseudoSELECTFP_F_S:
1462 case Mips::PseudoSELECTFP_F_D32:
1463 case Mips::PseudoSELECTFP_F_D64:
1464 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1F);
1465 case Mips::PseudoSELECTFP_T_I:
1466 case Mips::PseudoSELECTFP_T_I64:
1467 case Mips::PseudoSELECTFP_T_S:
1468 case Mips::PseudoSELECTFP_T_D32:
1469 case Mips::PseudoSELECTFP_T_D64:
1470 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1T);
1471 case Mips::PseudoD_SELECT_I:
1472 case Mips::PseudoD_SELECT_I64:
1473 return emitPseudoD_SELECT(
MI, BB);
1475 return emitLDR_W(
MI, BB);
1477 return emitLDR_D(
MI, BB);
1479 return emitSTR_W(
MI, BB);
1481 return emitSTR_D(
MI, BB);
1497 bool NeedsAdditionalReg =
false;
1498 switch (
MI.getOpcode()) {
1499 case Mips::ATOMIC_LOAD_ADD_I32:
1500 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1502 case Mips::ATOMIC_LOAD_SUB_I32:
1503 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1505 case Mips::ATOMIC_LOAD_AND_I32:
1506 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1508 case Mips::ATOMIC_LOAD_OR_I32:
1509 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1511 case Mips::ATOMIC_LOAD_XOR_I32:
1512 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1514 case Mips::ATOMIC_LOAD_NAND_I32:
1515 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1517 case Mips::ATOMIC_SWAP_I32:
1518 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1520 case Mips::ATOMIC_LOAD_ADD_I64:
1521 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1523 case Mips::ATOMIC_LOAD_SUB_I64:
1524 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1526 case Mips::ATOMIC_LOAD_AND_I64:
1527 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1529 case Mips::ATOMIC_LOAD_OR_I64:
1530 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1532 case Mips::ATOMIC_LOAD_XOR_I64:
1533 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1535 case Mips::ATOMIC_LOAD_NAND_I64:
1536 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1538 case Mips::ATOMIC_SWAP_I64:
1539 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1541 case Mips::ATOMIC_LOAD_MIN_I32:
1542 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1543 NeedsAdditionalReg =
true;
1545 case Mips::ATOMIC_LOAD_MAX_I32:
1546 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1547 NeedsAdditionalReg =
true;
1549 case Mips::ATOMIC_LOAD_UMIN_I32:
1550 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1551 NeedsAdditionalReg =
true;
1553 case Mips::ATOMIC_LOAD_UMAX_I32:
1554 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1555 NeedsAdditionalReg =
true;
1557 case Mips::ATOMIC_LOAD_MIN_I64:
1558 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1559 NeedsAdditionalReg =
true;
1561 case Mips::ATOMIC_LOAD_MAX_I64:
1562 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1563 NeedsAdditionalReg =
true;
1565 case Mips::ATOMIC_LOAD_UMIN_I64:
1566 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1567 NeedsAdditionalReg =
true;
1569 case Mips::ATOMIC_LOAD_UMAX_I64:
1570 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1571 NeedsAdditionalReg =
true;
1632 if (NeedsAdditionalReg) {
1639 MI.eraseFromParent();
1646 unsigned SrcReg)
const {
1666 int64_t ShiftImm = 32 - (
Size * 8);
1677 "Unsupported size for EmitAtomicBinaryPartial.");
1704 unsigned AtomicOp = 0;
1705 bool NeedsAdditionalReg =
false;
1706 switch (
MI.getOpcode()) {
1707 case Mips::ATOMIC_LOAD_NAND_I8:
1708 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1710 case Mips::ATOMIC_LOAD_NAND_I16:
1711 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1713 case Mips::ATOMIC_SWAP_I8:
1714 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1716 case Mips::ATOMIC_SWAP_I16:
1717 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1719 case Mips::ATOMIC_LOAD_ADD_I8:
1720 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1722 case Mips::ATOMIC_LOAD_ADD_I16:
1723 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1725 case Mips::ATOMIC_LOAD_SUB_I8:
1726 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1728 case Mips::ATOMIC_LOAD_SUB_I16:
1729 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1731 case Mips::ATOMIC_LOAD_AND_I8:
1732 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1734 case Mips::ATOMIC_LOAD_AND_I16:
1735 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1737 case Mips::ATOMIC_LOAD_OR_I8:
1738 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1740 case Mips::ATOMIC_LOAD_OR_I16:
1741 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1743 case Mips::ATOMIC_LOAD_XOR_I8:
1744 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1746 case Mips::ATOMIC_LOAD_XOR_I16:
1747 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1749 case Mips::ATOMIC_LOAD_MIN_I8:
1750 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1751 NeedsAdditionalReg =
true;
1753 case Mips::ATOMIC_LOAD_MIN_I16:
1754 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1755 NeedsAdditionalReg =
true;
1757 case Mips::ATOMIC_LOAD_MAX_I8:
1758 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1759 NeedsAdditionalReg =
true;
1761 case Mips::ATOMIC_LOAD_MAX_I16:
1762 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1763 NeedsAdditionalReg =
true;
1765 case Mips::ATOMIC_LOAD_UMIN_I8:
1766 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1767 NeedsAdditionalReg =
true;
1769 case Mips::ATOMIC_LOAD_UMIN_I16:
1770 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1771 NeedsAdditionalReg =
true;
1773 case Mips::ATOMIC_LOAD_UMAX_I8:
1774 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1775 NeedsAdditionalReg =
true;
1777 case Mips::ATOMIC_LOAD_UMAX_I16:
1778 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1779 NeedsAdditionalReg =
true;
1808 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1849 if (NeedsAdditionalReg) {
1855 MI.eraseFromParent();
1869 assert((
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1870 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1871 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1873 const unsigned Size =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1881 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1882 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1883 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1898 Register OldValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(OldVal));
1899 Register NewValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(NewVal));
1917 MI.eraseFromParent();
1925 "Unsupported size for EmitAtomicCmpSwapPartial.");
1952 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1953 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1954 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1995 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1996 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1998 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
2041 MI.eraseFromParent();
2066 FCC0, Dest, CondRes);
2088 "Floating point operand expected.");
2099 EVT Ty =
Op.getValueType();
2147 EVT Ty =
Op.getValueType();
2190 Args.push_back(Entry);
2195 .setLibCallee(
CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2196 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2242 EVT Ty =
Op.getValueType();
2255 EVT Ty =
Op.getValueType();
2284 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2291 EVT VT =
Node->getValueType(0);
2296 const Value *SV = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
2323 unsigned ArgSizeInBytes =
2339 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2348 bool HasExtractInsert) {
2349 EVT TyX =
Op.getOperand(0).getValueType();
2350 EVT TyY =
Op.getOperand(1).getValueType();
2367 if (HasExtractInsert) {
2385 if (TyX == MVT::f32)
2395 bool HasExtractInsert) {
2396 unsigned WidthX =
Op.getOperand(0).getValueSizeInBits();
2397 unsigned WidthY =
Op.getOperand(1).getValueSizeInBits();
2406 if (HasExtractInsert) {
2412 if (WidthX > WidthY)
2414 else if (WidthY > WidthX)
2433 if (WidthX > WidthY)
2435 else if (WidthY > WidthX)
2453 bool HasExtractInsert)
const {
2465 Op.getOperand(0), Const1);
2468 if (HasExtractInsert)
2479 if (
Op.getValueType() == MVT::f32)
2493 bool HasExtractInsert)
const {
2504 if (HasExtractInsert)
2526 if (
Op.getConstantOperandVal(0) != 0) {
2528 "return address can be determined only for current frame");
2534 EVT VT =
Op.getValueType();
2547 if (
Op.getConstantOperandVal(0) != 0) {
2549 "return address can be determined only for current frame");
2555 MVT VT =
Op.getSimpleValueType();
2556 unsigned RA =
ABI.
IsN64() ? Mips::RA_64 : Mips::RA;
2582 unsigned OffsetReg =
ABI.
IsN64() ? Mips::V1_64 : Mips::V1;
2583 unsigned AddrReg =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
2673 DL, VTList,
Cond, ShiftRightHi,
2689 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2690 EVT BasePtrVT =
Ptr.getValueType();
2700 LD->getMemOperand());
2706 EVT MemVT = LD->getMemoryVT();
2712 if ((LD->getAlign().value() >= (MemVT.
getSizeInBits() / 8)) ||
2713 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2717 EVT VT =
Op.getValueType();
2721 assert((VT == MVT::i32) || (VT == MVT::i64));
2764 SDValue Ops[] = { SRL, LWR.getValue(1) };
2837 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2849 EVT ValTy =
Op->getValueType(0);
2895 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2901 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2909 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2913 else if (ArgFlags.
isZExt())
2921 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2925 else if (ArgFlags.
isZExt())
2936 bool AllocateFloatsInIntReg = State.
isVarArg() || ValNo > 1 ||
2939 bool isI64 = (ValVT == MVT::i32 && OrigAlign ==
Align(8));
2943 if (ValVT == MVT::i32 && isVectorFloat) {
2950 if (Reg == Mips::A2)
2959 }
else if (ValVT == MVT::i32 ||
2960 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2964 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2967 }
else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2971 if (Reg == Mips::A1 || Reg == Mips::A3)
2987 if (ValVT == MVT::f32) {
2995 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
3014 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3016 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3022 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3024 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3031#include "MipsGenCallingConv.inc"
3034 return CC_Mips_FixedArg;
3046 const SDLoc &
DL,
bool IsTailCall,
3064 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3065 bool IsPICCall,
bool GlobalOrExternal,
bool InternalLinkage,
3078 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3079 unsigned GPReg =
ABI.
IsN64() ? Mips::GP_64 : Mips::GP;
3081 RegsToPass.push_back(std::make_pair(GPReg,
getGlobalReg(CLI.
DAG, Ty)));
3090 for (
auto &R : RegsToPass) {
3097 for (
auto &R : RegsToPass)
3104 assert(Mask &&
"Missing call preserved mask for calling convention");
3108 Function *
F =
G->getGlobal()->getParent()->getFunction(
Sym);
3109 if (
F &&
F->hasFnAttribute(
"__Mips16RetHelper")) {
3122 switch (
MI.getOpcode()) {
3126 case Mips::JALRPseudo:
3128 case Mips::JALR64Pseudo:
3129 case Mips::JALR16_MM:
3130 case Mips::JALRC16_MMR6:
3131 case Mips::TAILCALLREG:
3132 case Mips::TAILCALLREG64:
3133 case Mips::TAILCALLR6REG:
3134 case Mips::TAILCALL64R6REG:
3135 case Mips::TAILCALLREG_MM:
3136 case Mips::TAILCALLREG_MMR6: {
3140 Node->getNumOperands() < 1 ||
3141 Node->getOperand(0).getNumOperands() < 2) {
3147 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3150 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3154 if (!isa<Function>(
G->getGlobal())) {
3155 LLVM_DEBUG(
dbgs() <<
"Not adding R_MIPS_JALR against data symbol "
3156 <<
G->getGlobal()->getName() <<
"\n");
3159 Sym =
G->getGlobal()->getName();
3162 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3163 Sym = ES->getSymbol();
3206 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3238 unsigned ReservedArgArea =
3240 CCInfo.AllocateStack(ReservedArgArea,
Align(1));
3246 unsigned StackSize = CCInfo.getStackSize();
3253 bool InternalLinkage =
false;
3255 IsTailCall = isEligibleForTailCallOptimization(
3258 InternalLinkage =
G->getGlobal()->hasInternalLinkage();
3259 IsTailCall &= (InternalLinkage ||
G->getGlobal()->hasLocalLinkage() ||
3260 G->getGlobal()->hasPrivateLinkage() ||
3261 G->getGlobal()->hasHiddenVisibility() ||
3262 G->getGlobal()->hasProtectedVisibility());
3267 "site marked musttail");
3276 StackSize =
alignTo(StackSize, StackAlignment);
3278 if (!(IsTailCall || MemcpyInByVal))
3285 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3288 CCInfo.rewindByValRegsInfo();
3291 for (
unsigned i = 0, e = ArgLocs.
size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3292 SDValue Arg = OutVals[OutIdx];
3296 bool UseUpperBits =
false;
3299 if (
Flags.isByVal()) {
3300 unsigned FirstByValReg, LastByValReg;
3301 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3302 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3305 "ByVal args of size 0 should have been ignored by front-end.");
3306 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3308 "Do not tail-call optimize if there is a byval argument.");
3309 passByValArg(Chain,
DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3312 CCInfo.nextInRegsParam();
3322 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3323 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3324 (ValVT == MVT::i64 && LocVT == MVT::f64))
3326 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3337 Register LocRegHigh = ArgLocs[++i].getLocReg();
3338 RegsToPass.
push_back(std::make_pair(LocRegLo,
Lo));
3339 RegsToPass.push_back(std::make_pair(LocRegHigh,
Hi));
3348 UseUpperBits =
true;
3354 UseUpperBits =
true;
3360 UseUpperBits =
true;
3368 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3378 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
3399 Chain, Arg,
DL, IsTailCall, DAG));
3404 if (!MemOpChains.
empty())
3412 bool GlobalOrExternal =
false, IsCallReloc =
false;
3421 if (
auto *
N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3426 }
else if (
auto *
N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3430 if (
auto *
F = dyn_cast<Function>(
N->getGlobal())) {
3431 if (
F->hasFnAttribute(
"long-call"))
3432 UseLongCalls =
true;
3433 else if (
F->hasFnAttribute(
"short-call"))
3434 UseLongCalls =
false;
3448 if (InternalLinkage)
3464 GlobalOrExternal =
true;
3467 const char *
Sym = S->getSymbol();
3483 GlobalOrExternal =
true;
3489 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3490 IsCallReloc, CLI, Callee, Chain);
3506 if (!(MemcpyInByVal)) {
3513 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins,
DL, DAG,
3519SDValue MipsTargetLowering::LowerCallResult(
3530 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.
Callee.
getNode());
3531 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.
RetTy,
3535 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3540 RVLocs[i].getLocVT(), InGlue);
3545 unsigned ValSizeInBits =
Ins[i].ArgVT.getSizeInBits();
3646SDValue MipsTargetLowering::LowerFormalArguments(
3657 std::vector<SDValue> OutChains;
3667 if (
Func.hasFnAttribute(
"interrupt") && !
Func.arg_empty())
3669 "Functions with the interrupt attribute cannot have arguments!");
3671 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3673 CCInfo.getInRegsParamsCount() > 0);
3675 unsigned CurArgIdx = 0;
3676 CCInfo.rewindByValRegsInfo();
3678 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3680 if (Ins[InsIdx].isOrigArg()) {
3681 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3682 CurArgIdx =
Ins[InsIdx].getOrigArgIndex();
3688 if (
Flags.isByVal()) {
3689 assert(Ins[InsIdx].isOrigArg() &&
"Byval arguments cannot be implicit");
3690 unsigned FirstByValReg, LastByValReg;
3691 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3692 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3695 "ByVal args of size 0 should have been ignored by front-end.");
3696 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3697 copyByValRegs(Chain,
DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3698 FirstByValReg, LastByValReg, VA, CCInfo);
3699 CCInfo.nextInRegsParam();
3719 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3720 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3721 (RegVT == MVT::f64 && ValVT == MVT::i64))
3723 else if (
ABI.
IsO32() && RegVT == MVT::i32 &&
3724 ValVT == MVT::f64) {
3733 ArgValue, ArgValue2);
3752 LocVT,
DL, Chain, FIN,
3754 OutChains.push_back(ArgValue.
getValue(1));
3763 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3765 if (ArgLocs[i].needsCustom()) {
3773 if (Ins[InsIdx].
Flags.isSRet()) {
3787 writeVarArgRegs(OutChains, Chain,
DL, DAG, CCInfo);
3791 if (!OutChains.empty()) {
3792 OutChains.push_back(Chain);
3809 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3810 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3813bool MipsTargetLowering::shouldSignExtendTypeInLibCall(
EVT Type,
3814 bool IsSigned)
const {
3848 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3854 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3858 bool UseUpperBits =
false;
3869 UseUpperBits =
true;
3875 UseUpperBits =
true;
3881 UseUpperBits =
true;
3889 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3915 unsigned V0 =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
3930 return LowerInterruptReturn(RetOps,
DL, DAG);
3943MipsTargetLowering::getConstraintType(
StringRef Constraint)
const {
3955 if (Constraint.
size() == 1) {
3956 switch (Constraint[0]) {
3970 if (Constraint ==
"ZC")