81#define DEBUG_TYPE "mips-lower"
87 cl::desc(
"MIPS: Don't trap on integer division by zero."),
93 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
94 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
119 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
122 IntermediateVT = RegisterVT;
127 return NumIntermediates;
138 unsigned Flag)
const {
144 unsigned Flag)
const {
150 unsigned Flag)
const {
156 unsigned Flag)
const {
162 unsigned Flag)
const {
164 N->getOffset(), Flag);
533 if (!
TM.isPositionIndependent() || !
TM.getABI().IsO32() ||
553 EVT Ty =
N->getValueType(0);
554 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
555 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
561 N->getOperand(0),
N->getOperand(1));
566 if (
N->hasAnyUseOfValue(0)) {
575 if (
N->hasAnyUseOfValue(1)) {
617 "Illegal Condition Code");
631 if (!
LHS.getValueType().isFloatingPoint())
743 SDValue ValueIfTrue =
N->getOperand(0), ValueIfFalse =
N->getOperand(2);
759 SDValue FCC =
N->getOperand(1), Glue =
N->getOperand(3);
760 return DAG.
getNode(Opc,
SDLoc(
N), ValueIfFalse.getValueType(),
761 ValueIfFalse, FCC, ValueIfTrue, Glue);
770 SDValue FirstOperand =
N->getOperand(0);
771 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
773 EVT ValTy =
N->getValueType(0);
777 unsigned SMPos, SMSize;
783 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
793 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
813 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
818 if (SMPos != Pos || Pos >= ValTy.
getSizeInBits() || SMSize >= 32 ||
840 NewOperand = FirstOperand;
842 return DAG.
getNode(Opc,
DL, ValTy, NewOperand,
857 SDValue And0 =
N->getOperand(0), And1 =
N->getOperand(1);
858 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
865 if (!(CN = dyn_cast<ConstantSDNode>(And0.
getOperand(1))) ||
871 And1.getOperand(0).getOpcode() ==
ISD::SHL) {
873 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
878 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
883 if (!(CN = dyn_cast<ConstantSDNode>(Shl.
getOperand(1))))
890 EVT ValTy =
N->getValueType(0);
891 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.
getSizeInBits()))
904 if (~CN->
getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
905 ((SMSize0 + SMPos0 <= 64 && Subtarget.
hasMips64r2()) ||
906 (SMSize0 + SMPos0 <= 32))) {
910 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
913 if (!(CN1 = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
922 EVT ValTy =
N->getOperand(0)->getValueType(0);
997 if (!Mult.hasOneUse())
1005 SDValue MultLHS = Mult->getOperand(0);
1006 SDValue MultRHS = Mult->getOperand(1);
1013 if (!IsSigned && !IsUnsigned)
1019 std::tie(BottomHalf, TopHalf) =
1031 EVT VTs[2] = {MVT::i32, MVT::i32};
1047 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1062 !Subtarget.
inMips16Mode() &&
N->getValueType(0) == MVT::i64)
1080 EVT ValTy =
N->getValueType(0);
1098 SDValue FirstOperand =
N->getOperand(0);
1099 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
1100 SDValue SecondOperand =
N->getOperand(1);
1101 EVT ValTy =
N->getValueType(0);
1105 unsigned SMPos, SMSize;
1110 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1122 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
1128 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.
getSizeInBits())
1143 unsigned Opc =
N->getOpcode();
1182 if (
auto *
C = dyn_cast<ConstantSDNode>(
Y))
1183 return C->getAPIntValue().ule(15);
1191 N->getOperand(0).getOpcode() ==
ISD::SRL) ||
1193 N->getOperand(0).getOpcode() ==
ISD::SHL)) &&
1194 "Expected shift-shift mask");
1196 if (
N->getOperand(0).getValueType().isVector())
1211 switch (Op.getOpcode())
1224 case ISD::FABS:
return lowerFABS(Op, DAG);
1231 case ISD::SRL_PARTS:
return lowerShiftRightParts(Op, DAG,
false);
1258 bool Is64Bit,
bool IsMicroMips) {
1267 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1288 switch (
MI.getOpcode()) {
1291 case Mips::ATOMIC_LOAD_ADD_I8:
1292 return emitAtomicBinaryPartword(
MI, BB, 1);
1293 case Mips::ATOMIC_LOAD_ADD_I16:
1294 return emitAtomicBinaryPartword(
MI, BB, 2);
1295 case Mips::ATOMIC_LOAD_ADD_I32:
1296 return emitAtomicBinary(
MI, BB);
1297 case Mips::ATOMIC_LOAD_ADD_I64:
1298 return emitAtomicBinary(
MI, BB);
1300 case Mips::ATOMIC_LOAD_AND_I8:
1301 return emitAtomicBinaryPartword(
MI, BB, 1);
1302 case Mips::ATOMIC_LOAD_AND_I16:
1303 return emitAtomicBinaryPartword(
MI, BB, 2);
1304 case Mips::ATOMIC_LOAD_AND_I32:
1305 return emitAtomicBinary(
MI, BB);
1306 case Mips::ATOMIC_LOAD_AND_I64:
1307 return emitAtomicBinary(
MI, BB);
1309 case Mips::ATOMIC_LOAD_OR_I8:
1310 return emitAtomicBinaryPartword(
MI, BB, 1);
1311 case Mips::ATOMIC_LOAD_OR_I16:
1312 return emitAtomicBinaryPartword(
MI, BB, 2);
1313 case Mips::ATOMIC_LOAD_OR_I32:
1314 return emitAtomicBinary(
MI, BB);
1315 case Mips::ATOMIC_LOAD_OR_I64:
1316 return emitAtomicBinary(
MI, BB);
1318 case Mips::ATOMIC_LOAD_XOR_I8:
1319 return emitAtomicBinaryPartword(
MI, BB, 1);
1320 case Mips::ATOMIC_LOAD_XOR_I16:
1321 return emitAtomicBinaryPartword(
MI, BB, 2);
1322 case Mips::ATOMIC_LOAD_XOR_I32:
1323 return emitAtomicBinary(
MI, BB);
1324 case Mips::ATOMIC_LOAD_XOR_I64:
1325 return emitAtomicBinary(
MI, BB);
1327 case Mips::ATOMIC_LOAD_NAND_I8:
1328 return emitAtomicBinaryPartword(
MI, BB, 1);
1329 case Mips::ATOMIC_LOAD_NAND_I16:
1330 return emitAtomicBinaryPartword(
MI, BB, 2);
1331 case Mips::ATOMIC_LOAD_NAND_I32:
1332 return emitAtomicBinary(
MI, BB);
1333 case Mips::ATOMIC_LOAD_NAND_I64:
1334 return emitAtomicBinary(
MI, BB);
1336 case Mips::ATOMIC_LOAD_SUB_I8:
1337 return emitAtomicBinaryPartword(
MI, BB, 1);
1338 case Mips::ATOMIC_LOAD_SUB_I16:
1339 return emitAtomicBinaryPartword(
MI, BB, 2);
1340 case Mips::ATOMIC_LOAD_SUB_I32:
1341 return emitAtomicBinary(
MI, BB);
1342 case Mips::ATOMIC_LOAD_SUB_I64:
1343 return emitAtomicBinary(
MI, BB);
1345 case Mips::ATOMIC_SWAP_I8:
1346 return emitAtomicBinaryPartword(
MI, BB, 1);
1347 case Mips::ATOMIC_SWAP_I16:
1348 return emitAtomicBinaryPartword(
MI, BB, 2);
1349 case Mips::ATOMIC_SWAP_I32:
1350 return emitAtomicBinary(
MI, BB);
1351 case Mips::ATOMIC_SWAP_I64:
1352 return emitAtomicBinary(
MI, BB);
1354 case Mips::ATOMIC_CMP_SWAP_I8:
1355 return emitAtomicCmpSwapPartword(
MI, BB, 1);
1356 case Mips::ATOMIC_CMP_SWAP_I16:
1357 return emitAtomicCmpSwapPartword(
MI, BB, 2);
1358 case Mips::ATOMIC_CMP_SWAP_I32:
1359 return emitAtomicCmpSwap(
MI, BB);
1360 case Mips::ATOMIC_CMP_SWAP_I64:
1361 return emitAtomicCmpSwap(
MI, BB);
1363 case Mips::ATOMIC_LOAD_MIN_I8:
1364 return emitAtomicBinaryPartword(
MI, BB, 1);
1365 case Mips::ATOMIC_LOAD_MIN_I16:
1366 return emitAtomicBinaryPartword(
MI, BB, 2);
1367 case Mips::ATOMIC_LOAD_MIN_I32:
1368 return emitAtomicBinary(
MI, BB);
1369 case Mips::ATOMIC_LOAD_MIN_I64:
1370 return emitAtomicBinary(
MI, BB);
1372 case Mips::ATOMIC_LOAD_MAX_I8:
1373 return emitAtomicBinaryPartword(
MI, BB, 1);
1374 case Mips::ATOMIC_LOAD_MAX_I16:
1375 return emitAtomicBinaryPartword(
MI, BB, 2);
1376 case Mips::ATOMIC_LOAD_MAX_I32:
1377 return emitAtomicBinary(
MI, BB);
1378 case Mips::ATOMIC_LOAD_MAX_I64:
1379 return emitAtomicBinary(
MI, BB);
1381 case Mips::ATOMIC_LOAD_UMIN_I8:
1382 return emitAtomicBinaryPartword(
MI, BB, 1);
1383 case Mips::ATOMIC_LOAD_UMIN_I16:
1384 return emitAtomicBinaryPartword(
MI, BB, 2);
1385 case Mips::ATOMIC_LOAD_UMIN_I32:
1386 return emitAtomicBinary(
MI, BB);
1387 case Mips::ATOMIC_LOAD_UMIN_I64:
1388 return emitAtomicBinary(
MI, BB);
1390 case Mips::ATOMIC_LOAD_UMAX_I8:
1391 return emitAtomicBinaryPartword(
MI, BB, 1);
1392 case Mips::ATOMIC_LOAD_UMAX_I16:
1393 return emitAtomicBinaryPartword(
MI, BB, 2);
1394 case Mips::ATOMIC_LOAD_UMAX_I32:
1395 return emitAtomicBinary(
MI, BB);
1396 case Mips::ATOMIC_LOAD_UMAX_I64:
1397 return emitAtomicBinary(
MI, BB);
1399 case Mips::PseudoSDIV:
1400 case Mips::PseudoUDIV:
1407 case Mips::SDIV_MM_Pseudo:
1408 case Mips::UDIV_MM_Pseudo:
1411 case Mips::DIV_MMR6:
1412 case Mips::DIVU_MMR6:
1413 case Mips::MOD_MMR6:
1414 case Mips::MODU_MMR6:
1416 case Mips::PseudoDSDIV:
1417 case Mips::PseudoDUDIV:
1424 case Mips::PseudoSELECT_I:
1425 case Mips::PseudoSELECT_I64:
1426 case Mips::PseudoSELECT_S:
1427 case Mips::PseudoSELECT_D32:
1428 case Mips::PseudoSELECT_D64:
1429 return emitPseudoSELECT(
MI, BB,
false, Mips::BNE);
1430 case Mips::PseudoSELECTFP_F_I:
1431 case Mips::PseudoSELECTFP_F_I64:
1432 case Mips::PseudoSELECTFP_F_S:
1433 case Mips::PseudoSELECTFP_F_D32:
1434 case Mips::PseudoSELECTFP_F_D64:
1435 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1F);
1436 case Mips::PseudoSELECTFP_T_I:
1437 case Mips::PseudoSELECTFP_T_I64:
1438 case Mips::PseudoSELECTFP_T_S:
1439 case Mips::PseudoSELECTFP_T_D32:
1440 case Mips::PseudoSELECTFP_T_D64:
1441 return emitPseudoSELECT(
MI, BB,
true, Mips::BC1T);
1442 case Mips::PseudoD_SELECT_I:
1443 case Mips::PseudoD_SELECT_I64:
1444 return emitPseudoD_SELECT(
MI, BB);
1446 return emitLDR_W(
MI, BB);
1448 return emitLDR_D(
MI, BB);
1450 return emitSTR_W(
MI, BB);
1452 return emitSTR_D(
MI, BB);
1468 bool NeedsAdditionalReg =
false;
1469 switch (
MI.getOpcode()) {
1470 case Mips::ATOMIC_LOAD_ADD_I32:
1471 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1473 case Mips::ATOMIC_LOAD_SUB_I32:
1474 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1476 case Mips::ATOMIC_LOAD_AND_I32:
1477 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1479 case Mips::ATOMIC_LOAD_OR_I32:
1480 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1482 case Mips::ATOMIC_LOAD_XOR_I32:
1483 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1485 case Mips::ATOMIC_LOAD_NAND_I32:
1486 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1488 case Mips::ATOMIC_SWAP_I32:
1489 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1491 case Mips::ATOMIC_LOAD_ADD_I64:
1492 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1494 case Mips::ATOMIC_LOAD_SUB_I64:
1495 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1497 case Mips::ATOMIC_LOAD_AND_I64:
1498 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1500 case Mips::ATOMIC_LOAD_OR_I64:
1501 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1503 case Mips::ATOMIC_LOAD_XOR_I64:
1504 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1506 case Mips::ATOMIC_LOAD_NAND_I64:
1507 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1509 case Mips::ATOMIC_SWAP_I64:
1510 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1512 case Mips::ATOMIC_LOAD_MIN_I32:
1513 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1514 NeedsAdditionalReg =
true;
1516 case Mips::ATOMIC_LOAD_MAX_I32:
1517 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1518 NeedsAdditionalReg =
true;
1520 case Mips::ATOMIC_LOAD_UMIN_I32:
1521 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1522 NeedsAdditionalReg =
true;
1524 case Mips::ATOMIC_LOAD_UMAX_I32:
1525 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1526 NeedsAdditionalReg =
true;
1528 case Mips::ATOMIC_LOAD_MIN_I64:
1529 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1530 NeedsAdditionalReg =
true;
1532 case Mips::ATOMIC_LOAD_MAX_I64:
1533 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1534 NeedsAdditionalReg =
true;
1536 case Mips::ATOMIC_LOAD_UMIN_I64:
1537 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1538 NeedsAdditionalReg =
true;
1540 case Mips::ATOMIC_LOAD_UMAX_I64:
1541 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1542 NeedsAdditionalReg =
true;
1603 if (NeedsAdditionalReg) {
1610 MI.eraseFromParent();
1617 unsigned SrcReg)
const {
1637 int64_t ShiftImm = 32 - (
Size * 8);
1648 "Unsupported size for EmitAtomicBinaryPartial.");
1675 unsigned AtomicOp = 0;
1676 bool NeedsAdditionalReg =
false;
1677 switch (
MI.getOpcode()) {
1678 case Mips::ATOMIC_LOAD_NAND_I8:
1679 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1681 case Mips::ATOMIC_LOAD_NAND_I16:
1682 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1684 case Mips::ATOMIC_SWAP_I8:
1685 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1687 case Mips::ATOMIC_SWAP_I16:
1688 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1690 case Mips::ATOMIC_LOAD_ADD_I8:
1691 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1693 case Mips::ATOMIC_LOAD_ADD_I16:
1694 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1696 case Mips::ATOMIC_LOAD_SUB_I8:
1697 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1699 case Mips::ATOMIC_LOAD_SUB_I16:
1700 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1702 case Mips::ATOMIC_LOAD_AND_I8:
1703 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1705 case Mips::ATOMIC_LOAD_AND_I16:
1706 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1708 case Mips::ATOMIC_LOAD_OR_I8:
1709 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1711 case Mips::ATOMIC_LOAD_OR_I16:
1712 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1714 case Mips::ATOMIC_LOAD_XOR_I8:
1715 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1717 case Mips::ATOMIC_LOAD_XOR_I16:
1718 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1720 case Mips::ATOMIC_LOAD_MIN_I8:
1721 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1722 NeedsAdditionalReg =
true;
1724 case Mips::ATOMIC_LOAD_MIN_I16:
1725 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1726 NeedsAdditionalReg =
true;
1728 case Mips::ATOMIC_LOAD_MAX_I8:
1729 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1730 NeedsAdditionalReg =
true;
1732 case Mips::ATOMIC_LOAD_MAX_I16:
1733 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1734 NeedsAdditionalReg =
true;
1736 case Mips::ATOMIC_LOAD_UMIN_I8:
1737 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1738 NeedsAdditionalReg =
true;
1740 case Mips::ATOMIC_LOAD_UMIN_I16:
1741 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1742 NeedsAdditionalReg =
true;
1744 case Mips::ATOMIC_LOAD_UMAX_I8:
1745 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1746 NeedsAdditionalReg =
true;
1748 case Mips::ATOMIC_LOAD_UMAX_I16:
1749 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1750 NeedsAdditionalReg =
true;
1779 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1820 if (NeedsAdditionalReg) {
1826 MI.eraseFromParent();
1840 assert((
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1841 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1842 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1844 const unsigned Size =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1852 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1853 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1854 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1869 Register OldValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(OldVal));
1870 Register NewValCopy =
MRI.createVirtualRegister(
MRI.getRegClass(NewVal));
1888 MI.eraseFromParent();
1896 "Unsupported size for EmitAtomicCmpSwapPartial.");
1923 unsigned AtomicOp =
MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1924 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1925 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1966 int64_t MaskImm = (
Size == 1) ? 255 : 65535;
1967 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1969 BuildMI(BB,
DL,
TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
2012 MI.eraseFromParent();
2038 FCC0, Dest, CondRes);
2060 "Floating point operand expected.");
2071 EVT Ty =
Op.getValueType();
2119 EVT Ty =
Op.getValueType();
2162 Args.push_back(Entry);
2167 .setLibCallee(
CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2168 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2214 EVT Ty =
Op.getValueType();
2227 EVT Ty =
Op.getValueType();
2256 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2263 EVT VT =
Node->getValueType(0);
2268 const Value *SV = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
2295 unsigned ArgSizeInBytes =
2311 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2320 bool HasExtractInsert) {
2321 EVT TyX = Op.getOperand(0).getValueType();
2322 EVT TyY = Op.getOperand(1).getValueType();
2339 if (HasExtractInsert) {
2357 if (TyX == MVT::f32)
2367 bool HasExtractInsert) {
2368 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2369 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2378 if (HasExtractInsert) {
2384 if (WidthX > WidthY)
2386 else if (WidthY > WidthX)
2405 if (WidthX > WidthY)
2407 else if (WidthY > WidthX)
2425 bool HasExtractInsert)
const {
2437 Op.getOperand(0), Const1);
2440 if (HasExtractInsert)
2451 if (
Op.getValueType() == MVT::f32)
2465 bool HasExtractInsert)
const {
2476 if (HasExtractInsert)
2498 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() != 0) {
2500 "return address can be determined only for current frame");
2506 EVT VT =
Op.getValueType();
2519 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() != 0) {
2521 "return address can be determined only for current frame");
2527 MVT VT =
Op.getSimpleValueType();
2528 unsigned RA =
ABI.
IsN64() ? Mips::RA_64 : Mips::RA;
2554 unsigned OffsetReg =
ABI.
IsN64() ? Mips::V1_64 : Mips::V1;
2555 unsigned AddrReg =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
2642 : Mips::PseudoD_SELECT_I,
2643 DL, VTList,
Cond, ShiftRightHi,
2659 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2660 EVT BasePtrVT =
Ptr.getValueType();
2670 LD->getMemOperand());
2676 EVT MemVT = LD->getMemoryVT();
2682 if ((LD->getAlign().value() >= (MemVT.
getSizeInBits() / 8)) ||
2683 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2687 EVT VT = Op.getValueType();
2691 assert((VT == MVT::i32) || (VT == MVT::i64));
2734 SDValue Ops[] = { SRL, LWR.getValue(1) };
2807 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2819 EVT ValTy = Op->getValueType(0);
2865 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2871 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2879 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2883 else if (ArgFlags.
isZExt())
2891 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2895 else if (ArgFlags.
isZExt())
2906 bool AllocateFloatsInIntReg = State.
isVarArg() || ValNo > 1 ||
2909 bool isI64 = (ValVT == MVT::i32 && OrigAlign ==
Align(8));
2913 if (ValVT == MVT::i32 && isVectorFloat) {
2920 if (Reg == Mips::A2)
2929 }
else if (ValVT == MVT::i32 ||
2930 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2934 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2937 }
else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2943 if (Reg == Mips::A1 || Reg == Mips::A3)
2957 if (ValVT == MVT::f32) {
2965 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2984 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
2986 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
2992 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
2994 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3001#include "MipsGenCallingConv.inc"
3004 return CC_Mips_FixedArg;
3016 const SDLoc &
DL,
bool IsTailCall,
3034 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3035 bool IsPICCall,
bool GlobalOrExternal,
bool InternalLinkage,
3048 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3049 unsigned GPReg =
ABI.
IsN64() ? Mips::GP_64 : Mips::GP;
3051 RegsToPass.push_back(std::make_pair(GPReg,
getGlobalReg(CLI.
DAG, Ty)));
3060 for (
auto &R : RegsToPass) {
3067 for (
auto &R : RegsToPass)
3074 assert(Mask &&
"Missing call preserved mask for calling convention");
3078 Function *
F =
G->getGlobal()->getParent()->getFunction(
Sym);
3079 if (
F &&
F->hasFnAttribute(
"__Mips16RetHelper")) {
3092 switch (
MI.getOpcode()) {
3096 case Mips::JALRPseudo:
3098 case Mips::JALR64Pseudo:
3099 case Mips::JALR16_MM:
3100 case Mips::JALRC16_MMR6:
3101 case Mips::TAILCALLREG:
3102 case Mips::TAILCALLREG64:
3103 case Mips::TAILCALLR6REG:
3104 case Mips::TAILCALL64R6REG:
3105 case Mips::TAILCALLREG_MM:
3106 case Mips::TAILCALLREG_MMR6: {
3110 Node->getNumOperands() < 1 ||
3111 Node->getOperand(0).getNumOperands() < 2) {
3117 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3120 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3124 if (!isa<Function>(
G->getGlobal())) {
3125 LLVM_DEBUG(
dbgs() <<
"Not adding R_MIPS_JALR against data symbol "
3126 <<
G->getGlobal()->getName() <<
"\n");
3129 Sym =
G->getGlobal()->getName();
3132 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3133 Sym = ES->getSymbol();
3176 dyn_cast_or_null<const ExternalSymbolSDNode>(
Callee.getNode());
3202 bool MemcpyInByVal = ES &&
3209 unsigned ReservedArgArea =
3211 CCInfo.AllocateStack(ReservedArgArea,
Align(1));
3217 unsigned StackSize = CCInfo.getStackSize();
3224 bool InternalLinkage =
false;
3226 IsTailCall = isEligibleForTailCallOptimization(
3229 InternalLinkage =
G->getGlobal()->hasInternalLinkage();
3230 IsTailCall &= (InternalLinkage ||
G->getGlobal()->hasLocalLinkage() ||
3231 G->getGlobal()->hasPrivateLinkage() ||
3232 G->getGlobal()->hasHiddenVisibility() ||
3233 G->getGlobal()->hasProtectedVisibility());
3238 "site marked musttail");
3247 StackSize =
alignTo(StackSize, StackAlignment);
3249 if (!(IsTailCall || MemcpyInByVal))
3256 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3259 CCInfo.rewindByValRegsInfo();
3262 for (
unsigned i = 0, e = ArgLocs.
size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3267 bool UseUpperBits =
false;
3270 if (
Flags.isByVal()) {
3271 unsigned FirstByValReg, LastByValReg;
3272 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3273 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3276 "ByVal args of size 0 should have been ignored by front-end.");
3277 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3279 "Do not tail-call optimize if there is a byval argument.");
3280 passByValArg(Chain,
DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG,
Arg,
3283 CCInfo.nextInRegsParam();
3293 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3294 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3295 (ValVT == MVT::i64 && LocVT == MVT::f64))
3297 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3308 Register LocRegHigh = ArgLocs[++i].getLocReg();
3309 RegsToPass.
push_back(std::make_pair(LocRegLo,
Lo));
3310 RegsToPass.push_back(std::make_pair(LocRegHigh,
Hi));
3319 UseUpperBits =
true;
3325 UseUpperBits =
true;
3331 UseUpperBits =
true;
3339 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3349 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Arg));
3358 if (
Options.SupportsDebugEntryValues)
3370 Chain,
Arg,
DL, IsTailCall, DAG));
3375 if (!MemOpChains.
empty())
3383 bool GlobalOrExternal =
false, IsCallReloc =
false;
3392 if (
auto *
N = dyn_cast<ExternalSymbolSDNode>(
Callee)) {
3397 }
else if (
auto *
N = dyn_cast<GlobalAddressSDNode>(
Callee)) {
3401 if (
auto *
F = dyn_cast<Function>(
N->getGlobal())) {
3402 if (
F->hasFnAttribute(
"long-call"))
3403 UseLongCalls =
true;
3404 else if (
F->hasFnAttribute(
"short-call"))
3405 UseLongCalls =
false;
3419 if (InternalLinkage)
3435 GlobalOrExternal =
true;
3438 const char *
Sym = S->getSymbol();
3454 GlobalOrExternal =
true;
3460 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3461 IsCallReloc, CLI,
Callee, Chain);
3477 if (!(MemcpyInByVal)) {
3484 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins,
DL, DAG,
3490SDValue MipsTargetLowering::LowerCallResult(
3501 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.
Callee.
getNode());
3502 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.
RetTy,
3506 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3511 RVLocs[i].getLocVT(), InGlue);
3516 unsigned ValSizeInBits =
Ins[i].ArgVT.getSizeInBits();
3617SDValue MipsTargetLowering::LowerFormalArguments(
3628 std::vector<SDValue> OutChains;
3638 if (
Func.hasFnAttribute(
"interrupt") && !
Func.arg_empty())
3640 "Functions with the interrupt attribute cannot have arguments!");
3642 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3644 CCInfo.getInRegsParamsCount() > 0);
3646 unsigned CurArgIdx = 0;
3647 CCInfo.rewindByValRegsInfo();
3649 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3651 if (Ins[InsIdx].isOrigArg()) {
3652 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3653 CurArgIdx =
Ins[InsIdx].getOrigArgIndex();
3659 if (
Flags.isByVal()) {
3660 assert(Ins[InsIdx].isOrigArg() &&
"Byval arguments cannot be implicit");
3661 unsigned FirstByValReg, LastByValReg;
3662 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3663 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3666 "ByVal args of size 0 should have been ignored by front-end.");
3667 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3668 copyByValRegs(Chain,
DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3669 FirstByValReg, LastByValReg, VA, CCInfo);
3670 CCInfo.nextInRegsParam();
3690 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3691 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3692 (RegVT == MVT::f64 && ValVT == MVT::i64))
3694 else if (
ABI.
IsO32() && RegVT == MVT::i32 &&
3695 ValVT == MVT::f64) {
3704 ArgValue, ArgValue2);
3732 LocVT,
DL, Chain, FIN,
3734 OutChains.push_back(ArgValue.
getValue(1));
3743 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3745 if (ArgLocs[i].needsCustom()) {
3753 if (Ins[InsIdx].
Flags.isSRet()) {
3767 writeVarArgRegs(OutChains, Chain,
DL, DAG, CCInfo);
3771 if (!OutChains.empty()) {
3772 OutChains.push_back(Chain);
3789 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3790 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3793bool MipsTargetLowering::shouldSignExtendTypeInLibCall(
EVT Type,
3794 bool IsSigned)
const {
3828 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3834 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3838 bool UseUpperBits =
false;
3849 UseUpperBits =
true;
3855 UseUpperBits =
true;
3861 UseUpperBits =
true;
3869 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3895 unsigned V0 =
ABI.
IsN64() ? Mips::V0_64 : Mips::V0;
3910 return LowerInterruptReturn(RetOps,
DL, DAG);
3923MipsTargetLowering::getConstraintType(
StringRef Constraint)
const {
3935 if (Constraint.
size() == 1) {
3936 switch (Constraint[0]) {
3950 if (Constraint ==
"ZC")
3960MipsTargetLowering::getSingleConstraintMatchWeight(
3961 AsmOperandInfo &
info,
const char *constraint)
const {
3963 Value *CallOperandVal =
info.CallOperandVal;
3966 if (!CallOperandVal)
3970 switch (*constraint) {
3999 if (isa<ConstantInt>(CallOperandVal))
4014 unsigned long long &Reg) {
4015 if (
C.front() !=
'{' ||
C.back() !=
'}')
4016 return std::make_pair(
false,
false);