70#include "llvm/IR/IntrinsicsPowerPC.h"
105#define DEBUG_TYPE "ppc-lowering"
127 cl::desc(
"disable vector permute decomposition"),
131 "disable-auto-paired-vec-st",
132 cl::desc(
"disable automatically generated 32byte paired vector stores"),
137 cl::desc(
"Set minimum number of entries to use a jump table on PPC"));
142 "Number of shuffles lowered to a VPERM or XXPERM");
143STATISTIC(NumDynamicAllocaProbed,
"Number of dynamic stack allocation probed");
166 initializeAddrModeMap();
169 bool isPPC64 = Subtarget.
isPPC64();
178 if (!Subtarget.hasEFPU2())
203 if (Subtarget.isISA3_0()) {
233 if (!Subtarget.hasSPE()) {
241 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
242 for (
MVT VT : ScalarIntVTs) {
249 if (Subtarget.useCRBits()) {
252 if (isPPC64 || Subtarget.hasFPCVT()) {
255 isPPC64 ? MVT::i64 : MVT::i32);
258 isPPC64 ? MVT::i64 : MVT::i32);
262 isPPC64 ? MVT::i64 : MVT::i32);
265 isPPC64 ? MVT::i64 : MVT::i32);
269 isPPC64 ? MVT::i64 : MVT::i32);
272 isPPC64 ? MVT::i64 : MVT::i32);
276 isPPC64 ? MVT::i64 : MVT::i32);
279 isPPC64 ? MVT::i64 : MVT::i32);
326 if (Subtarget.isISA3_0()) {
361 if (!Subtarget.hasSPE()) {
366 if (Subtarget.hasVSX()) {
371 if (Subtarget.hasFSQRT()) {
376 if (Subtarget.hasFPRND()) {
417 if (Subtarget.hasSPE()) {
425 if (Subtarget.hasSPE())
431 if (!Subtarget.hasFSQRT() &&
432 !(
TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
436 if (!Subtarget.hasFSQRT() &&
437 !(
TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
438 Subtarget.hasFRES()))
441 if (Subtarget.hasFCPSGN()) {
449 if (Subtarget.hasFPRND()) {
463 if (Subtarget.isISA3_1()) {
474 if (Subtarget.isISA3_0()) {
494 if (!Subtarget.useCRBits()) {
507 if (!Subtarget.useCRBits())
510 if (Subtarget.hasFPU()) {
521 if (!Subtarget.useCRBits())
526 if (Subtarget.hasSPE()) {
550 if (Subtarget.hasDirectMove() && isPPC64) {
555 if (
TM.Options.UnsafeFPMath) {
658 if (Subtarget.hasSPE()) {
680 if (Subtarget.has64BitSupport()) {
695 if (Subtarget.hasLFIWAX() || Subtarget.
isPPC64()) {
701 if (Subtarget.hasSPE()) {
711 if (Subtarget.hasFPCVT()) {
712 if (Subtarget.has64BitSupport()) {
733 if (Subtarget.use64BitRegs()) {
751 if (Subtarget.has64BitSupport()) {
758 if (Subtarget.hasVSX()) {
765 if (Subtarget.hasAltivec()) {
766 for (
MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
781 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
794 if (Subtarget.hasVSX()) {
800 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
810 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
884 if (!Subtarget.hasP8Vector()) {
926 if (Subtarget.hasAltivec())
927 for (
auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
930 if (Subtarget.hasP8Altivec())
941 if (Subtarget.hasVSX()) {
947 if (Subtarget.hasP8Altivec())
952 if (Subtarget.isISA3_1()) {
990 if (Subtarget.hasVSX()) {
993 if (Subtarget.hasP8Vector()) {
997 if (Subtarget.hasDirectMove() && isPPC64) {
1011 if (
TM.Options.UnsafeFPMath) {
1048 if (Subtarget.hasP8Vector())
1057 if (Subtarget.hasP8Altivec()) {
1084 if (Subtarget.isISA3_1())
1186 if (Subtarget.hasP8Altivec()) {
1191 if (Subtarget.hasP9Vector()) {
1196 if (Subtarget.useCRBits()) {
1255 }
else if (Subtarget.hasVSX()) {
1280 for (
MVT VT : {MVT::f32, MVT::f64}) {
1299 if (Subtarget.hasP9Altivec()) {
1300 if (Subtarget.isISA3_1()) {
1323 if (Subtarget.hasP10Vector()) {
1328 if (Subtarget.pairedVectorMemops()) {
1333 if (Subtarget.hasMMA()) {
1334 if (Subtarget.isISAFuture())
1343 if (Subtarget.has64BitSupport())
1346 if (Subtarget.isISA3_1())
1364 if (Subtarget.hasAltivec()) {
1391 if (Subtarget.hasFPCVT())
1394 if (Subtarget.useCRBits())
1403 if (Subtarget.useCRBits()) {
1433 setLibcallName(RTLIB::MEMCPY, isPPC64 ?
"___memmove64" :
"___memmove");
1434 setLibcallName(RTLIB::MEMMOVE, isPPC64 ?
"___memmove64" :
"___memmove");
1435 setLibcallName(RTLIB::MEMSET, isPPC64 ?
"___memset64" :
"___memset");
1436 setLibcallName(RTLIB::BZERO, isPPC64 ?
"___bzero64" :
"___bzero");
1441 if (Subtarget.useCRBits()) {
1544void PPCTargetLowering::initializeAddrModeMap() {
1595 if (MaxAlign == MaxMaxAlign)
1597 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1598 if (MaxMaxAlign >= 32 &&
1599 VTy->getPrimitiveSizeInBits().getFixedValue() >= 256)
1600 MaxAlign =
Align(32);
1601 else if (VTy->getPrimitiveSizeInBits().getFixedValue() >= 128 &&
1603 MaxAlign =
Align(16);
1604 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1607 if (EltAlign > MaxAlign)
1608 MaxAlign = EltAlign;
1609 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
1610 for (
auto *EltTy : STy->elements()) {
1613 if (EltAlign > MaxAlign)
1614 MaxAlign = EltAlign;
1615 if (MaxAlign == MaxMaxAlign)
1628 if (Subtarget.hasAltivec())
1630 return Alignment.
value();
1638 return Subtarget.hasSPE();
1646 Type *VectorTy,
unsigned ElemSizeInBits,
unsigned &
Index)
const {
1647 if (!Subtarget.
isPPC64() || !Subtarget.hasVSX())
1650 if (
auto *VTy = dyn_cast<VectorType>(VectorTy)) {
1651 if (VTy->getScalarType()->isIntegerTy()) {
1653 if (ElemSizeInBits == 32) {
1657 if (ElemSizeInBits == 64) {
1683 return "PPCISD::FTSQRT";
1685 return "PPCISD::FSQRT";
1690 return "PPCISD::XXSPLTI_SP_TO_DP";
1692 return "PPCISD::XXSPLTI32DX";
1696 return "PPCISD::XXPERM";
1716 return "PPCISD::CALL_RM";
1718 return "PPCISD::CALL_NOP_RM";
1720 return "PPCISD::CALL_NOTOC_RM";
1725 return "PPCISD::BCTRL_RM";
1727 return "PPCISD::BCTRL_LOAD_TOC_RM";
1739 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1741 return "PPCISD::ANDI_rec_1_EQ_BIT";
1743 return "PPCISD::ANDI_rec_1_GT_BIT";
1758 return "PPCISD::ST_VSR_SCAL_INT";
1785 return "PPCISD::PADDI_DTPREL";
1801 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1803 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1813 return "PPCISD::STRICT_FADDRTZ";
1815 return "PPCISD::STRICT_FCTIDZ";
1817 return "PPCISD::STRICT_FCTIWZ";
1819 return "PPCISD::STRICT_FCTIDUZ";
1821 return "PPCISD::STRICT_FCTIWUZ";
1823 return "PPCISD::STRICT_FCFID";
1825 return "PPCISD::STRICT_FCFIDU";
1827 return "PPCISD::STRICT_FCFIDS";
1829 return "PPCISD::STRICT_FCFIDUS";
1832 return "PPCISD::STORE_COND";
1840 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1857 return CFP->getValueAPF().isZero();
1861 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1862 return CFP->getValueAPF().isZero();
1870 return Op < 0 ||
Op == Val;
1882 if (ShuffleKind == 0) {
1885 for (
unsigned i = 0; i != 16; ++i)
1888 }
else if (ShuffleKind == 2) {
1891 for (
unsigned i = 0; i != 16; ++i)
1894 }
else if (ShuffleKind == 1) {
1895 unsigned j = IsLE ? 0 : 1;
1896 for (
unsigned i = 0; i != 8; ++i)
1913 if (ShuffleKind == 0) {
1916 for (
unsigned i = 0; i != 16; i += 2)
1920 }
else if (ShuffleKind == 2) {
1923 for (
unsigned i = 0; i != 16; i += 2)
1927 }
else if (ShuffleKind == 1) {
1928 unsigned j = IsLE ? 0 : 2;
1929 for (
unsigned i = 0; i != 8; i += 2)
1950 if (!Subtarget.hasP8Vector())
1954 if (ShuffleKind == 0) {
1957 for (
unsigned i = 0; i != 16; i += 4)
1963 }
else if (ShuffleKind == 2) {
1966 for (
unsigned i = 0; i != 16; i += 4)
1972 }
else if (ShuffleKind == 1) {
1973 unsigned j = IsLE ? 0 : 4;
1974 for (
unsigned i = 0; i != 8; i += 4)
1991 unsigned LHSStart,
unsigned RHSStart) {
1992 if (
N->getValueType(0) != MVT::v16i8)
1994 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1995 "Unsupported merge size!");
1997 for (
unsigned i = 0; i != 8/UnitSize; ++i)
1998 for (
unsigned j = 0; j != UnitSize; ++j) {
2000 LHSStart+j+i*UnitSize) ||
2002 RHSStart+j+i*UnitSize))
2017 if (ShuffleKind == 1)
2019 else if (ShuffleKind == 2)
2024 if (ShuffleKind == 1)
2026 else if (ShuffleKind == 0)
2042 if (ShuffleKind == 1)
2044 else if (ShuffleKind == 2)
2049 if (ShuffleKind == 1)
2051 else if (ShuffleKind == 0)
2101 unsigned RHSStartValue) {
2102 if (
N->getValueType(0) != MVT::v16i8)
2105 for (
unsigned i = 0; i < 2; ++i)
2106 for (
unsigned j = 0; j < 4; ++j)
2108 i*RHSStartValue+j+IndexOffset) ||
2110 i*RHSStartValue+j+IndexOffset+8))
2132 unsigned indexOffset = CheckEven ? 4 : 0;
2133 if (ShuffleKind == 1)
2135 else if (ShuffleKind == 2)
2141 unsigned indexOffset = CheckEven ? 0 : 4;
2142 if (ShuffleKind == 1)
2144 else if (ShuffleKind == 0)
2160 if (
N->getValueType(0) != MVT::v16i8)
2167 for (i = 0; i != 16 && SVOp->
getMaskElt(i) < 0; ++i)
2170 if (i == 16)
return -1;
2175 if (ShiftAmt < i)
return -1;
2180 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
2182 for (++i; i != 16; ++i)
2185 }
else if (ShuffleKind == 1) {
2187 for (++i; i != 16; ++i)
2194 ShiftAmt = 16 - ShiftAmt;
2203 EVT VT =
N->getValueType(0);
2204 if (VT == MVT::v2i64 || VT == MVT::v2f64)
2205 return EltSize == 8 &&
N->getMaskElt(0) ==
N->getMaskElt(1);
2208 EltSize <= 8 &&
"Can only handle 1,2,4,8 byte element sizes");
2212 if (
N->getMaskElt(0) % EltSize != 0)
2217 unsigned ElementBase =
N->getMaskElt(0);
2220 if (ElementBase >= 16)
2225 for (
unsigned i = 1; i != EltSize; ++i)
2226 if (
N->getMaskElt(i) < 0 ||
N->getMaskElt(i) != (
int)(i+ElementBase))
2229 for (
unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2230 if (
N->getMaskElt(i) < 0)
continue;
2231 for (
unsigned j = 0; j != EltSize; ++j)
2232 if (
N->getMaskElt(i+j) !=
N->getMaskElt(j))
2249 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2250 "Unexpected element width.");
2251 assert((StepLen == 1 || StepLen == -1) &&
"Unexpected element width.");
2253 unsigned NumOfElem = 16 / Width;
2254 unsigned MaskVal[16];
2255 for (
unsigned i = 0; i < NumOfElem; ++i) {
2256 MaskVal[0] =
N->getMaskElt(i * Width);
2257 if ((StepLen == 1) && (MaskVal[0] % Width)) {
2259 }
else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2263 for (
unsigned int j = 1; j < Width; ++j) {
2264 MaskVal[j] =
N->getMaskElt(i * Width + j);
2265 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2275 unsigned &InsertAtByte,
bool &Swap,
bool IsLE) {
2280 unsigned M0 =
N->getMaskElt(0) / 4;
2281 unsigned M1 =
N->getMaskElt(4) / 4;
2282 unsigned M2 =
N->getMaskElt(8) / 4;
2283 unsigned M3 =
N->getMaskElt(12) / 4;
2284 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2285 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2290 if ((
M0 > 3 &&
M1 == 1 && M2 == 2 && M3 == 3) ||
2291 (
M0 < 4 &&
M1 == 5 && M2 == 6 && M3 == 7)) {
2292 ShiftElts = IsLE ? LittleEndianShifts[
M0 & 0x3] : BigEndianShifts[
M0 & 0x3];
2293 InsertAtByte = IsLE ? 12 : 0;
2298 if ((
M1 > 3 &&
M0 == 0 && M2 == 2 && M3 == 3) ||
2299 (
M1 < 4 &&
M0 == 4 && M2 == 6 && M3 == 7)) {
2300 ShiftElts = IsLE ? LittleEndianShifts[
M1 & 0x3] : BigEndianShifts[
M1 & 0x3];
2301 InsertAtByte = IsLE ? 8 : 4;
2306 if ((M2 > 3 &&
M0 == 0 &&
M1 == 1 && M3 == 3) ||
2307 (M2 < 4 &&
M0 == 4 &&
M1 == 5 && M3 == 7)) {
2308 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2309 InsertAtByte = IsLE ? 4 : 8;
2314 if ((M3 > 3 &&
M0 == 0 &&
M1 == 1 && M2 == 2) ||
2315 (M3 < 4 &&
M0 == 4 &&
M1 == 5 && M2 == 6)) {
2316 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2317 InsertAtByte = IsLE ? 0 : 12;
2324 if (
N->getOperand(1).isUndef()) {
2327 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2328 if (
M0 == XXINSERTWSrcElem &&
M1 == 1 && M2 == 2 && M3 == 3) {
2329 InsertAtByte = IsLE ? 12 : 0;
2332 if (
M0 == 0 &&
M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2333 InsertAtByte = IsLE ? 8 : 4;
2336 if (
M0 == 0 &&
M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2337 InsertAtByte = IsLE ? 4 : 8;
2340 if (
M0 == 0 &&
M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2341 InsertAtByte = IsLE ? 0 : 12;
2350 bool &Swap,
bool IsLE) {
2351 assert(
N->getValueType(0) == MVT::v16i8 &&
"Shuffle vector expects v16i8");
2357 unsigned M0 =
N->getMaskElt(0) / 4;
2358 unsigned M1 =
N->getMaskElt(4) / 4;
2359 unsigned M2 =
N->getMaskElt(8) / 4;
2360 unsigned M3 =
N->getMaskElt(12) / 4;
2364 if (
N->getOperand(1).isUndef()) {
2365 assert(
M0 < 4 &&
"Indexing into an undef vector?");
2366 if (
M1 != (
M0 + 1) % 4 || M2 != (
M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2369 ShiftElts = IsLE ? (4 -
M0) % 4 :
M0;
2375 if (
M1 != (
M0 + 1) % 8 || M2 != (
M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2379 if (
M0 == 0 ||
M0 == 7 ||
M0 == 6 ||
M0 == 5) {
2384 ShiftElts = (8 -
M0) % 8;
2385 }
else if (
M0 == 4 ||
M0 == 3 ||
M0 == 2 ||
M0 == 1) {
2390 ShiftElts = (4 -
M0) % 4;
2395 if (
M0 == 0 ||
M0 == 1 ||
M0 == 2 ||
M0 == 3) {
2400 }
else if (
M0 == 4 ||
M0 == 5 ||
M0 == 6 ||
M0 == 7) {
2412 assert(
N->getValueType(0) == MVT::v16i8 &&
"Shuffle vector expects v16i8");
2417 for (
int i = 0; i < 16; i += Width)
2418 if (
N->getMaskElt(i) != i + Width - 1)
2449 bool &Swap,
bool IsLE) {
2450 assert(
N->getValueType(0) == MVT::v16i8 &&
"Shuffle vector expects v16i8");
2456 unsigned M0 =
N->getMaskElt(0) / 8;
2457 unsigned M1 =
N->getMaskElt(8) / 8;
2458 assert(((
M0 |
M1) < 4) &&
"A mask element out of bounds?");
2462 if (
N->getOperand(1).isUndef()) {
2463 if ((
M0 |
M1) < 2) {
2464 DM = IsLE ? (((~M1) & 1) << 1) + ((~
M0) & 1) : (
M0 << 1) + (
M1 & 1);
2472 if (
M0 > 1 &&
M1 < 2) {
2474 }
else if (M0 < 2 && M1 > 1) {
2482 DM = (((~M1) & 1) << 1) + ((~
M0) & 1);
2485 if (M0 < 2 && M1 > 1) {
2487 }
else if (
M0 > 1 &&
M1 < 2) {
2495 DM = (
M0 << 1) + (
M1 & 1);
2510 if (VT == MVT::v2i64 || VT == MVT::v2f64)
2515 return (16 / EltSize) - 1 - (SVOp->
getMaskElt(0) / EltSize);
2531 unsigned EltSize = 16/
N->getNumOperands();
2532 if (EltSize < ByteSize) {
2533 unsigned Multiple = ByteSize/EltSize;
2535 assert(Multiple > 1 && Multiple <= 4 &&
"How can this happen?");
2538 for (
unsigned i = 0, e =
N->getNumOperands(); i != e; ++i) {
2539 if (
N->getOperand(i).isUndef())
continue;
2541 if (!isa<ConstantSDNode>(
N->getOperand(i)))
return SDValue();
2543 if (!UniquedVals[i&(Multiple-1)].getNode())
2544 UniquedVals[i&(Multiple-1)] =
N->getOperand(i);
2545 else if (UniquedVals[i&(Multiple-1)] !=
N->getOperand(i))
2555 bool LeadingZero =
true;
2556 bool LeadingOnes =
true;
2557 for (
unsigned i = 0; i != Multiple-1; ++i) {
2558 if (!UniquedVals[i].getNode())
continue;
2565 if (!UniquedVals[Multiple-1].getNode())
2567 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2572 if (!UniquedVals[Multiple-1].getNode())
2574 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2583 for (
unsigned i = 0, e =
N->getNumOperands(); i != e; ++i) {
2584 if (
N->getOperand(i).isUndef())
continue;
2586 OpVal =
N->getOperand(i);
2587 else if (OpVal !=
N->getOperand(i))
2593 unsigned ValSizeInBytes = EltSize;
2596 Value = CN->getZExtValue();
2598 assert(CN->getValueType(0) == MVT::f32 &&
"Only one legal FP vector type!");
2599 Value = llvm::bit_cast<uint32_t>(CN->getValueAPF().convertToFloat());
2605 if (ValSizeInBytes < ByteSize)
return SDValue();
2616 if (MaskVal == 0)
return SDValue();
2619 if (SignExtend32<5>(MaskVal) == MaskVal)
2633 if (!isa<ConstantSDNode>(
N))
2636 Imm = (int16_t)cast<ConstantSDNode>(
N)->getZExtValue();
2637 if (
N->getValueType(0) == MVT::i32)
2638 return Imm == (int32_t)cast<ConstantSDNode>(
N)->getZExtValue();
2640 return Imm == (int64_t)cast<ConstantSDNode>(
N)->getZExtValue();
2658 return (~(LHSKnown.
Zero | RHSKnown.
Zero) == 0);
2667 if (
MemSDNode *Memop = dyn_cast<MemSDNode>(U)) {
2668 if (Memop->getMemoryVT() == MVT::f64) {
2669 Base =
N.getOperand(0);
2682 if (!isa<ConstantSDNode>(
N))
2685 Imm = (int64_t)cast<ConstantSDNode>(
N)->getZExtValue();
2686 return isInt<34>(Imm);
2713 (!EncodingAlignment ||
isAligned(*EncodingAlignment, Imm)))
2718 Base =
N.getOperand(0);
2721 }
else if (
N.getOpcode() ==
ISD::OR) {
2723 (!EncodingAlignment ||
isAligned(*EncodingAlignment, Imm)))
2735 if (~(LHSKnown.
Zero | RHSKnown.
Zero) == 0) {
2736 Base =
N.getOperand(0);
2807 (!EncodingAlignment ||
isAligned(*EncodingAlignment, imm))) {
2813 Base =
N.getOperand(0);
2816 }
else if (
N.getOperand(1).getOpcode() ==
PPCISD::Lo) {
2818 assert(!cast<ConstantSDNode>(
N.getOperand(1).getOperand(1))->getZExtValue()
2819 &&
"Cannot handle constant offsets yet!");
2820 Disp =
N.getOperand(1).getOperand(0);
2825 Base =
N.getOperand(0);
2828 }
else if (
N.getOpcode() ==
ISD::OR) {
2831 (!EncodingAlignment ||
isAligned(*EncodingAlignment, imm))) {
2841 dyn_cast<FrameIndexSDNode>(
N.getOperand(0))) {
2845 Base =
N.getOperand(0);
2858 (!EncodingAlignment ||
isAligned(*EncodingAlignment, Imm))) {
2861 CN->getValueType(0));
2866 if ((CN->getValueType(0) == MVT::i32 ||
2867 (int64_t)CN->getZExtValue() == (
int)CN->getZExtValue()) &&
2868 (!EncodingAlignment ||
2869 isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2870 int Addr = (int)CN->getZExtValue();
2877 unsigned Opc = CN->
getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2898 if (
N.getValueType() != MVT::i64)
2911 Base =
N.getOperand(0);
2927 Base =
N.getOperand(0);
2960 !
N.getOperand(1).hasOneUse() || !
N.getOperand(0).hasOneUse())) {
2961 Base =
N.getOperand(0);
2974 Ty *PCRelCand = dyn_cast<Ty>(
N);
2986 if (isValidPCRelNode<ConstantPoolSDNode>(
N) ||
2987 isValidPCRelNode<GlobalAddressSDNode>(
N) ||
2988 isValidPCRelNode<JumpTableSDNode>(
N) ||
2989 isValidPCRelNode<BlockAddressSDNode>(
N))
3005 EVT MemVT = LD->getMemoryVT();
3012 if (!ST.hasP8Vector())
3017 if (!ST.hasP9Vector())
3030 if (UI.getUse().get().getResNo() == 0 &&
3052 Ptr = LD->getBasePtr();
3053 VT = LD->getMemoryVT();
3054 Alignment = LD->getAlign();
3055 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
3056 Ptr = ST->getBasePtr();
3057 VT = ST->getMemoryVT();
3058 Alignment = ST->getAlign();
3081 if (isa<FrameIndexSDNode>(
Base) || isa<RegisterSDNode>(
Base))
3084 SDValue Val = cast<StoreSDNode>(
N)->getValue();
3097 if (VT != MVT::i64) {
3102 if (Alignment <
Align(4))
3112 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
3114 isa<ConstantSDNode>(
Offset))
3129 unsigned &HiOpFlags,
unsigned &LoOpFlags,
3171 const bool Is64Bit = Subtarget.
isPPC64();
3172 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
3186 EVT PtrVT =
Op.getValueType();
3202 return getTOCEntry(DAG,
SDLoc(CP), GA);
3205 unsigned MOHiFlag, MOLoFlag;
3212 return getTOCEntry(DAG,
SDLoc(CP), GA);
3272 EVT PtrVT =
Op.getValueType();
3290 return getTOCEntry(DAG,
SDLoc(JT), GA);
3293 unsigned MOHiFlag, MOLoFlag;
3300 return getTOCEntry(DAG,
SDLoc(GA), GA);
3310 EVT PtrVT =
Op.getValueType();
3329 return getTOCEntry(DAG,
SDLoc(BASDN), GA);
3338 unsigned MOHiFlag, MOLoFlag;
3349 return LowerGlobalTLSAddressAIX(
Op, DAG);
3351 return LowerGlobalTLSAddressLinux(
Op, DAG);
3364 bool Is64Bit = Subtarget.
isPPC64();
3365 bool HasAIXSmallLocalExecTLS = Subtarget.hasAIXSmallLocalExecTLS();
3372 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3390 if (HasAIXSmallLocalExecTLS && IsTLSLocalExecModel) {
3409 if (HasAIXSmallLocalExecTLS)
3411 "currently only supported on AIX (64-bit mode).");
3427 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3428 SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
3446 bool is64bit = Subtarget.
isPPC64();
3494 if (!
TM.isPositionIndependent())
3553 PtrVT, GOTPtr, TGA, TGA);
3555 PtrVT, TLSAddr, TGA);
3564 EVT PtrVT =
Op.getValueType();
3590 return getTOCEntry(DAG,
DL, GA);
3593 unsigned MOHiFlag, MOLoFlag;
3601 return getTOCEntry(DAG,
DL, GA);
3613 bool IsStrict =
Op->isStrictFPOpcode();
3615 cast<CondCodeSDNode>(
Op.getOperand(IsStrict ? 3 : 2))->get();
3619 EVT LHSVT =
LHS.getValueType();
3623 if (LHSVT == MVT::f128) {
3624 assert(!Subtarget.hasP9Vector() &&
3625 "SETCC for f128 is already legal under Power9!");
3636 assert(!IsStrict &&
"Don't know how to handle STRICT_FSETCC!");
3638 if (
Op.getValueType() == MVT::v2i64) {
3641 if (
LHS.getValueType() == MVT::v2i64) {
3649 int ShuffV[] = {1, 0, 3, 2};
3654 dl, MVT::v4i32, Shuff, SetCC32));
3671 if (
C->isAllOnes() ||
C->isZero())
3681 EVT VT =
Op.getValueType();