69#include "llvm/IR/IntrinsicsPowerPC.h"
104#define DEBUG_TYPE "ppc-lowering"
125 "ppc-quadword-atomics",
131 cl::desc(
"disable vector permute decomposition"),
135 "disable-auto-paired-vec-st",
136 cl::desc(
"disable automatically generated 32byte paired vector stores"),
142 "Number of shuffles lowered to a VPERM or XXPERM");
143STATISTIC(NumDynamicAllocaProbed,
"Number of dynamic stack allocation probed");
160 initializeAddrModeMap();
163 bool isPPC64 = Subtarget.
isPPC64();
172 if (!Subtarget.hasEFPU2())
197 if (Subtarget.isISA3_0()) {
227 if (!Subtarget.hasSPE()) {
235 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
236 for (
MVT VT : ScalarIntVTs) {
243 if (Subtarget.useCRBits()) {
246 if (isPPC64 || Subtarget.hasFPCVT()) {
249 isPPC64 ? MVT::i64 : MVT::i32);
252 isPPC64 ? MVT::i64 : MVT::i32);
256 isPPC64 ? MVT::i64 : MVT::i32);
259 isPPC64 ? MVT::i64 : MVT::i32);
263 isPPC64 ? MVT::i64 : MVT::i32);
266 isPPC64 ? MVT::i64 : MVT::i32);
270 isPPC64 ? MVT::i64 : MVT::i32);
273 isPPC64 ? MVT::i64 : MVT::i32);
320 if (Subtarget.isISA3_0()) {
355 if (!Subtarget.hasSPE()) {
360 if (Subtarget.hasVSX()) {
365 if (Subtarget.hasFSQRT()) {
370 if (Subtarget.hasFPRND()) {
411 if (Subtarget.hasSPE()) {
419 if (Subtarget.hasSPE())
425 if (!Subtarget.hasFSQRT() &&
426 !(
TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
430 if (!Subtarget.hasFSQRT() &&
431 !(
TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
432 Subtarget.hasFRES()))
435 if (Subtarget.hasFCPSGN()) {
443 if (Subtarget.hasFPRND()) {
457 if (Subtarget.isISA3_1()) {
468 if (Subtarget.isISA3_0()) {
488 if (!Subtarget.useCRBits()) {
501 if (!Subtarget.useCRBits())
504 if (Subtarget.hasFPU()) {
515 if (!Subtarget.useCRBits())
520 if (Subtarget.hasSPE()) {
544 if (Subtarget.hasDirectMove() && isPPC64) {
549 if (
TM.Options.UnsafeFPMath) {
652 if (Subtarget.hasSPE()) {
674 if (Subtarget.has64BitSupport()) {
689 if (Subtarget.hasLFIWAX() || Subtarget.
isPPC64()) {
695 if (Subtarget.hasSPE()) {
705 if (Subtarget.hasFPCVT()) {
706 if (Subtarget.has64BitSupport()) {
727 if (Subtarget.use64BitRegs()) {
745 if (Subtarget.has64BitSupport()) {
752 if (Subtarget.hasVSX()) {
759 if (Subtarget.hasAltivec()) {
760 for (
MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
775 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
788 if (Subtarget.hasVSX()) {
794 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
804 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
878 if (!Subtarget.hasP8Vector()) {
920 if (Subtarget.hasAltivec())
921 for (
auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
924 if (Subtarget.hasP8Altivec())
935 if (Subtarget.hasVSX()) {
941 if (Subtarget.hasP8Altivec())
946 if (Subtarget.isISA3_1()) {
984 if (Subtarget.hasVSX()) {
987 if (Subtarget.hasP8Vector()) {
991 if (Subtarget.hasDirectMove() && isPPC64) {
1005 if (
TM.Options.UnsafeFPMath) {
1042 if (Subtarget.hasP8Vector())
1051 if (Subtarget.hasP8Altivec()) {
1078 if (Subtarget.isISA3_1())
1180 if (Subtarget.hasP8Altivec()) {
1185 if (Subtarget.hasP9Vector()) {
1190 if (Subtarget.useCRBits()) {
1249 }
else if (Subtarget.hasVSX()) {
1274 for (
MVT VT : {MVT::f32, MVT::f64}) {
1293 if (Subtarget.hasP9Altivec()) {
1294 if (Subtarget.isISA3_1()) {
1317 if (Subtarget.hasP10Vector()) {
1322 if (Subtarget.pairedVectorMemops()) {
1327 if (Subtarget.hasMMA()) {
1328 if (Subtarget.isISAFuture())
1337 if (Subtarget.has64BitSupport())
1340 if (Subtarget.isISA3_1())
1358 if (Subtarget.hasAltivec()) {
1385 if (Subtarget.hasFPCVT())
1388 if (Subtarget.useCRBits())
1397 if (Subtarget.useCRBits()) {
1426 setLibcallName(RTLIB::MEMCPY, isPPC64 ?
"___memmove64" :
"___memmove");
1427 setLibcallName(RTLIB::MEMMOVE, isPPC64 ?
"___memmove64" :
"___memmove");
1428 setLibcallName(RTLIB::MEMSET, isPPC64 ?
"___memset64" :
"___memset");
1429 setLibcallName(RTLIB::BZERO, isPPC64 ?
"___bzero64" :
"___bzero");
1434 if (Subtarget.useCRBits()) {
1531void PPCTargetLowering::initializeAddrModeMap() {
1582 if (MaxAlign == MaxMaxAlign)
1584 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1585 if (MaxMaxAlign >= 32 &&
1586 VTy->getPrimitiveSizeInBits().getFixedValue() >= 256)
1587 MaxAlign =
Align(32);
1588 else if (VTy->getPrimitiveSizeInBits().getFixedValue() >= 128 &&
1590 MaxAlign =
Align(16);
1591 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1594 if (EltAlign > MaxAlign)
1595 MaxAlign = EltAlign;
1596 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
1597 for (
auto *EltTy : STy->elements()) {
1600 if (EltAlign > MaxAlign)
1601 MaxAlign = EltAlign;
1602 if (MaxAlign == MaxMaxAlign)
1615 if (Subtarget.hasAltivec())
1617 return Alignment.
value();
1625 return Subtarget.hasSPE();
1649 return "PPCISD::FTSQRT";
1651 return "PPCISD::FSQRT";
1656 return "PPCISD::XXSPLTI_SP_TO_DP";
1658 return "PPCISD::XXSPLTI32DX";
1662 return "PPCISD::XXPERM";
1682 return "PPCISD::CALL_RM";
1684 return "PPCISD::CALL_NOP_RM";
1686 return "PPCISD::CALL_NOTOC_RM";
1691 return "PPCISD::BCTRL_RM";
1693 return "PPCISD::BCTRL_LOAD_TOC_RM";
1705 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1707 return "PPCISD::ANDI_rec_1_EQ_BIT";
1709 return "PPCISD::ANDI_rec_1_GT_BIT";
1724 return "PPCISD::ST_VSR_SCAL_INT";
1750 return "PPCISD::PADDI_DTPREL";
1766 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1768 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1778 return "PPCISD::STRICT_FADDRTZ";
1780 return "PPCISD::STRICT_FCTIDZ";
1782 return "PPCISD::STRICT_FCTIWZ";
1784 return "PPCISD::STRICT_FCTIDUZ";
1786 return "PPCISD::STRICT_FCTIWUZ";
1788 return "PPCISD::STRICT_FCFID";
1790 return "PPCISD::STRICT_FCFIDU";
1792 return "PPCISD::STRICT_FCFIDS";
1794 return "PPCISD::STRICT_FCFIDUS";
1797 return "PPCISD::STORE_COND";
1805 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1822 return CFP->getValueAPF().isZero();
1826 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1827 return CFP->getValueAPF().isZero();
1835 return Op < 0 || Op == Val;
1847 if (ShuffleKind == 0) {
1850 for (
unsigned i = 0; i != 16; ++i)
1853 }
else if (ShuffleKind == 2) {
1856 for (
unsigned i = 0; i != 16; ++i)
1859 }
else if (ShuffleKind == 1) {
1860 unsigned j = IsLE ? 0 : 1;
1861 for (
unsigned i = 0; i != 8; ++i)
1878 if (ShuffleKind == 0) {
1881 for (
unsigned i = 0; i != 16; i += 2)
1885 }
else if (ShuffleKind == 2) {
1888 for (
unsigned i = 0; i != 16; i += 2)
1892 }
else if (ShuffleKind == 1) {
1893 unsigned j = IsLE ? 0 : 2;
1894 for (
unsigned i = 0; i != 8; i += 2)
1915 if (!Subtarget.hasP8Vector())
1919 if (ShuffleKind == 0) {
1922 for (
unsigned i = 0; i != 16; i += 4)
1928 }
else if (ShuffleKind == 2) {
1931 for (
unsigned i = 0; i != 16; i += 4)
1937 }
else if (ShuffleKind == 1) {
1938 unsigned j = IsLE ? 0 : 4;
1939 for (
unsigned i = 0; i != 8; i += 4)
1956 unsigned LHSStart,
unsigned RHSStart) {
1957 if (
N->getValueType(0) != MVT::v16i8)
1959 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1960 "Unsupported merge size!");
1962 for (
unsigned i = 0; i != 8/UnitSize; ++i)
1963 for (
unsigned j = 0; j != UnitSize; ++j) {
1965 LHSStart+j+i*UnitSize) ||
1967 RHSStart+j+i*UnitSize))
1982 if (ShuffleKind == 1)
1984 else if (ShuffleKind == 2)
1989 if (ShuffleKind == 1)
1991 else if (ShuffleKind == 0)
2007 if (ShuffleKind == 1)
2009 else if (ShuffleKind == 2)
2014 if (ShuffleKind == 1)
2016 else if (ShuffleKind == 0)
2066 unsigned RHSStartValue) {
2067 if (
N->getValueType(0) != MVT::v16i8)
2070 for (
unsigned i = 0; i < 2; ++i)
2071 for (
unsigned j = 0; j < 4; ++j)
2073 i*RHSStartValue+j+IndexOffset) ||
2075 i*RHSStartValue+j+IndexOffset+8))
2097 unsigned indexOffset = CheckEven ? 4 : 0;
2098 if (ShuffleKind == 1)
2100 else if (ShuffleKind == 2)
2106 unsigned indexOffset = CheckEven ? 0 : 4;
2107 if (ShuffleKind == 1)
2109 else if (ShuffleKind == 0)
2125 if (
N->getValueType(0) != MVT::v16i8)
2132 for (i = 0; i != 16 && SVOp->
getMaskElt(i) < 0; ++i)
2135 if (i == 16)
return -1;
2140 if (ShiftAmt < i)
return -1;
2145 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
2147 for (++i; i != 16; ++i)
2150 }
else if (ShuffleKind == 1) {
2152 for (++i; i != 16; ++i)
2159 ShiftAmt = 16 - ShiftAmt;
2168 EVT VT =
N->getValueType(0);
2169 if (VT == MVT::v2i64 || VT == MVT::v2f64)
2170 return EltSize == 8 &&
N->getMaskElt(0) ==
N->getMaskElt(1);
2173 EltSize <= 8 &&
"Can only handle 1,2,4,8 byte element sizes");
2177 if (
N->getMaskElt(0) % EltSize != 0)
2182 unsigned ElementBase =
N->getMaskElt(0);
2185 if (ElementBase >= 16)
2190 for (
unsigned i = 1; i != EltSize; ++i)
2191 if (
N->getMaskElt(i) < 0 ||
N->getMaskElt(i) != (
int)(i+ElementBase))
2194 for (
unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2195 if (
N->getMaskElt(i) < 0)
continue;
2196 for (
unsigned j = 0; j != EltSize; ++j)
2197 if (
N->getMaskElt(i+j) !=
N->getMaskElt(j))
2214 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2215 "Unexpected element width.");
2216 assert((StepLen == 1 || StepLen == -1) &&
"Unexpected element width.");
2218 unsigned NumOfElem = 16 / Width;
2219 unsigned MaskVal[16];
2220 for (
unsigned i = 0; i < NumOfElem; ++i) {
2221 MaskVal[0] =
N->getMaskElt(i * Width);
2222 if ((StepLen == 1) && (MaskVal[0] % Width)) {
2224 }
else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2228 for (
unsigned int j = 1; j < Width; ++j) {
2229 MaskVal[j] =
N->getMaskElt(i * Width + j);
2230 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2240 unsigned &InsertAtByte,
bool &Swap,
bool IsLE) {
2245 unsigned M0 =
N->getMaskElt(0) / 4;
2246 unsigned M1 =
N->getMaskElt(4) / 4;
2247 unsigned M2 =
N->getMaskElt(8) / 4;
2248 unsigned M3 =
N->getMaskElt(12) / 4;
2249 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2250 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2255 if ((
M0 > 3 &&
M1 == 1 && M2 == 2 && M3 == 3) ||
2256 (
M0 < 4 &&
M1 == 5 && M2 == 6 && M3 == 7)) {
2257 ShiftElts = IsLE ? LittleEndianShifts[
M0 & 0x3] : BigEndianShifts[
M0 & 0x3];
2258 InsertAtByte = IsLE ? 12 : 0;
2263 if ((
M1 > 3 &&
M0 == 0 && M2 == 2 && M3 == 3) ||
2264 (
M1 < 4 &&
M0 == 4 && M2 == 6 && M3 == 7)) {
2265 ShiftElts = IsLE ? LittleEndianShifts[
M1 & 0x3] : BigEndianShifts[
M1 & 0x3];
2266 InsertAtByte = IsLE ? 8 : 4;
2271 if ((M2 > 3 &&
M0 == 0 &&
M1 == 1 && M3 == 3) ||
2272 (M2 < 4 &&
M0 == 4 &&
M1 == 5 && M3 == 7)) {
2273 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2274 InsertAtByte = IsLE ? 4 : 8;
2279 if ((M3 > 3 &&
M0 == 0 &&
M1 == 1 && M2 == 2) ||
2280 (M3 < 4 &&
M0 == 4 &&
M1 == 5 && M2 == 6)) {
2281 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2282 InsertAtByte = IsLE ? 0 : 12;
2289 if (
N->getOperand(1).isUndef()) {
2292 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2293 if (
M0 == XXINSERTWSrcElem &&
M1 == 1 && M2 == 2 && M3 == 3) {
2294 InsertAtByte = IsLE ? 12 : 0;
2297 if (
M0 == 0 &&
M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2298 InsertAtByte = IsLE ? 8 : 4;
2301 if (
M0 == 0 &&
M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2302 InsertAtByte = IsLE ? 4 : 8;
2305 if (
M0 == 0 &&
M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2306 InsertAtByte = IsLE ? 0 : 12;
2315 bool &Swap,
bool IsLE) {
2316 assert(
N->getValueType(0) == MVT::v16i8 &&
"Shuffle vector expects v16i8");
2322 unsigned M0 =
N->getMaskElt(0) / 4;
2323 unsigned M1 =
N->getMaskElt(4) / 4;
2324 unsigned M2 =
N->getMaskElt(8) / 4;
2325 unsigned M3 =
N->getMaskElt(12) / 4;
2329 if (
N->getOperand(1).isUndef()) {
2330 assert(
M0 < 4 &&
"Indexing into an undef vector?");
2331 if (
M1 != (
M0 + 1) % 4 || M2 != (
M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2334 ShiftElts = IsLE ? (4 -
M0) % 4 :
M0;
2340 if (
M1 != (
M0 + 1) % 8 || M2 != (
M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2344 if (
M0 == 0 ||
M0 == 7 ||
M0 == 6 ||
M0 == 5) {
2349 ShiftElts = (8 -
M0) % 8;
2350 }
else if (
M0 == 4 ||
M0 == 3 ||
M0 == 2 ||
M0 == 1) {
2355 ShiftElts = (4 -
M0) % 4;
2360 if (
M0 == 0 ||
M0 == 1 ||
M0 == 2 ||
M0 == 3) {
2365 }
else if (
M0 == 4 ||
M0 == 5 ||
M0 == 6 ||
M0 == 7) {
2377 assert(
N->getValueType(0) == MVT::v16i8 &&
"Shuffle vector expects v16i8");
2382 for (
int i = 0; i < 16; i += Width)
2383 if (
N->getMaskElt(i) != i + Width - 1)
2414 bool &Swap,
bool IsLE) {
2415 assert(
N->getValueType(0) == MVT::v16i8 &&
"Shuffle vector expects v16i8");
2421 unsigned M0 =
N->getMaskElt(0) / 8;
2422 unsigned M1 =
N->getMaskElt(8) / 8;
2423 assert(((
M0 |
M1) < 4) &&
"A mask element out of bounds?");
2427 if (
N->getOperand(1).isUndef()) {
2428 if ((
M0 |
M1) < 2) {
2429 DM = IsLE ? (((~M1) & 1) << 1) + ((~
M0) & 1) : (
M0 << 1) + (
M1 & 1);
2437 if (
M0 > 1 &&
M1 < 2) {
2439 }
else if (M0 < 2 && M1 > 1) {
2447 DM = (((~M1) & 1) << 1) + ((~
M0) & 1);
2450 if (M0 < 2 && M1 > 1) {
2452 }
else if (
M0 > 1 &&
M1 < 2) {
2460 DM = (
M0 << 1) + (
M1 & 1);
2475 if (VT == MVT::v2i64 || VT == MVT::v2f64)
2480 return (16 / EltSize) - 1 - (SVOp->
getMaskElt(0) / EltSize);
2496 unsigned EltSize = 16/
N->getNumOperands();
2497 if (EltSize < ByteSize) {
2498 unsigned Multiple = ByteSize/EltSize;
2500 assert(Multiple > 1 && Multiple <= 4 &&
"How can this happen?");
2503 for (
unsigned i = 0, e =
N->getNumOperands(); i != e; ++i) {
2504 if (
N->getOperand(i).isUndef())
continue;
2506 if (!isa<ConstantSDNode>(
N->getOperand(i)))
return SDValue();
2508 if (!UniquedVals[i&(Multiple-1)].getNode())
2509 UniquedVals[i&(Multiple-1)] =
N->getOperand(i);
2510 else if (UniquedVals[i&(Multiple-1)] !=
N->getOperand(i))
2520 bool LeadingZero =
true;
2521 bool LeadingOnes =
true;
2522 for (
unsigned i = 0; i != Multiple-1; ++i) {
2523 if (!UniquedVals[i].getNode())
continue;
2530 if (!UniquedVals[Multiple-1].getNode())
2532 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2537 if (!UniquedVals[Multiple-1].getNode())
2539 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2548 for (
unsigned i = 0, e =
N->getNumOperands(); i != e; ++i) {
2549 if (
N->getOperand(i).isUndef())
continue;
2551 OpVal =
N->getOperand(i);
2552 else if (OpVal !=
N->getOperand(i))
2558 unsigned ValSizeInBytes = EltSize;
2561 Value = CN->getZExtValue();
2563 assert(CN->getValueType(0) == MVT::f32 &&
"Only one legal FP vector type!");
2564 Value = llvm::bit_cast<uint32_t>(CN->getValueAPF().convertToFloat());
2570 if (ValSizeInBytes < ByteSize)
return SDValue();
2581 if (MaskVal == 0)
return SDValue();
2584 if (SignExtend32<5>(MaskVal) == MaskVal)
2598 if (!isa<ConstantSDNode>(
N))
2601 Imm = (int16_t)cast<ConstantSDNode>(
N)->getZExtValue();
2602 if (
N->getValueType(0) == MVT::i32)
2603 return Imm == (int32_t)cast<ConstantSDNode>(
N)->getZExtValue();
2605 return Imm == (int64_t)cast<ConstantSDNode>(
N)->getZExtValue();
2623 return (~(LHSKnown.
Zero | RHSKnown.
Zero) == 0);
2632 if (
MemSDNode *Memop = dyn_cast<MemSDNode>(U)) {
2633 if (Memop->getMemoryVT() == MVT::f64) {
2634 Base =
N.getOperand(0);
2647 if (!isa<ConstantSDNode>(
N))
2650 Imm = (int64_t)cast<ConstantSDNode>(
N)->getZExtValue();
2651 return isInt<34>(Imm);
2678 (!EncodingAlignment ||
isAligned(*EncodingAlignment, Imm)))
2683 Base =
N.getOperand(0);
2686 }
else if (
N.getOpcode() ==
ISD::OR) {
2688 (!EncodingAlignment ||
isAligned(*EncodingAlignment, Imm)))
2700 if (~(LHSKnown.
Zero | RHSKnown.
Zero) == 0) {
2701 Base =
N.getOperand(0);
2772 (!EncodingAlignment ||
isAligned(*EncodingAlignment, imm))) {
2778 Base =
N.getOperand(0);
2781 }
else if (
N.getOperand(1).getOpcode() ==
PPCISD::Lo) {
2783 assert(!cast<ConstantSDNode>(
N.getOperand(1).getOperand(1))->getZExtValue()
2784 &&
"Cannot handle constant offsets yet!");
2785 Disp =
N.getOperand(1).getOperand(0);
2790 Base =
N.getOperand(0);
2793 }
else if (
N.getOpcode() ==
ISD::OR) {
2796 (!EncodingAlignment ||
isAligned(*EncodingAlignment, imm))) {
2806 dyn_cast<FrameIndexSDNode>(
N.getOperand(0))) {
2810 Base =
N.getOperand(0);
2823 (!EncodingAlignment ||
isAligned(*EncodingAlignment, Imm))) {
2826 CN->getValueType(0));
2831 if ((CN->getValueType(0) == MVT::i32 ||
2832 (int64_t)CN->getZExtValue() == (
int)CN->getZExtValue()) &&
2833 (!EncodingAlignment ||
2834 isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2835 int Addr = (int)CN->getZExtValue();
2842 unsigned Opc = CN->
getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2863 if (
N.getValueType() != MVT::i64)
2876 Base =
N.getOperand(0);
2892 Base =
N.getOperand(0);
2925 !
N.getOperand(1).hasOneUse() || !
N.getOperand(0).hasOneUse())) {
2926 Base =
N.getOperand(0);
2939 Ty *PCRelCand = dyn_cast<Ty>(
N);
2951 if (isValidPCRelNode<ConstantPoolSDNode>(
N) ||
2952 isValidPCRelNode<GlobalAddressSDNode>(
N) ||
2953 isValidPCRelNode<JumpTableSDNode>(
N) ||
2954 isValidPCRelNode<BlockAddressSDNode>(
N))
2970 EVT MemVT = LD->getMemoryVT();
2977 if (!ST.hasP8Vector())
2982 if (!ST.hasP9Vector())
2995 if (UI.getUse().get().getResNo() == 0 &&
3017 Ptr = LD->getBasePtr();
3018 VT = LD->getMemoryVT();
3019 Alignment = LD->getAlign();
3020 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
3021 Ptr = ST->getBasePtr();
3022 VT = ST->getMemoryVT();
3023 Alignment = ST->getAlign();
3046 if (isa<FrameIndexSDNode>(
Base) || isa<RegisterSDNode>(
Base))
3049 SDValue Val = cast<StoreSDNode>(
N)->getValue();
3062 if (VT != MVT::i64) {
3067 if (Alignment <
Align(4))
3077 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
3079 isa<ConstantSDNode>(
Offset))
3094 unsigned &HiOpFlags,
unsigned &LoOpFlags,
3136 const bool Is64Bit = Subtarget.
isPPC64();
3137 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
3151 EVT PtrVT =
Op.getValueType();
3167 return getTOCEntry(DAG,
SDLoc(CP), GA);
3170 unsigned MOHiFlag, MOLoFlag;
3177 return getTOCEntry(DAG,
SDLoc(CP), GA);
3237 EVT PtrVT = Op.getValueType();
3255 return getTOCEntry(DAG,
SDLoc(JT), GA);
3258 unsigned MOHiFlag, MOLoFlag;
3265 return getTOCEntry(DAG,
SDLoc(GA), GA);
3275 EVT PtrVT =
Op.getValueType();
3294 return getTOCEntry(DAG,
SDLoc(BASDN), GA);
3303 unsigned MOHiFlag, MOLoFlag;
3314 return LowerGlobalTLSAddressAIX(Op, DAG);
3316 return LowerGlobalTLSAddressLinux(Op, DAG);
3340 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3341 SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
3359 bool is64bit = Subtarget.
isPPC64();
3407 if (!
TM.isPositionIndependent())
3466 PtrVT, GOTPtr, TGA, TGA);
3468 PtrVT, TLSAddr, TGA);
3477 EVT PtrVT =
Op.getValueType();
3503 return getTOCEntry(DAG,
DL, GA);
3506 unsigned MOHiFlag, MOLoFlag;
3514 return getTOCEntry(DAG,
DL, GA);
3526 bool IsStrict =
Op->isStrictFPOpcode();
3528 cast<CondCodeSDNode>(
Op.getOperand(IsStrict ? 3 : 2))->get();
3532 EVT LHSVT =
LHS.getValueType();
3536 if (LHSVT == MVT::f128) {
3537 assert(!Subtarget.hasP9Vector() &&
3538 "SETCC for f128 is already legal under Power9!");
3549 assert(!IsStrict &&
"Don't know how to handle STRICT_FSETCC!");
3551 if (
Op.getValueType() == MVT::v2i64) {
3554 if (
LHS.getValueType() == MVT::v2i64) {
3562 int ShuffV[] = {1, 0, 3, 2};
3567 dl, MVT::v4i32, Shuff, SetCC32));
3584 if (
C->isAllOnes() ||
C->isZero())
3594 EVT VT =
Op.getValueType();
3603 EVT VT =
Node->getValueType(0);
3607 const Value *SV = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
3617 if (VT == MVT::i64) {
3648 InChain = OverflowArea.
getValue(1);