68#include "llvm/IR/IntrinsicsPowerPC.h" 
  102#define DEBUG_TYPE "ppc-lowering" 
  105    "disable-p10-store-forward",
 
  129                          cl::desc(
"disable vector permute decomposition"),
 
  133    "disable-auto-paired-vec-st",
 
  134    cl::desc(
"disable automatically generated 32byte paired vector stores"),
 
  139    cl::desc(
"Set minimum number of entries to use a jump table on PPC"));
 
  143    cl::desc(
"Set minimum of largest number of comparisons to use bit test for " 
  148    cl::desc(
"max depth when checking alias info in GatherAllAliases()"));
 
  152    cl::desc(
"Set inclusive limit count of TLS local-dynamic access(es) in a " 
  153             "function to use initial-exec"));
 
  158          "Number of shuffles lowered to a VPERM or XXPERM");
 
  159STATISTIC(NumDynamicAllocaProbed, 
"Number of dynamic stack allocation probed");
 
  180  initializeAddrModeMap();
 
  183  bool isPPC64 = Subtarget.isPPC64();
 
  185  const MVT RegVT = Subtarget.getScalarIntVT();
 
  193      if (!Subtarget.hasEFPU2())
 
  210  if (!Subtarget.hasP10Vector()) {
 
  236  if (Subtarget.isISA3_0()) {
 
  269  if (!Subtarget.hasSPE()) {
 
  276  if (Subtarget.useCRBits()) {
 
  279    if (isPPC64 || Subtarget.hasFPCVT()) {
 
  345  if (Subtarget.isISA3_0()) {
 
  380  if (!Subtarget.hasSPE()) {
 
  385  if (Subtarget.hasVSX()) {
 
  390  if (Subtarget.hasFSQRT()) {
 
  395  if (Subtarget.hasFPRND()) {
 
  436  if (Subtarget.hasSPE()) {
 
  446  if (Subtarget.hasSPE())
 
  450  if (!Subtarget.hasFSQRT() && !(Subtarget.hasFRSQRTE() && Subtarget.hasFRE()))
 
  453  if (!Subtarget.hasFSQRT() &&
 
  454      !(Subtarget.hasFRSQRTES() && Subtarget.hasFRES()))
 
  457  if (Subtarget.hasFCPSGN()) {
 
  465  if (Subtarget.hasFPRND()) {
 
  479  if (Subtarget.isISA3_1()) {
 
  485                       (Subtarget.hasP9Vector() && isPPC64) ? 
Custom : 
Expand);
 
  489  if (Subtarget.isISA3_0()) {
 
  509  if (!Subtarget.useCRBits()) {
 
  522  if (!Subtarget.useCRBits())
 
  525  if (Subtarget.hasFPU()) {
 
  536  if (!Subtarget.useCRBits())
 
  541  if (Subtarget.hasSPE()) {
 
  565  if (Subtarget.hasDirectMove() && isPPC64) {
 
  625  if (Subtarget.is64BitELFABI()) {
 
  636  } 
else if (Subtarget.is32BitELFABI()) {
 
  644  if (Subtarget.is32BitELFABI())
 
  676  if (Subtarget.hasSPE()) {
 
  698  if (Subtarget.has64BitSupport()) {
 
  713    if (Subtarget.hasLFIWAX() || isPPC64) {
 
  719    if (Subtarget.hasSPE()) {
 
  729  if (Subtarget.hasFPCVT()) {
 
  730    if (Subtarget.has64BitSupport()) {
 
  751  if (Subtarget.use64BitRegs()) {
 
  769  if (Subtarget.has64BitSupport()) {
 
  776  if (Subtarget.hasVSX()) {
 
  785  if (Subtarget.hasAltivec()) {
 
  786    for (
MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
 
  801      if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
 
  814      if (Subtarget.hasVSX()) {
 
  820      if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
 
  830      if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
 
  904    if (!Subtarget.hasP8Vector()) {
 
  946    if (Subtarget.hasAltivec())
 
  947      for (
auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
 
  950    if (Subtarget.hasP8Altivec())
 
  961    if (Subtarget.hasVSX()) {
 
  967    if (Subtarget.hasP8Altivec())
 
  972    if (Subtarget.isISA3_1()) {
 
 1018    if (Subtarget.hasVSX()) {
 
 1021      if (Subtarget.hasP8Vector()) {
 
 1025      if (Subtarget.hasDirectMove() && isPPC64) {
 
 1074      if (Subtarget.hasP8Vector())
 
 1083      if (Subtarget.hasP8Altivec()) {
 
 1110      if (Subtarget.isISA3_1())
 
 1213    if (Subtarget.hasP8Altivec()) {
 
 1218    if (Subtarget.hasP9Vector()) {
 
 1223      if (Subtarget.useCRBits()) {
 
 1283    } 
else if (Subtarget.hasVSX()) {
 
 1308      for (
MVT VT : {MVT::f32, MVT::f64}) {
 
 1327    if (Subtarget.hasP9Altivec()) {
 
 1328      if (Subtarget.isISA3_1()) {
 
 1351    if (Subtarget.hasP10Vector()) {
 
 1356  if (Subtarget.pairedVectorMemops()) {
 
 1361  if (Subtarget.hasMMA()) {
 
 1362    if (Subtarget.isISAFuture()) {
 
 1378  if (Subtarget.has64BitSupport())
 
 1381  if (Subtarget.isISA3_1())
 
 1399  if (Subtarget.hasAltivec()) {
 
 1416  if (Subtarget.hasFPCVT())
 
 1419  if (Subtarget.useCRBits())
 
 1428  if (Subtarget.useCRBits()) {
 
 1434  if (Subtarget.useCRBits()) {
 
 1450  auto CPUDirective = Subtarget.getCPUDirective();
 
 1451  switch (CPUDirective) {
 
 1474  if (Subtarget.enableMachineScheduler())
 
 
 1548void PPCTargetLowering::initializeAddrModeMap() {
 
 1599  if (MaxAlign == MaxMaxAlign)
 
 1602    if (MaxMaxAlign >= 32 &&
 
 1603        VTy->getPrimitiveSizeInBits().getFixedValue() >= 256)
 
 1604      MaxAlign = 
Align(32);
 
 1605    else if (VTy->getPrimitiveSizeInBits().getFixedValue() >= 128 &&
 
 1607      MaxAlign = 
Align(16);
 
 1611    if (EltAlign > MaxAlign)
 
 1612      MaxAlign = EltAlign;
 
 1614    for (
auto *EltTy : STy->elements()) {
 
 1617      if (EltAlign > MaxAlign)
 
 1618        MaxAlign = EltAlign;
 
 1619      if (MaxAlign == MaxMaxAlign)
 
 
 1632  if (Subtarget.hasAltivec())
 
 
 1638  return Subtarget.useSoftFloat();
 
 
 1642  return Subtarget.hasSPE();
 
 
 1650    Type *VectorTy, 
unsigned ElemSizeInBits, 
unsigned &Index)
 const {
 
 1651  if (!Subtarget.isPPC64() || !Subtarget.hasVSX())
 
 1655    if (VTy->getScalarType()->isIntegerTy()) {
 
 1657      if (ElemSizeInBits == 32) {
 
 1658        Index = Subtarget.isLittleEndian() ? 2 : 1;
 
 1661      if (ElemSizeInBits == 64) {
 
 1662        Index = Subtarget.isLittleEndian() ? 1 : 0;
 
 
 1687    return "PPCISD::FTSQRT";
 
 1689    return "PPCISD::FSQRT";
 
 1694    return "PPCISD::XXSPLTI_SP_TO_DP";
 
 1696    return "PPCISD::XXSPLTI32DX";
 
 1700    return "PPCISD::XXPERM";
 
 1703    return "PPCISD::VSRQ";
 
 1722    return "PPCISD::CALL_RM";
 
 1724    return "PPCISD::CALL_NOP_RM";
 
 1726    return "PPCISD::CALL_NOTOC_RM";
 
 1731    return "PPCISD::BCTRL_RM";
 
 1733    return "PPCISD::BCTRL_LOAD_TOC_RM";
 
 1745    return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
 
 1747    return "PPCISD::ANDI_rec_1_EQ_BIT";
 
 1749    return "PPCISD::ANDI_rec_1_GT_BIT";
 
 1764                                return "PPCISD::ST_VSR_SCAL_INT";
 
 1793    return "PPCISD::PADDI_DTPREL";
 
 1795    return "PPCISD::VADD_SPLAT";
 
 1806    return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
 
 1808    return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
 
 1818    return "PPCISD::STRICT_FADDRTZ";
 
 1820    return "PPCISD::STRICT_FCTIDZ";
 
 1822    return "PPCISD::STRICT_FCTIWZ";
 
 1824    return "PPCISD::STRICT_FCTIDUZ";
 
 1826    return "PPCISD::STRICT_FCTIWUZ";
 
 1828    return "PPCISD::STRICT_FCFID";
 
 1830    return "PPCISD::STRICT_FCFIDU";
 
 1832    return "PPCISD::STRICT_FCFIDS";
 
 1834    return "PPCISD::STRICT_FCFIDUS";
 
 1837    return "PPCISD::STORE_COND";
 
 1839    return "PPCISD::SETBC";
 
 1841    return "PPCISD::SETBCR";
 
 1843    return "PPCISD::ADDC";
 
 1845    return "PPCISD::ADDE";
 
 1847    return "PPCISD::SUBC";
 
 1849    return "PPCISD::SUBE";
 
 
 1857    return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
 
 
 1874    return CFP->getValueAPF().isZero();
 
 1879        return CFP->getValueAPF().isZero();
 
 
 1887  return Op < 0 || 
Op == Val;
 
 
 1899  if (ShuffleKind == 0) {
 
 1902    for (
unsigned i = 0; i != 16; ++i)
 
 1905  } 
else if (ShuffleKind == 2) {
 
 1908    for (
unsigned i = 0; i != 16; ++i)
 
 1911  } 
else if (ShuffleKind == 1) {
 
 1912    unsigned j = IsLE ? 0 : 1;
 
 1913    for (
unsigned i = 0; i != 8; ++i)
 
 
 1930  if (ShuffleKind == 0) {
 
 1933    for (
unsigned i = 0; i != 16; i += 2)
 
 1937  } 
else if (ShuffleKind == 2) {
 
 1940    for (
unsigned i = 0; i != 16; i += 2)
 
 1944  } 
else if (ShuffleKind == 1) {
 
 1945    unsigned j = IsLE ? 0 : 2;
 
 1946    for (
unsigned i = 0; i != 8; i += 2)
 
 
 1967  if (!Subtarget.hasP8Vector())
 
 1971  if (ShuffleKind == 0) {
 
 1974    for (
unsigned i = 0; i != 16; i += 4)
 
 1980  } 
else if (ShuffleKind == 2) {
 
 1983    for (
unsigned i = 0; i != 16; i += 4)
 
 1989  } 
else if (ShuffleKind == 1) {
 
 1990    unsigned j = IsLE ? 0 : 4;
 
 1991    for (
unsigned i = 0; i != 8; i += 4)
 
 
 2008                     unsigned LHSStart, 
unsigned RHSStart) {
 
 2009  if (
N->getValueType(0) != MVT::v16i8)
 
 2011  assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
 
 2012         "Unsupported merge size!");
 
 2014  for (
unsigned i = 0; i != 8/UnitSize; ++i)     
 
 2015    for (
unsigned j = 0; j != UnitSize; ++j) {   
 
 2017                             LHSStart+j+i*UnitSize) ||
 
 2019                             RHSStart+j+i*UnitSize))
 
 
 2034    if (ShuffleKind == 1) 
 
 2036    else if (ShuffleKind == 2) 
 
 2041    if (ShuffleKind == 1) 
 
 2043    else if (ShuffleKind == 0) 
 
 
 2059    if (ShuffleKind == 1) 
 
 2061    else if (ShuffleKind == 2) 
 
 2066    if (ShuffleKind == 1) 
 
 2068    else if (ShuffleKind == 0) 
 
 
 2118                     unsigned RHSStartValue) {
 
 2119  if (
N->getValueType(0) != MVT::v16i8)
 
 2122  for (
unsigned i = 0; i < 2; ++i)
 
 2123    for (
unsigned j = 0; j < 4; ++j)
 
 2125                             i*RHSStartValue+j+IndexOffset) ||
 
 2127                             i*RHSStartValue+j+IndexOffset+8))
 
 
 2149    unsigned indexOffset = CheckEven ? 4 : 0;
 
 2150    if (ShuffleKind == 1) 
 
 2152    else if (ShuffleKind == 2) 
 
 2158    unsigned indexOffset = CheckEven ? 0 : 4;
 
 2159    if (ShuffleKind == 1) 
 
 2161    else if (ShuffleKind == 0) 
 
 
 2177  if (
N->getValueType(0) != MVT::v16i8)
 
 2184  for (i = 0; i != 16 && SVOp->
getMaskElt(i) < 0; ++i)
 
 2187  if (i == 16) 
return -1;  
 
 2192  if (ShiftAmt < i) 
return -1;
 
 2197  if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
 
 2199    for (++i; i != 16; ++i)
 
 2202  } 
else if (ShuffleKind == 1) {
 
 2204    for (++i; i != 16; ++i)
 
 2211    ShiftAmt = 16 - ShiftAmt;
 
 
 2220  EVT VT = 
N->getValueType(0);
 
 2221  if (VT == MVT::v2i64 || VT == MVT::v2f64)
 
 2222    return EltSize == 8 && 
N->getMaskElt(0) == 
N->getMaskElt(1);
 
 2225         EltSize <= 8 && 
"Can only handle 1,2,4,8 byte element sizes");
 
 2229  if (
N->getMaskElt(0) % EltSize != 0)
 
 2234  unsigned ElementBase = 
N->getMaskElt(0);
 
 2237  if (ElementBase >= 16)
 
 2242  for (
unsigned i = 1; i != EltSize; ++i)
 
 2243    if (
N->getMaskElt(i) < 0 || 
N->getMaskElt(i) != (
int)(i+ElementBase))
 
 2246  for (
unsigned i = EltSize, e = 16; i != e; i += EltSize) {
 
 2248    if (
N->getMaskElt(i) < 0) {
 
 2249      for (
unsigned j = 1; j != EltSize; ++j)
 
 2250        if (
N->getMaskElt(i + j) >= 0)
 
 2253      for (
unsigned j = 0; j != EltSize; ++j)
 
 2254        if (
N->getMaskElt(i + j) != 
N->getMaskElt(j))
 
 
 2271  assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
 
 2272         "Unexpected element width.");
 
 2273  assert((StepLen == 1 || StepLen == -1) && 
"Unexpected element width.");
 
 2275  unsigned NumOfElem = 16 / Width;
 
 2276  unsigned MaskVal[16]; 
 
 2277  for (
unsigned i = 0; i < NumOfElem; ++i) {
 
 2278    MaskVal[0] = 
N->getMaskElt(i * Width);
 
 2279    if ((StepLen == 1) && (MaskVal[0] % Width)) {
 
 2281    } 
else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
 
 2285    for (
unsigned int j = 1; j < Width; ++j) {
 
 2286      MaskVal[j] = 
N->getMaskElt(i * Width + j);
 
 2287      if (MaskVal[j] != MaskVal[j-1] + StepLen) {
 
 
 2297                          unsigned &InsertAtByte, 
bool &Swap, 
bool IsLE) {
 
 2302  unsigned M0 = 
N->getMaskElt(0) / 4;
 
 2303  unsigned M1 = 
N->getMaskElt(4) / 4;
 
 2304  unsigned M2 = 
N->getMaskElt(8) / 4;
 
 2305  unsigned M3 = 
N->getMaskElt(12) / 4;
 
 2306  unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
 
 2307  unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
 
 2312  if ((
M0 > 3 && 
M1 == 1 && M2 == 2 && M3 == 3) ||
 
 2313      (
M0 < 4 && 
M1 == 5 && M2 == 6 && M3 == 7)) {
 
 2314    ShiftElts = IsLE ? LittleEndianShifts[
M0 & 0x3] : BigEndianShifts[
M0 & 0x3];
 
 2315    InsertAtByte = IsLE ? 12 : 0;
 
 2320  if ((
M1 > 3 && 
M0 == 0 && M2 == 2 && M3 == 3) ||
 
 2321      (
M1 < 4 && 
M0 == 4 && M2 == 6 && M3 == 7)) {
 
 2322    ShiftElts = IsLE ? LittleEndianShifts[
M1 & 0x3] : BigEndianShifts[
M1 & 0x3];
 
 2323    InsertAtByte = IsLE ? 8 : 4;
 
 2328  if ((M2 > 3 && 
M0 == 0 && 
M1 == 1 && M3 == 3) ||
 
 2329      (M2 < 4 && 
M0 == 4 && 
M1 == 5 && M3 == 7)) {
 
 2330    ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
 
 2331    InsertAtByte = IsLE ? 4 : 8;
 
 2336  if ((M3 > 3 && 
M0 == 0 && 
M1 == 1 && M2 == 2) ||
 
 2337      (M3 < 4 && 
M0 == 4 && 
M1 == 5 && M2 == 6)) {
 
 2338    ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
 
 2339    InsertAtByte = IsLE ? 0 : 12;
 
 2346  if (
N->getOperand(1).isUndef()) {
 
 2349    unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
 
 2350    if (
M0 == XXINSERTWSrcElem && 
M1 == 1 && M2 == 2 && M3 == 3) {
 
 2351      InsertAtByte = IsLE ? 12 : 0;
 
 2354    if (
M0 == 0 && 
M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
 
 2355      InsertAtByte = IsLE ? 8 : 4;
 
 2358    if (
M0 == 0 && 
M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
 
 2359      InsertAtByte = IsLE ? 4 : 8;
 
 2362    if (
M0 == 0 && 
M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
 
 2363      InsertAtByte = IsLE ? 0 : 12;
 
 
 2372                               bool &Swap, 
bool IsLE) {
 
 2373  assert(
N->getValueType(0) == MVT::v16i8 && 
"Shuffle vector expects v16i8");
 
 2379  unsigned M0 = 
N->getMaskElt(0) / 4;
 
 2380  unsigned M1 = 
N->getMaskElt(4) / 4;
 
 2381  unsigned M2 = 
N->getMaskElt(8) / 4;
 
 2382  unsigned M3 = 
N->getMaskElt(12) / 4;
 
 2386  if (
N->getOperand(1).isUndef()) {
 
 2387    assert(
M0 < 4 && 
"Indexing into an undef vector?");
 
 2388    if (
M1 != (
M0 + 1) % 4 || M2 != (
M1 + 1) % 4 || M3 != (M2 + 1) % 4)
 
 2391    ShiftElts = IsLE ? (4 - 
M0) % 4 : 
M0;
 
 2397  if (
M1 != (
M0 + 1) % 8 || M2 != (
M1 + 1) % 8 || M3 != (M2 + 1) % 8)
 
 2401    if (
M0 == 0 || 
M0 == 7 || 
M0 == 6 || 
M0 == 5) {
 
 2406      ShiftElts = (8 - 
M0) % 8;
 
 2407    } 
else if (
M0 == 4 || 
M0 == 3 || 
M0 == 2 || 
M0 == 1) {
 
 2412      ShiftElts = (4 - 
M0) % 4;
 
 2417    if (
M0 == 0 || 
M0 == 1 || 
M0 == 2 || 
M0 == 3) {
 
 2422    } 
else if (
M0 == 4 || 
M0 == 5 || 
M0 == 6 || 
M0 == 7) {
 
 
 2434  assert(
N->getValueType(0) == MVT::v16i8 && 
"Shuffle vector expects v16i8");
 
 2439  for (
int i = 0; i < 16; i += Width)
 
 2440    if (
N->getMaskElt(i) != i + Width - 1)
 
 
 2471                               bool &Swap, 
bool IsLE) {
 
 2472  assert(
N->getValueType(0) == MVT::v16i8 && 
"Shuffle vector expects v16i8");
 
 2478  unsigned M0 = 
N->getMaskElt(0) / 8;
 
 2479  unsigned M1 = 
N->getMaskElt(8) / 8;
 
 2480  assert(((
M0 | 
M1) < 4) && 
"A mask element out of bounds?");
 
 2484  if (
N->getOperand(1).isUndef()) {
 
 2485    if ((
M0 | 
M1) < 2) {
 
 2486      DM = IsLE ? (((
~M1) & 1) << 1) + ((~
M0) & 1) : (
M0 << 1) + (
M1 & 1);
 
 2494    if (
M0 > 1 && 
M1 < 2) {
 
 2504    DM = (((
~M1) & 1) << 1) + ((~
M0) & 1);
 
 2509    } 
else if (
M0 > 1 && 
M1 < 2) {
 
 2517    DM = (
M0 << 1) + (
M1 & 1);
 
 
 2532  if (VT == MVT::v2i64 || VT == MVT::v2f64)
 
 2537    return (16 / EltSize) - 1 - (SVOp->
getMaskElt(0) / EltSize);
 
 
 2553  unsigned EltSize = 16/
N->getNumOperands();
 
 2554  if (EltSize < ByteSize) {
 
 2555    unsigned Multiple = ByteSize/EltSize;   
 
 2557    assert(Multiple > 1 && Multiple <= 4 && 
"How can this happen?");
 
 2560    for (
unsigned i = 0, e = 
N->getNumOperands(); i != e; ++i) {
 
 2561      if (
N->getOperand(i).isUndef()) 
continue;
 
 2565      if (!UniquedVals[i&(Multiple-1)].
getNode())
 
 2566        UniquedVals[i&(Multiple-1)] = 
N->getOperand(i);
 
 2567      else if (UniquedVals[i&(Multiple-1)] != 
N->getOperand(i))
 
 2577    bool LeadingZero = 
true;
 
 2578    bool LeadingOnes = 
true;
 
 2579    for (
unsigned i = 0; i != Multiple-1; ++i) {
 
 2580      if (!UniquedVals[i].
getNode()) 
continue;  
 
 2587      if (!UniquedVals[Multiple-1].
getNode())
 
 2594      if (!UniquedVals[Multiple-1].
getNode())
 
 2605  for (
unsigned i = 0, e = 
N->getNumOperands(); i != e; ++i) {
 
 2606    if (
N->getOperand(i).isUndef()) 
continue;
 
 2608      OpVal = 
N->getOperand(i);
 
 2609    else if (OpVal != 
N->getOperand(i))
 
 2615  unsigned ValSizeInBytes = EltSize;
 
 2618    Value = CN->getZExtValue();
 
 2620    assert(CN->getValueType(0) == MVT::f32 && 
"Only one legal FP vector type!");
 
 2627  if (ValSizeInBytes < ByteSize) 
return SDValue();
 
 2638  if (MaskVal == 0) 
return SDValue();
 
 
 2658  Imm = (int16_t)
N->getAsZExtVal();
 
 2659  if (
N->getValueType(0) == MVT::i32)
 
 2660    return Imm == (int32_t)
N->getAsZExtVal();
 
 2662    return Imm == (int64_t)
N->getAsZExtVal();
 
 
 2680  return (~(LHSKnown.
Zero | RHSKnown.
Zero) == 0);
 
 
 2688  for (
SDNode *U : 
N->users()) {
 
 2690      if (Memop->getMemoryVT() == MVT::f64) {
 
 2691          Base = 
N.getOperand(0);
 
 2692          Index = 
N.getOperand(1);
 
 
 2735        (!EncodingAlignment || 
isAligned(*EncodingAlignment, Imm)))
 
 2740    Base = 
N.getOperand(0);
 
 2741    Index = 
N.getOperand(1);
 
 2743  } 
else if (
N.getOpcode() == 
ISD::OR) {
 
 2745        (!EncodingAlignment || 
isAligned(*EncodingAlignment, Imm)))
 
 2757      if (~(LHSKnown.
Zero | RHSKnown.
Zero) == 0) {
 
 2758        Base = 
N.getOperand(0);
 
 2759        Index = 
N.getOperand(1);
 
 
 2829        (!EncodingAlignment || 
isAligned(*EncodingAlignment, imm))) {
 
 2835        Base = 
N.getOperand(0);
 
 2838    } 
else if (
N.getOperand(1).getOpcode() == 
PPCISD::Lo) {
 
 2840      assert(!
N.getOperand(1).getConstantOperandVal(1) &&
 
 2841             "Cannot handle constant offsets yet!");
 
 2842      Disp = 
N.getOperand(1).getOperand(0);  
 
 2847      Base = 
N.getOperand(0);
 
 2850  } 
else if (
N.getOpcode() == 
ISD::OR) {
 
 2853        (!EncodingAlignment || 
isAligned(*EncodingAlignment, imm))) {
 
 2867          Base = 
N.getOperand(0);
 
 2880        (!EncodingAlignment || 
isAligned(*EncodingAlignment, Imm))) {
 
 2883                             CN->getValueType(0));
 
 2888    if ((CN->getValueType(0) == MVT::i32 ||
 
 2889         (int64_t)CN->getZExtValue() == (
int)CN->getZExtValue()) &&
 
 2890        (!EncodingAlignment ||
 
 2891         isAligned(*EncodingAlignment, CN->getZExtValue()))) {
 
 2892      int Addr = (int)CN->getZExtValue();
 
 2899      unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
 
 
 2920  if (
N.getValueType() != MVT::i64)
 
 2933      Base = 
N.getOperand(0);
 
 2949      Base = 
N.getOperand(0);
 
 
 2982       !
N.getOperand(1).hasOneUse() || !
N.getOperand(0).hasOneUse())) {
 
 2983    Base = 
N.getOperand(0);
 
 2984    Index = 
N.getOperand(1);
 
 
 3027  EVT MemVT = LD->getMemoryVT();
 
 3034    if (!ST.hasP8Vector())
 
 3039    if (!ST.hasP9Vector())
 
 3051    if (
Use.getResNo() == 0 &&
 
 
 3073    Ptr = LD->getBasePtr();
 
 3074    VT = LD->getMemoryVT();
 
 3075    Alignment = LD->getAlign();
 
 3077    Ptr = ST->getBasePtr();
 
 3078    VT  = ST->getMemoryVT();
 
 3079    Alignment = ST->getAlign();
 
 3118  if (VT != MVT::i64) {
 
 3123    if (Alignment < 
Align(4))
 
 3133    if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
 
 
 3150                               unsigned &HiOpFlags, 
unsigned &LoOpFlags,
 
 
 3192  EVT VT = Subtarget.getScalarIntVT();
 
 3194                : Subtarget.isAIXABI()
 
 3206  EVT PtrVT = 
Op.getValueType();
 
 3212  if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
 
 3213    if (Subtarget.isUsingPCRelativeCalls()) {
 
 3222    return getTOCEntry(DAG, SDLoc(CP), GA);
 
 3225  unsigned MOHiFlag, MOLoFlag;
 
 3229  if (IsPIC && Subtarget.isSVR4ABI()) {
 
 3232    return getTOCEntry(DAG, SDLoc(CP), GA);
 
 3255  if (Subtarget.isPPC64() || Subtarget.isAIXABI())
 
 
 3262  if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
 
 
 3279  if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
 
 
 3292  EVT PtrVT = 
Op.getValueType();
 
 3310    return getTOCEntry(DAG, 
SDLoc(JT), GA);
 
 3313  unsigned MOHiFlag, MOLoFlag;
 
 3317  if (IsPIC && Subtarget.isSVR4ABI()) {
 
 3320    return getTOCEntry(DAG, SDLoc(GA), GA);
 
 3330  EVT PtrVT = 
Op.getValueType();
 
 3335  if (Subtarget.isUsingPCRelativeCalls()) {
 
 3346  if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
 
 3349    return getTOCEntry(DAG, SDLoc(BASDN), GA);
 
 3358  unsigned MOHiFlag, MOLoFlag;
 
 3368  if (Subtarget.isAIXABI())
 
 3369    return LowerGlobalTLSAddressAIX(
Op, DAG);
 
 3371  return LowerGlobalTLSAddressLinux(
Op, DAG);
 
 3393        if (
I.getOpcode() == Instruction::Call)
 
 3395            if (
Function *CF = CI->getCalledFunction())
 
 3396              if (CF->isDeclaration() &&
 
 3397                  CF->getIntrinsicID() == Intrinsic::threadlocal_address)
 
 3405    unsigned TLSGVCnt = TLSGV.
size();
 
 3415               << 
" function is using the TLS-IE model for TLS-LD access.\n");
 
 
 3428  const GlobalValue *GV = GA->
getGlobal();
 
 3430  bool Is64Bit = Subtarget.isPPC64();
 
 3434  if (Subtarget.hasAIXShLibTLSModelOpt())
 
 3444    bool HasAIXSmallLocalExecTLS = Subtarget.hasAIXSmallLocalExecTLS();
 
 3445    bool HasAIXSmallTLSGlobalAttr = 
false;
 
 3448    SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
 
 3452      if (GVar->hasAttribute(
"aix-small-tls"))
 
 3453        HasAIXSmallTLSGlobalAttr = 
true;
 
 3472      if ((HasAIXSmallLocalExecTLS || HasAIXSmallTLSGlobalAttr) &&
 
 3473          IsTLSLocalExecModel) {
 
 3493      if (HasAIXSmallLocalExecTLS || HasAIXSmallTLSGlobalAttr)
 
 3495                           "currently only supported on AIX (64-bit mode).");
 
 3501    bool HasAIXSmallLocalDynamicTLS = Subtarget.hasAIXSmallLocalDynamicTLS();
 
 3505    if (!Is64Bit && HasAIXSmallLocalDynamicTLS)
 
 3507                         "currently only supported on AIX (64-bit mode).");
 
 3515    SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
 
 3518    GlobalVariable *TLSGV =
 
 3522    assert(TLSGV && 
"Not able to create GV for _$TLSML.");
 
 3525    SDValue ModuleHandleTOC = getTOCEntry(DAG, dl, ModuleHandleTGA);
 
 3536    if (HasAIXSmallLocalDynamicTLS) {
 
 3545    return DAG.
getNode(
ISD::ADD, dl, PtrVT, ModuleHandle, VariableOffset);
 
 3558  SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
 
 3559  SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
 
 3575  const GlobalValue *GV = GA->
getGlobal();
 
 3577  bool is64bit = Subtarget.isPPC64();
 
 3585    if (Subtarget.isUsingPCRelativeCalls()) {
 
 3606    bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
 
 3615                             MachinePointerInfo());
 
 3624        if (!TM.isPositionIndependent())
 
 3637    if (Subtarget.isUsingPCRelativeCalls()) {
 
 3661    if (Subtarget.isUsingPCRelativeCalls()) {
 
 3683                                  PtrVT, GOTPtr, TGA, TGA);
 
 3685                                      PtrVT, TLSAddr, TGA);
 
 3694  EVT PtrVT = 
Op.getValueType();
 
 3697  const GlobalValue *GV = GSDN->
getGlobal();
 
 3701  if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
 
 3702    if (Subtarget.isUsingPCRelativeCalls()) {
 
 3709                                   MachinePointerInfo());
 
 3719    return getTOCEntry(DAG, 
DL, GA);
 
 3722  unsigned MOHiFlag, MOLoFlag;
 
 3726  if (IsPIC && Subtarget.isSVR4ABI()) {
 
 3730    return getTOCEntry(DAG, 
DL, GA);
 
 3742  bool IsStrict = 
Op->isStrictFPOpcode();
 
 3748  EVT LHSVT = 
LHS.getValueType();
 
 3752  if (LHSVT == MVT::f128) {
 
 3753    assert(!Subtarget.hasP9Vector() &&
 
 3754           "SETCC for f128 is already legal under Power9!");
 
 3765  assert(!IsStrict && 
"Don't know how to handle STRICT_FSETCC!");
 
 3767  if (
Op.getValueType() == MVT::v2i64) {
 
 3770    if (
LHS.getValueType() == MVT::v2i64) {
 
 3776          dl, MVT::v4i32, DAG.
getNode(ISD::BITCAST, dl, MVT::v4i32, 
LHS),
 
 3777          DAG.
getNode(ISD::BITCAST, dl, MVT::v4i32, 
RHS), CC);
 
 3778      int ShuffV[] = {1, 0, 3, 2};
 
 3783                                        dl, MVT::v4i32, Shuff, SetCC32));
 
 3800    if (
C->isAllOnes() || 
C->isZero())
 
 3810    EVT VT = 
Op.getValueType();
 
 3818  SDNode *
Node = 
Op.getNode();
 
 3819  EVT VT = 
Node->getValueType(0);
 
 3826  assert(!Subtarget.isPPC64() && 
"LowerVAARG is PPC32 only");
 
 3830                                    VAListPtr, MachinePointerInfo(SV), MVT::i8);
 
 3833  if (VT == MVT::i64) {
 
 3852                                    FprPtr, MachinePointerInfo(SV), MVT::i8);
 
 3863      DAG.
getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
 
 3864  InChain = OverflowArea.
getValue(1);
 
 3867      DAG.
getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
 
 3897                              MachinePointerInfo(SV), MVT::i8);
 
 3910  InChain = DAG.
getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
 
 3911                              MachinePointerInfo(), MVT::i32);
 
 3913  return DAG.
getLoad(VT, dl, InChain, Result, MachinePointerInfo());
 
 3917  assert(!Subtarget.isPPC64() && 
"LowerVACOPY is PPC32 only");
 
 3923                       false, 
true, 
nullptr, std::nullopt,
 
 3924                       MachinePointerInfo(), MachinePointerInfo());
 
 3929  return Op.getOperand(0);
 
 3934  PPCFunctionInfo &MFI = *MF.
getInfo<PPCFunctionInfo>();
 
 3936  assert((
Op.getOpcode() == ISD::INLINEASM ||
 
 3937          Op.getOpcode() == ISD::INLINEASM_BR) &&
 
 3938         "Expecting Inline ASM node.");
 
 3948  if (
Op.getOperand(
NumOps - 1).getValueType() == MVT::Glue)
 
 3953    const InlineAsm::Flag 
Flags(
Op.getConstantOperandVal(i));
 
 3954    unsigned NumVals = 
Flags.getNumOperandRegisters();
 
 3957    switch (
Flags.getKind()) {
 
 3968      for (; NumVals; --NumVals, ++i) {
 
 3970        if (
Reg != PPC::LR && 
Reg != PPC::LR8)
 
 3993  if (Subtarget.isAIXABI()) {
 
 3997    uint64_t 
PointerSize = Subtarget.isPPC64() ? 8 : 4;
 
 3998    MaybeAlign PointerAlign(PointerSize);
 
 3999    auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
 
 4002                        : MachineMemOperand::MONone;
 
 4009    const Value *TrampolineAddr =
 
 4019        DAG.
getLoad(PtrVT, dl, Chain, FPtr, MachinePointerInfo(Func, 0),
 
 4020                    PointerAlign, MMOFlags);
 
 4022    OutChains[0] = DAG.
getStore(EPLoadChain, dl, LoadEntryPoint, Trmp,
 
 4023                                MachinePointerInfo(TrampolineAddr, 0));
 
 4027    SDValue TOCFromDescriptorPtr =
 
 4029    SDValue TOCReg = DAG.
getLoad(PtrVT, dl, Chain, TOCFromDescriptorPtr,
 
 4030                                 MachinePointerInfo(Func, TOCPointerOffset),
 
 4031                                 PointerAlign, MMOFlags);
 
 4032    SDValue TrampolineTOCPointer =
 
 4036        DAG.
getStore(TOCLoadChain, dl, TOCReg, TrampolineTOCPointer,
 
 4037                     MachinePointerInfo(TrampolineAddr, TOCPointerOffset));
 
 4043        DAG.
getStore(Chain, dl, Nest, EnvPointer,
 
 4044                     MachinePointerInfo(TrampolineAddr, EnvPointerOffset));
 
 4051  bool isPPC64 = (PtrVT == MVT::i64);
 
 4055  Args.emplace_back(Trmp, IntPtrTy);
 
 4058      DAG.
getConstant(isPPC64 ? 48 : 40, dl, Subtarget.getScalarIntVT()),
 
 4060  Args.emplace_back(FPtr, IntPtrTy);
 
 4061  Args.emplace_back(Nest, IntPtrTy);
 
 4064  TargetLowering::CallLoweringInfo CLI(DAG);
 
 4065  CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
 
 4069  std::pair<SDValue, SDValue> CallResult = 
LowerCallTo(CLI);
 
 4070  return CallResult.second;
 
 4075  PPCFunctionInfo *FuncInfo = MF.
getInfo<PPCFunctionInfo>();
 
 4080  if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
 
 4085    return DAG.
getStore(
Op.getOperand(0), dl, FR, 
Op.getOperand(1),
 
 4086                        MachinePointerInfo(SV));
 
 4120  uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
 
 4123  uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
 
 4126  uint64_t FPROffset = 1;
 
 4134                        MachinePointerInfo(SV), MVT::i8);
 
 4135  uint64_t nextOffset = FPROffset;
 
 4142                        MachinePointerInfo(SV, nextOffset), MVT::i8);
 
 4143  nextOffset += StackOffset;
 
 4144  nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
 
 4147  SDValue thirdStore = DAG.
getStore(secondStore, dl, StackOffsetFI, nextPtr,
 
 4148                                    MachinePointerInfo(SV, nextOffset));
 
 4149  nextOffset += FrameOffset;
 
 4150  nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
 
 4153  return DAG.
getStore(thirdStore, dl, FR, nextPtr,
 
 4154                      MachinePointerInfo(SV, nextOffset));
 
 4159static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
 
 4160                                PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
 
 4161                                PPC::F11, PPC::F12, PPC::F13};
 
 
 4166                                       unsigned PtrByteSize) {
 
 4168  if (Flags.isByVal())
 
 4169    ArgSize = Flags.getByValSize();
 
 4173  if (!Flags.isInConsecutiveRegs())
 
 4174    ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
 
 
 4183                                         unsigned PtrByteSize) {
 
 4184  Align Alignment(PtrByteSize);
 
 4187  if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
 
 4188      ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
 
 4189      ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
 
 4190      ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
 
 4191    Alignment = 
Align(16);
 
 4194  if (Flags.isByVal()) {
 
 4195    auto BVAlign = Flags.getNonZeroByValAlign();
 
 4196    if (BVAlign > PtrByteSize) {
 
 4197      if (BVAlign.value() % PtrByteSize != 0)
 
 4199            "ByVal alignment is not a multiple of the pointer size");
 
 4201      Alignment = BVAlign;
 
 4206  if (Flags.isInConsecutiveRegs()) {
 
 4210    if (Flags.isSplit() && OrigVT != MVT::ppcf128)
 
 
 4224                                   unsigned PtrByteSize, 
unsigned LinkageSize,
 
 4225                                   unsigned ParamAreaSize, 
unsigned &ArgOffset,
 
 4226                                   unsigned &AvailableFPRs,
 
 4227                                   unsigned &AvailableVRs) {
 
 4228  bool UseMemory = 
false;
 
 4233  ArgOffset = 
alignTo(ArgOffset, Alignment);
 
 4236  if (ArgOffset >= LinkageSize + ParamAreaSize)
 
 4241  if (Flags.isInConsecutiveRegsLast())
 
 4242    ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
 
 4245  if (ArgOffset > LinkageSize + ParamAreaSize)
 
 4250  if (!Flags.isByVal()) {
 
 4251    if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
 
 4252      if (AvailableFPRs > 0) {
 
 4256    if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
 
 4257        ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
 
 4258        ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
 
 4259        ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
 
 4260      if (AvailableVRs > 0) {
 
 
 4272                                     unsigned NumBytes) {
 
 
 4276SDValue PPCTargetLowering::LowerFormalArguments(
 
 4280  if (Subtarget.isAIXABI())
 
 4281    return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
 
 4283  if (Subtarget.is64BitELFABI())
 
 4284    return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
 
 4286  assert(Subtarget.is32BitELFABI());
 
 4287  return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
 
 4291SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
 
 4327  PPCFunctionInfo *FuncInfo = MF.
getInfo<PPCFunctionInfo>();
 
 4333  const Align PtrAlign(4);
 
 4341  unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
 
 4342  CCInfo.AllocateStack(LinkageSize, PtrAlign);
 
 4345  for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
 
 4346    CCValAssign &VA = ArgLocs[i];
 
 4350      const TargetRegisterClass *RC;
 
 4358          RC = &PPC::GPRCRegClass;
 
 4361          if (Subtarget.hasP8Vector())
 
 4362            RC = &PPC::VSSRCRegClass;
 
 4363          else if (Subtarget.hasSPE())
 
 4364            RC = &PPC::GPRCRegClass;
 
 4366            RC = &PPC::F4RCRegClass;
 
 4369          if (Subtarget.hasVSX())
 
 4370            RC = &PPC::VSFRCRegClass;
 
 4371          else if (Subtarget.hasSPE())
 
 4373            RC = &PPC::GPRCRegClass;
 
 4375            RC = &PPC::F8RCRegClass;
 
 4380          RC = &PPC::VRRCRegClass;
 
 4383          RC = &PPC::VRRCRegClass;
 
 4387          RC = &PPC::VRRCRegClass;
 
 4394      if (VA.
getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
 
 4395        assert(i + 1 < e && 
"No second half of double precision argument");
 
 4400        if (!Subtarget.isLittleEndian())
 
 4407                                      ValVT == MVT::i1 ? MVT::i32 : ValVT);
 
 4408        if (ValVT == MVT::i1)
 
 4423      ArgOffset += ArgSize - ObjSize;
 
 4441  CCByValInfo.AllocateStack(CCInfo.getStackSize(), PtrAlign);
 
 4446  unsigned MinReservedArea = CCByValInfo.getStackSize();
 
 4447  MinReservedArea = std::max(MinReservedArea, LinkageSize);
 
 4463      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
 
 4464      PPC::R7, PPC::R8, PPC::R9, PPC::R10,
 
 4466    const unsigned NumGPArgRegs = std::size(GPArgRegs);
 
 4469      PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
 
 4472    unsigned NumFPArgRegs = std::size(FPArgRegs);
 
 4481    int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
 
 4482                NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
 
 4485        PtrVT.getSizeInBits() / 8, CCInfo.getStackSize(), 
true));
 
 4498        VReg = MF.
addLiveIn(GPArgReg, &PPC::GPRCRegClass);
 
 4513    for (
unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
 
 4517        VReg = MF.
addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
 
 4530  if (!MemOps.
empty())
 
 4541                                             const SDLoc &dl)
 const {
 
 4545  else if (
Flags.isZExt())
 
 4552SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
 
 4558  bool isELFv2ABI = Subtarget.isELFv2ABI();
 
 4559  bool isLittleEndian = Subtarget.isLittleEndian();
 
 4562  PPCFunctionInfo *FuncInfo = MF.
getInfo<PPCFunctionInfo>();
 
 4565         "fastcc not supported on varargs functions");
 
 4571  unsigned PtrByteSize = 8;
 
 4572  unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
 
 4575    PPC::X3, PPC::X4, PPC::X5, PPC::X6,
 
 4576    PPC::X7, PPC::X8, PPC::X9, PPC::X10,
 
 4579    PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
 
 4580    PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
 
 4583  const unsigned Num_GPR_Regs = std::size(GPR);
 
 4585  const unsigned Num_VR_Regs = std::size(VR);
 
 4593  bool HasParameterArea = !isELFv2ABI || isVarArg;
 
 4594  unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
 
 4595  unsigned NumBytes = LinkageSize;
 
 4596  unsigned AvailableFPRs = Num_FPR_Regs;
 
 4597  unsigned AvailableVRs = Num_VR_Regs;
 
 4598  for (
const ISD::InputArg &In : Ins) {
 
 4599    if (
In.Flags.isNest())
 
 4603                               LinkageSize, ParamAreaSize, NumBytes,
 
 4604                               AvailableFPRs, AvailableVRs))
 
 4605      HasParameterArea = 
true;
 
 4612  unsigned ArgOffset = LinkageSize;
 
 4613  unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
 
 4616  unsigned CurArgIdx = 0;
 
 4617  for (
unsigned ArgNo = 0, e = 
Ins.size(); ArgNo != e; ++ArgNo) {
 
 4619    bool needsLoad = 
false;
 
 4620    EVT ObjectVT = 
Ins[ArgNo].VT;
 
 4621    EVT OrigVT = 
Ins[ArgNo].ArgVT;
 
 4623    unsigned ArgSize = ObjSize;
 
 4624    ISD::ArgFlagsTy 
Flags = 
Ins[ArgNo].Flags;
 
 4625    if (Ins[ArgNo].isOrigArg()) {
 
 4626      std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
 
 4627      CurArgIdx = 
Ins[ArgNo].getOrigArgIndex();
 
 4632    unsigned CurArgOffset;
 
 4634    auto ComputeArgOffset = [&]() {
 
 4638      ArgOffset = 
alignTo(ArgOffset, Alignment);
 
 4639      CurArgOffset = ArgOffset;
 
 4646      GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
 
 4647      GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
 
 4652    if (
Flags.isByVal()) {
 
 4653      assert(Ins[ArgNo].isOrigArg() && 
"Byval arguments cannot be implicit");
 
 4659      ObjSize = 
Flags.getByValSize();
 
 4660      ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
 
 4682      if (HasParameterArea ||
 
 4683          ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
 
 4690      if (ObjSize < PtrByteSize) {
 
 4694        if (!isLittleEndian) {
 
 4700        if (GPR_idx != Num_GPR_Regs) {
 
 4707                                MachinePointerInfo(&*FuncArg), ObjType);
 
 4712        ArgOffset += PtrByteSize;
 
 4721      for (
unsigned j = 0; 
j < ArgSize; 
j += PtrByteSize) {
 
 4722        if (GPR_idx == Num_GPR_Regs)
 
 4733        unsigned StoreSizeInBits = std::min(PtrByteSize, (ObjSize - j)) * 8;
 
 4737                              MachinePointerInfo(&*FuncArg, j), ObjType);
 
 4741      ArgOffset += ArgSize;
 
 4750      if (
Flags.isNest()) {
 
 4755        if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
 
 4756          ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
 
 4764      if (GPR_idx != Num_GPR_Regs) {
 
 4769        if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
 
 4772          ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
 
 4778        ArgSize = PtrByteSize;
 
 4789      if (FPR_idx != Num_FPR_Regs) {
 
 4792        if (ObjectVT == MVT::f32)
 
 4794                              Subtarget.hasP8Vector()
 
 4795                                  ? &PPC::VSSRCRegClass
 
 4796                                  : &PPC::F4RCRegClass);
 
 4799                                                ? &PPC::VSFRCRegClass
 
 4800                                                : &PPC::F8RCRegClass);
 
 4815        if (ObjectVT == MVT::f32) {
 
 4816          if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
 
 4822        ArgVal = DAG.
getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
 
 4834        ArgSize = 
Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
 
 4835        ArgOffset += ArgSize;
 
 4836        if (
Flags.isInConsecutiveRegsLast())
 
 4837          ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
 
 4851      if (VR_idx != Num_VR_Regs) {
 
 4868      if (ObjSize < ArgSize && !isLittleEndian)
 
 4869        CurArgOffset += ArgSize - ObjSize;
 
 4872      ArgVal = DAG.
getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
 
 4879  unsigned MinReservedArea;
 
 4880  if (HasParameterArea)
 
 4881    MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
 
 4883    MinReservedArea = LinkageSize;
 
 4900    int Depth = ArgOffset;
 
 4909    for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
 
 4910         GPR_idx < Num_GPR_Regs; ++GPR_idx) {
 
 4922  if (!MemOps.
empty())
 
 4931                                   unsigned ParamSize) {
 
 4933  if (!isTailCall) 
return 0;
 
 4937  int SPDiff = (int)CallerMinReservedArea - (
int)ParamSize;
 
 4939  if (SPDiff < FI->getTailCallSPDelta())
 
 
 4955         "PC Relative callers do not have a TOC and cannot share a TOC Base");
 
 4968  if (!TM.shouldAssumeDSOLocal(CalleeGV))
 
 5013  if (TM.getFunctionSections() || CalleeGV->
hasComdat() ||
 
 5014      Caller->hasComdat() || CalleeGV->
getSection() != Caller->getSection())
 
 5017    if (
F->getSectionPrefix() != Caller->getSectionPrefix())
 
 
 5029  const unsigned PtrByteSize = 8;
 
 5033    PPC::X3, PPC::X4, PPC::X5, PPC::X6,
 
 5034    PPC::X7, PPC::X8, PPC::X9, PPC::X10,
 
 5037    PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
 
 5038    PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
 
 5041  const unsigned NumGPRs = std::size(GPR);
 
 5042  const unsigned NumFPRs = 13;
 
 5043  const unsigned NumVRs = std::size(VR);
 
 5044  const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
 
 5046  unsigned NumBytes = LinkageSize;
 
 5047  unsigned AvailableFPRs = NumFPRs;
 
 5048  unsigned AvailableVRs = NumVRs;
 
 5051    if (Param.Flags.isNest()) 
continue;
 
 5054                               LinkageSize, ParamAreaSize, NumBytes,
 
 5055                               AvailableFPRs, AvailableVRs))
 
 
 5066  auto CalleeArgEnd = CB.
arg_end();
 
 5069  for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
 
 5070    const Value* CalleeArg = *CalleeArgIter;
 
 5071    const Value* CallerArg = &(*CallerArgIter);
 
 5072    if (CalleeArg == CallerArg)
 
 
 5098  if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
 
 
 5108bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
 
 5113    bool isCalleeExternalSymbol)
 const {
 
 5116  if (
DisableSCO && !TailCallOpt) 
return false;
 
 5119  if (isVarArg) 
return false;
 
 5126  if (
any_of(Ins, [](
const ISD::InputArg &IA) { 
return IA.Flags.isByVal(); }))
 
 5162  if (!Subtarget.isUsingPCRelativeCalls() &&
 
 5167  if (!Subtarget.isUsingPCRelativeCalls() &&
 
 5195bool PPCTargetLowering::IsEligibleForTailCallOptimization(
 
 5208    if (
any_of(Ins, [](
const ISD::InputArg &IA) { 
return IA.Flags.isByVal(); }))
 
 5229  if (!
C) 
return nullptr;
 
 5231  int Addr = 
C->getZExtValue();
 
 5232  if ((Addr & 3) != 0 ||  
 
 5238          (
int)
C->getZExtValue() >> 2, 
SDLoc(
Op),
 
 
 5245struct TailCallArgumentInfo {
 
 5250  TailCallArgumentInfo() = 
default;
 
 5260  for (
unsigned i = 0, e = TailCallArgs.
size(); i != e; ++i) {
 
 5261    SDValue Arg = TailCallArgs[i].Arg;
 
 5262    SDValue FIN = TailCallArgs[i].FrameIdxOp;
 
 5263    int FI = TailCallArgs[i].FrameIdx;
 
 5266        Chain, dl, Arg, FIN,
 
 
 5275                                             int SPDiff, 
const SDLoc &dl) {
 
 5281    int SlotSize = Subtarget.isPPC64() ? 8 : 4;
 
 5282    int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
 
 5284                                                         NewRetAddrLoc, 
true);
 
 5287    Chain = DAG.
getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
 
 
 5297    int SPDiff, 
unsigned ArgOffset,
 
 5299  int Offset = ArgOffset + SPDiff;
 
 5302  EVT VT = IsPPC64 ? MVT::i64 : MVT::i32;
 
 5304  TailCallArgumentInfo 
Info;
 
 5306  Info.FrameIdxOp = FIN;
 
 
 5314SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
 
 5319    LROpOut = getReturnAddrFrameIndex(DAG);
 
 5320    LROpOut = DAG.
getLoad(Subtarget.getScalarIntVT(), dl, Chain, LROpOut,
 
 5321                          MachinePointerInfo());
 
 5338      Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(), 
false, 
false,
 
 
 5346    SDValue PtrOff, 
int SPDiff, 
unsigned ArgOffset, 
bool isPPC64,
 
 
 5370                const SDLoc &dl, 
int SPDiff, 
unsigned NumBytes, 
SDValue LROp,
 
 5380  if (!MemOpChains2.
empty())
 
 
 5404SDValue PPCTargetLowering::LowerCallResult(
 
 5412  CCRetInfo.AnalyzeCallResult(
 
 5418  for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
 
 5419    CCValAssign &VA = RVLocs[i];
 
 5424    if (Subtarget.hasSPE() && VA.
getLocVT() == MVT::f64) {
 
 5434      if (!Subtarget.isLittleEndian())
 
 5502                              bool IsStrictFPCall = 
false) {
 
 5506  unsigned RetOpc = 0;
 
 5537  if (IsStrictFPCall) {
 
 
 5568  auto isLocalCallee = [&]() {
 
 5584  const auto getAIXFuncEntryPointSymbolSDNode = [&](
const GlobalValue *GV) {
 
 5601      return getAIXFuncEntryPointSymbolSDNode(GV);
 
 5608    const char *SymName = S->getSymbol();
 
 5615        return getAIXFuncEntryPointSymbolSDNode(
F);
 
 5621      const auto getExternalFunctionEntryPointSymbol = [&](
StringRef SymName) {
 
 5629      SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
 
 5636  assert(Callee.getNode() && 
"What no callee?");
 
 
 5642         "Expected a CALLSEQ_STARTSDNode.");
 
 
 5659  SDValue MTCTROps[] = {Chain, Callee, Glue};
 
 5660  EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
 
 
 5701  auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
 
 5720  SDValue LoadFuncPtr = DAG.
getLoad(RegVT, dl, LDChain, Callee, MPI,
 
 5721                                    Alignment, MMOFlags);
 
 5728      DAG.
getLoad(RegVT, dl, LDChain, AddTOC,
 
 5735      DAG.
getLoad(RegVT, dl, LDChain, AddPtr,
 
 5747         "Nest parameter is not supported on AIX.");
 
 
 5763                  SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
 
 5766  const bool IsPPC64 = Subtarget.isPPC64();
 
 5771  Ops.push_back(Chain);
 
 5775    Ops.push_back(Callee);
 
 5795      Ops.push_back(AddTOC);
 
 5806      Ops.push_back(DAG.
getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
 
 5815  for (
const auto &[
Reg, 
N] : RegsToPass)
 
 5833  assert(Mask && 
"Missing call preserved mask for calling convention");
 
 5838    Ops.push_back(Glue);
 
 
 5841SDValue PPCTargetLowering::FinishCall(
 
 5848  if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
 
 5849      Subtarget.isAIXABI())
 
 5856  if (!CFlags.IsIndirect)
 
 5858  else if (Subtarget.usesFunctionDescriptors())
 
 5860                                  dl, CFlags.HasNest, Subtarget);
 
 5870  if (CFlags.IsTailCall) {
 
 5878            (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
 
 5879           "Expecting a global address, external symbol, absolute value, " 
 5880           "register or an indirect tail call when PC Relative calls are " 
 5884           "Unexpected call opcode for a tail call.");
 
 5891  std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
 
 5892  Chain = DAG.
getNode(CallOpc, dl, ReturnTypes, 
Ops);
 
 5904  Chain = DAG.
getCALLSEQ_END(Chain, NumBytes, BytesCalleePops, Glue, dl);
 
 5907  return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
 
 5927  return isEligibleForTCO(CalleeGV, CalleeCC, CallerCC, CB,
 
 5928                          CalleeFunc->
isVarArg(), Outs, Ins, CallerFunc,
 
 
 5932bool PPCTargetLowering::isEligibleForTCO(
 
 5937    bool isCalleeExternalSymbol)
 const {
 
 5941  if (Subtarget.
isSVR4ABI() && Subtarget.isPPC64())
 
 5942    return IsEligibleForTailCallOptimization_64SVR4(
 
 5943        CalleeGV, CalleeCC, CallerCC, CB, isVarArg, Outs, Ins, CallerFunc,
 
 5944        isCalleeExternalSymbol);
 
 5946    return IsEligibleForTailCallOptimization(CalleeGV, CalleeCC, CallerCC,
 
 5974        isEligibleForTCO(GV, CallConv, CallerCC, CB, isVarArg, Outs, Ins,
 
 5989             "Callee should be an llvm::Function object.");
 
 5992                        << 
"\nTCO callee: ");
 
 5999                       "site marked musttail");
 
 6006    Callee = LowerGlobalAddress(Callee, DAG);
 
 6009      CallConv, isTailCall, isVarArg, isPatchPoint,
 
 6012      Subtarget.is64BitELFABI() &&
 
 6016  if (Subtarget.isAIXABI())
 
 6017    return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
 
 6020  assert(Subtarget.isSVR4ABI());
 
 6021  if (Subtarget.isPPC64())
 
 6022    return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
 
 6024  return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
 
 6028SDValue PPCTargetLowering::LowerCall_32SVR4(
 
 6039  const bool IsVarArg = CFlags.IsVarArg;
 
 6040  const bool IsTailCall = CFlags.IsTailCall;
 
 6046  const Align PtrAlign(4);
 
 6057    MF.
getInfo<PPCFunctionInfo>()->setHasFastCall();
 
 6065  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.
getContext());
 
 6068  CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
 
 6075    unsigned NumArgs = Outs.
size();
 
 6077    for (
unsigned i = 0; i != NumArgs; ++i) {
 
 6078      MVT ArgVT = Outs[i].VT;
 
 6079      ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
 
 6084                               Outs[i].OrigTy, CCInfo);
 
 6087                                      ArgFlags, Outs[i].OrigTy, CCInfo);
 
 6092        errs() << 
"Call operand #" << i << 
" has unhandled type " 
 6105  CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.
getContext());
 
 6108  CCByValInfo.AllocateStack(CCInfo.getStackSize(), PtrAlign);
 
 6115  unsigned NumBytes = CCByValInfo.getStackSize();
 
 6129  Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
 
 6140  bool seenFloatArg = 
false;
 
 6145  for (
unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.
size();
 
 6147       ++i, ++RealArgIdx) {
 
 6148    CCValAssign &VA = ArgLocs[i];
 
 6149    SDValue Arg = OutVals[RealArgIdx];
 
 6150    ISD::ArgFlagsTy 
Flags = Outs[RealArgIdx].Flags;
 
 6152    if (
Flags.isByVal()) {
 
 6157      assert((j < ByValArgLocs.
size()) && 
"Index out of bounds!");
 
 6158      CCValAssign &ByValVA = ByValArgLocs[
j++];
 
 6180      Chain = CallSeqStart = NewCallSeqStart;
 
 6199      if (Subtarget.hasSPE() && Arg.
getValueType() == MVT::f64) {
 
 6200        bool IsLE = Subtarget.isLittleEndian();
 
 6206        RegsToPass.
push_back(std::make_pair(ArgLocs[++i].getLocReg(),
 
 6221            DAG.
getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
 
 6230  if (!MemOpChains.
empty())
 
 6236  for (
const auto &[
Reg, 
N] : RegsToPass) {
 
 6244    SDVTList VTs = DAG.
getVTList(MVT::Other, MVT::Glue);
 
 6257  return FinishCall(CFlags, dl, DAG, RegsToPass, InGlue, Chain, CallSeqStart,
 
 6258                    Callee, SPDiff, NumBytes, Ins, InVals, CB);
 
 6263SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
 
 6275  return NewCallSeqStart;
 
 6278SDValue PPCTargetLowering::LowerCall_64SVR4(
 
 6285  bool isELFv2ABI = Subtarget.isELFv2ABI();
 
 6286  bool isLittleEndian = Subtarget.isLittleEndian();
 
 6288  bool IsSibCall = 
false;
 
 6292  unsigned PtrByteSize = 8;
 
 6305    MF.
getInfo<PPCFunctionInfo>()->setHasFastCall();
 
 6307  assert(!(IsFastCall && CFlags.IsVarArg) &&
 
 6308         "fastcc not supported on varargs functions");
 
 6314  unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
 
 6315  unsigned NumBytes = LinkageSize;
 
 6316  unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
 
 6319    PPC::X3, PPC::X4, PPC::X5, PPC::X6,
 
 6320    PPC::X7, PPC::X8, PPC::X9, PPC::X10,
 
 6323    PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
 
 6324    PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
 
 6327  const unsigned NumGPRs = std::size(GPR);
 
 6329  const unsigned NumVRs = std::size(VR);
 
 6335  bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
 
 6336  if (!HasParameterArea) {
 
 6337    unsigned ParamAreaSize = NumGPRs * PtrByteSize;
 
 6338    unsigned AvailableFPRs = NumFPRs;
 
 6339    unsigned AvailableVRs = NumVRs;
 
 6340    unsigned NumBytesTmp = NumBytes;
 
 6341    for (
unsigned i = 0; i != 
NumOps; ++i) {
 
 6342      if (Outs[i].
Flags.isNest()) 
continue;
 
 6344                                 PtrByteSize, LinkageSize, ParamAreaSize,
 
 6345                                 NumBytesTmp, AvailableFPRs, AvailableVRs))
 
 6346        HasParameterArea = 
true;
 
 6352  unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
 
 6357    HasParameterArea = 
false;
 
 6360  for (
unsigned i = 0; i != 
NumOps; ++i) {
 
 6361    ISD::ArgFlagsTy 
Flags = Outs[i].Flags;
 
 6362    EVT ArgVT = Outs[i].VT;
 
 6363    EVT OrigVT = Outs[i].ArgVT;
 
 6369      if (
Flags.isByVal()) {
 
 6370        NumGPRsUsed += (
Flags.getByValSize()+7)/8;
 
 6371        if (NumGPRsUsed > NumGPRs)
 
 6372          HasParameterArea = 
true;
 
 6379          if (++NumGPRsUsed <= NumGPRs)
 
 6389          if (++NumVRsUsed <= NumVRs)
 
 6393          if (++NumVRsUsed <= NumVRs)
 
 6398          if (++NumFPRsUsed <= NumFPRs)
 
 6402        HasParameterArea = 
true;
 
 6409    NumBytes = 
alignTo(NumBytes, Alignement);
 
 6412    if (
Flags.isInConsecutiveRegsLast())
 
 6413      NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
 
 6416  unsigned NumBytesActuallyUsed = NumBytes;
 
 6426  if (HasParameterArea)
 
 6427    NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
 
 6429    NumBytes = LinkageSize;
 
 6444  if (CFlags.IsTailCall)
 
 6456  Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
 
 6467  unsigned ArgOffset = LinkageSize;
 
 6473  for (
unsigned i = 0; i != 
NumOps; ++i) {
 
 6475    ISD::ArgFlagsTy 
Flags = Outs[i].Flags;
 
 6476    EVT ArgVT = Outs[i].VT;
 
 6477    EVT OrigVT = Outs[i].ArgVT;
 
 6486    auto ComputePtrOff = [&]() {
 
 6490      ArgOffset = 
alignTo(ArgOffset, Alignment);
 
 6501      GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
 
 6502      GPR_idx = std::min(GPR_idx, NumGPRs);
 
 6509      Arg = DAG.
getNode(ExtOp, dl, MVT::i64, Arg);
 
 6515    if (
Flags.isByVal()) {
 
 6533        EVT VT = (
Size==1) ? MVT::i8 : ((
Size==2) ? MVT::i16 : MVT::i32);
 
 6534        if (GPR_idx != NumGPRs) {
 
 6536                                        MachinePointerInfo(), VT);
 
 6538          RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
 
 6540          ArgOffset += PtrByteSize;
 
 6545      if (GPR_idx == NumGPRs && 
Size < 8) {
 
 6547        if (!isLittleEndian) {
 
 6552        Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
 
 6555        ArgOffset += PtrByteSize;
 
 6564      if ((NumGPRs - GPR_idx) * PtrByteSize < 
Size)
 
 6565        Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
 
 6570      if (
Size < 8 && GPR_idx != NumGPRs) {
 
 6580        if (!isLittleEndian) {
 
 6584        Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
 
 6590            DAG.
getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
 
 6592        RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
 
 6595        ArgOffset += PtrByteSize;
 
 6601      for (
unsigned j=0; 
j<
Size; 
j+=PtrByteSize) {
 
 6604        if (GPR_idx != NumGPRs) {
 
 6605          unsigned LoadSizeInBits = std::min(PtrByteSize, (
Size - j)) * 8;
 
 6608                                        MachinePointerInfo(), ObjType);
 
 6611          RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
 
 6612          ArgOffset += PtrByteSize;
 
 6614          ArgOffset += ((
Size - 
j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
 
 6626      if (
Flags.isNest()) {
 
 6628        RegsToPass.
push_back(std::make_pair(PPC::X11, Arg));
 
 6635      if (GPR_idx != NumGPRs) {
 
 6636        RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
 
 6641        assert(HasParameterArea &&
 
 6642               "Parameter area must exist to pass an argument in memory.");
 
 6644                         true, CFlags.IsTailCall, 
false, MemOpChains,
 
 6645                         TailCallArguments, dl);
 
 6647          ArgOffset += PtrByteSize;
 
 6650        ArgOffset += PtrByteSize;
 
 6663      bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
 
 6664      bool NeededLoad = 
false;
 
 6667      if (FPR_idx != NumFPRs)
 
 6668        RegsToPass.
push_back(std::make_pair(
FPR[FPR_idx++], Arg));
 
 6671      if (!NeedGPROrStack)
 
 6673      else if (GPR_idx != NumGPRs && !IsFastCall) {
 
 6684          ArgVal = DAG.
getNode(ISD::BITCAST, dl, MVT::i64, Arg);
 
 6687        } 
else if (!
Flags.isInConsecutiveRegs()) {
 
 6688          ArgVal = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, Arg);
 
 6693        } 
else if (ArgOffset % PtrByteSize != 0) {
 
 6695          Lo = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
 
 6696          Hi = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, Arg);
 
 6697          if (!isLittleEndian)
 
 6702        } 
else if (
Flags.isInConsecutiveRegsLast()) {
 
 6703          ArgVal = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, Arg);
 
 6705          if (!isLittleEndian)
 
 6715          RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
 
 6723            !isLittleEndian && !
Flags.isInConsecutiveRegs()) {
 
 6728        assert(HasParameterArea &&
 
 6729               "Parameter area must exist to pass an argument in memory.");
 
 6731                         true, CFlags.IsTailCall, 
false, MemOpChains,
 
 6732                         TailCallArguments, dl);
 
 6739      if (!IsFastCall || NeededLoad) {
 
 6741                      Flags.isInConsecutiveRegs()) ? 4 : 8;
 
 6742        if (
Flags.isInConsecutiveRegsLast())
 
 6743          ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
 
 6763      if (CFlags.IsVarArg) {
 
 6764        assert(HasParameterArea &&
 
 6765               "Parameter area must exist if we have a varargs call.");
 
 6769            DAG.
getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
 
 6771        if (VR_idx != NumVRs) {
 
 6773              DAG.
getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
 
 6775          RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Load));
 
 6778        for (
unsigned i=0; i<16; i+=PtrByteSize) {
 
 6779          if (GPR_idx == NumGPRs)
 
 6784              DAG.
getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
 
 6786          RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
 
 6792      if (VR_idx != NumVRs) {
 
 6793        RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Arg));
 
 6798        assert(HasParameterArea &&
 
 6799               "Parameter area must exist to pass an argument in memory.");
 
 6801                         true, CFlags.IsTailCall, 
true, MemOpChains,
 
 6802                         TailCallArguments, dl);
 
 6813  assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
 
 6814         "mismatch in size of parameter area");
 
 6815  (void)NumBytesActuallyUsed;
 
 6817  if (!MemOpChains.
empty())
 
 6823  if (CFlags.IsIndirect) {
 
 6827      assert(!CFlags.IsTailCall && 
"Indirect tails calls not supported");
 
 6832      unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
 
 6842    if (isELFv2ABI && !CFlags.IsPatchPoint)
 
 6843      RegsToPass.
push_back(std::make_pair((
unsigned)PPC::X12, Callee));
 
 6849  for (
const auto &[
Reg, 
N] : RegsToPass) {
 
 6854  if (CFlags.IsTailCall && !IsSibCall)
 
 6858  return FinishCall(CFlags, dl, DAG, RegsToPass, InGlue, Chain, CallSeqStart,
 
 6859                    Callee, SPDiff, NumBytes, Ins, InVals, CB);
 
 6866         "Required alignment greater than stack alignment.");
 
 6886    return RequiredAlign <= 8;
 
 6891    return RequiredAlign <= 4;
 
 
 6899      State.getMachineFunction().getSubtarget());
 
 6900  const bool IsPPC64 = Subtarget.isPPC64();
 
 6901  const unsigned PtrSize = IsPPC64 ? 8 : 4;
 
 6902  const Align PtrAlign(PtrSize);
 
 6903  const Align StackAlign(16);
 
 6906  if (ValVT == MVT::f128)
 
 6910                                     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
 
 6911                                     PPC::R7, PPC::R8, PPC::R9, PPC::R10};
 
 6913                                     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
 
 6914                                     PPC::X7, PPC::X8, PPC::X9, PPC::X10};
 
 6917                                 PPC::V2,  PPC::V3,  PPC::V4,  PPC::V5,
 
 6918                                 PPC::V6,  PPC::V7,  PPC::V8,  PPC::V9,
 
 6919                                 PPC::V10, PPC::V11, PPC::V12, PPC::V13};
 
 6924    MCRegister EnvReg = State.AllocateReg(IsPPC64 ? PPC::X11 : PPC::R11);
 
 6933    if (ByValAlign > StackAlign)
 
 6935                         "16 are not supported.");
 
 6938    const Align ObjAlign = ByValAlign > PtrAlign ? ByValAlign : PtrAlign;
 
 6942    if (ByValSize == 0) {
 
 6944                                       State.getStackSize(), RegVT, LocInfo));
 
 6949    unsigned NextReg = State.getFirstUnallocated(GPRs);
 
 6950    while (NextReg != GPRs.
size() &&
 
 6955      State.AllocateStack(PtrSize, PtrAlign);
 
 6956      assert(
Reg && 
"Alocating register unexpectedly failed.");
 
 6958      NextReg = State.getFirstUnallocated(GPRs);
 
 6961    const unsigned StackSize = 
alignTo(ByValSize, ObjAlign);
 
 6962    unsigned Offset = State.AllocateStack(StackSize, ObjAlign);
 
 6982    assert(IsPPC64 && 
"PPC32 should have split i64 values.");
 
 6986    const unsigned Offset = State.AllocateStack(PtrSize, PtrAlign);
 
 7005        State.AllocateStack(IsPPC64 ? 8 : StoreSize, 
Align(4));
 
 7011    for (
unsigned I = 0; 
I < StoreSize; 
I += PtrSize) {
 
 7013        assert(FReg && 
"An FPR should be available when a GPR is reserved.");
 
 7014        if (State.isVarArg()) {
 
 7046    const unsigned VecSize = 16;
 
 7047    const Align VecAlign(VecSize);
 
 7049    if (!State.isVarArg()) {
 
 7052      if (
MCRegister VReg = State.AllocateReg(VR)) {
 
 7059      const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
 
 7064    unsigned NextRegIndex = State.getFirstUnallocated(GPRs);
 
 7067    while (NextRegIndex != GPRs.
size() &&
 
 7071      State.AllocateStack(PtrSize, PtrAlign);
 
 7072      assert(
Reg && 
"Allocating register unexpectedly failed.");
 
 7074      NextRegIndex = State.getFirstUnallocated(GPRs);
 
 7082      if (
MCRegister VReg = State.AllocateReg(VR)) {
 
 7085        for (
unsigned I = 0; 
I != VecSize; 
I += PtrSize)
 
 7086          State.AllocateReg(GPRs);
 
 7087        State.AllocateStack(VecSize, VecAlign);
 
 7091      const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
 
 7097    if (NextRegIndex == GPRs.
size()) {
 
 7098      const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
 
 7106    if (GPRs[NextRegIndex] == PPC::R9) {
 
 7107      const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
 
 7111      const MCRegister FirstReg = State.AllocateReg(PPC::R9);
 
 7112      const MCRegister SecondReg = State.AllocateReg(PPC::R10);
 
 7113      assert(FirstReg && SecondReg &&
 
 7114             "Allocating R9 or R10 unexpectedly failed.");
 
 7125    const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
 
 7128    for (
unsigned I = 0; 
I != VecSize; 
I += PtrSize) {
 
 7130      assert(
Reg && 
"Failed to allocated register for vararg vector argument");
 
 
 7145  assert((IsPPC64 || SVT != MVT::i64) &&
 
 7146         "i64 should have been split for 32-bit codegen.");
 
 7154    return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
 
 7156    return HasP8Vector ? &PPC::VSSRCRegClass : &PPC::F4RCRegClass;
 
 7158    return HasVSX ? &PPC::VSFRCRegClass : &PPC::F8RCRegClass;
 
 7166    return &PPC::VRRCRegClass;
 
 
 7179  else if (Flags.isZExt())
 
 
 7191           "Reg must be a valid argument register!");
 
 7192    return LASize + 4 * (
Reg - PPC::R3);
 
 7197           "Reg must be a valid argument register!");
 
 7198    return LASize + 8 * (
Reg - PPC::X3);
 
 
 7244SDValue PPCTargetLowering::LowerFormalArguments_AIX(
 
 7251         "Unexpected calling convention!");
 
 7259  const PPCSubtarget &Subtarget = DAG.
getSubtarget<PPCSubtarget>();
 
 7261  const bool IsPPC64 = Subtarget.isPPC64();
 
 7262  const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
 
 7268  PPCFunctionInfo *FuncInfo = MF.
getInfo<PPCFunctionInfo>();
 
 7269  CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.
getContext());
 
 7273  const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
 
 7274  CCInfo.AllocateStack(LinkageSize, 
Align(PtrByteSize));
 
 7275  uint64_t SaveStackPos = CCInfo.getStackSize();
 
 7277  CCInfo.AnalyzeFormalArguments(Ins, 
CC_AIX);
 
 7281  for (
size_t I = 0, End = ArgLocs.
size(); 
I != End; ) {
 
 7282    CCValAssign &VA = ArgLocs[
I++];
 
 7288    bool ArgSignExt = 
Ins[VA.
getValNo()].Flags.isSExt();
 
 7300          LocVT.
SimpleTy, IsPPC64, Subtarget.hasP8Vector(), Subtarget.hasVSX());
 
 7302      MVT SaveVT = RegClass == &PPC::G8RCRegClass ? MVT::i64 : LocVT;
 
 7308                                      MachinePointerInfo(), 
Align(PtrByteSize));
 
 7314      unsigned StoreSize =
 
 7316      SaveStackPos = 
alignTo(SaveStackPos + StoreSize, PtrByteSize);
 
 7319    auto HandleMemLoc = [&]() {
 
 7322      assert((ValSize <= LocSize) &&
 
 7323             "Object size is larger than size of MemLoc");
 
 7326      if (LocSize > ValSize)
 
 7327        CurArgOffset += LocSize - ValSize;
 
 7329      const bool IsImmutable =
 
 7335          DAG.
getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
 
 7369      assert(isVarArg && 
"Only use custom memloc for vararg.");
 
 7372      const unsigned OriginalValNo = VA.
getValNo();
 
 7373      (void)OriginalValNo;
 
 7375      auto HandleCustomVecRegLoc = [&]() {
 
 7376        assert(
I != End && ArgLocs[
I].isRegLoc() && ArgLocs[
I].needsCustom() &&
 
 7377               "Missing custom RegLoc.");
 
 7380               "Unexpected Val type for custom RegLoc.");
 
 7382               "ValNo mismatch between custom MemLoc and RegLoc.");
 
 7386                                       Subtarget.hasVSX()));
 
 7393      HandleCustomVecRegLoc();
 
 7394      HandleCustomVecRegLoc();
 
 7398      if (
I != End && ArgLocs[
I].isRegLoc() && ArgLocs[
I].needsCustom()) {
 
 7400               "Only 2 custom RegLocs expected for 64-bit codegen.");
 
 7401        HandleCustomVecRegLoc();
 
 7402        HandleCustomVecRegLoc();
 
 7446      const unsigned Size =
 
 7458    if (
Flags.isByVal()) {
 
 7462      const PPCFrameLowering *FL = Subtarget.getFrameLowering();
 
 7464      const unsigned StackSize = 
alignTo(
Flags.getByValSize(), PtrByteSize);
 
 7472      const TargetRegisterClass *RegClass =
 
 7473          IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
 
 7475      auto HandleRegLoc = [&, RegClass, LocVT](
const MCPhysReg PhysReg,
 
 7488            CopyFrom.
getValue(1), dl, CopyFrom,
 
 7498      for (; 
Offset != StackSize && ArgLocs[
I].isRegLoc();
 
 7501               "RegLocs should be for ByVal argument.");
 
 7503        const CCValAssign RL = ArgLocs[
I++];
 
 7508      if (
Offset != StackSize) {
 
 7510               "Expected MemLoc for remaining bytes.");
 
 7511        assert(ArgLocs[
I].isMemLoc() && 
"Expected MemLoc for remaining bytes.");
 
 7525                                         Subtarget.hasVSX()));
 
 7542  const unsigned MinParameterSaveArea = 8 * PtrByteSize;
 
 7544  unsigned CallerReservedArea = std::max<unsigned>(
 
 7545      CCInfo.getStackSize(), LinkageSize + MinParameterSaveArea);
 
 7551  CallerReservedArea =
 
 7560    static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
 
 7561                                       PPC::R7, PPC::R8, PPC::R9, PPC::R10};
 
 7563    static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
 
 7564                                       PPC::X7, PPC::X8, PPC::X9, PPC::X10};
 
 7565    const unsigned NumGPArgRegs = std::size(IsPPC64 ? GPR_64 : GPR_32);
 
 7570    for (
unsigned GPRIndex =
 
 7571             (CCInfo.getStackSize() - LinkageSize) / PtrByteSize;
 
 7572         GPRIndex < NumGPArgRegs; ++GPRIndex) {
 
 7575          IsPPC64 ? MF.
addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
 
 7576                  : MF.
addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
 
 7588  if (!MemOps.
empty())
 
 7594SDValue PPCTargetLowering::LowerCall_AIX(
 
 7607         "Unexpected calling convention!");
 
 7609  if (CFlags.IsPatchPoint)
 
 7612  const PPCSubtarget &Subtarget = DAG.
getSubtarget<PPCSubtarget>();
 
 7616  CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
 
 7623  const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
 
 7624  const bool IsPPC64 = Subtarget.isPPC64();
 
 7626  const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
 
 7627  CCInfo.AllocateStack(LinkageSize, 
Align(PtrByteSize));
 
 7628  CCInfo.AnalyzeCallOperands(Outs, 
CC_AIX);
 
 7636  const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
 
 7637  const unsigned NumBytes = std::max<unsigned>(
 
 7638      LinkageSize + MinParameterSaveAreaSize, CCInfo.getStackSize());
 
 7654  for (
unsigned I = 0, 
E = ArgLocs.
size(); 
I != 
E;) {
 
 7655    const unsigned ValNo = ArgLocs[
I].getValNo();
 
 7657    ISD::ArgFlagsTy 
Flags = Outs[ValNo].Flags;
 
 7659    if (
Flags.isByVal()) {
 
 7660      const unsigned ByValSize = 
Flags.getByValSize();
 
 7668      auto GetLoad = [&](EVT VT, 
unsigned LoadOffset) {
 
 7674                              MachinePointerInfo(), VT);
 
 7677      unsigned LoadOffset = 0;
 
 7680      while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[
I].isRegLoc()) {
 
 7683        LoadOffset += PtrByteSize;
 
 7684        const CCValAssign &ByValVA = ArgLocs[
I++];
 
 7686               "Unexpected location for pass-by-value argument.");
 
 7690      if (LoadOffset == ByValSize)
 
 7694      assert(ArgLocs[
I].getValNo() == ValNo &&
 
 7695             "Expected additional location for by-value argument.");
 
 7697      if (ArgLocs[
I].isMemLoc()) {
 
 7698        assert(LoadOffset < ByValSize && 
"Unexpected memloc for by-val arg.");
 
 7699        const CCValAssign &ByValVA = ArgLocs[
I++];
 
 7700        ISD::ArgFlagsTy MemcpyFlags = 
Flags;
 
 7703        Chain = CallSeqStart = createMemcpyOutsideCallSeq(
 
 7709            CallSeqStart, MemcpyFlags, DAG, dl);
 
 7718      const unsigned ResidueBytes = ByValSize % PtrByteSize;
 
 7719      assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
 
 7720             "Unexpected register residue for by-value argument.");
 
 7722      for (
unsigned Bytes = 0; Bytes != ResidueBytes;) {
 
 7726                   : ((
N == 2) ? MVT::i16 : (
N == 4 ? MVT::i32 : MVT::i64));
 
 7736               "Unexpected load emitted during handling of pass-by-value " 
 7744        ResidueVal = ResidueVal ? DAG.
getNode(
ISD::OR, dl, PtrVT, ResidueVal,
 
 7749      const CCValAssign &ByValVA = ArgLocs[
I++];
 
 7754    CCValAssign &VA = ArgLocs[
I++];
 
 7779      assert(CFlags.IsVarArg && 
"Custom MemLocs only used for Vector args.");
 
 7785          DAG.
getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
 
 7787      const unsigned OriginalValNo = VA.
getValNo();
 
 7789      unsigned LoadOffset = 0;
 
 7790      auto HandleCustomVecRegLoc = [&]() {
 
 7791        assert(
I != 
E && 
"Unexpected end of CCvalAssigns.");
 
 7792        assert(ArgLocs[
I].isRegLoc() && ArgLocs[
I].needsCustom() &&
 
 7793               "Expected custom RegLoc.");
 
 7794        CCValAssign RegVA = ArgLocs[
I++];
 
 7796               "Custom MemLoc ValNo and custom RegLoc ValNo must match.");
 
 7802        LoadOffset += PtrByteSize;
 
 7808      HandleCustomVecRegLoc();
 
 7809      HandleCustomVecRegLoc();
 
 7811      if (
I != 
E && ArgLocs[
I].isRegLoc() && ArgLocs[
I].needsCustom() &&
 
 7812          ArgLocs[
I].getValNo() == OriginalValNo) {
 
 7814               "Only 2 custom RegLocs expected for 64-bit codegen.");
 
 7815        HandleCustomVecRegLoc();
 
 7816        HandleCustomVecRegLoc();
 
 7827          DAG.
getStore(Chain, dl, Arg, PtrOff,
 
 7829                       Subtarget.getFrameLowering()->getStackAlign()));
 
 7836          "Unexpected register handling for calling convention.");
 
 7842           "Custom register handling only expected for VarArg.");
 
 7847    if (Arg.getValueType().getStoreSize() == LocVT.
getStoreSize())
 
 7851    else if (Arg.getValueType().getFixedSizeInBits() <
 
 7859      assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
 
 7860             "Unexpected custom register for argument!");
 
 7861      CCValAssign &GPR1 = VA;
 
 7870        CCValAssign &PeekArg = ArgLocs[
I];
 
 7873          CCValAssign &GPR2 = ArgLocs[
I++];
 
 7881  if (!MemOpChains.
empty())
 
 7886  if (CFlags.IsIndirect) {
 
 7887    assert(!CFlags.IsTailCall && 
"Indirect tail-calls not supported.");
 
 7888    const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
 
 7889    const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
 
 7890    const MVT PtrVT = Subtarget.getScalarIntVT();
 
 7891    const unsigned TOCSaveOffset =
 
 7892        Subtarget.getFrameLowering()->getTOCSaveOffset();
 
 7907  for (
auto Reg : RegsToPass) {
 
 7912  const int SPDiff = 0;
 
 7913  return FinishCall(CFlags, dl, DAG, RegsToPass, InGlue, Chain, CallSeqStart,
 
 7914                    Callee, SPDiff, NumBytes, Ins, InVals, CB);
 
 7922                                  const Type *RetTy)
 const {
 
 7924  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, 
Context);
 
 7925  return CCInfo.CheckReturn(
 
 7940  CCInfo.AnalyzeReturn(Outs,
 
 7949  for (
unsigned i = 0, RealResIdx = 0; i != RVLocs.
size(); ++i, ++RealResIdx) {
 
 7950    CCValAssign &VA = RVLocs[i];
 
 7953    SDValue Arg = OutVals[RealResIdx];
 
 7968    if (Subtarget.hasSPE() && VA.
getLocVT() == MVT::f64) {
 
 7969      bool isLittleEndian = Subtarget.isLittleEndian();
 
 7991    RetOps.push_back(Glue);
 
 7997PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(
SDValue Op,
 
 8002  EVT IntVT = 
Op.getValueType();
 
 8006  SDValue FPSIdx = getFramePointerFrameIndex(DAG);
 
 8022  bool isPPC64 = Subtarget.isPPC64();
 
 8023  unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
 
 8032      DAG.
getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
 
 8038  return DAG.
getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
 
 8043  bool isPPC64 = Subtarget.isPPC64();
 
 8048  PPCFunctionInfo *FI = MF.
getInfo<PPCFunctionInfo>();
 
 8054    int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
 
 8064PPCTargetLowering::getFramePointerFrameIndex(
SelectionDAG & DAG)
 const {
 
 8066  bool isPPC64 = Subtarget.isPPC64();
 
 8071  PPCFunctionInfo *FI = MF.
getInfo<PPCFunctionInfo>();
 
 8077    int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
 
 8100  SDValue FPSIdx = getFramePointerFrameIndex(DAG);
 
 8102  SDVTList VTs = DAG.
getVTList(PtrVT, MVT::Other);
 
 8112  bool isPPC64 = Subtarget.isPPC64();
 
 8124                     Op.getOperand(0), 
Op.getOperand(1));
 
 8131                     Op.getOperand(0), 
Op.getOperand(1));
 
 8135  if (
Op.getValueType().isVector())
 
 8136    return LowerVectorLoad(
Op, DAG);
 
 8138  assert(
Op.getValueType() == MVT::i1 &&
 
 8139         "Custom lowering only for i1 loads");
 
 8148  MachineMemOperand *MMO = 
LD->getMemOperand();
 
 8152                     BasePtr, MVT::i8, MMO);
 
 8160  if (
Op.getOperand(1).getValueType().isVector())
 
 8161    return LowerVectorStore(
Op, DAG);
 
 8163  assert(
Op.getOperand(1).getValueType() == MVT::i1 &&
 
 8164         "Custom lowering only for i1 stores");
 
 8174  MachineMemOperand *MMO = 
ST->getMemOperand();
 
 8183  assert(
Op.getValueType() == MVT::i1 &&
 
 8184         "Custom lowering only for i1 results");
 
 8212  EVT TrgVT = 
Op.getValueType();
 
 8236  if (SrcSize == 256) {
 
 8247    Op1 = SrcSize == 128 ? N1 : 
widenVec(DAG, N1, 
DL);
 
 8253  SmallVector<int, 16> ShuffV;
 
 8254  if (Subtarget.isLittleEndian())
 
 8255    for (
unsigned i = 0; i < TrgNumElts; ++i)
 
 8258    for (
unsigned i = 1; i <= TrgNumElts; ++i)
 
 8262  for (
unsigned i = TrgNumElts; i < WideNumElts; ++i)
 
 8266  Op1 = DAG.
getNode(ISD::BITCAST, 
DL, WideVT, Op1);
 
 8267  Op2 = DAG.
getNode(ISD::BITCAST, 
DL, WideVT, Op2);
 
 8275  EVT ResVT = 
Op.getValueType();
 
 8276  EVT CmpVT = 
Op.getOperand(0).getValueType();
 
 8278  SDValue TV  = 
Op.getOperand(2), FV  = 
Op.getOperand(3);
 
 8284  if (!Subtarget.hasP9Vector() && CmpVT == MVT::f128) {
 
 8297  SDNodeFlags 
Flags = 
Op.getNode()->getFlags();
 
 8301  if (Subtarget.hasP9Vector() && 
LHS == TV && 
RHS == FV) {
 
 8333      if (
LHS.getValueType() == MVT::f32)   
 
 8337        Sel1 = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
 
 8339                         DAG.
getNode(ISD::FNEG, dl, MVT::f64, 
LHS), Sel1, FV);
 
 8346      if (
LHS.getValueType() == MVT::f32)   
 
 8355      if (
LHS.getValueType() == MVT::f32)   
 
 8358                         DAG.
getNode(ISD::FNEG, dl, MVT::f64, 
LHS), TV, FV);
 
 8369    if (
Cmp.getValueType() == MVT::f32)   
 
 8370      Cmp = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
 
 8373      Sel1 = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
 
 8375                       DAG.
getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
 
 8379    if (
Cmp.getValueType() == MVT::f32)   
 
 8380      Cmp = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
 
 8385    if (
Cmp.getValueType() == MVT::f32)   
 
 8386      Cmp = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
 
 8391    if (
Cmp.getValueType() == MVT::f32)   
 
 8392      Cmp = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
 
 8397    if (
Cmp.getValueType() == MVT::f32)   
 
 8398      Cmp = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
 
 8430  bool IsStrict = 
Op->isStrictFPOpcode();
 
 8439  SDValue Src = 
Op.getOperand(IsStrict ? 1 : 0);
 
 8441  MVT DestTy = 
Op.getSimpleValueType();
 
 8442  assert(Src.getValueType().isFloatingPoint() &&
 
 8443         (DestTy == MVT::i8 || DestTy == MVT::i16 || DestTy == MVT::i32 ||
 
 8444          DestTy == MVT::i64) &&
 
 8445         "Invalid FP_TO_INT types");
 
 8446  if (Src.getValueType() == MVT::f32) {
 
 8450                      DAG.
getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
 
 8453      Src = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
 
 8455  if ((DestTy == MVT::i8 || DestTy == MVT::i16) && Subtarget.hasP9Vector())
 
 8465    assert((IsSigned || Subtarget.hasFPCVT()) &&
 
 8466           "i64 FP_TO_UINT is supported only with FPCVT");
 
 8469  EVT ConvTy = Src.getValueType() == MVT::f128 ? MVT::f128 : MVT::f64;
 
 
 8481void PPCTargetLowering::LowerFP_TO_INTForReuse(
SDValue Op, ReuseLoadInfo &RLI,
 
 8483                                               const SDLoc &dl)
 const {
 
 8487  bool IsStrict = 
Op->isStrictFPOpcode();
 
 8490  bool i32Stack = 
Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
 
 8491                  (IsSigned || Subtarget.hasFPCVT());
 
 8494  MachinePointerInfo MPI =
 
 8502    Alignment = 
Align(4);
 
 8503    MachineMemOperand *MMO =
 
 8509    Chain = DAG.
getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
 
 8513  if (
Op.getValueType() == MVT::i32 && !i32Stack &&
 
 8514      !Subtarget.isLittleEndian()) {
 
 8523  RLI.Alignment = Alignment;
 
 8531                                                    const SDLoc &dl)
 const {
 
 8534  if (
Op->isStrictFPOpcode())
 
 8541                                          const SDLoc &dl)
 const {
 
 8542  bool IsStrict = 
Op->isStrictFPOpcode();
 
 8545  SDValue Src = 
Op.getOperand(IsStrict ? 1 : 0);
 
 8546  EVT SrcVT = Src.getValueType();
 
 8547  EVT DstVT = 
Op.getValueType();
 
 8550  if (SrcVT == MVT::f128)
 
 8551    return Subtarget.hasP9Vector() ? 
Op : 
SDValue();
 
 8555  if (SrcVT == MVT::ppcf128) {
 
 8556    if (DstVT == MVT::i32) {
 
 8561      Flags.setNoFPExcept(
Op->getFlags().hasNoFPExcept());
 
 8572                                    {Op.getOperand(0), Lo, Hi}, Flags);
 
 8575                             {Res.getValue(1), Res}, Flags);
 
 8581        const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
 
 8605                                    {Chain, Src, FltOfs}, Flags);
 
 8609                                     {Chain, Val}, Flags);
 
 8612              dl, DstVT, Sel, DAG.
getConstant(0, dl, DstVT), SignMask);
 
 8630  if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
 
 8631    return LowerFP_TO_INTDirectMove(
Op, DAG, dl);
 
 8634  LowerFP_TO_INTForReuse(
Op, RLI, DAG, dl);
 
 8636  return DAG.
getLoad(
Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
 
 8637                     RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
 
 8648bool PPCTargetLowering::canReuseLoadAddress(
SDValue Op, 
EVT MemVT,
 
 8653  if (
Op->isStrictFPOpcode())
 
 8658                       (Subtarget.hasFPCVT() || 
Op.getValueType() == MVT::i32);
 
 8662                               Op.getOperand(0).getValueType())) {
 
 8664    LowerFP_TO_INTForReuse(
Op, RLI, DAG, dl);
 
 8669  if (!LD || 
LD->getExtensionType() != ET || 
LD->isVolatile() ||
 
 8670      LD->isNonTemporal())
 
 8672  if (
LD->getMemoryVT() != MemVT)
 
 8682  RLI.Ptr = 
LD->getBasePtr();
 
 8683  if (
LD->isIndexed() && !
LD->getOffset().isUndef()) {
 
 8685           "Non-pre-inc AM on PPC?");
 
 8690  RLI.Chain = 
LD->getChain();
 
 8691  RLI.MPI = 
LD->getPointerInfo();
 
 8692  RLI.IsDereferenceable = 
LD->isDereferenceable();
 
 8693  RLI.IsInvariant = 
LD->isInvariant();
 
 8694  RLI.Alignment = 
LD->getAlign();
 
 8695  RLI.AAInfo = 
LD->getAAInfo();
 
 8696  RLI.Ranges = 
LD->getRanges();
 
 8698  RLI.ResChain = 
SDValue(LD, 
LD->isIndexed() ? 2 : 1);
 
 8705bool PPCTargetLowering::directMoveIsProfitable(
const SDValue &
Op)
 const {
 
 8706  SDNode *Origin = 
Op.getOperand(
Op->isStrictFPOpcode() ? 1 : 0).getNode();
 
 8713  if (!Subtarget.hasP9Vector() &&
 
 8717  for (SDUse &Use : Origin->
uses()) {
 
 8720    if (
Use.getResNo() != 0)
 
 8747  bool IsSingle = 
Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
 
 8750  EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
 
 8751  if (
Op->isStrictFPOpcode()) {
 
 8753      Chain = 
Op.getOperand(0);
 
 8755                       DAG.
getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
 
 8757    return DAG.
getNode(ConvOpc, dl, ConvTy, Src);
 
 
 8765                                                    const SDLoc &dl)
 const {
 
 8766  assert((
Op.getValueType() == MVT::f32 ||
 
 8767          Op.getValueType() == MVT::f64) &&
 
 8768         "Invalid floating point type as target of conversion");
 
 8769  assert(Subtarget.hasFPCVT() &&
 
 8770         "Int to FP conversions with direct moves require FPCVT");
 
 8771  SDValue Src = 
Op.getOperand(
Op->isStrictFPOpcode() ? 1 : 0);
 
 8772  bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
 
 8794  for (
unsigned i = 1; i < NumConcat; ++i)
 
 
 8801                                                const SDLoc &dl)
 const {
 
 8802  bool IsStrict = 
Op->isStrictFPOpcode();
 
 8803  unsigned Opc = 
Op.getOpcode();
 
 8804  SDValue Src = 
Op.getOperand(IsStrict ? 1 : 0);
 
 8807         "Unexpected conversion type");
 
 8808  assert((
Op.getValueType() == MVT::v2f64 || 
Op.getValueType() == MVT::v4f32) &&
 
 8809         "Supports conversions to v2f64/v4f32 only.");
 
 8813  Flags.setNoFPExcept(
Op->getFlags().hasNoFPExcept());
 
 8816  bool FourEltRes = 
Op.getValueType() == MVT::v4f32;
 
 8821  MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
 
 8823  SmallVector<int, 16> ShuffV;
 
 8824  for (
unsigned i = 0; i < WideNumElts; ++i)
 
 8827  int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
 
 8828  int SaveElts = FourEltRes ? 4 : 2;
 
 8829  if (Subtarget.isLittleEndian())
 
 8830    for (
int i = 0; i < SaveElts; i++)
 
 8831      ShuffV[i * Stride] = i;
 
 8833    for (
int i = 1; i <= SaveElts; i++)
 
 8834      ShuffV[i * Stride - 1] = i - 1;
 
 8842    Arrange = DAG.
getBitcast(IntermediateVT, Arrange);
 
 8843    EVT ExtVT = Src.getValueType();
 
 8844    if (Subtarget.hasP9Altivec())
 
 8851    Extend = DAG.
getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
 
 8855                       {Op.getOperand(0), Extend}, Flags);
 
 8857  return DAG.
getNode(
Opc, dl, 
Op.getValueType(), Extend);
 
 8865  bool IsStrict = 
Op->isStrictFPOpcode();
 
 8866  SDValue Src = 
Op.getOperand(IsStrict ? 1 : 0);
 
 8871  Flags.setNoFPExcept(
Op->getFlags().hasNoFPExcept());
 
 8873  EVT InVT = Src.getValueType();
 
 8874  EVT OutVT = 
Op.getValueType();
 
 8877    return LowerINT_TO_FPVector(
Op, DAG, dl);
 
 8880  if (
Op.getValueType() == MVT::f128)
 
 8881    return Subtarget.hasP9Vector() ? 
Op : 
SDValue();
 
 8884  if (
Op.getValueType() != MVT::f32 && 
Op.getValueType() != MVT::f64)
 
 8887  if (Src.getValueType() == MVT::i1) {
 
 8899  if (Subtarget.hasDirectMove() && directMoveIsProfitable(
Op) &&
 
 8900      Subtarget.isPPC64() && Subtarget.hasFPCVT())
 
 8901    return LowerINT_TO_FPDirectMove(
Op, DAG, dl);
 
 8903  assert((IsSigned || Subtarget.hasFPCVT()) &&
 
 8904         "UINT_TO_FP is supported only with FPCVT");
 
 8906  if (Src.getValueType() == MVT::i64) {
 
 8921    if (
Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT() &&
 
 8922        !
Op->getFlags().hasApproximateFuncs()) {
 
 8962    if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
 
 8963      Bits = DAG.
getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
 
 8964                         RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
 
 8967    } 
else if (Subtarget.hasLFIWAX() &&
 
 8968               canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, 
ISD::SEXTLOAD)) {
 
 8969      MachineMemOperand *MMO =
 
 8971                                RLI.Alignment, RLI.AAInfo, RLI.Ranges);
 
 8975                                     Ops, MVT::i32, MMO);
 
 8978    } 
else if (Subtarget.hasFPCVT() &&
 
 8979               canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, 
ISD::ZEXTLOAD)) {
 
 8980      MachineMemOperand *MMO =
 
 8982                                RLI.Alignment, RLI.AAInfo, RLI.Ranges);
 
 8986                                     Ops, MVT::i32, MMO);
 
 8989    } 
else if (((Subtarget.hasLFIWAX() &&
 
 8991                (Subtarget.hasFPCVT() &&
 
 9006             "Expected an i32 store");
 
 9012      RLI.Alignment = 
Align(4);
 
 9014      MachineMemOperand *MMO =
 
 9016                                RLI.Alignment, RLI.AAInfo, RLI.Ranges);
 
 9020                                     dl, DAG.
getVTList(MVT::f64, MVT::Other),
 
 9021                                     Ops, MVT::i32, MMO);
 
 9022      Chain = 
Bits.getValue(1);
 
 9024      Bits = DAG.
getNode(ISD::BITCAST, dl, MVT::f64, SINT);
 
 9030    if (
Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
 
 9034            {Chain, FP, DAG.getIntPtrConstant(0, dl, true)},
 
 9043  assert(Src.getValueType() == MVT::i32 &&
 
 9044         "Unhandled INT_TO_FP type in custom expander!");
 
 9054  if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
 
 9057    if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
 
 9067             "Expected an i32 store");
 
 9073      RLI.Alignment = 
Align(4);
 
 9076    MachineMemOperand *MMO =
 
 9078                              RLI.Alignment, RLI.AAInfo, RLI.Ranges);
 
 9084    if (ReusingLoad && RLI.ResChain) {
 
 9088    assert(Subtarget.isPPC64() &&
 
 9089           "i32->FP without LFIWAX supported only on PPC64");
 
 9098        Chain, dl, Ext64, FIdx,
 
 9104        MVT::f64, dl, Chain, FIdx,
 
 9113  if (
Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
 
 9117          {Chain, FP, DAG.getIntPtrConstant(0, dl, true)}, Flags);
 
 9134    uint64_t 
Mode = CVal->getZExtValue();
 
 9135    assert(
Mode < 4 && 
"Unsupported rounding mode!");
 
 9136    unsigned InternalRnd = 
Mode ^ (~(
Mode >> 1) & 1);
 
 9137    if (Subtarget.isISA3_0())
 
 9140              PPC::MFFSCRNI, Dl, {MVT::f64, MVT::Other},
 
 9141              {DAG.getConstant(InternalRnd, Dl, MVT::i32, true), Chain}),
 
 9144        (InternalRnd & 2) ? PPC::MTFSB1 : PPC::MTFSB0, Dl, MVT::Other,
 
 9145        {DAG.
getConstant(30, Dl, MVT::i32, 
true), Chain});
 
 9147        (InternalRnd & 1) ? PPC::MTFSB1 : PPC::MTFSB0, Dl, MVT::Other,
 
 9165  if (!Subtarget.isISA3_0()) {
 
 9167    Chain = 
MFFS.getValue(1);
 
 9170  if (Subtarget.isPPC64()) {
 
 9171    if (Subtarget.isISA3_0()) {
 
 9176          PPC::RLDIMI, Dl, MVT::i64,
 
 9177          {DAG.
getNode(ISD::BITCAST, Dl, MVT::i64, MFFS),
 
 9181      NewFPSCR = 
SDValue(InsertRN, 0);
 
 9183    NewFPSCR = DAG.
getNode(ISD::BITCAST, Dl, MVT::f64, NewFPSCR);
 
 9188    SDValue Addr = Subtarget.isLittleEndian()
 
 9192    if (Subtarget.isISA3_0()) {
 
 9193      Chain = DAG.
getStore(Chain, Dl, DstFlag, Addr, MachinePointerInfo());
 
 9195      Chain = DAG.
getStore(Chain, Dl, MFFS, StackSlot, MachinePointerInfo());
 
 9197          DAG.
getLoad(MVT::i32, Dl, Chain, Addr, MachinePointerInfo());
 
 9200                        PPC::RLWIMI, Dl, MVT::i32,
 
 9201                        {Tmp, DstFlag, DAG.getTargetConstant(0, Dl, MVT::i32),
 
 9202                         DAG.getTargetConstant(30, Dl, MVT::i32),
 
 9203                         DAG.getTargetConstant(31, Dl, MVT::i32)}),
 
 9205      Chain = DAG.
getStore(Chain, Dl, Tmp, Addr, MachinePointerInfo());
 
 9208        DAG.
getLoad(MVT::f64, Dl, Chain, StackSlot, MachinePointerInfo());
 
 9211  if (Subtarget.isISA3_0())
 
 9217      PPC::MTFSF, Dl, MVT::Other,
 
 9245  EVT VT = 
Op.getValueType();
 
 9251  Chain = 
MFFS.getValue(1);
 
 9256                      DAG.
getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
 
 9261    Chain = DAG.
getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
 
 9265           "Stack slot adjustment is valid only on big endian subtargets!");
 
 9268    CWD = DAG.
getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
 
 9295  EVT VT = 
Op.getValueType();
 
 9299         VT == 
Op.getOperand(1).getValueType() &&
 
 9319  SDValue OutOps[] = { OutLo, OutHi };
 
 9324  EVT VT = 
Op.getValueType();
 
 9328         VT == 
Op.getOperand(1).getValueType() &&
 
 9348  SDValue OutOps[] = { OutLo, OutHi };
 
 9354  EVT VT = 
Op.getValueType();
 
 9357         VT == 
Op.getOperand(1).getValueType() &&
 
 9377  SDValue OutOps[] = { OutLo, OutHi };
 
 9384  EVT VT = 
Op.getValueType();
 
 9391  EVT AmtVT = 
Z.getValueType();
 
 9414  static const MVT VTys[] = { 
 
 9415    MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
 
 9418  EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
 
 9421  if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
 
 9426  EVT CanonicalVT = VTys[SplatSize-1];
 
 
 9439                                const SDLoc &dl, 
EVT DestVT = MVT::Other) {
 
 9440  if (DestVT == MVT::Other) DestVT = 
Op.getValueType();
 
 
 9449                                EVT DestVT = MVT::Other) {
 
 9450  if (DestVT == MVT::Other) DestVT = 
LHS.getValueType();
 
 
 9459                                EVT DestVT = MVT::Other) {
 
 9462                     DAG.
getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
 
 
 9474  for (
unsigned i = 0; i != 16; ++i)
 
 9477  return DAG.
getNode(ISD::BITCAST, dl, VT, 
T);
 
 
 9495  EVT VecVT = V->getValueType(0);
 
 9496  bool RightType = VecVT == MVT::v2f64 ||
 
 9497    (HasP8Vector && VecVT == MVT::v4f32) ||
 
 9498    (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
 
 9502  bool IsSplat = 
true;
 
 9503  bool IsLoad = 
false;
 
 9509  if (V->isConstant())
 
 9511  for (
int i = 0, e = V->getNumOperands(); i < e; ++i) {
 
 9512    if (V->getOperand(i).isUndef())
 
 9516    if (V->getOperand(i).getOpcode() == ISD::LOAD ||
 
 9518         V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
 
 9520         V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
 
 9522         V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
 
 9526    if (V->getOperand(i) != Op0 ||
 
 9527        (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
 
 9530  return !(IsSplat && IsLoad);
 
 
 9540      (
Op.getValueType() != MVT::f128))
 
 9545  if ((
Lo.getValueType() != MVT::i64) || (
Hi.getValueType() != MVT::i64))
 
 9548  if (!Subtarget.isLittleEndian())
 
 9556  while (InputLoad->
getOpcode() == ISD::BITCAST)
 
 9563  if (InputLoad->
getOpcode() != ISD::LOAD)
 
 
 9573  APFloat APFloatToConvert = ArgAPFloat;
 
 9574  bool LosesInfo = 
true;
 
 9579    ArgAPFloat = APFloatToConvert;
 
 
 9601  APFloat APFloatToConvert = ArgAPFloat;
 
 9602  bool LosesInfo = 
true;
 
 9606  return (!LosesInfo && !APFloatToConvert.
isDenormal());
 
 
 9615  EVT Ty = 
Op->getValueType(0);
 
 9618  if ((Ty == MVT::v2f64 || Ty == MVT::v4f32 || Ty == MVT::v4i32) &&
 
 9627  if ((Ty == MVT::v8i16 || Ty == MVT::v16i8) && 
ISD::isEXTLoad(InputNode) &&
 
 9631  if (Ty == MVT::v2i64) {
 
 9634    if (MemVT == MVT::i32) {
 
 
 9646                     bool IsLittleEndian) {
 
 9652  APInt ConstValue(VTSize, 0);
 
 9656  unsigned BitPos = 0;
 
 9664    ConstValue.
insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth),
 
 9665                          IsLittleEndian ? BitPos : VTSize - EltWidth - BitPos);
 
 9669  for (
unsigned J = 0; J < 16; ++J) {
 
 9671    if (ExtractValue != 0x00 && ExtractValue != 0xFF)
 
 9673    if (ExtractValue == 0xFF)
 
 
 9688  assert(BVN && 
"Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
 
 9690  if (Subtarget.hasP10Vector()) {
 
 9691    APInt BitMask(32, 0);
 
 9697        BitMask != 0 && BitMask != 0xffff) {
 
 9699      MachineSDNode *MSDNode =
 
 9705        SDV = DAG.
getNode(ISD::BITCAST, dl, DVT, SDV);
 
 9711    if (
SDValue VecPat = combineBVLoadsSpecialValue(
Op, DAG))
 
 9715  APInt APSplatBits, APSplatUndef;
 
 9716  unsigned SplatBitSize;
 
 9718  bool BVNIsConstantSplat =
 
 9720                           HasAnyUndefs, 0, !Subtarget.isLittleEndian());
 
 9726  if (BVNIsConstantSplat && (SplatBitSize == 64) &&
 
 9727      Subtarget.hasPrefixInstrs() && Subtarget.hasP10Vector()) {
 
 9730    if ((
Op->getValueType(0) == MVT::v2f64) &&
 
 9763  bool IsSplat64 = 
false;
 
 9764  uint64_t SplatBits = 0;
 
 9765  int32_t SextVal = 0;
 
 9766  if (BVNIsConstantSplat && SplatBitSize <= 64) {
 
 9768    if (SplatBitSize <= 32) {
 
 9770    } 
else if (SplatBitSize == 64 && Subtarget.hasP8Altivec()) {
 
 9771      int64_t Splat64Val = 
static_cast<int64_t
>(SplatBits);
 
 9772      bool P9Vector = Subtarget.hasP9Vector();
 
 9773      int32_t 
Hi = P9Vector ? 127 : 15;
 
 9774      int32_t 
Lo = P9Vector ? -128 : -16;
 
 9775      IsSplat64 = Splat64Val >= 
Lo && Splat64Val <= 
Hi;
 
 9776      SextVal = 
static_cast<int32_t
>(SplatBits);
 
 9780  if (!BVNIsConstantSplat || (SplatBitSize > 32 && !IsSplat64)) {
 
 9787      const SDValue *InputLoad = &
Op.getOperand(0);
 
 9792      unsigned MemorySize = 
LD->getMemoryVT().getScalarSizeInBits();
 
 9793      unsigned ElementSize =
 
 9796      assert(((ElementSize == 2 * MemorySize)
 
 9800             "Unmatched element size and opcode!\n");
 
 9805      unsigned NumUsesOfInputLD = 128 / ElementSize;
 
 9807        if (BVInOp.isUndef())
 
 9822      if (NumUsesOfInputLD == 1 &&
 
 9824           !Subtarget.isLittleEndian() && Subtarget.hasVSX() &&
 
 9825           Subtarget.hasLFIWAX()))
 
 9833      if (NumUsesOfInputLD == 1 && Subtarget.isLittleEndian() &&
 
 9834          Subtarget.isISA3_1() && ElementSize <= 16)
 
 9837      assert(NumUsesOfInputLD > 0 && 
"No uses of input LD of a build_vector?");
 
 9839          Subtarget.hasVSX()) {
 
 9846            NewOpcode, dl, DAG.
getVTList(
Op.getValueType(), MVT::Other), 
Ops,
 
 9847            LD->getMemoryVT(), 
LD->getMemOperand());
 
 9859    if (Subtarget.hasVSX() && Subtarget.isPPC64() &&
 
 9861                                        Subtarget.hasP8Vector()))
 
 9867  unsigned SplatSize = SplatBitSize / 8;
 
 9872  if (SplatBits == 0) {
 
 9874    if (
Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
 
 9876      Op = DAG.
getNode(ISD::BITCAST, dl, 
Op.getValueType(), Z);
 
 9886  if (Subtarget.hasPrefixInstrs() && Subtarget.hasP10Vector() && SplatSize == 2)
 
 9888                                  Op.getValueType(), DAG, dl);
 
 9890  if (Subtarget.hasPrefixInstrs() && Subtarget.hasP10Vector() && SplatSize == 4)
 
 9895  if (Subtarget.hasP9Vector() && SplatSize == 1)
 
 9901  if (SextVal >= -16 && SextVal <= 15) {
 
 9904    unsigned UseSize = SplatSize == 8 ? 4 : SplatSize;
 
 9914  if (Subtarget.hasP9Vector() && SextVal >= -128 && SextVal <= 127) {
 
 9920    switch (SplatSize) {
 
 9924      IID = Intrinsic::ppc_altivec_vupklsb;
 
 9928      IID = Intrinsic::ppc_altivec_vextsb2w;
 
 9932      IID = Intrinsic::ppc_altivec_vextsb2d;
 
 9939  assert(!IsSplat64 && 
"Unhandled 64-bit splat pattern");
 
 9948  if (SextVal >= -32 && SextVal <= 31) {
 
 9953    EVT VT = (SplatSize == 1 ? MVT::v16i8 :
 
 9954              (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
 
 9957    if (VT == 
Op.getValueType())
 
 9960      return DAG.
getNode(ISD::BITCAST, dl, 
Op.getValueType(), RetVal);
 
 9966  if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
 
 9976    return DAG.
getNode(ISD::BITCAST, dl, 
Op.getValueType(), Res);
 
 9980  static const signed char SplatCsts[] = {
 
 9981    -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
 
 9982    -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
 
 9985  for (
unsigned idx = 0; idx < std::size(SplatCsts); ++idx) {
 
 9988    int i = SplatCsts[idx];
 
 9992    unsigned TypeShiftAmt = i & (SplatBitSize-1);
 
 9995    if (SextVal == (
int)((
unsigned)i << TypeShiftAmt)) {
 
 9997      static const unsigned IIDs[] = { 
 
 9998        Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
 
 9999        Intrinsic::ppc_altivec_vslw
 
10002      return DAG.
getNode(ISD::BITCAST, dl, 
Op.getValueType(), Res);
 
10006    if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
 
10008      static const unsigned IIDs[] = { 
 
10009        Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
 
10010        Intrinsic::ppc_altivec_vsrw
 
10013      return DAG.
getNode(ISD::BITCAST, dl, 
Op.getValueType(), Res);
 
10017    if (SextVal == (
int)(((
unsigned)i << TypeShiftAmt) |
 
10018                         ((
unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
 
10020      static const unsigned IIDs[] = { 
 
10021        Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
 
10022        Intrinsic::ppc_altivec_vrlw
 
10025      return DAG.
getNode(ISD::BITCAST, dl, 
Op.getValueType(), Res);
 
10029    if (SextVal == (
int)(((
unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
 
10031      unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
 
10035    if (SextVal == (
int)(((
unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
 
10037      unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
 
10041    if (SextVal == (
int)(((
unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
 
10043      unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
 
10056  unsigned OpNum = (PFEntry >> 26) & 0x0F;
 
10057  unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
 
10058  unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
 
10074    if (LHSID == (1*9+2)*9+3) 
return LHS;
 
10075    assert(LHSID == ((4*9+5)*9+6)*9+7 && 
"Illegal OP_COPY!");
 
10087    ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
 
10088    ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
 
10089    ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
 
10090    ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
 
10093    ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
 
10094    ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
 
10095    ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
 
10096    ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
 
10099    for (
unsigned i = 0; i != 16; ++i)
 
10100      ShufIdxs[i] = (i&3)+0;
 
10103    for (
unsigned i = 0; i != 16; ++i)
 
10104      ShufIdxs[i] = (i&3)+4;
 
10107    for (
unsigned i = 0; i != 16; ++i)
 
10108      ShufIdxs[i] = (i&3)+8;
 
10111    for (
unsigned i = 0; i != 16; ++i)
 
10112      ShufIdxs[i] = (i&3)+12;
 
10122  OpLHS = DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
 
10123  OpRHS = DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
 
10125  return DAG.
getNode(ISD::BITCAST, dl, VT, 
T);
 
 
10133  const unsigned BytesInVector = 16;
 
10134  bool IsLE = Subtarget.isLittleEndian();
 
10138  unsigned ShiftElts = 0, InsertAtByte = 0;
 
10142  unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
 
10143                                   0, 15, 14, 13, 12, 11, 10, 9};
 
10144  unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
 
10145                                1, 2,  3,  4,  5,  6,  7,  8};
 
10147  ArrayRef<int> 
Mask = 
N->getMask();
 
10148  int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
 
10160  bool FoundCandidate = 
false;
 
10164  unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
 
10167  for (
unsigned i = 0; i < BytesInVector; ++i) {
 
10168    unsigned CurrentElement = 
Mask[i];
 
10171    if (V2.
isUndef() && CurrentElement != VINSERTBSrcElem)
 
10174    bool OtherElementsInOrder = 
true;
 
10177    for (
unsigned j = 0; 
j < BytesInVector; ++
j) {
 
10184          (!V2.
isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
 
10185      if (Mask[j] != OriginalOrder[j] + MaskOffset) {
 
10186        OtherElementsInOrder = 
false;
 
10193    if (OtherElementsInOrder) {
 
10200        ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
 
10201                         : BigEndianShifts[CurrentElement & 0xF];
 
10202        Swap = CurrentElement < BytesInVector;
 
10204      InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
 
10205      FoundCandidate = 
true;
 
10210  if (!FoundCandidate)
 
10234  const unsigned NumHalfWords = 8;
 
10235  const unsigned BytesInVector = NumHalfWords * 2;
 
10240  bool IsLE = Subtarget.isLittleEndian();
 
10244  unsigned ShiftElts = 0, InsertAtByte = 0;
 
10248  unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
 
10249  unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
 
10252  uint32_t OriginalOrderLow = 0x1234567;
 
10253  uint32_t OriginalOrderHigh = 0x89ABCDEF;
 
10256  for (
unsigned i = 0; i < NumHalfWords; ++i) {
 
10257    unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
 
10274  bool FoundCandidate = 
false;
 
10277  for (
unsigned i = 0; i < NumHalfWords; ++i) {
 
10278    unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
 
10280    uint32_t MaskOtherElts = ~(0xF << 
MaskShift);
 
10281    uint32_t TargetOrder = 0x0;
 
10288      unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
 
10289      TargetOrder = OriginalOrderLow;
 
10293      if (MaskOneElt == VINSERTHSrcElem &&
 
10294          (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
 
10295        InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
 
10296        FoundCandidate = 
true;
 
10302          (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
 
10304      if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
 
10306        ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
 
10307                         : BigEndianShifts[MaskOneElt & 0x7];
 
10308        InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
 
10309        Swap = MaskOneElt < NumHalfWords;
 
10310        FoundCandidate = 
true;
 
10316  if (!FoundCandidate)
 
10333    return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
 
10338  return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
 
10351  auto ShuffleMask = SVN->
getMask();
 
10366    ShuffleMask = CommutedSV->
getMask();
 
10375  APInt APSplatValue, APSplatUndef;
 
10376  unsigned SplatBitSize;
 
10379                            HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
 
10391  bool IsLE = Subtarget.isLittleEndian();
 
10392  if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
 
10393      (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
 
10394       ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
 
10396  else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
 
10397           (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
 
10398            ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
 
10406  for (; SplatBitSize < 32; SplatBitSize <<= 1)
 
10407    SplatVal |= (SplatVal << SplatBitSize);
 
10412  return DAG.
getNode(ISD::BITCAST, 
DL, MVT::v16i8, SplatNode);
 
10421  assert(
Op.getValueType() == MVT::v1i128 &&
 
10422         "Only set v1i128 as custom, other type shouldn't reach here!");
 
10427  if (SHLAmt % 8 == 0) {
 
10428    std::array<int, 16> 
Mask;
 
10429    std::iota(
Mask.begin(), 
Mask.end(), 0);
 
10430    std::rotate(
Mask.begin(), 
Mask.begin() + SHLAmt / 8, 
Mask.end());
 
10434      return DAG.
getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
 
10442  return DAG.
getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
 
10459  if (
SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
 
10464    V1 = 
Op.getOperand(0);
 
10465    V2 = 
Op.getOperand(1);
 
10467  EVT VT = 
Op.getValueType();
 
10468  bool isLittleEndian = Subtarget.isLittleEndian();
 
10470  unsigned ShiftElts, InsertAtByte;
 
10476  bool IsPermutedLoad = 
false;
 
10478  if (InputLoad && Subtarget.hasVSX() && V2.
isUndef() &&
 
10488    if (IsPermutedLoad) {
 
10489      assert((isLittleEndian || IsFourByte) &&
 
10490             "Unexpected size for permuted load on big endian target");
 
10491      SplatIdx += IsFourByte ? 2 : 1;
 
10492      assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
 
10493             "Splat of a value outside of the loaded memory");
 
10498    if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
 
10501        Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
 
10503        Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
 
10507      if (
LD->getValueType(0).getSizeInBits() == (IsFourByte ? 32 : 64))
 
10520        DAG.
getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
 
10523                                Ops, 
LD->getMemoryVT(), 
LD->getMemOperand());
 
10532  if (VT == MVT::v2i64 || VT == MVT::v2f64)
 
10535  if (Subtarget.hasP9Vector() &&
 
10549      return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
 
10553    return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
 
10556  if (Subtarget.hasPrefixInstrs() && Subtarget.hasP10Vector()) {
 
10558    if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
 
10559      return SplatInsertNode;
 
10562  if (Subtarget.hasP9Altivec()) {
 
10564    if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
 
10567    if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
 
10571  if (Subtarget.hasVSX() &&
 
10577        DAG.
getNode(ISD::BITCAST, dl, MVT::v4i32, V2.
isUndef() ? V1 : V2);
 
10581    return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
 
10584  if (Subtarget.hasVSX() &&
 
10590        DAG.
getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
 
10594    return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
 
10597  if (Subtarget.hasP9Vector()) {
 
10601      return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
 
10605      return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
 
10609      return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
 
10613      return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
 
10617  if (Subtarget.hasVSX()) {
 
10624      return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, 
Splat);
 
10631      return DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
 
10638  if (V2.isUndef()) {
 
10651        (Subtarget.hasP8Altivec() && (
 
10662  unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
 
10672      (Subtarget.hasP8Altivec() && (
 
10680  ArrayRef<int> PermMask = SVOp->
getMask();
 
10683    unsigned PFIndexes[4];
 
10684    bool isFourElementShuffle = 
true;
 
10685    for (
unsigned i = 0; i != 4 && isFourElementShuffle;
 
10687      unsigned EltNo = 8;                 
 
10688      for (
unsigned j = 0; 
j != 4; ++
j) { 
 
10689        if (PermMask[i * 4 + j] < 0)
 
10692        unsigned ByteSource = PermMask[i * 4 + 
j];
 
10693        if ((ByteSource & 3) != j) {
 
10694          isFourElementShuffle = 
false;
 
10699          EltNo = ByteSource / 4;
 
10700        } 
else if (EltNo != ByteSource / 4) {
 
10701          isFourElementShuffle = 
false;
 
10705      PFIndexes[i] = EltNo;
 
10713    if (isFourElementShuffle) {
 
10715      unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
 
10716                              PFIndexes[2] * 9 + PFIndexes[3];
 
10719      unsigned Cost = (PFEntry >> 30);
 
10739  if (V2.isUndef()) V2 = V1;
 
10741  return LowerVPERM(
Op, DAG, PermMask, VT, V1, V2);
 
10750  bool NeedSwap = 
false;
 
10751  bool isLittleEndian = Subtarget.isLittleEndian();
 
10752  bool isPPC64 = Subtarget.isPPC64();
 
10754  if (Subtarget.hasVSX() && Subtarget.hasP9Vector() &&
 
10756    LLVM_DEBUG(
dbgs() << 
"At least one of two input vectors are dead - using " 
10757                         "XXPERM instead\n");
 
10766      NeedSwap = !NeedSwap;
 
10801    unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
 
10803    if (V1HasXXSWAPD) {
 
10806      else if (SrcElt < 16)
 
10809    if (V2HasXXSWAPD) {
 
10812      else if (SrcElt > 15)
 
10821    for (
unsigned j = 0; 
j != BytesPerElement; ++
j)
 
10822      if (isLittleEndian)
 
10824            DAG.
getConstant(31 - (SrcElt * BytesPerElement + j), dl, MVT::i32));
 
10827            DAG.
getConstant(SrcElt * BytesPerElement + j, dl, MVT::i32));
 
10830  if (V1HasXXSWAPD) {
 
10834  if (V2HasXXSWAPD) {
 
10839  if (isPPC64 && (V1HasXXSWAPD || V2HasXXSWAPD)) {
 
10840    if (ValType != MVT::v2f64)
 
10846  ShufflesHandledWithVPERM++;
 
10851      dbgs() << 
"Emitting a XXPERM for the following shuffle:\n";
 
10853      dbgs() << 
"Emitting a VPERM for the following shuffle:\n";
 
10856    dbgs() << 
"With the following permute control vector:\n";
 
10861    VPermMask = DAG.
getBitcast(MVT::v4i32, VPermMask);
 
10865  if (isLittleEndian)
 
10871  VPERMNode = DAG.
getBitcast(ValType, VPERMNode);
 
10883  switch (IntrinsicID) {
 
10887  case Intrinsic::ppc_altivec_vcmpbfp_p:
 
10891  case Intrinsic::ppc_altivec_vcmpeqfp_p:
 
10895  case Intrinsic::ppc_altivec_vcmpequb_p:
 
10899  case Intrinsic::ppc_altivec_vcmpequh_p:
 
10903  case Intrinsic::ppc_altivec_vcmpequw_p:
 
10907  case Intrinsic::ppc_altivec_vcmpequd_p:
 
10908    if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
 
10914  case Intrinsic::ppc_altivec_vcmpneb_p:
 
10915  case Intrinsic::ppc_altivec_vcmpneh_p:
 
10916  case Intrinsic::ppc_altivec_vcmpnew_p:
 
10917  case Intrinsic::ppc_altivec_vcmpnezb_p:
 
10918  case Intrinsic::ppc_altivec_vcmpnezh_p:
 
10919  case Intrinsic::ppc_altivec_vcmpnezw_p:
 
10920    if (Subtarget.hasP9Altivec()) {
 
10921      switch (IntrinsicID) {
 
10924      case Intrinsic::ppc_altivec_vcmpneb_p:
 
10927      case Intrinsic::ppc_altivec_vcmpneh_p:
 
10930      case Intrinsic::ppc_altivec_vcmpnew_p:
 
10933      case Intrinsic::ppc_altivec_vcmpnezb_p:
 
10936      case Intrinsic::ppc_altivec_vcmpnezh_p:
 
10939      case Intrinsic::ppc_altivec_vcmpnezw_p:
 
10947  case Intrinsic::ppc_altivec_vcmpgefp_p:
 
10951  case Intrinsic::ppc_altivec_vcmpgtfp_p:
 
10955  case Intrinsic::ppc_altivec_vcmpgtsb_p:
 
10959  case Intrinsic::ppc_altivec_vcmpgtsh_p:
 
10963  case Intrinsic::ppc_altivec_vcmpgtsw_p:
 
10967  case Intrinsic::ppc_altivec_vcmpgtsd_p:
 
10968    if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
 
10974  case Intrinsic::ppc_altivec_vcmpgtub_p:
 
10978  case Intrinsic::ppc_altivec_vcmpgtuh_p:
 
10982  case Intrinsic::ppc_altivec_vcmpgtuw_p:
 
10986  case Intrinsic::ppc_altivec_vcmpgtud_p:
 
10987    if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
 
10994  case Intrinsic::ppc_altivec_vcmpequq:
 
10995  case Intrinsic::ppc_altivec_vcmpgtsq:
 
10996  case Intrinsic::ppc_altivec_vcmpgtuq:
 
10997    if (!Subtarget.isISA3_1())
 
10999    switch (IntrinsicID) {
 
11002    case Intrinsic::ppc_altivec_vcmpequq:
 
11005    case Intrinsic::ppc_altivec_vcmpgtsq:
 
11008    case Intrinsic::ppc_altivec_vcmpgtuq:
 
11015  case Intrinsic::ppc_vsx_xvcmpeqdp_p:
 
11016  case Intrinsic::ppc_vsx_xvcmpgedp_p:
 
11017  case Intrinsic::ppc_vsx_xvcmpgtdp_p:
 
11018  case Intrinsic::ppc_vsx_xvcmpeqsp_p:
 
11019  case Intrinsic::ppc_vsx_xvcmpgesp_p:
 
11020  case Intrinsic::ppc_vsx_xvcmpgtsp_p:
 
11021    if (Subtarget.hasVSX()) {
 
11022      switch (IntrinsicID) {
 
11023      case Intrinsic::ppc_vsx_xvcmpeqdp_p:
 
11026      case Intrinsic::ppc_vsx_xvcmpgedp_p:
 
11029      case Intrinsic::ppc_vsx_xvcmpgtdp_p:
 
11032      case Intrinsic::ppc_vsx_xvcmpeqsp_p:
 
11035      case Intrinsic::ppc_vsx_xvcmpgesp_p:
 
11038      case Intrinsic::ppc_vsx_xvcmpgtsp_p:
 
11048  case Intrinsic::ppc_altivec_vcmpbfp:
 
11051  case Intrinsic::ppc_altivec_vcmpeqfp:
 
11054  case Intrinsic::ppc_altivec_vcmpequb:
 
11057  case Intrinsic::ppc_altivec_vcmpequh:
 
11060  case Intrinsic::ppc_altivec_vcmpequw:
 
11063  case Intrinsic::ppc_altivec_vcmpequd:
 
11064    if (Subtarget.hasP8Altivec())
 
11069  case Intrinsic::ppc_altivec_vcmpneb:
 
11070  case Intrinsic::ppc_altivec_vcmpneh:
 
11071  case Intrinsic::ppc_altivec_vcmpnew:
 
11072  case Intrinsic::ppc_altivec_vcmpnezb:
 
11073  case Intrinsic::ppc_altivec_vcmpnezh:
 
11074  case Intrinsic::ppc_altivec_vcmpnezw:
 
11075    if (Subtarget.hasP9Altivec())
 
11076      switch (IntrinsicID) {
 
11079      case Intrinsic::ppc_altivec_vcmpneb:
 
11082      case Intrinsic::ppc_altivec_vcmpneh:
 
11085      case Intrinsic::ppc_altivec_vcmpnew:
 
11088      case Intrinsic::ppc_altivec_vcmpnezb:
 
11091      case Intrinsic::ppc_altivec_vcmpnezh:
 
11094      case Intrinsic::ppc_altivec_vcmpnezw:
 
11101  case Intrinsic::ppc_altivec_vcmpgefp:
 
11104  case Intrinsic::ppc_altivec_vcmpgtfp:
 
11107  case Intrinsic::ppc_altivec_vcmpgtsb:
 
11110  case Intrinsic::ppc_altivec_vcmpgtsh:
 
11113  case Intrinsic::ppc_altivec_vcmpgtsw:
 
11116  case Intrinsic::ppc_altivec_vcmpgtsd:
 
11117    if (Subtarget.hasP8Altivec())
 
11122  case Intrinsic::ppc_altivec_vcmpgtub:
 
11125  case Intrinsic::ppc_altivec_vcmpgtuh:
 
11128  case Intrinsic::ppc_altivec_vcmpgtuw:
 
11131  case Intrinsic::ppc_altivec_vcmpgtud:
 
11132    if (Subtarget.hasP8Altivec())
 
11137  case Intrinsic::ppc_altivec_vcmpequq_p:
 
11138  case Intrinsic::ppc_altivec_vcmpgtsq_p:
 
11139  case Intrinsic::ppc_altivec_vcmpgtuq_p:
 
11140    if (!Subtarget.isISA3_1())
 
11142    switch (IntrinsicID) {
 
11145    case Intrinsic::ppc_altivec_vcmpequq_p:
 
11148    case Intrinsic::ppc_altivec_vcmpgtsq_p:
 
11151    case Intrinsic::ppc_altivec_vcmpgtuq_p:
 
 
11165  unsigned IntrinsicID = 
Op.getConstantOperandVal(0);
 
11169  switch (IntrinsicID) {
 
11170  case Intrinsic::thread_pointer:
 
11172    if (Subtarget.isPPC64())
 
11176  case Intrinsic::ppc_rldimi: {
 
11177    assert(Subtarget.isPPC64() && 
"rldimi is only available in 64-bit!");
 
11179    APInt 
Mask = 
Op.getConstantOperandAPInt(4);
 
11181      return Op.getOperand(2);
 
11182    if (
Mask.isAllOnes())
 
11184    uint64_t SH = 
Op.getConstantOperandVal(3);
 
11185    unsigned MB = 0, ME = 0;
 
11189    if (ME < 63 - SH) {
 
11192    } 
else if (ME > 63 - SH) {
 
11198                           {Op.getOperand(2), Src,
 
11199                            DAG.getTargetConstant(63 - ME, dl, MVT::i32),
 
11200                            DAG.getTargetConstant(MB, dl, MVT::i32)}),
 
11204  case Intrinsic::ppc_rlwimi: {
 
11205    APInt 
Mask = 
Op.getConstantOperandAPInt(4);
 
11207      return Op.getOperand(2);
 
11208    if (
Mask.isAllOnes())
 
11211    unsigned MB = 0, ME = 0;
 
11215                       PPC::RLWIMI, dl, MVT::i32,
 
11216                       {Op.getOperand(2), Op.getOperand(1), Op.getOperand(3),
 
11217                        DAG.getTargetConstant(MB, dl, MVT::i32),
 
11218                        DAG.getTargetConstant(ME, dl, MVT::i32)}),
 
11222  case Intrinsic::ppc_rlwnm: {
 
11223    if (
Op.getConstantOperandVal(3) == 0)
 
11225    unsigned MB = 0, ME = 0;
 
11230                           {Op.getOperand(1), Op.getOperand(2),
 
11231                            DAG.getTargetConstant(MB, dl, MVT::i32),
 
11232                            DAG.getTargetConstant(ME, dl, MVT::i32)}),
 
11236  case Intrinsic::ppc_mma_disassemble_acc: {
 
11237    if (Subtarget.isISAFuture()) {
 
11238      EVT ReturnTypes[] = {MVT::v256i1, MVT::v256i1};
 
11250          Subtarget.isLittleEndian() ? Value2 : 
Value,
 
11251          DAG.
getConstant(Subtarget.isLittleEndian() ? 1 : 0,
 
11256          Subtarget.isLittleEndian() ? Value2 : 
Value,
 
11257          DAG.
getConstant(Subtarget.isLittleEndian() ? 0 : 1,
 
11262          Subtarget.isLittleEndian() ? 
Value : Value2,
 
11263          DAG.
getConstant(Subtarget.isLittleEndian() ? 1 : 0,
 
11268          Subtarget.isLittleEndian() ? 
Value : Value2,
 
11269          DAG.
getConstant(Subtarget.isLittleEndian() ? 0 : 1,
 
11276  case Intrinsic::ppc_vsx_disassemble_pair: {
 
11279    if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
 
11284    for (
int VecNo = 0; VecNo < NumVecs; VecNo++) {
 
11287          DAG.
getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
 
11295  case Intrinsic::ppc_mma_build_dmr: {
 
11298    for (
int i = 1; i < 9; i += 2) {
 
11301      if (
Hi->getOpcode() == ISD::LOAD)
 
11303      if (
Lo->getOpcode() == ISD::LOAD)
 
11313  case Intrinsic::ppc_mma_dmxxextfdmr512: {
 
11314    assert(Subtarget.isISAFuture() && 
"dmxxextfdmr512 requires ISA Future");
 
11316    assert(Idx && (Idx->getSExtValue() == 0 || Idx->getSExtValue() == 1) &&
 
11317           "Specify P of 0 or 1 for lower or upper 512 bytes");
 
11318    unsigned HiLo = Idx->getSExtValue();
 
11322      Opcode = PPC::DMXXEXTFDMR512;
 
11323      Subx = PPC::sub_wacc_lo;
 
11325      Opcode = PPC::DMXXEXTFDMR512_HI;
 
11326      Subx = PPC::sub_wacc_hi;
 
11329        DAG.
getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::v512i1,
 
11333    EVT ReturnTypes[] = {MVT::v256i1, MVT::v256i1};
 
11337  case Intrinsic::ppc_mma_dmxxextfdmr256: {
 
11338    assert(Subtarget.isISAFuture() && 
"dmxxextfdmr256 requires ISA Future");
 
11340    assert(Idx && (Idx->getSExtValue() >= 0 || Idx->getSExtValue() <= 3) &&
 
11341           "Specify a dmr row pair 0-3");
 
11342    unsigned IdxVal = Idx->getSExtValue();
 
11346      Subx = PPC::sub_dmrrowp0;
 
11349      Subx = PPC::sub_dmrrowp1;
 
11352      Subx = PPC::sub_wacc_hi_then_sub_dmrrowp0;
 
11355      Subx = PPC::sub_wacc_hi_then_sub_dmrrowp1;
 
11359        DAG.
getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::v256i1,
 
11365        DAG.
getMachineNode(PPC::DMXXEXTFDMR256, dl, MVT::v256i1, {Subreg, P}),
 
11369  case Intrinsic::ppc_mma_dmxxinstdmr512: {
 
11370    assert(Subtarget.isISAFuture() && 
"dmxxinstdmr512 requires ISA Future");
 
11372    assert(Idx && (Idx->getSExtValue() == 0 || Idx->getSExtValue() == 1) &&
 
11373           "Specify P of 0 or 1 for lower or upper 512 bytes");
 
11374    unsigned HiLo = Idx->getSExtValue();
 
11378      Opcode = PPC::DMXXINSTDMR512;
 
11379      Subx = PPC::sub_wacc_lo;
 
11381      Opcode = PPC::DMXXINSTDMR512_HI;
 
11382      Subx = PPC::sub_wacc_hi;
 
11392  case Intrinsic::ppc_mma_dmxxinstdmr256: {
 
11393    assert(Subtarget.isISAFuture() && 
"dmxxinstdmr256 requires ISA Future");
 
11395    assert(Idx && (Idx->getSExtValue() >= 0 || Idx->getSExtValue() <= 3) &&
 
11396           "Specify a dmr row pair 0-3");
 
11397    unsigned IdxVal = Idx->getSExtValue();
 
11401      Subx = PPC::sub_dmrrowp0;
 
11404      Subx = PPC::sub_dmrrowp1;
 
11407      Subx = PPC::sub_wacc_hi_then_sub_dmrrowp0;
 
11410      Subx = PPC::sub_wacc_hi_then_sub_dmrrowp1;
 
11419                                      Op.getOperand(1), DMRRowp, 
SubReg),
 
11423  case Intrinsic::ppc_mma_xxmfacc:
 
11424  case Intrinsic::ppc_mma_xxmtacc: {
 
11426    if (!Subtarget.isISAFuture())
 
11437  case Intrinsic::ppc_unpack_longdouble: {
 
11439    assert(Idx && (Idx->getSExtValue() == 0 || Idx->getSExtValue() == 1) &&
 
11440           "Argument of long double unpack must be 0 or 1!");
 
11443                                       Idx->getValueType(0)));
 
11446  case Intrinsic::ppc_compare_exp_lt:
 
11447  case Intrinsic::ppc_compare_exp_gt:
 
11448  case Intrinsic::ppc_compare_exp_eq:
 
11449  case Intrinsic::ppc_compare_exp_uo: {
 
11451    switch (IntrinsicID) {
 
11452    case Intrinsic::ppc_compare_exp_lt:
 
11455    case Intrinsic::ppc_compare_exp_gt:
 
11458    case Intrinsic::ppc_compare_exp_eq:
 
11461    case Intrinsic::ppc_compare_exp_uo:
 
11467            PPC::SELECT_CC_I4, dl, MVT::i32,
 
11468            {SDValue(DAG.getMachineNode(PPC::XSCMPEXPDP, dl, MVT::i32,
 
11469                                        Op.getOperand(1), Op.getOperand(2)),
 
11471             DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32),
 
11472             DAG.getTargetConstant(Pred, dl, MVT::i32)}),
 
11475  case Intrinsic::ppc_test_data_class: {
 
11476    EVT OpVT = 
Op.getOperand(1).getValueType();
 
11477    unsigned CmprOpc = OpVT == MVT::f128 ? PPC::XSTSTDCQP
 
11478                                         : (OpVT == MVT::f64 ? PPC::XSTSTDCDP
 
11482            PPC::SELECT_CC_I4, dl, MVT::i32,
 
11483            {SDValue(DAG.getMachineNode(CmprOpc, dl, MVT::i32, Op.getOperand(2),
 
11486             DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32),
 
11487             DAG.getTargetConstant(PPC::PRED_EQ, dl, MVT::i32)}),
 
11490  case Intrinsic::ppc_fnmsub: {
 
11491    EVT VT = 
Op.getOperand(1).getValueType();
 
11492    if (!Subtarget.hasVSX() || (!Subtarget.hasFloat128() && VT == MVT::f128))
 
11496                      DAG.
getNode(ISD::FNEG, dl, VT, 
Op.getOperand(3))));
 
11498                       Op.getOperand(2), 
Op.getOperand(3));
 
11500  case Intrinsic::ppc_convert_f128_to_ppcf128:
 
11501  case Intrinsic::ppc_convert_ppcf128_to_f128: {
 
11502    RTLIB::Libcall LC = IntrinsicID == Intrinsic::ppc_convert_ppcf128_to_f128
 
11503                            ? RTLIB::CONVERT_PPCF128_F128
 
11504                            : RTLIB::CONVERT_F128_PPCF128;
 
11506    std::pair<SDValue, SDValue> 
Result =
 
11507        makeLibCall(DAG, LC, 
Op.getValueType(), 
Op.getOperand(1), CallOptions,
 
11511  case Intrinsic::ppc_maxfe:
 
11512  case Intrinsic::ppc_maxfl:
 
11513  case Intrinsic::ppc_maxfs:
 
11514  case Intrinsic::ppc_minfe:
 
11515  case Intrinsic::ppc_minfl:
 
11516  case Intrinsic::ppc_minfs: {
 
11517    EVT VT = 
Op.getValueType();
 
11520               [VT](
const SDUse &Use) { return Use.getValueType() == VT; }) &&
 
11521        "ppc_[max|min]f[e|l|s] must have uniform type arguments");
 
11524    if (IntrinsicID == Intrinsic::ppc_minfe ||
 
11525        IntrinsicID == Intrinsic::ppc_minfl ||
 
11526        IntrinsicID == Intrinsic::ppc_minfs)
 
11548                              Op.getOperand(1), 
Op.getOperand(2),
 
11550    return DAG.
getNode(ISD::BITCAST, dl, 
Op.getValueType(), Tmp);
 
11559  EVT VTs[] = { 
Op.getOperand(2).getValueType(), MVT::Glue };
 
11567  switch (
Op.getConstantOperandVal(1)) {
 
11572    Bitx = PPC::sub_eq;
 
11578    Bitx = PPC::sub_eq;
 
11584    Bitx = PPC::sub_lt;
 
11590    Bitx = PPC::sub_lt;
 
11596  if (Subtarget.isISA3_1()) {
 
11601                                   CR6Reg, SubRegIdx, GlueOp),
 
11603    return DAG.
getNode(SetOp, dl, MVT::i32, CRBit);
 
11631  switch (
Op.getConstantOperandVal(ArgStart)) {
 
11632  case Intrinsic::ppc_cfence: {
 
11633    assert(ArgStart == 1 && 
"llvm.ppc.cfence must carry a chain argument.");
 
11634    SDValue Val = 
Op.getOperand(ArgStart + 1);
 
11636    if (Ty == MVT::i128) {
 
11641    unsigned Opcode = Subtarget.isPPC64() ? PPC::CFENCE8 : PPC::CFENCE;
 
11644            Opcode, 
DL, MVT::Other,
 
11649  case Intrinsic::ppc_mma_disassemble_dmr: {
 
11651                        Op.getOperand(ArgStart + 1), MachinePointerInfo());
 
11662  if (!Subtarget.isPPC64())
 
11670  int VectorIndex = 0;
 
11671  if (Subtarget.isLittleEndian())
 
11682  assert(
Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
 
11683         "Expecting an atomic compare-and-swap here.");
 
11686  EVT MemVT = AtomicNode->getMemoryVT();
 
11704  for (
int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
 
11705    Ops.push_back(AtomicNode->getOperand(i));
 
11707  MachineMemOperand *MMO = AtomicNode->getMemOperand();
 
11708  SDVTList Tys = DAG.
getVTList(MVT::i32, MVT::Other);
 
11717  EVT MemVT = 
N->getMemoryVT();
 
11719         "Expect quadword atomic operations");
 
11721  unsigned Opc = 
N->getOpcode();
 
11723  case ISD::ATOMIC_LOAD: {
 
11726    SDVTList Tys = DAG.
getVTList(MVT::i64, MVT::i64, MVT::Other);
 
11729        DAG.
getConstant(Intrinsic::ppc_atomic_load_i128, dl, MVT::i32)};
 
11730    for (
int I = 1, 
E = 
N->getNumOperands(); 
I < 
E; ++
I)
 
11731      Ops.push_back(
N->getOperand(
I));
 
11733                                                Ops, MemVT, 
N->getMemOperand());
 
11740        DAG.
getNode(
ISD::OR, dl, {MVT::i128, MVT::Other}, {ValLo, ValHi});
 
11744  case ISD::ATOMIC_STORE: {
 
11747    SDVTList Tys = DAG.
getVTList(MVT::Other);
 
11750        DAG.
getConstant(Intrinsic::ppc_atomic_store_i128, dl, MVT::i32)};
 
11756    Ops.push_back(ValLo);
 
11757    Ops.push_back(ValHi);
 
11758    Ops.push_back(
N->getOperand(2));
 
11760                                   N->getMemOperand());
 
11772  enum DataClassMask {
 
11774    DC_NEG_INF = 1 << 4,
 
11775    DC_POS_INF = 1 << 5,
 
11776    DC_NEG_ZERO = 1 << 2,
 
11777    DC_POS_ZERO = 1 << 3,
 
11778    DC_NEG_SUBNORM = 1,
 
11779    DC_POS_SUBNORM = 1 << 1,
 
11782  EVT VT = 
Op.getValueType();
 
11784  unsigned TestOp = VT == MVT::f128  ? PPC::XSTSTDCQP
 
11785                    : VT == MVT::f64 ? PPC::XSTSTDCDP
 
11796    return DAG.
getNOT(Dl, Rev, MVT::i1);
 
11803                    TestOp, Dl, MVT::i32,
 
11805                                              DC_NEG_ZERO | DC_POS_ZERO |
 
11806                                              DC_NEG_SUBNORM | DC_POS_SUBNORM,
 
11812        DAG.
getMachineNode(TargetOpcode::EXTRACT_SUBREG, Dl, MVT::i1, Rev,
 
11818                    TargetOpcode::EXTRACT_SUBREG, Dl, MVT::i1, Rev,
 
11823      Sign = DAG.
getNOT(Dl, Sign, MVT::i1);
 
11836    bool IsQuiet = Mask & 
fcQNan;
 
11842    if (VT == MVT::f128) {
 
11846      QuietMask = 0x8000;
 
11847    } 
else if (VT == MVT::f64) {
 
11848      if (Subtarget.isPPC64()) {
 
11859      QuietMask = 0x80000;
 
11860    } 
else if (VT == MVT::f32) {
 
11862      QuietMask = 0x400000;
 
11878  unsigned NativeMask = 0;
 
11880    NativeMask |= DC_NAN;
 
11882    NativeMask |= DC_NEG_INF;
 
11884    NativeMask |= DC_POS_INF;
 
11886    NativeMask |= DC_NEG_ZERO;
 
11888    NativeMask |= DC_POS_ZERO;
 
11890    NativeMask |= DC_NEG_SUBNORM;
 
11892    NativeMask |= DC_POS_SUBNORM;
 
11895          TargetOpcode::EXTRACT_SUBREG, Dl, MVT::i1,
 
11897                      TestOp, Dl, MVT::i32,
 
 
11906  assert(Subtarget.hasP9Vector() && 
"Test data class requires Power9");
 
11908  uint64_t RHSC = 
Op.getConstantOperandVal(1);
 
11911  if (
LHS.getValueType() == MVT::ppcf128) {
 
11927  unsigned EltSize = 
Op.getValueType().getScalarSizeInBits();
 
11929    int64_t 
IntVal = 
Op.getConstantOperandVal(0);
 
11930    if (IntVal >= -16 && IntVal <= 15)
 
11936  if (Subtarget.hasLFIWAX() && Subtarget.hasVSX() &&
 
11937      Op.getValueType() == MVT::v4i32 && Op0.
getOpcode() == ISD::LOAD &&
 
11941    MachineMemOperand *MMO =
 
11943                                RLI.Alignment, RLI.AAInfo, RLI.Ranges);
 
11950    return Bits.getValue(0);
 
11966      !Subtarget.isLittleEndian() && ValVT.
isInteger() &&
 
11971        64 - 
Op.getValueType().getScalarSizeInBits(), dl, ShiftAmountTy);
 
11978    return DAG.
getLoad(
Op.getValueType(), dl, Store, FIdx,
 
11979                       MachinePointerInfo());
 
11986  return DAG.
getLoad(
Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
 
11992         "Should only be called for ISD::INSERT_VECTOR_ELT");
 
11996  EVT VT = 
Op.getValueType();
 
12001  if (VT == MVT::v2f64 && 
C)
 
12004  if (Subtarget.hasP9Vector()) {
 
12013    if ((VT == MVT::v4f32) && (V2.
getValueType() == MVT::f32) &&
 
12019                      BitcastLoad, 
Op.getOperand(2));
 
12020      return DAG.
getBitcast(MVT::v4f32, InsVecElt);
 
12024  if (Subtarget.isISA3_1()) {
 
12025    if ((VT == MVT::v2i64 || VT == MVT::v2f64) && !Subtarget.isPPC64())
 
12029    if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
 
12030        VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64)
 
12040  if (VT == MVT::v8i16 || VT == MVT::v16i8) {
 
12043    unsigned InsertAtElement = 
C->getZExtValue();
 
12044    unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
 
12045    if (Subtarget.isLittleEndian()) {
 
12046      InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
 
12060  EVT VT = 
Op.getValueType();
 
12061  bool IsV1024i1 = VT == MVT::v1024i1;
 
12062  bool IsV2048i1 = VT == MVT::v2048i1;
 
12066  assert((IsV1024i1 || IsV2048i1) && 
"Unsupported type.");
 
12068  assert((Subtarget.hasMMA() && Subtarget.isISAFuture()) &&
 
12069         "Dense Math support required.");
 
12070  assert(Subtarget.pairedVectorMemops() && 
"Vector pair support required.");
 
12079  for (
unsigned Idx = 0; Idx < NumVecs; ++Idx) {
 
12080    MachineMemOperand *NewMMO =
 
12088                                         DAG.
getVTList(MVT::v256i1, MVT::Other),
 
12089                                         LoadOps, MVT::v256i1, NewMMO);
 
12094  if (Subtarget.isLittleEndian()) {
 
12095    std::reverse(Loads.
begin(), Loads.
end());
 
12096    std::reverse(LoadChains.
begin(), LoadChains.
end());
 
12105                                Loads[2], Loads[3]),
 
12121                                    Loads[4], Loads[5]),
 
12124                                    Loads[6], Loads[7]),
 
12126  const SDValue Dmr1Ops[] = {RC, Dmr1Lo, LoSub, Dmr1Hi, HiSub};
 
12128      DAG.
getMachineNode(PPC::REG_SEQUENCE, dl, MVT::v1024i1, Dmr1Ops), 0);
 
12134  const SDValue DmrPOps[] = {DmrPRC, 
Value, Dmr0Sub, Dmr1Value, Dmr1Sub};
 
12137      DAG.
getMachineNode(PPC::REG_SEQUENCE, dl, MVT::v2048i1, DmrPOps), 0);
 
12150                                Pairs[2], Pairs[3]),
 
12156                                    {RC, Lo, LoSub, Hi, HiSub}),
 
12166  EVT VT = 
Op.getValueType();
 
12168  if (VT == MVT::v1024i1 || VT == MVT::v2048i1)
 
12169    return LowerDMFVectorLoad(
Op, DAG);
 
12171  if (VT != MVT::v256i1 && VT != MVT::v512i1)
 
12177  assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&
 
12178         "Type unsupported without MMA");
 
12179  assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
 
12180         "Type unsupported without paired vector support");
 
12185  for (
unsigned Idx = 0; Idx < NumVecs; ++Idx) {
 
12187        DAG.
getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
 
12196  if (Subtarget.isLittleEndian()) {
 
12197    std::reverse(Loads.
begin(), Loads.
end());
 
12198    std::reverse(LoadChains.
begin(), LoadChains.
end());
 
12218  bool IsV1024i1 = VT == MVT::v1024i1;
 
12219  bool IsV2048i1 = VT == MVT::v2048i1;
 
12223  assert((IsV1024i1 || IsV2048i1) && 
"Unsupported type.");
 
12225  assert((Subtarget.hasMMA() && Subtarget.isISAFuture()) &&
 
12226         "Dense Math support required.");
 
12227  assert(Subtarget.pairedVectorMemops() && 
"Vector pair support required.");
 
12229  EVT ReturnTypes[] = {MVT::v256i1, MVT::v256i1};
 
12232                   TargetOpcode::EXTRACT_SUBREG, dl, MVT::v512i1,
 
12237                   TargetOpcode::EXTRACT_SUBREG, dl, MVT::v512i1,
 
12241    MachineSDNode *ExtNode =
 
12245    ExtNode = DAG.
getMachineNode(PPC::DMXXEXTFDMR512_HI, dl, ReturnTypes, 
Hi);
 
12251        DAG.
getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::v1024i1,
 
12257        DAG.
getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::v1024i1,
 
12263                       TargetOpcode::EXTRACT_SUBREG, dl, MVT::v512i1, Dmr0,
 
12268                       TargetOpcode::EXTRACT_SUBREG, dl, MVT::v512i1, Dmr0,
 
12273                       TargetOpcode::EXTRACT_SUBREG, dl, MVT::v512i1, Dmr1,
 
12278                       TargetOpcode::EXTRACT_SUBREG, dl, MVT::v512i1, Dmr1,
 
12282    MachineSDNode *ExtNode =
 
12283        DAG.
getMachineNode(PPC::DMXXEXTFDMR512, dl, ReturnTypes, Dmr0Lo);
 
12287        DAG.
getMachineNode(PPC::DMXXEXTFDMR512_HI, dl, ReturnTypes, Dmr0Hi);
 
12290    ExtNode = DAG.
getMachineNode(PPC::DMXXEXTFDMR512, dl, ReturnTypes, Dmr1Lo);
 
12294        DAG.
getMachineNode(PPC::DMXXEXTFDMR512_HI, dl, ReturnTypes, Dmr1Hi);
 
12299  if (Subtarget.isLittleEndian())
 
12300    std::reverse(Values.
begin(), Values.
end());
 
12302  SDVTList Tys = DAG.
getVTList(MVT::Other);
 
12304      StoreChain, DAG.
getConstant(Intrinsic::ppc_vsx_stxvp, dl, MVT::i32),
 
12308  for (
unsigned Idx = 0; Idx < NumVecs; ++Idx) {
 
12309    MachineMemOperand *NewMMO =
 
12316    Ops[2] = Values[Idx];
 
12318                                         MVT::v256i1, NewMMO);
 
12334  EVT StoreVT = 
Value.getValueType();
 
12336  if (StoreVT == MVT::v1024i1 || StoreVT == MVT::v2048i1)
 
12337    return LowerDMFVectorStore(
Op, DAG);
 
12339  if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
 
12345  assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&
 
12346         "Type unsupported without MMA");
 
12347  assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
 
12348         "Type unsupported without paired vector support");
 
12351  unsigned NumVecs = 2;
 
12352  if (StoreVT == MVT::v512i1) {
 
12353    if (Subtarget.isISAFuture()) {
 
12354      EVT ReturnTypes[] = {MVT::v256i1, MVT::v256i1};
 
12356          PPC::DMXXEXTFDMR512, dl, ReturnTypes, 
Op.getOperand(1));
 
12359      Value2 = 
SDValue(ExtNode, 1);
 
12364  for (
unsigned Idx = 0; Idx < NumVecs; ++Idx) {
 
12365    unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
 
12367    if (Subtarget.isISAFuture()) {
 
12368      VecNum = Subtarget.isLittleEndian() ? 1 - (Idx % 2) : (Idx % 2);
 
12370                        Idx > 1 ? Value2 : 
Value,
 
12377        DAG.
getStore(StoreChain, dl, Elt, BasePtr,
 
12391  if (
Op.getValueType() == MVT::v4i32) {
 
12403    RHSSwap = DAG.
getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
 
12408                                        LHS, 
RHS, DAG, dl, MVT::v4i32);
 
12411                                      LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
 
12416  } 
else if (
Op.getValueType() == MVT::v16i8) {
 
12418    bool isLittleEndian = Subtarget.isLittleEndian();
 
12422                                           LHS, 
RHS, DAG, dl, MVT::v8i16);
 
12423    EvenParts = DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
 
12427                                          LHS, 
RHS, DAG, dl, MVT::v8i16);
 
12428    OddParts = DAG.
getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
 
12435    for (
unsigned i = 0; i != 8; ++i) {
 
12436      if (isLittleEndian) {
 
12438        Ops[i*2+1] = 2*i+16;
 
12441        Ops[i*2+1] = 2*i+1+16;
 
12444    if (isLittleEndian)
 
12454  bool IsStrict = 
Op->isStrictFPOpcode();
 
12455  if (
Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
 
12456      !Subtarget.hasP9Vector())
 
12465  assert(
Op.getOpcode() == ISD::FP_EXTEND &&
 
12466         "Should only be called for ISD::FP_EXTEND");
 
12470  if (
Op.getValueType() != MVT::v2f64 ||
 
12471      Op.getOperand(0).getValueType() != MVT::v2f32)
 
12483           "Node should have 2 operands with second one being a constant!");
 
12495    int DWord = Idx >> 1;
 
12498    if (Subtarget.isLittleEndian())
 
12515      SDValue LoadOps[] = {
LD->getChain(), 
LD->getBasePtr()};
 
12518          LD->getMemoryVT(), 
LD->getMemOperand());
 
12528    SDValue LoadOps[] = {
LD->getChain(), 
LD->getBasePtr()};
 
12531        LD->getMemoryVT(), 
LD->getMemOperand());
 
12543  if (STI.useCRBits())
 
 
12561  if (STI.useCRBits())
 
 
12569  SDNode *
N = 
Op.getNode();
 
12570  EVT VT = 
N->getValueType(0);
 
12571  EVT CarryType = 
N->getValueType(1);
 
12572  unsigned Opc = 
N->getOpcode();
 
12576                            N->getOperand(0), 
N->getOperand(1));
 
12588  SDNode *
N = 
Op.getNode();
 
12589  unsigned Opc = 
N->getOpcode();
 
12590  EVT VT = 
N->getValueType(0);
 
12591  EVT CarryType = 
N->getValueType(1);
 
12592  SDValue CarryOp = 
N->getOperand(2);
 
12600                            Op.getOperand(0), 
Op.getOperand(1), CarryOp);
 
12614  EVT VT = 
Op.getNode()->getValueType(0);
 
12638  EVT OpVT = 
A.getValueType();   
 
12639  EVT ResVT = 
Op.getValueType(); 
 
12645  SDVTList VTs = DAG.
getVTList(OpVT, MVT::i32);
 
12663  switch (
Op.getOpcode()) {
 
12666  case ISD::FPOW:               
return lowerPow(
Op, DAG);
 
12667  case ISD::FSIN:               
return lowerSin(
Op, DAG);
 
12668  case ISD::FCOS:               
return lowerCos(
Op, DAG);
 
12669  case ISD::FLOG:               
return lowerLog(
Op, DAG);
 
12670  case ISD::FLOG10:             
return lowerLog10(
Op, DAG);
 
12671  case ISD::FEXP:               
return lowerExp(
Op, DAG);
 
12680  case ISD::INIT_TRAMPOLINE:    
return LowerINIT_TRAMPOLINE(
Op, DAG);
 
12681  case ISD::ADJUST_TRAMPOLINE:  
return LowerADJUST_TRAMPOLINE(
Op, DAG);
 
12683    return LowerSSUBO(
Op, DAG);
 
12685  case ISD::INLINEASM:
 
12686  case ISD::INLINEASM_BR:       
return LowerINLINEASM(
Op, DAG);
 
12688  case ISD::VASTART:            
return LowerVASTART(
Op, DAG);
 
12689  case ISD::VAARG:              
return LowerVAARG(
Op, DAG);
 
12690  case ISD::VACOPY:             
return LowerVACOPY(
Op, DAG);
 
12692  case ISD::STACKRESTORE:       
return LowerSTACKRESTORE(
Op, DAG);
 
12693  case ISD::DYNAMIC_STACKALLOC: 
return LowerDYNAMIC_STACKALLOC(
Op, DAG);
 
12694  case ISD::GET_DYNAMIC_AREA_OFFSET:
 
12695    return LowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
 
12702  case ISD::LOAD:               
return LowerLOAD(
Op, DAG);
 
12703  case ISD::STORE:              
return LowerSTORE(
Op, DAG);
 
12715  case ISD::SET_ROUNDING:
 
12716    return LowerSET_ROUNDING(
Op, DAG);
 
12723  case ISD::FSHL:               
return LowerFunnelShift(
Op, DAG);
 
12724  case ISD::FSHR:               
return LowerFunnelShift(
Op, DAG);
 
12733  case ISD::FP_EXTEND:          
return LowerFP_EXTEND(
Op, DAG);
 
12736    return LowerFP_ROUND(
Op, DAG);
 
12742  case ISD::BITCAST:            
return LowerBITCAST(
Op, DAG);
 
12749    return LowerINTRINSIC_VOID(
Op, DAG);
 
12751    return LowerBSWAP(
Op, DAG);
 
12752  case ISD::ATOMIC_CMP_SWAP:
 
12753    return LowerATOMIC_CMP_SWAP(
Op, DAG);
 
12754  case ISD::ATOMIC_STORE:
 
12755    return LowerATOMIC_LOAD_STORE(
Op, DAG);
 
12757    return LowerIS_FPCLASS(
Op, DAG);
 
12760    return LowerADDSUBO(
Op, DAG);
 
12763    return LowerADDSUBO_CARRY(
Op, DAG);
 
12765    return LowerUCMP(
Op, DAG);
 
12771    if (
Op->getFlags().hasNoFPExcept())
 
 
12781  switch (
N->getOpcode()) {
 
12783    llvm_unreachable(
"Do not know how to custom type legalize this operation!");
 
12784  case ISD::ATOMIC_LOAD: {
 
12790  case ISD::READCYCLECOUNTER: {
 
12800    if (
N->getConstantOperandVal(1) != Intrinsic::loop_decrement)
 
12803    assert(
N->getValueType(0) == MVT::i1 &&
 
12804           "Unexpected result type for CTR decrement intrinsic");
 
12806                                 N->getValueType(0));
 
12816    switch (
N->getConstantOperandVal(0)) {
 
12817    case Intrinsic::ppc_pack_longdouble:
 
12819                                    N->getOperand(2), 
N->getOperand(1)));
 
12821    case Intrinsic::ppc_maxfe:
 
12822    case Intrinsic::ppc_minfe:
 
12823    case Intrinsic::ppc_fnmsub:
 
12824    case Intrinsic::ppc_convert_f128_to_ppcf128:
 
12831    if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
 
12834    EVT VT = 
N->getValueType(0);
 
12836    if (VT == MVT::i64) {
 
12849    if (
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
 
12853    Results.push_back(LoweredValue);
 
12854    if (
N->isStrictFPOpcode())
 
12859    if (!
N->getValueType(0).isVector())
 
12879  case ISD::FP_EXTEND:
 
 
12892  return Builder.CreateIntrinsic(Id, {});
 
 
12898  unsigned SZ = ValueTy->getPrimitiveSizeInBits();
 
12900  assert((SZ == 8 || SZ == 16 || SZ == 32 || SZ == 64) &&
 
12901         "Only 8/16/32/64-bit atomic loads supported");
 
12907    IntID = Intrinsic::ppc_lbarx;
 
12908    assert(Subtarget.hasPartwordAtomics() && 
"No support partword atomics.");
 
12911    IntID = Intrinsic::ppc_lharx;
 
12912    assert(Subtarget.hasPartwordAtomics() && 
"No support partword atomics.");
 
12915    IntID = Intrinsic::ppc_lwarx;
 
12918    IntID = Intrinsic::ppc_ldarx;
 
12922      Builder.CreateIntrinsic(IntID, Addr, 
nullptr, 
"larx");
 
12924  return Builder.CreateTruncOrBitCast(
Call, ValueTy);
 
 
12935  assert((SZ == 8 || SZ == 16 || SZ == 32 || SZ == 64) &&
 
12936         "Only 8/16/32/64-bit atomic loads supported");
 
12942    IntID = Intrinsic::ppc_stbcx;
 
12943    assert(Subtarget.hasPartwordAtomics() && 
"No support partword atomics.");
 
12946    IntID = Intrinsic::ppc_sthcx;
 
12947    assert(Subtarget.hasPartwordAtomics() && 
"No support partword atomics.");
 
12950    IntID = Intrinsic::ppc_stwcx;
 
12953    IntID = Intrinsic::ppc_stdcx;
 
12957  if (SZ == 8 || SZ == 16)
 
12958    Val = Builder.CreateZExt(Val, Builder.getInt32Ty());
 
12960  Value *
Call = Builder.CreateIntrinsic(IntID, {Addr, Val},
 
12962  return Builder.CreateXor(
Call, Builder.getInt32(1));
 
 
12985      return Builder.CreateIntrinsic(Intrinsic::ppc_cfence, {Inst->
getType()},
 
 
12995                                    unsigned AtomicSize,
 
12996                                    unsigned BinOpcode,
 
12997                                    unsigned CmpOpcode,
 
12998                                    unsigned CmpPred)
 const {
 
13002  auto LoadMnemonic = PPC::LDARX;
 
13003  auto StoreMnemonic = PPC::STDCX;
 
13004  switch (AtomicSize) {
 
13008    LoadMnemonic = PPC::LBARX;
 
13009    StoreMnemonic = PPC::STBCX;
 
13010    assert(Subtarget.hasPartwordAtomics() && 
"Call this only with size >=4");
 
13013    LoadMnemonic = PPC::LHARX;
 
13014    StoreMnemonic = PPC::STHCX;
 
13015    assert(Subtarget.hasPartwordAtomics() && 
"Call this only with size >=4");
 
13018    LoadMnemonic = PPC::LWARX;
 
13019    StoreMnemonic = PPC::STWCX;
 
13022    LoadMnemonic = PPC::LDARX;
 
13023    StoreMnemonic = PPC::STDCX;
 
13039    CmpOpcode ? 
F->CreateMachineBasicBlock(LLVM_BB) : 
nullptr;
 
13041  F->insert(It, loopMBB);
 
13043    F->insert(It, loop2MBB);
 
13044  F->insert(It, exitMBB);
 
13050  Register TmpReg = (!BinOpcode) ? incr :
 
13051    RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
 
13052                                           : &PPC::GPRCRegClass);
 
13077  BuildMI(BB, dl, 
TII->get(LoadMnemonic), dest)
 
13082    Register CrReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
 
13084    if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
 
13085      Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
 
13086      BuildMI(BB, dl, 
TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
 
 
13116  switch(
MI.getOpcode()) {
 
13120    return TII->isSignExtended(
MI.getOperand(1).getReg(),
 
13121                               &
MI.getMF()->getRegInfo());
 
13145  case PPC::EXTSB8_32_64:
 
13146  case PPC::EXTSB8_rec:
 
13147  case PPC::EXTSB_rec:
 
13150  case PPC::EXTSH8_32_64:
 
13151  case PPC::EXTSH8_rec:
 
13152  case PPC::EXTSH_rec:
 
13154  case PPC::EXTSWSLI:
 
13155  case PPC::EXTSWSLI_32_64:
 
13156  case PPC::EXTSWSLI_32_64_rec:
 
13157  case PPC::EXTSWSLI_rec:
 
13158  case PPC::EXTSW_32:
 
13159  case PPC::EXTSW_32_64:
 
13160  case PPC::EXTSW_32_64_rec:
 
13161  case PPC::EXTSW_rec:
 
13164  case PPC::SRAWI_rec:
 
13165  case PPC::SRAW_rec:
 
 
13174    unsigned BinOpcode, 
unsigned CmpOpcode, 
unsigned CmpPred)
 const {
 
13184  bool IsSignExtended =
 
13187  if (CmpOpcode == PPC::CMPW && !IsSignExtended) {
 
13188    Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
 
13189    BuildMI(*BB, 
MI, dl, 
TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg)
 
13190        .
addReg(
MI.getOperand(3).getReg());
 
13191    MI.getOperand(3).setReg(ValueReg);
 
13195  if (Subtarget.hasPartwordAtomics())
 
13203  bool is64bit = Subtarget.isPPC64();
 
13204  bool isLittleEndian = Subtarget.isLittleEndian();
 
13205  unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
 
13216      CmpOpcode ? 
F->CreateMachineBasicBlock(LLVM_BB) : 
nullptr;
 
13218  F->insert(It, loopMBB);
 
13220    F->insert(It, loop2MBB);
 
13221  F->insert(It, exitMBB);
 
13227      is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
 
13230  Register PtrReg = RegInfo.createVirtualRegister(RC);
 
13231  Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
 
13233      isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
 
13234  Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
 
13235  Register MaskReg = RegInfo.createVirtualRegister(GPRC);
 
13236  Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
 
13237  Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
 
13238  Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
 
13239  Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
 
13240  Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
 
13241  Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
 
13242  Register SrwDestReg = RegInfo.createVirtualRegister(GPRC);
 
13245      (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
 
13272  if (ptrA != ZeroReg) {
 
13273    Ptr1Reg = RegInfo.createVirtualRegister(RC);
 
13274    BuildMI(BB, dl, 
TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
 
13282  BuildMI(BB, dl, 
TII->get(PPC::RLWINM), Shift1Reg)
 
13283      .
addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
 
13286      .
addImm(is8bit ? 28 : 27);
 
13287  if (!isLittleEndian)
 
13288    BuildMI(BB, dl, 
TII->get(PPC::XORI), ShiftReg)
 
13290        .
addImm(is8bit ? 24 : 16);
 
13292    BuildMI(BB, dl, 
TII->get(PPC::RLDICR), PtrReg)
 
13297    BuildMI(BB, dl, 
TII->get(PPC::RLWINM), PtrReg)
 
13307    BuildMI(BB, dl, 
TII->get(PPC::ORI), Mask2Reg)
 
13311  BuildMI(BB, dl, 
TII->get(PPC::SLW), MaskReg)
 
13316  BuildMI(BB, dl, 
TII->get(PPC::LWARX), TmpDestReg)
 
13320    BuildMI(BB, dl, 
TII->get(BinOpcode), TmpReg)
 
13323  BuildMI(BB, dl, 
TII->get(PPC::ANDC), Tmp2Reg)
 
13330    Register SReg = RegInfo.createVirtualRegister(GPRC);
 
13331    Register CrReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
 
13335    unsigned ValueReg = SReg;
 
13336    unsigned CmpReg = Incr2Reg;
 
13337    if (CmpOpcode == PPC::CMPW) {
 
13338      ValueReg = RegInfo.createVirtualRegister(GPRC);
 
13339      BuildMI(BB, dl, 
TII->get(PPC::SRW), ValueReg)
 
13342      Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
 
13343      BuildMI(BB, dl, 
TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
 
13345      ValueReg = ValueSReg;
 
13377      .
addImm(is8bit ? 24 : 16)
 
 
13398  Register DstReg = 
MI.getOperand(0).getReg();
 
13400  assert(
TRI->isTypeLegalForClass(*RC, MVT::i32) && 
"Invalid destination!");
 
13401  Register mainDstReg = 
MRI.createVirtualRegister(RC);
 
13402  Register restoreDstReg = 
MRI.createVirtualRegister(RC);
 
13405  assert((PVT == MVT::i64 || PVT == MVT::i32) &&
 
13406         "Invalid Pointer Size!");
 
13454  Register LabelReg = 
MRI.createVirtualRegister(PtrRC);
 
13455  Register BufReg = 
MI.getOperand(1).getReg();
 
13457  if (Subtarget.is64BitELFABI()) {
 
13470    BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
 
13472    BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
 
13475                TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
 
13498              TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
 
13501  if (Subtarget.isPPC64()) {
 
13519          TII->get(PPC::PHI), DstReg)
 
13523  MI.eraseFromParent();
 
 
13537  assert((PVT == MVT::i64 || PVT == MVT::i32) &&
 
13538         "Invalid Pointer Size!");
 
13541    (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
 
13544  unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
 
13545  unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
 
13559  Register BufReg = 
MI.getOperand(0).getReg();
 
13564  if (PVT == MVT::i64) {
 
13576  if (PVT == MVT::i64) {
 
13588  if (PVT == MVT::i64) {
 
13600  if (PVT == MVT::i64) {
 
13612  if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
 
13622          TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).
addReg(Tmp);
 
13625  MI.eraseFromParent();
 
 
13641         "Unexpected stack alignment");
 
13645  unsigned StackProbeSize =
 
13648  StackProbeSize &= ~(StackAlign - 1);
 
13649  return StackProbeSize ? StackProbeSize : StackAlign;
 
 
13661  const bool isPPC64 = Subtarget.isPPC64();
 
13693  MF->
insert(MBBIter, TestMBB);
 
13694  MF->
insert(MBBIter, BlockMBB);
 
13695  MF->
insert(MBBIter, TailMBB);
 
13700  Register DstReg = 
MI.getOperand(0).getReg();
 
13701  Register NegSizeReg = 
MI.getOperand(1).getReg();
 
13703  Register FinalStackPtr = 
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
 
13704  Register FramePointer = 
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
 
13705  Register ActualNegSizeReg = 
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
 
13711  if (!
MRI.hasOneNonDBGUse(NegSizeReg))
 
13713        isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
 
13719    ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
 
13720                       : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
 
13722      .
addDef(ActualNegSizeReg)
 
13724      .
add(
MI.getOperand(2))
 
13725      .
add(
MI.getOperand(3));
 
13731      .
addReg(ActualNegSizeReg);
 
13734  int64_t NegProbeSize = -(int64_t)ProbeSize;
 
13736  Register ScratchReg = 
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
 
13738    Register TempReg = 
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
 
13740        .
addImm(NegProbeSize >> 16);
 
13744        .
addImm(NegProbeSize & 0xFFFF);
 
13751    Register Div = 
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
 
13753        .
addReg(ActualNegSizeReg)
 
13755    Register Mul = 
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
 
13759    Register NegMod = 
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
 
13762        .
addReg(ActualNegSizeReg);
 
13771    Register CmpResult = 
MRI.createVirtualRegister(&PPC::CRRCRegClass);
 
13772    BuildMI(TestMBB, 
DL, 
TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
 
13797      MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
 
13799          TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
 
13800          MaxCallFrameSizeReg)
 
13801      .
add(
MI.getOperand(2))
 
13802      .
add(
MI.getOperand(3));
 
13803  BuildMI(TailMBB, 
DL, 
TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
 
13805      .
addReg(MaxCallFrameSizeReg);
 
13811  MBB->addSuccessor(TestMBB);
 
13814  MI.eraseFromParent();
 
13816  ++NumDynamicAllocaProbed;
 
 
13821  switch (
MI.getOpcode()) {
 
13822  case PPC::SELECT_CC_I4:
 
13823  case PPC::SELECT_CC_I8:
 
13824  case PPC::SELECT_CC_F4:
 
13825  case PPC::SELECT_CC_F8:
 
13826  case PPC::SELECT_CC_F16:
 
13827  case PPC::SELECT_CC_VRRC:
 
13828  case PPC::SELECT_CC_VSFRC:
 
13829  case PPC::SELECT_CC_VSSRC:
 
13830  case PPC::SELECT_CC_VSRC:
 
13831  case PPC::SELECT_CC_SPE4:
 
13832  case PPC::SELECT_CC_SPE:
 
 
13840  switch (
MI.getOpcode()) {
 
13841  case PPC::SELECT_I4:
 
13842  case PPC::SELECT_I8:
 
13843  case PPC::SELECT_F4:
 
13844  case PPC::SELECT_F8:
 
13845  case PPC::SELECT_F16:
 
13846  case PPC::SELECT_SPE:
 
13847  case PPC::SELECT_SPE4:
 
13848  case PPC::SELECT_VRRC:
 
13849  case PPC::SELECT_VSFRC:
 
13850  case PPC::SELECT_VSSRC:
 
13851  case PPC::SELECT_VSRC:
 
 
13861  if (
MI.getOpcode() == TargetOpcode::STACKMAP ||
 
13862      MI.getOpcode() == TargetOpcode::PATCHPOINT) {
 
13863    if (Subtarget.is64BitELFABI() &&
 
13864        MI.getOpcode() == TargetOpcode::PATCHPOINT &&
 
13865        !Subtarget.isUsingPCRelativeCalls()) {
 
13877  if (
MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
 
13878      MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
 
13880  } 
else if (
MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
 
13881             MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
 
13895  if (Subtarget.hasISEL() &&
 
13896      (
MI.getOpcode() == PPC::SELECT_CC_I4 ||
 
13897       MI.getOpcode() == PPC::SELECT_CC_I8 ||
 
13898       MI.getOpcode() == PPC::SELECT_I4 || 
MI.getOpcode() == PPC::SELECT_I8)) {
 
13900    if (
MI.getOpcode() == PPC::SELECT_CC_I4 ||
 
13901        MI.getOpcode() == PPC::SELECT_CC_I8)
 
13902      Cond.push_back(
MI.getOperand(4));
 
13905    Cond.push_back(
MI.getOperand(1));
 
13908    TII->insertSelect(*BB, 
MI, dl, 
MI.getOperand(0).getReg(), 
Cond,
 
13909                      MI.getOperand(2).getReg(), 
MI.getOperand(3).getReg());
 
13925    F->insert(It, copy0MBB);
 
13926    F->insert(It, sinkMBB);
 
13935    unsigned CallFrameSize = 
TII->getCallFrameSizeAt(
MI);
 
13950          .
addReg(
MI.getOperand(1).getReg())
 
13953      unsigned SelectPred = 
MI.getOperand(4).getImm();
 
13956          .
addReg(
MI.getOperand(1).getReg())
 
13973        .
addReg(
MI.getOperand(3).getReg())
 
13975        .
addReg(
MI.getOperand(2).getReg())
 
13977  } 
else if (
MI.getOpcode() == PPC::ReadTB) {
 
13993    F->insert(It, readMBB);
 
13994    F->insert(It, sinkMBB);
 
14005    Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
 
14013    Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
 
14015    BuildMI(BB, dl, 
TII->get(PPC::CMPW), CmpReg)
 
14025  } 
else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
 
14027  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
 
14029  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
 
14031  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
 
14034  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
 
14036  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
 
14038  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
 
14040  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
 
14043  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
 
14045  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
 
14047  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
 
14049  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
 
14052  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
 
14054  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
 
14056  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
 
14058  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
 
14061  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
 
14063  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
 
14065  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
 
14067  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
 
14070  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
 
14072  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
 
14074  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
 
14076  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
 
14079  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
 
14081  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
 
14083  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
 
14085  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
 
14088  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
 
14090  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
 
14092  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
 
14094  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
 
14097  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
 
14099  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
 
14101  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
 
14103  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
 
14106  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
 
14108  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
 
14110  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
 
14112  else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
 
14115  else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
 
14117  else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
 
14119  else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
 
14121  else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
 
14123  else if (
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
 
14124           MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
 
14125           (Subtarget.hasPartwordAtomics() &&
 
14126            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
 
14127           (Subtarget.hasPartwordAtomics() &&
 
14128            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
 
14129    bool is64bit = 
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
 
14131    auto LoadMnemonic = PPC::LDARX;
 
14132    auto StoreMnemonic = PPC::STDCX;
 
14133    switch (
MI.getOpcode()) {
 
14136    case PPC::ATOMIC_CMP_SWAP_I8:
 
14137      LoadMnemonic = PPC::LBARX;
 
14138      StoreMnemonic = PPC::STBCX;
 
14139      assert(Subtarget.hasPartwordAtomics() && 
"No support partword atomics.");
 
14141    case PPC::ATOMIC_CMP_SWAP_I16:
 
14142      LoadMnemonic = PPC::LHARX;
 
14143      StoreMnemonic = PPC::STHCX;
 
14144      assert(Subtarget.hasPartwordAtomics() && 
"No support partword atomics.");
 
14146    case PPC::ATOMIC_CMP_SWAP_I32:
 
14147      LoadMnemonic = PPC::LWARX;
 
14148      StoreMnemonic = PPC::STWCX;
 
14150    case PPC::ATOMIC_CMP_SWAP_I64:
 
14151      LoadMnemonic = PPC::LDARX;
 
14152      StoreMnemonic = PPC::STDCX;
 
14159    Register CrReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
 
14160    Register oldval = 
MI.getOperand(3).getReg();
 
14161    Register newval = 
MI.getOperand(4).getReg();
 
14167    F->insert(It, loop1MBB);
 
14168    F->insert(It, loop2MBB);
 
14169    F->insert(It, exitMBB);
 
14190    BuildMI(BB, dl, 
TII->get(is64bit ? PPC::CMPD : PPC::CMPW), CrReg)
 
14216  } 
else if (
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
 
14217             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
 
14221    bool is64bit = Subtarget.isPPC64();
 
14222    bool isLittleEndian = Subtarget.isLittleEndian();
 
14223    bool is8bit = 
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
 
14228    Register oldval = 
MI.getOperand(3).getReg();
 
14229    Register newval = 
MI.getOperand(4).getReg();
 
14235    F->insert(It, loop1MBB);
 
14236    F->insert(It, loop2MBB);
 
14237    F->insert(It, exitMBB);
 
14244        is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
 
14247    Register PtrReg = RegInfo.createVirtualRegister(RC);
 
14248    Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
 
14250        isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
 
14251    Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
 
14252    Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
 
14253    Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
 
14254    Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
 
14255    Register MaskReg = RegInfo.createVirtualRegister(GPRC);
 
14256    Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
 
14257    Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
 
14258    Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
 
14259    Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
 
14260    Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
 
14262    Register TmpReg = RegInfo.createVirtualRegister(GPRC);
 
14263    Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
 
14264    Register CrReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
 
14295    if (ptrA != ZeroReg) {
 
14296      Ptr1Reg = RegInfo.createVirtualRegister(RC);
 
14297      BuildMI(BB, dl, 
TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
 
14306    BuildMI(BB, dl, 
TII->get(PPC::RLWINM), Shift1Reg)
 
14307        .
addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
 
14310        .
addImm(is8bit ? 28 : 27);
 
14311    if (!isLittleEndian)
 
14312      BuildMI(BB, dl, 
TII->get(PPC::XORI), ShiftReg)
 
14314          .
addImm(is8bit ? 24 : 16);
 
14316      BuildMI(BB, dl, 
TII->get(PPC::RLDICR), PtrReg)
 
14321      BuildMI(BB, dl, 
TII->get(PPC::RLWINM), PtrReg)
 
14326    BuildMI(BB, dl, 
TII->get(PPC::SLW), NewVal2Reg)
 
14329    BuildMI(BB, dl, 
TII->get(PPC::SLW), OldVal2Reg)
 
14336      BuildMI(BB, dl, 
TII->get(PPC::ORI), Mask2Reg)
 
14340    BuildMI(BB, dl, 
TII->get(PPC::SLW), MaskReg)
 
14343    BuildMI(BB, dl, 
TII->get(PPC::AND), NewVal3Reg)
 
14346    BuildMI(BB, dl, 
TII->get(PPC::AND), OldVal3Reg)
 
14351    BuildMI(BB, dl, 
TII->get(PPC::LWARX), TmpDestReg)
 
14368    BuildMI(BB, dl, 
TII->get(PPC::ANDC), Tmp2Reg)
 
14392  } 
else if (
MI.getOpcode() == PPC::FADDrtz) {
 
14402    Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
 
14417    auto MIB = 
BuildMI(*BB, 
MI, dl, 
TII->get(PPC::FADD), Dest)
 
14425  } 
else if (
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
 
14426             MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
 
14427             MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
 
14428             MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
 
14429    unsigned Opcode = (
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
 
14430                       MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
 
14433    bool IsEQ = (
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
 
14434                 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
 
14437    Register Dest = RegInfo.createVirtualRegister(
 
14438        Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
 
14442        .
addReg(
MI.getOperand(1).getReg())
 
14445            MI.getOperand(0).getReg())
 
14446        .
addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
 
14447  } 
else if (
MI.getOpcode() == PPC::TCHECK_RET) {
 
14450    Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
 
14453            MI.getOperand(0).getReg())
 
14455  } 
else if (
MI.getOpcode() == PPC::TBEGIN_RET) {
 
14457    unsigned Imm = 
MI.getOperand(1).getImm();
 
14460            MI.getOperand(0).getReg())
 
14462  } 
else if (
MI.getOpcode() == PPC::SETRNDi) {
 
14464    Register OldFPSCRReg = 
MI.getOperand(0).getReg();
 
14467    if (
MRI.use_empty(OldFPSCRReg))
 
14468      BuildMI(*BB, 
MI, dl, 
TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg);
 
14470      BuildMI(*BB, 
MI, dl, 
TII->get(PPC::MFFS), OldFPSCRReg);
 
14481    unsigned Mode = 
MI.getOperand(1).getImm();
 
14482    BuildMI(*BB, 
MI, dl, 
TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
 
14486    BuildMI(*BB, 
MI, dl, 
TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
 
14489  } 
else if (
MI.getOpcode() == PPC::SETRND) {
 
14497    auto copyRegFromG8RCOrF8RC = [&] (
unsigned DestReg, 
unsigned SrcReg) {
 
14498      if (Subtarget.hasDirectMove()) {
 
14499        BuildMI(*BB, 
MI, dl, 
TII->get(TargetOpcode::COPY), DestReg)
 
14503        unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
 
14506        if (RC == &PPC::F8RCRegClass) {
 
14508          assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
 
14509                 "Unsupported RegClass.");
 
14511          StoreOp = PPC::STFD;
 
14515          assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
 
14516                 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
 
14517                 "Unsupported RegClass.");
 
14550    Register OldFPSCRReg = 
MI.getOperand(0).getReg();
 
14553    BuildMI(*BB, 
MI, dl, 
TII->get(PPC::MFFS), OldFPSCRReg);
 
14565    Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
 
14567    copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
 
14569    Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
 
14570    Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
 
14575    BuildMI(*BB, 
MI, dl, 
TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
 
14576    BuildMI(*BB, 
MI, dl, 
TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
 
14581    Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
 
14582    BuildMI(*BB, 
MI, dl, 
TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
 
14588    Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
 
14589    copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
 
14598  } 
else if (
MI.getOpcode() == PPC::SETFLM) {
 
14602    Register OldFPSCRReg = 
MI.getOperand(0).getReg();
 
14603    if (
MRI.use_empty(OldFPSCRReg))
 
14604      BuildMI(*BB, 
MI, Dl, 
TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg);
 
14606      BuildMI(*BB, 
MI, Dl, 
TII->get(PPC::MFFS), OldFPSCRReg);
 
14609    Register NewFPSCRReg = 
MI.getOperand(1).getReg();
 
14615  } 
else if (
MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
 
14616             MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
 
14618  } 
else if (
MI.getOpcode() == PPC::SPLIT_QUADWORD) {
 
14625        .
addUse(Src, 0, PPC::sub_gp8_x1);
 
14628        .
addUse(Src, 0, PPC::sub_gp8_x0);
 
14629  } 
else if (
MI.getOpcode() == PPC::LQX_PSEUDO ||
 
14630             MI.getOpcode() == PPC::STQX_PSEUDO) {
 
14636        F->getRegInfo().createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
 
14642            MI.getOpcode() == PPC::LQX_PSEUDO ? 
TII->get(PPC::LQ)
 
14643                                              : 
TII->get(PPC::STQ))
 
14651  MI.eraseFromParent(); 
 
 
14664  int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
 
14667  return RefinementSteps;
 
 
14673  EVT VT = 
Op.getValueType();
 
14676       ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
 
14700PPCTargetLowering::getSqrtResultForDenormInput(
SDValue Op,
 
14703  EVT VT = 
Op.getValueType();
 
14704  if (VT != MVT::f64 &&
 
14705      ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
 
14712                                           int Enabled, 
int &RefinementSteps,
 
14713                                           bool &UseOneConstNR,
 
14714                                           bool Reciprocal)
 const {
 
14716  if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
 
14717      (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
 
14718      (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
 
14719      (VT == MVT::v2f64 && Subtarget.hasVSX())) {
 
14725    UseOneConstNR = !Subtarget.needsTwoConstNR();
 
14733                                            int &RefinementSteps)
 const {
 
14735  if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
 
14736      (VT == MVT::f64 && Subtarget.hasFRE()) ||
 
14737      (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
 
14738      (VT == MVT::v2f64 && Subtarget.hasVSX())) {
 
14757  switch (Subtarget.getCPUDirective()) {
 
14784                            unsigned Bytes, 
int Dist,
 
14798    if (FS != BFS || FS != (
int)Bytes) 
return false;
 
14803  int64_t Offset1 = 0, Offset2 = 0;
 
14806  if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
 
14816  if (isGA1 && isGA2 && GV1 == GV2)
 
14817    return Offset1 == (Offset2 + Dist*Bytes);
 
 
14824                            unsigned Bytes, 
int Dist,
 
14827    EVT VT = LS->getMemoryVT();
 
14834    switch (
N->getConstantOperandVal(1)) {
 
14835    default: 
return false;
 
14836    case Intrinsic::ppc_altivec_lvx:
 
14837    case Intrinsic::ppc_altivec_lvxl:
 
14838    case Intrinsic::ppc_vsx_lxvw4x:
 
14839    case Intrinsic::ppc_vsx_lxvw4x_be:
 
14842    case Intrinsic::ppc_vsx_lxvd2x:
 
14843    case Intrinsic::ppc_vsx_lxvd2x_be:
 
14846    case Intrinsic::ppc_altivec_lvebx:
 
14849    case Intrinsic::ppc_altivec_lvehx:
 
14852    case Intrinsic::ppc_altivec_lvewx:
 
14862    switch (
N->getConstantOperandVal(1)) {
 
14863    default: 
return false;
 
14864    case Intrinsic::ppc_altivec_stvx:
 
14865    case Intrinsic::ppc_altivec_stvxl:
 
14866    case Intrinsic::ppc_vsx_stxvw4x:
 
14869    case Intrinsic::ppc_vsx_stxvd2x:
 
14872    case Intrinsic::ppc_vsx_stxvw4x_be:
 
14875    case Intrinsic::ppc_vsx_stxvd2x_be:
 
14878    case Intrinsic::ppc_altivec_stvebx:
 
14881    case Intrinsic::ppc_altivec_stvehx:
 
14884    case Intrinsic::ppc_altivec_stvewx:
 
 
14901  SDValue Chain = LD->getChain();
 
14902  EVT VT = LD->getMemoryVT();
 
14911  while (!Queue.empty()) {
 
14912    SDNode *ChainNext = Queue.pop_back_val();
 
14913    if (!Visited.
insert(ChainNext).second)
 
14920      if (!Visited.
count(ChainLD->getChain().getNode()))
 
14921        Queue.push_back(ChainLD->getChain().getNode());
 
14923      for (
const SDUse &O : ChainNext->
ops())
 
14924        if (!Visited.
count(O.getNode()))
 
14925          Queue.push_back(O.getNode());
 
14927      LoadRoots.
insert(ChainNext);
 
14938  for (
SDNode *
I : LoadRoots) {
 
14939    Queue.push_back(
I);
 
14941    while (!Queue.empty()) {
 
14942      SDNode *LoadRoot = Queue.pop_back_val();
 
14943      if (!Visited.
insert(LoadRoot).second)
 
14955          Queue.push_back(U);
 
 
14988  auto Final = Shifted;
 
 
14999                                                  DAGCombinerInfo &DCI)
 const {
 
15002  SelectionDAG &DAG = DCI.DAG;
 
15007  if (!DCI.isAfterLegalizeDAG())
 
15012  for (
const SDNode *U : 
N->users())
 
15017  auto OpSize = 
N->getOperand(0).getValueSizeInBits();
 
15021  if (OpSize < 
Size) {
 
15039                                                  DAGCombinerInfo &DCI)
 const {
 
15040  SelectionDAG &DAG = DCI.DAG;
 
15043  assert(Subtarget.useCRBits() && 
"Expecting to be tracking CR bits");
 
15054      N->getValueType(0) != MVT::i1)
 
15057  if (
N->getOperand(0).getValueType() != MVT::i32 &&
 
15058      N->getOperand(0).getValueType() != MVT::i64)
 
15068    unsigned OpBits = 
N->getOperand(0).getValueSizeInBits();
 
15079        return (
N->getOpcode() == 
ISD::SETCC ? ConvertSETCCToSubtract(
N, DCI)
 
15102  if (
N->getOperand(0).getOpcode() != 
ISD::AND &&
 
15103      N->getOperand(0).getOpcode() != 
ISD::OR  &&
 
15104      N->getOperand(0).getOpcode() != 
ISD::XOR &&
 
15114      N->getOperand(1).getOpcode() != 
ISD::AND &&
 
15115      N->getOperand(1).getOpcode() != 
ISD::OR  &&
 
15116      N->getOperand(1).getOpcode() != 
ISD::XOR &&
 
15127  SmallPtrSet<SDNode *, 16> Visited;
 
15129  for (
unsigned i = 0; i < 2; ++i) {
 
15133          N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
 
15145  while (!BinOps.
empty()) {
 
15153    for (
unsigned i = 0, ie = BinOp.
getNumOperands(); i != ie; ++i) {
 
15187  for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
 
15191    for (
const SDNode *User : Inputs[i].
getNode()->
users()) {
 
15192      if (User != 
N && !Visited.
count(User))
 
15201        if (
User->getOperand(0) == Inputs[i])
 
15204        if (
User->getOperand(0) == Inputs[i] ||
 
15205            User->getOperand(1) == Inputs[i])
 
15211  for (
unsigned i = 0, ie = PromOps.
size(); i != ie; ++i) {
 
15212    for (
const SDNode *User : PromOps[i].
getNode()->
users()) {
 
15213      if (User != 
N && !Visited.
count(User))
 
15222        if (
User->getOperand(0) == PromOps[i])
 
15225        if (
User->getOperand(0) == PromOps[i] ||
 
15226            User->getOperand(1) == PromOps[i])
 
15233  for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
 
15242  std::list<HandleSDNode> PromOpHandles;
 
15243  for (
auto &PromOp : PromOps)
 
15244    PromOpHandles.emplace_back(PromOp);
 
15251  while (!PromOpHandles.empty()) {
 
15252    SDValue PromOp = PromOpHandles.back().getValue();
 
15253    PromOpHandles.pop_back();
 
15262        PromOpHandles.emplace_front(PromOp);
 
15276    default:             
C = 0; 
break;
 
15289      PromOpHandles.emplace_front(PromOp);
 
15296    for (
unsigned i = 0; i < 2; ++i)
 
15306    return N->getOperand(0);
 
15314                                                  DAGCombinerInfo &DCI)
 const {
 
15315  SelectionDAG &DAG = DCI.DAG;
 
15332  if (
N->getValueType(0) != MVT::i32 &&
 
15333      N->getValueType(0) != MVT::i64)
 
15336  if (!((
N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
 
15337        (
N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
 
15340  if (
N->getOperand(0).getOpcode() != 
ISD::AND &&
 
15341      N->getOperand(0).getOpcode() != 
ISD::OR  &&
 
15342      N->getOperand(0).getOpcode() != 
ISD::XOR &&
 
15349  SmallPtrSet<SDNode *, 16> Visited;
 
15353  while (!BinOps.
empty()) {
 
15361    for (
unsigned i = 0, ie = BinOp.
getNumOperands(); i != ie; ++i) {
 
15387  DenseMap<SDNode *, EVT> SelectTruncOp[2];
 
15392  for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
 
15397      if (User != 
N && !Visited.
count(User))
 
15403        if (
User->getOperand(0) == Inputs[i])
 
15404          SelectTruncOp[0].
insert(std::make_pair(User,
 
15405                                    User->getOperand(0).getValueType()));
 
15407        if (
User->getOperand(0) == Inputs[i])
 
15408          SelectTruncOp[0].
insert(std::make_pair(User,
 
15409                                    User->getOperand(0).getValueType()));
 
15410        if (
User->getOperand(1) == Inputs[i])
 
15411          SelectTruncOp[1].
insert(std::make_pair(User,
 
15412                                    User->getOperand(1).getValueType()));
 
15417  for (
unsigned i = 0, ie = PromOps.
size(); i != ie; ++i) {
 
15419      if (User != 
N && !Visited.
count(User))
 
15425        if (
User->getOperand(0) == PromOps[i])
 
15426          SelectTruncOp[0].
insert(std::make_pair(User,
 
15427                                    User->getOperand(0).getValueType()));
 
15429        if (
User->getOperand(0) == PromOps[i])
 
15430          SelectTruncOp[0].
insert(std::make_pair(User,
 
15431                                    User->getOperand(0).getValueType()));
 
15432        if (
User->getOperand(1) == PromOps[i])
 
15433          SelectTruncOp[1].
insert(std::make_pair(User,
 
15434                                    User->getOperand(1).getValueType()));
 
15439  unsigned PromBits = 
N->getOperand(0).getValueSizeInBits();
 
15440  bool ReallyNeedsExt = 
false;
 
15444    for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
 
15449        Inputs[i].getOperand(0).getValueSizeInBits();
 
15450      assert(PromBits < OpBits && 
"Truncation not to a smaller bit count?");
 
15455                                                        OpBits-PromBits))) ||
 
15458             (OpBits-(PromBits-1)))) {
 
15459        ReallyNeedsExt = 
true;
 
15467  std::list<HandleSDNode> PromOpHandles;
 
15468  for (
auto &PromOp : PromOps)
 
15469    PromOpHandles.emplace_back(PromOp);
 
15473  for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
 
15480    SDValue InSrc = Inputs[i].getOperand(0);
 
15498  while (!PromOpHandles.empty()) {
 
15500    PromOpHandles.pop_back();
 
15504    default:             
C = 0; 
break;
 
15517      PromOpHandles.emplace_front(PromOp);
 
15527          (SelectTruncOp[1].count(PromOp.
getNode()) &&
 
15529        PromOpHandles.emplace_front(PromOp);
 
15537    for (
unsigned i = 0; i < 2; ++i) {
 
15555      auto SI0 = SelectTruncOp[0].
find(PromOp.
getNode());
 
15556      if (SI0 != SelectTruncOp[0].
end())
 
15558      auto SI1 = SelectTruncOp[1].
find(PromOp.
getNode());
 
15559      if (SI1 != SelectTruncOp[1].
end())
 
15568  if (!ReallyNeedsExt)
 
15569    return N->getOperand(0);
 
15576                                         N->getValueSizeInBits(0), PromBits),
 
15577                                       dl, 
N->getValueType(0)));
 
15580         "Invalid extension type");
 
15583      DAG.
getConstant(
N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
 
15591                                        DAGCombinerInfo &DCI)
 const {
 
15593         "Should be called with a SETCC node");
 
15610      SelectionDAG &DAG = DCI.DAG;
 
15611      EVT VT = 
N->getValueType(0);
 
15612      EVT OpVT = 
LHS.getValueType();
 
15618  return DAGCombineTruncBoolExt(
N, DCI);
 
15625      Op.getValueType() == MVT::f64;
 
 
15637combineElementTruncationToVectorTruncation(
SDNode *
N,
 
15638                                           DAGCombinerInfo &DCI)
 const {
 
15640         "Should be called with a BUILD_VECTOR node");
 
15642  SelectionDAG &DAG = DCI.DAG;
 
15645  SDValue FirstInput = 
N->getOperand(0);
 
15647         "The input operand must be an fp-to-int conversion.");
 
15656    bool IsSplat = 
true;
 
15661    EVT TargetVT = 
N->getValueType(0);
 
15662    for (
int i = 0, e = 
N->getNumOperands(); i < e; ++i) {
 
15663      SDValue NextOp = 
N->getOperand(i);
 
15667      if (NextConversion != FirstConversion)
 
15675      if (
N->getOperand(i) != FirstInput)
 
15686    for (
int i = 0, e = 
N->getNumOperands(); i < e; ++i) {
 
15687      SDValue In = 
N->getOperand(i).getOperand(0);
 
15697          Ops.push_back(Trunc);
 
15700        Ops.push_back(
In.isUndef() ? DAG.
getUNDEF(SrcVT) : 
In.getOperand(0));
 
15710    EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
 
15712    return DAG.
getNode(Opcode, dl, TargetVT, BV);
 
15730  static const APInt BasePattern = 
APInt(128, 0x8000000000000000ULL) << 64;
 
15734  if (FullVal == BasePattern)
 
15735    return std::make_tuple(Uim, 
uint8_t{0});
 
15738  if (FullVal == 
APInt(128, 1))
 
15739    return std::make_tuple(Uim, 
uint8_t{127});
 
15741  return std::nullopt;
 
 
15761         "Expected a BuildVectorSDNode in combineBVLoadsSpecialValue");
 
15765  EVT VT = 
Op.getValueType();
 
15766  if (!(VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v4i32 ||
 
15780  for (
const SDValue &Operand : 
Op.getNode()->op_values()) {
 
15790  for (
unsigned Index = 0; 
Index < NumElems; ++
Index) {
 
15794    uint64_t ElemValue = 
C->getZExtValue();
 
15798      ElemValue &= ((1ULL << ElemBits) - 1);
 
15802        (IsLittleEndian) ? (Index * ElemBits) : (128 - (
Index + 1) * ElemBits);
 
15805    APInt ElemAPInt(128, ElemValue);
 
15806    ElemAPInt <<= BitPos;
 
15809    FullVal |= ElemAPInt;
 
15816    const auto &[Uim, ShiftAmount] = *UIMOpt;
 
15820    if (ShiftAmount == 0) {
 
15825                     << 
"combineBVLoadsSpecialValue: Instruction Emitted ";
 
15826                 LxvkqInstr.
dump());
 
15830    assert(ShiftAmount == 127 && 
"Unexpected lxvkq shift amount value");
 
15842        DAG.
getMachineNode(PPC::VSRQ, Dl, VT, ShiftAmountVec, ShiftAmountVec),
 
15845                   << 
"\n combineBVLoadsSpecialValue: Instruction Emitted ";
 
15861         "Should be called with a BUILD_VECTOR node");
 
15866  if (!
N->getValueType(0).getVectorElementType().isByteSized())
 
15869  bool InputsAreConsecutiveLoads = 
true;
 
15870  bool InputsAreReverseConsecutive = 
true;
 
15871  unsigned ElemSize = 
N->getValueType(0).getScalarType().getStoreSize();
 
15872  SDValue FirstInput = 
N->getOperand(0);
 
15873  bool IsRoundOfExtLoad = 
false;
 
15882  if ((!IsRoundOfExtLoad && FirstInput.
getOpcode() != ISD::LOAD) ||
 
15883      N->getNumOperands() == 1)
 
15886  if (!IsRoundOfExtLoad)
 
15891  for (
int i = 1, e = 
N->getNumOperands(); i < e; ++i) {
 
15893    if (IsRoundOfExtLoad && 
N->getOperand(i).getOpcode() != 
ISD::FP_ROUND)
 
15896    SDValue NextInput = IsRoundOfExtLoad ? 
N->getOperand(i).getOperand(0) :
 
15898    if (NextInput.
getOpcode() != ISD::LOAD)
 
15902      IsRoundOfExtLoad ? 
N->getOperand(i-1).getOperand(0) : 
N->getOperand(i-1);
 
15913      InputsAreConsecutiveLoads = 
false;
 
15915      InputsAreReverseConsecutive = 
false;
 
15918    if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
 
15923  assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
 
15924         "The loads cannot be both consecutive and reverse consecutive.");
 
15928  if (InputsAreConsecutiveLoads) {
 
15929    assert(FirstLoad && 
"Input needs to be a LoadSDNode.");
 
15933    ReturnSDVal = WideLoad;
 
15934  } 
else if (InputsAreReverseConsecutive) {
 
15936    assert(LastLoad && 
"Input needs to be a LoadSDNode.");
 
15941    for (
int i = 
N->getNumOperands() - 1; i >= 0; i--)
 
15949  for (
auto *LD : InputLoads)
 
15951  return ReturnSDVal;
 
 
15962  unsigned NumElems = 
Input.getValueType().getVectorNumElements();
 
15968  for (
unsigned i = 0; i < 
N->getNumOperands(); i++) {
 
15970      ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
 
15972      ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
 
15973    CorrectElems = CorrectElems >> 8;
 
15974    Elems = Elems >> 8;
 
15981  EVT VT = 
N->getValueType(0);
 
15985                               Input.getValueType().getVectorElementType(),
 
 
16019  auto isSExtOfVecExtract = [&](
SDValue Op) -> 
bool {
 
16045    Elems = Elems << 8;
 
16054  for (
unsigned i = 0; i < 
N->getNumOperands(); i++) {
 
16055    if (!isSExtOfVecExtract(
N->getOperand(i))) {
 
16062  int TgtElemArrayIdx;
 
16063  int InputSize = 
Input.getValueType().getScalarSizeInBits();
 
16064  int OutputSize = 
N->getValueType(0).getScalarSizeInBits();
 
16065  if (InputSize + OutputSize == 40)
 
16066    TgtElemArrayIdx = 0;
 
16067  else if (InputSize + OutputSize == 72)
 
16068    TgtElemArrayIdx = 1;
 
16069  else if (InputSize + OutputSize == 48)
 
16070    TgtElemArrayIdx = 2;
 
16071  else if (InputSize + OutputSize == 80)
 
16072    TgtElemArrayIdx = 3;
 
16073  else if (InputSize + OutputSize == 96)
 
16074    TgtElemArrayIdx = 4;
 
16078  uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
 
16080                     ? CorrectElems & 0x0F0F0F0F0F0F0F0F
 
16081                     : CorrectElems & 0xF0F0F0F0F0F0F0F0;
 
16082  if (Elems != CorrectElems) {
 
 
16098  if (
N->getValueType(0) != MVT::v1i128)
 
16101  SDValue Operand = 
N->getOperand(0);
 
16108  EVT MemoryType = LD->getMemoryVT();
 
16112  bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
 
16113                     MemoryType == MVT::i32 || MemoryType == MVT::i64;
 
16116  if (!ValidLDType ||
 
16122      LD->getChain(), LD->getBasePtr(),
 
16126                                 DAG.
getVTList(MVT::v1i128, MVT::Other),
 
16127                                 LoadOps, MemoryType, LD->getMemOperand());
 
 
16131                                                 DAGCombinerInfo &DCI)
 const {
 
16133         "Should be called with a BUILD_VECTOR node");
 
16135  SelectionDAG &DAG = DCI.DAG;
 
16138  if (!Subtarget.hasVSX())
 
16146    SDValue Reduced = combineElementTruncationToVectorTruncation(
N, DCI);
 
16161  if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
 
16170  if (Subtarget.isISA3_1()) {
 
16176  if (
N->getValueType(0) != MVT::v2f64)
 
16187  if (FirstInput.
getOpcode() != 
N->getOperand(1).getOpcode())
 
16198  if (!Ext1Op || !Ext2Op)
 
16207  if (FirstElem == 0 && SecondElem == 1)
 
16208    SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
 
16209  else if (FirstElem == 2 && SecondElem == 3)
 
16210    SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
 
16217  return DAG.
getNode(NodeType, dl, MVT::v2f64,
 
16222                                              DAGCombinerInfo &DCI)
 const {
 
16225         "Need an int -> FP conversion node here");
 
16230  SelectionDAG &DAG = DCI.DAG;
 
16236  if (
Op.getValueType() != MVT::f32 && 
Op.getValueType() != MVT::f64)
 
16238  if (!
Op.getOperand(0).getValueType().isSimple())
 
16240  if (
Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
 
16241      Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
 
16244  SDValue FirstOperand(
Op.getOperand(0));
 
16245  bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
 
16246    (FirstOperand.getValueType() == MVT::i8 ||
 
16247     FirstOperand.getValueType() == MVT::i16);
 
16248  if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
 
16250    bool DstDouble = 
Op.getValueType() == MVT::f64;
 
16251    unsigned ConvOp = 
Signed ?
 
16258    SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
 
16261                                         Ops, MVT::i8, LDN->getMemOperand());
 
16266      SDValue ExtOps[] = { Ld, WidthConst };
 
16268      return DAG.
getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
 
16270      return DAG.
getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
 
16278  if (
Op.getOperand(0).getValueType() == MVT::i32)
 
16282         "UINT_TO_FP is supported only with FPCVT");
 
16286  unsigned FCFOp = (Subtarget.hasFPCVT() && 
Op.getValueType() == MVT::f32)
 
16291  MVT FCFTy = (Subtarget.hasFPCVT() && 
Op.getValueType() == MVT::f32)
 
16298       Subtarget.hasFPCVT()) ||
 
16300    SDValue Src = 
Op.getOperand(0).getOperand(0);
 
16301    if (Src.getValueType() == MVT::f32) {
 
16302      Src = DAG.
getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
 
16303      DCI.AddToWorklist(Src.getNode());
 
16304    } 
else if (Src.getValueType() != MVT::f64) {
 
16316    if (
Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
 
16319      DCI.AddToWorklist(
FP.getNode());
 
16343  switch (
N->getOpcode()) {
 
16348    Chain = LD->getChain();
 
16349    Base = LD->getBasePtr();
 
16350    MMO = LD->getMemOperand();
 
16369  MVT VecTy = 
N->getValueType(0).getSimpleVT();
 
16377  Chain = Load.getValue(1);
 
16383  if (VecTy != MVT::v2f64) {
 
 
16410  switch (
N->getOpcode()) {
 
16415    Chain = ST->getChain();
 
16416    Base = ST->getBasePtr();
 
16417    MMO = ST->getMemOperand();
 
16437  SDValue Src = 
N->getOperand(SrcOpnd);
 
16438  MVT VecTy = Src.getValueType().getSimpleVT();
 
16441  if (VecTy != MVT::v2f64) {
 
16442    Src = DAG.
getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
 
16447                             DAG.
getVTList(MVT::v2f64, MVT::Other), Chain, Src);
 
16453                                          StoreOps, VecTy, MMO);
 
 
16460                                               DAGCombinerInfo &DCI)
 const {
 
16463  unsigned Opcode = 
N->getOperand(1).getOpcode();
 
16465  bool Strict = 
N->getOperand(1)->isStrictFPOpcode();
 
16469         && 
"Not a FP_TO_INT Instruction!");
 
16471  SDValue Val = 
N->getOperand(1).getOperand(Strict ? 1 : 0);
 
16472  EVT Op1VT = 
N->getOperand(1).getValueType();
 
16475  if (!Subtarget.hasVSX() || !Subtarget.hasFPCVT() || !
isTypeLegal(ResVT))
 
16479  bool ValidTypeForStoreFltAsInt =
 
16480        (Op1VT == MVT::i32 || (Op1VT == MVT::i64 && Subtarget.isPPC64()) ||
 
16481         (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
 
16484  if (ResVT == MVT::ppcf128 || (ResVT == MVT::f128 && !Subtarget.hasP9Vector()))
 
16487  if ((Op1VT != MVT::i64 && !Subtarget.hasP8Vector()) ||
 
16495  SDValue Ops[] = {
N->getOperand(0), Val, 
N->getOperand(2),
 
16510  bool PrevElemFromFirstVec = Mask[0] < NumElts;
 
16511  for (
int i = 1, e = Mask.size(); i < e; i++) {
 
16512    if (PrevElemFromFirstVec && Mask[i] < NumElts)
 
16514    if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
 
16516    PrevElemFromFirstVec = !PrevElemFromFirstVec;
 
 
16527  for (
int i = 0, e = 
Op.getNumOperands(); i < e; i++) {
 
16528    FirstOp = 
Op.getOperand(i);
 
16534  for (
int i = 1, e = 
Op.getNumOperands(); i < e; i++)
 
16535    if (
Op.getOperand(i) != FirstOp && !
Op.getOperand(i).isUndef())
 
 
16543  if (
Op.getOpcode() != ISD::BITCAST)
 
16545  Op = 
Op.getOperand(0);
 
 
16561    int RHSFirstElt, 
int RHSLastElt, 
int HalfVec, 
unsigned LHSNumValidElts,
 
16562    unsigned RHSNumValidElts, 
const PPCSubtarget &Subtarget) {
 
16564      Subtarget.
isLittleEndian() ? HalfVec : HalfVec - LHSNumValidElts;
 
16566      Subtarget.
isLittleEndian() ? HalfVec : HalfVec - RHSNumValidElts;
 
16567  for (
int I = 0, 
E = ShuffV.
size(); 
I < 
E; ++
I) {
 
16568    int Idx = ShuffV[
I];
 
16569    if (Idx >= LHSFirstElt && Idx <= LHSLastElt)
 
16570      ShuffV[
I] += LHSEltFixup;
 
16571    else if (Idx >= RHSFirstElt && Idx <= RHSLastElt)
 
16572      ShuffV[
I] += RHSEltFixup;
 
 
16583  SDLoc dl(OrigSToV);
 
16586         "Expecting a SCALAR_TO_VECTOR here");
 
16599          "Cannot produce a permuted scalar_to_vector for one element vector");
 
16601      unsigned ResultInElt = NumElts / 2;
 
 
16612                                 int HalfVec, 
int LHSLastElementDefined,
 
16613                                 int RHSLastElementDefined) {
 
16614  for (
int Index : ShuffV) {
 
16618    if ((LHSLastElementDefined >= 0) && (Index < HalfVec) &&
 
16619        (Index > LHSLastElementDefined))
 
16622    if ((RHSLastElementDefined >= 0) &&
 
16623        (Index > HalfVec + RHSLastElementDefined))
 
 
16630    int ScalarSize, 
uint64_t ShuffleEltWidth, 
unsigned &NumValidElts,
 
16631    int FirstElt, 
int &LastElt, 
SDValue VecShuffOperand, 
SDValue SToVNode,
 
16647  LastElt = (
uint64_t)ScalarSize > ShuffleEltWidth
 
16648                ? ScalarSize / ShuffleEltWidth - 1 + FirstElt
 
16651  if (SToVPermuted.
getValueType() != VecShuffOperandType)
 
16652    SToVPermuted = DAG.
getBitcast(VecShuffOperandType, SToVPermuted);
 
16653  return SToVPermuted;
 
 
16673  int NumElts = 
LHS.getValueType().getVectorNumElements();
 
16676  bool IsLittleEndian = Subtarget.isLittleEndian();
 
16683  if (!Subtarget.hasDirectMove())
 
16699  SmallVector<int, 16> ShuffV(Mask);
 
16702  if (SToVLHS || SToVRHS) {
 
16705    int ShuffleNumElts = ShuffV.
size();
 
16706    int HalfVec = ShuffleNumElts / 2;
 
16712    unsigned LHSNumValidElts = HalfVec;
 
16713    unsigned RHSNumValidElts = HalfVec;
 
16718    int LHSFirstElt = 0;
 
16719    int RHSFirstElt = ShuffleNumElts;
 
16720    int LHSLastElt = -1;
 
16721    int RHSLastElt = -1;
 
16729    int LHSScalarSize = 0;
 
16730    int RHSScalarSize = 0;
 
16733      if (!IsLittleEndian && LHSScalarSize >= 64)
 
16738      if (!IsLittleEndian && RHSScalarSize >= 64)
 
16741    if (LHSScalarSize != 0)
 
16743          LHSScalarSize, ShuffleEltWidth, LHSNumValidElts, LHSFirstElt,
 
16744          LHSLastElt, 
LHS, SToVLHS, DAG, Subtarget);
 
16745    if (RHSScalarSize != 0)
 
16747          RHSScalarSize, ShuffleEltWidth, RHSNumValidElts, RHSFirstElt,
 
16748          RHSLastElt, 
RHS, SToVRHS, DAG, Subtarget);
 
16759        ShuffV, LHSFirstElt, LHSLastElt, RHSFirstElt, RHSLastElt, HalfVec,
 
16760        LHSNumValidElts, RHSNumValidElts, Subtarget);
 
16786  if (IsLittleEndian) {
 
16789    if (Mask[0] < NumElts)
 
16790      for (
int i = 1, e = 
Mask.size(); i < e; i += 2) {
 
16794        ShuffV[i] = (ShuffV[i - 1] >= 0 ? ShuffV[i - 1] : 0) + NumElts;
 
16799      for (
int i = 0, e = 
Mask.size(); i < e; i += 2) {
 
16803        ShuffV[i] = (ShuffV[i + 1] >= 0 ? ShuffV[i + 1] : 0) + NumElts;
 
16808    if (Mask[0] < NumElts)
 
16809      for (
int i = 0, e = 
Mask.size(); i < e; i += 2) {
 
16813        ShuffV[i] = ShuffV[i + 1] >= 0 ? ShuffV[i + 1] - NumElts : 0;
 
16818      for (
int i = 1, e = 
Mask.size(); i < e; i += 2) {
 
16822        ShuffV[i] = ShuffV[i - 1] >= 0 ? ShuffV[i - 1] - NumElts : 0;
 
16832  if (IsLittleEndian)
 
16841                                                DAGCombinerInfo &DCI)
 const {
 
16843        "Not a reverse memop pattern!");
 
16845  auto IsElementReverse = [](
const ShuffleVectorSDNode *SVN) -> 
bool {
 
16848    auto I = 
Mask.rbegin();
 
16849    auto E = 
Mask.rend();
 
16851    for (; 
I != 
E; ++
I) {
 
16859  SelectionDAG &DAG = DCI.DAG;
 
16862  if (!
isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
 
16868  if (!Subtarget.hasP9Vector())
 
16871  if(!IsElementReverse(SVN))
 
16874  if (LSBase->
getOpcode() == ISD::LOAD) {
 
16878    for (SDUse &Use : LSBase->
uses())
 
16879      if (
Use.getResNo() == 0 &&
 
16890  if (LSBase->
getOpcode() == ISD::STORE) {
 
16910  if (IntrinsicID == Intrinsic::ppc_stdcx)
 
16912  else if (IntrinsicID == Intrinsic::ppc_stwcx)
 
16914  else if (IntrinsicID == Intrinsic::ppc_sthcx)
 
16916  else if (IntrinsicID == Intrinsic::ppc_stbcx)
 
 
16942  switch (
N->getOpcode()) {
 
16945    return combineADD(
N, DCI);
 
16971    return combineSHL(
N, DCI);
 
16973    return combineSRA(
N, DCI);
 
16975    return combineSRL(
N, DCI);
 
16977    return combineMUL(
N, DCI);
 
16980    return combineFMALike(
N, DCI);
 
16983        return N->getOperand(0);
 
16987        return N->getOperand(0);
 
16993        return N->getOperand(0);
 
16999    return DAGCombineExtBoolTrunc(
N, DCI);
 
17001    return combineTRUNCATE(
N, DCI);
 
17003    if (
SDValue CSCC = combineSetCC(
N, DCI))
 
17007    return DAGCombineTruncBoolExt(
N, DCI);
 
17010    return combineFPToIntToFP(
N, DCI);
 
17019    EVT Op1VT = 
N->getOperand(1).getValueType();
 
17020    unsigned Opcode = 
N->getOperand(1).getOpcode();
 
17024      SDValue Val = combineStoreFPToInt(
N, DCI);
 
17038        N->getOperand(1).getNode()->hasOneUse() &&
 
17039        (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
 
17040         (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
 
17048      SDValue BSwapOp = 
N->getOperand(1).getOperand(0);
 
17055      if (Op1VT.
bitsGT(mVT)) {
 
17060        if (Op1VT == MVT::i64)
 
17065        N->getOperand(0), BSwapOp, 
N->getOperand(2), DAG.
getValueType(mVT)
 
17085                                   ST->getBasePtr(), ST->getOffset(), MemVT,
 
17086                                   ST->getMemOperand(), ST->getAddressingMode(),
 
17090      return ST->isUnindexed()
 
17099      if (Subtarget.needsSwapsForVSXMemOps() &&
 
17100          (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
 
17101           StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
 
17108    EVT VT = LD->getValueType(0);
 
17114      if (Subtarget.needsSwapsForVSXMemOps() &&
 
17115          (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
 
17116           LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
 
17127    auto ReplaceTwoFloatLoad = [&]() {
 
17128      if (VT != MVT::i64)
 
17143      if (!LD->hasNUsesOfValue(2, 0))
 
17146      auto UI = LD->user_begin();
 
17147      while (UI.getUse().getResNo() != 0) ++UI;
 
17149      while (UI.getUse().getResNo() != 0) ++UI;
 
17150      SDNode *RightShift = *UI;
 
17158      if (RightShift->getOpcode() != 
ISD::SRL ||
 
17160          RightShift->getConstantOperandVal(1) != 32 ||
 
17161          !RightShift->hasOneUse())
 
17164      SDNode *Trunc2 = *RightShift->user_begin();
 
17173      if (Bitcast->getOpcode() != ISD::BITCAST ||
 
17174          Bitcast->getValueType(0) != MVT::f32)
 
17176      if (Bitcast2->
getOpcode() != ISD::BITCAST ||
 
17180      if (Subtarget.isLittleEndian())
 
17186      SDValue BasePtr = LD->getBasePtr();
 
17187      if (LD->isIndexed()) {
 
17189               "Non-pre-inc AM on PPC?");
 
17197      SDValue FloatLoad = DAG.
getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
 
17198                                      LD->getPointerInfo(), LD->getAlign(),
 
17199                                      MMOFlags, LD->getAAInfo());
 
17205          LD->getPointerInfo().getWithOffset(4),
 
17208      if (LD->isIndexed()) {
 
17222    if (ReplaceTwoFloatLoad())
 
17225    EVT MemVT = LD->getMemoryVT();
 
17228    if (LD->isUnindexed() && VT.
isVector() &&
 
17231          !Subtarget.hasP8Vector() &&
 
17232          (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
 
17233           VT == MVT::v4f32))) &&
 
17234        LD->getAlign() < ABIAlignment) {
 
17236      SDValue Chain = LD->getChain();
 
17238      bool isLittleEndian = Subtarget.isLittleEndian();
 
17265      MVT PermCntlTy, PermTy, LDTy;
 
17266      Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
 
17267                            : Intrinsic::ppc_altivec_lvsl;
 
17268      IntrLD = Intrinsic::ppc_altivec_lvx;
 
17269      IntrPerm = Intrinsic::ppc_altivec_vperm;
 
17270      PermCntlTy = MVT::v16i8;
 
17271      PermTy = MVT::v4i32;
 
17290      SDValue BaseLoadOps[] = { Chain, LDXIntID, 
Ptr };
 
17294                                BaseLoadOps, LDTy, BaseMMO);
 
17303      int IncValue = IncOffset;
 
17320      SDValue ExtraLoadOps[] = { Chain, LDXIntID, 
Ptr };
 
17324                                ExtraLoadOps, LDTy, ExtraMMO);
 
17335      if (isLittleEndian)
 
17337                                ExtraLoad, BaseLoad, PermCntl, DAG, dl);
 
17340                                BaseLoad, ExtraLoad, PermCntl, DAG, dl);
 
17343        Perm = Subtarget.hasAltivec()
 
17344                   ? DAG.
getNode(ISD::BITCAST, dl, VT, Perm)
 
17358      bool isLittleEndian = Subtarget.isLittleEndian();
 
17359      unsigned IID = 
N->getConstantOperandVal(0);
 
17360      Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
 
17361                                           : Intrinsic::ppc_altivec_lvsl);
 
17362      if (IID == Intr && 
N->getOperand(1)->getOpcode() == 
ISD::ADD) {
 
17369                                      .zext(
Add.getScalarValueSizeInBits()))) {
 
17370          SDNode *BasePtr = 
Add->getOperand(0).getNode();
 
17371          for (
SDNode *U : BasePtr->users()) {
 
17373                U->getConstantOperandVal(0) == IID) {
 
17384          SDNode *BasePtr = 
Add->getOperand(0).getNode();
 
17385          for (
SDNode *U : BasePtr->users()) {
 
17388                (
Add->getConstantOperandVal(1) - U->getConstantOperandVal(1)) %
 
17394                    V->getConstantOperandVal(0) == IID) {
 
17406          (IID == Intrinsic::ppc_altivec_vmaxsw ||
 
17407           IID == Intrinsic::ppc_altivec_vmaxsh ||
 
17408           IID == Intrinsic::ppc_altivec_vmaxsb)) {
 
17439      switch (
N->getConstantOperandVal(1)) {
 
17442      case Intrinsic::ppc_altivec_vsum4sbs:
 
17443      case Intrinsic::ppc_altivec_vsum4shs:
 
17444      case Intrinsic::ppc_altivec_vsum4ubs: {
 
17451          APInt APSplatBits, APSplatUndef;
 
17452          unsigned SplatBitSize;
 
17455              APSplatBits, APSplatUndef, SplatBitSize, HasAnyUndefs, 0,
 
17456              !Subtarget.isLittleEndian());
 
17458          if (BVNIsConstantSplat && APSplatBits == 0)
 
17463    case Intrinsic::ppc_vsx_lxvw4x:
 
17464    case Intrinsic::ppc_vsx_lxvd2x:
 
17467      if (Subtarget.needsSwapsForVSXMemOps())
 
17475    if (Subtarget.needsSwapsForVSXMemOps()) {
 
17476      switch (
N->getConstantOperandVal(1)) {
 
17479      case Intrinsic::ppc_vsx_stxvw4x:
 
17480      case Intrinsic::ppc_vsx_stxvd2x:
 
17489    bool Is64BitBswapOn64BitTgt =
 
17490        Subtarget.isPPC64() && 
N->getValueType(0) == MVT::i64;
 
17492                               N->getOperand(0).hasOneUse();
 
17493    if (IsSingleUseNormalLd &&
 
17494        (
N->getValueType(0) == MVT::i32 || 
N->getValueType(0) == MVT::i16 ||
 
17495         (Subtarget.hasLDBRX() && Is64BitBswapOn64BitTgt))) {
 
17506                                DAG.
getVTList(
N->getValueType(0) == MVT::i64 ?
 
17507                                              MVT::i64 : MVT::i32, MVT::Other),
 
17508                                Ops, LD->getMemoryVT(), LD->getMemOperand());
 
17512      if (
N->getValueType(0) == MVT::i16)
 
17529        !IsSingleUseNormalLd)
 
17534    if (!LD->isSimple())
 
17536    SDValue BasePtr = LD->getBasePtr();
 
17538                             LD->getPointerInfo(), LD->getAlign());
 
17543        LD->getMemOperand(), 4, 4);
 
17547    if (Subtarget.isLittleEndian())
 
17553                    Hi.getOperand(0).getValue(1), 
Lo.getOperand(0).getValue(1));
 
17562    if (!
N->getOperand(0).hasOneUse() &&
 
17563        !
N->getOperand(1).hasOneUse() &&
 
17564        !
N->getOperand(2).hasOneUse()) {
 
17567      SDNode *VCMPrecNode = 
nullptr;
 
17569      SDNode *LHSN = 
N->getOperand(0).getNode();
 
17575          VCMPrecNode = 
User;
 
17587      SDNode *FlagUser = 
nullptr;
 
17589           FlagUser == 
nullptr; ++UI) {
 
17590        assert(UI != VCMPrecNode->
use_end() && 
"Didn't find user!");
 
17603        return SDValue(VCMPrecNode, 0);
 
17614    SDValue LHS = 
N->getOperand(2), RHS = 
N->getOperand(3);
 
17625    auto RHSAPInt = RHS->getAsAPIntVal();
 
17626    if (!RHSAPInt.isIntN(64))
 
17629    unsigned Val = RHSAPInt.getZExtValue();
 
17630    auto isImpossibleCompare = [&]() {
 
17633      if (Val != 0 && Val != 1) {
 
17635          return N->getOperand(0);
 
17637        return DAG.
getNode(ISD::BR, dl, MVT::Other,
 
17638                           N->getOperand(0), 
N->getOperand(4));
 
17643    unsigned StoreWidth = 0;
 
17646      if (
SDValue Impossible = isImpossibleCompare())
 
17658      SDValue Ops[] = {LHS.getOperand(0), LHS.getOperand(2), LHS.getOperand(3),
 
17664          MemNode->getMemoryVT(), MemNode->getMemOperand());
 
17668      if (
N->getOperand(0) == LHS.getValue(1))
 
17681                         DAG.
getRegister(PPC::CR0, MVT::i32), 
N->getOperand(4),
 
17687      assert(isDot && 
"Can't compare against a vector result!");
 
17689      if (
SDValue Impossible = isImpossibleCompare())
 
17692      bool BranchOnWhenPredTrue = (CC == 
ISD::SETEQ) ^ (Val == 0);
 
17699      EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
 
17704      switch (LHS.getConstantOperandVal(1)) {
 
17723                         N->getOperand(4), CompNode.
getValue(1));
 
17728    return DAGCombineBuildVector(
N, DCI);
 
 
17741  EVT VT = 
N->getValueType(0);
 
17742  if (VT == MVT::i64 && !Subtarget.isPPC64())
 
17744  if ((VT != MVT::i32 && VT != MVT::i64) ||
 
17752  unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).
countr_zero();
 
 
17772                                                      const APInt &DemandedElts,
 
17774                                                      unsigned Depth)
 const {
 
17776  switch (
Op.getOpcode()) {
 
17781      Known.
Zero = 0xFFFF0000;
 
17785    if (
Op.getResNo() == 0) {
 
17790        Known.
Zero = ~1ULL;
 
17795    switch (
Op.getConstantOperandVal(0)) {
 
17797    case Intrinsic::ppc_altivec_vcmpbfp_p:
 
17798    case Intrinsic::ppc_altivec_vcmpeqfp_p:
 
17799    case Intrinsic::ppc_altivec_vcmpequb_p:
 
17800    case Intrinsic::ppc_altivec_vcmpequh_p:
 
17801    case Intrinsic::ppc_altivec_vcmpequw_p:
 
17802    case Intrinsic::ppc_altivec_vcmpequd_p:
 
17803    case Intrinsic::ppc_altivec_vcmpequq_p:
 
17804    case Intrinsic::ppc_altivec_vcmpgefp_p:
 
17805    case Intrinsic::ppc_altivec_vcmpgtfp_p:
 
17806    case Intrinsic::ppc_altivec_vcmpgtsb_p:
 
17807    case Intrinsic::ppc_altivec_vcmpgtsh_p:
 
17808    case Intrinsic::ppc_altivec_vcmpgtsw_p:
 
17809    case Intrinsic::ppc_altivec_vcmpgtsd_p:
 
17810    case Intrinsic::ppc_altivec_vcmpgtsq_p:
 
17811    case Intrinsic::ppc_altivec_vcmpgtub_p:
 
17812    case Intrinsic::ppc_altivec_vcmpgtuh_p:
 
17813    case Intrinsic::ppc_altivec_vcmpgtuw_p:
 
17814    case Intrinsic::ppc_altivec_vcmpgtud_p:
 
17815    case Intrinsic::ppc_altivec_vcmpgtuq_p:
 
17822    switch (
Op.getConstantOperandVal(1)) {
 
17825    case Intrinsic::ppc_load2r:
 
17827      Known.
Zero = 0xFFFF0000;
 
 
17836  switch (Subtarget.getCPUDirective()) {
 
17858      if (
ML->getLoopDepth() > 1 && 
ML->getSubLoops().empty())
 
17867    for (
auto I = 
ML->block_begin(), IE = 
ML->block_end(); 
I != IE; ++
I)
 
17869        LoopSize += 
TII->getInstSizeInBytes(J);
 
17874    if (LoopSize > 16 && LoopSize <= 32)
 
 
17888  if (Constraint.
size() == 1) {
 
17889    switch (Constraint[0]) {
 
17907  } 
else if (Constraint == 
"wc") { 
 
17909  } 
else if (Constraint == 
"wa" || Constraint == 
"wd" ||
 
17910             Constraint == 
"wf" || Constraint == 
"ws" ||
 
17911             Constraint == 
"wi" || Constraint == 
"ww") {
 
 
17924  Value *CallOperandVal = 
info.CallOperandVal;
 
17927  if (!CallOperandVal)
 
17934  else if ((
StringRef(constraint) == 
"wa" ||
 
17946  switch (*constraint) {
 
 
17976std::pair<unsigned, const TargetRegisterClass *>
 
17980  if (Constraint.
size() == 1) {
 
17982    switch (Constraint[0]) {
 
17984      if (VT == MVT::i64 && Subtarget.isPPC64())
 
17985        return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
 
17986      return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
 
17988      if (VT == MVT::i64 && Subtarget.isPPC64())
 
17989        return std::make_pair(0U, &PPC::G8RCRegClass);
 
17990      return std::make_pair(0U, &PPC::GPRCRegClass);
 
17996      if (Subtarget.hasSPE()) {
 
17997        if (VT == MVT::f32 || VT == MVT::i32)
 
17998          return std::make_pair(0U, &PPC::GPRCRegClass);
 
17999        if (VT == MVT::f64 || VT == MVT::i64)
 
18000          return std::make_pair(0U, &PPC::SPERCRegClass);
 
18002        if (VT == MVT::f32 || VT == MVT::i32)
 
18003          return std::make_pair(0U, &PPC::F4RCRegClass);
 
18004        if (VT == MVT::f64 || VT == MVT::i64)
 
18005          return std::make_pair(0U, &PPC::F8RCRegClass);
 
18009      if (Subtarget.hasAltivec() && VT.
isVector())
 
18010        return std::make_pair(0U, &PPC::VRRCRegClass);
 
18011      else if (Subtarget.hasVSX())
 
18013        return std::make_pair(0U, &PPC::VFRCRegClass);
 
18016      return std::make_pair(0U, &PPC::CRRCRegClass);
 
18018  } 
else if (Constraint == 
"wc" && Subtarget.useCRBits()) {
 
18020    return std::make_pair(0U, &PPC::CRBITRCRegClass);
 
18021  } 
else if ((Constraint == 
"wa" || Constraint == 
"wd" ||
 
18022             Constraint == 
"wf" || Constraint == 
"wi") &&
 
18023             Subtarget.hasVSX()) {
 
18027      return std::make_pair(0U, &PPC::VSRCRegClass);
 
18028    if (VT == MVT::f32 && Subtarget.hasP8Vector())
 
18029      return std::make_pair(0U, &PPC::VSSRCRegClass);
 
18030    return std::make_pair(0U, &PPC::VSFRCRegClass);
 
18031  } 
else if ((Constraint == 
"ws" || Constraint == 
"ww") && Subtarget.hasVSX()) {
 
18032    if (VT == MVT::f32 && Subtarget.hasP8Vector())
 
18033      return std::make_pair(0U, &PPC::VSSRCRegClass);
 
18035      return std::make_pair(0U, &PPC::VSFRCRegClass);
 
18036  } 
else if (Constraint == 
"lr") {
 
18037    if (VT == MVT::i64)
 
18038      return std::make_pair(0U, &PPC::LR8RCRegClass);
 
18040      return std::make_pair(0U, &PPC::LRRCRegClass);
 
18045  if (Constraint[0] == 
'{' && Constraint[Constraint.
size() - 1] == 
'}') {
 
18049    if (Constraint.
size() > 3 && Constraint[1] == 
'v' && Constraint[2] == 
's') {
 
18050      int VSNum = atoi(Constraint.
data() + 3);
 
18051      assert(VSNum >= 0 && VSNum <= 63 &&
 
18052             "Attempted to access a vsr out of range");
 
18054        return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
 
18055      return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
 
18060    if (Constraint.
size() > 3 && Constraint[1] == 
'f') {
 
18061      int RegNum = atoi(Constraint.
data() + 2);
 
18062      if (RegNum > 31 || RegNum < 0)
 
18064      if (VT == MVT::f32 || VT == MVT::i32)
 
18065        return Subtarget.hasSPE()
 
18066                   ? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass)
 
18067                   : std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass);
 
18068      if (VT == MVT::f64 || VT == MVT::i64)
 
18069        return Subtarget.hasSPE()
 
18070                   ? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass)
 
18071                   : std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass);
 
18075  std::pair<unsigned, const TargetRegisterClass *> R =
 
18084  if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
 
18085      PPC::GPRCRegClass.contains(R.first))
 
18086    return std::make_pair(
TRI->getMatchingSuperReg(R.first,
 
18087                            PPC::sub_32, &PPC::G8RCRegClass),
 
18088                          &PPC::G8RCRegClass);
 
18091  if (!R.second && 
StringRef(
"{cc}").equals_insensitive(Constraint)) {
 
18092    R.first = PPC::CR0;
 
18093    R.second = &PPC::CRRCRegClass;
 
18097  if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) {
 
18098    if (((R.first >= PPC::V20 && R.first <= PPC::V31) ||
 
18099         (R.first >= PPC::VF20 && R.first <= PPC::VF31)) &&
 
18100        (R.second == &PPC::VSRCRegClass || R.second == &PPC::VSFRCRegClass))
 
18101      errs() << 
"warning: vector registers 20 to 32 are reserved in the " 
18102                "default AIX AltiVec ABI and cannot be used\n";
 
 
18112                                                     std::vector<SDValue> &
Ops,
 
18117  if (Constraint.
size() > 1)
 
18120  char Letter = Constraint[0];
 
18135    EVT TCVT = MVT::i64; 
 
18176  if (Result.getNode()) {
 
18177    Ops.push_back(Result);
 
 
18188  if (
I.getNumOperands() <= 1)
 
18192  auto IntrinsicID = 
Ops[1].getNode()->getAsZExtVal();
 
18193  if (IntrinsicID != Intrinsic::ppc_tdw && IntrinsicID != Intrinsic::ppc_tw &&
 
18194      IntrinsicID != Intrinsic::ppc_trapd && IntrinsicID != Intrinsic::ppc_trap)
 
18197  if (
MDNode *MDN = 
I.getMetadata(LLVMContext::MD_annotation))
 
 
18213  if (Ty->isVectorTy() && AM.
BaseOffs != 0 && !Subtarget.hasP9Vector())
 
18225  switch (AM.
Scale) {
 
 
18253  unsigned Depth = 
Op.getConstantOperandVal(0);
 
18277  SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
 
18285  unsigned Depth = 
Op.getConstantOperandVal(0);
 
18292  bool isPPC64 = PtrVT == MVT::i64;
 
18298    FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
 
18300    FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
 
18306                            FrameAddr, MachinePointerInfo());
 
18310#define GET_REGISTER_MATCHER 
18311#include "PPCGenAsmMatcher.inc" 
18315  bool IsPPC64 = Subtarget.isPPC64();
 
18327  if ((IsPPC64 && Reg == PPC::R2) || Reg == PPC::R0)
 
18333    Reg = Reg.id() - PPC::R0 + PPC::X0;
 
 
18340  if (Subtarget.is32BitELFABI())
 
18345  if (Subtarget.isAIXABI())
 
18359    return Subtarget.isGVIndirectSymbol(
G->getGlobal());
 
 
18375  case Intrinsic::ppc_atomicrmw_xchg_i128:
 
18376  case Intrinsic::ppc_atomicrmw_add_i128:
 
18377  case Intrinsic::ppc_atomicrmw_sub_i128:
 
18378  case Intrinsic::ppc_atomicrmw_nand_i128:
 
18379  case Intrinsic::ppc_atomicrmw_and_i128:
 
18380  case Intrinsic::ppc_atomicrmw_or_i128:
 
18381  case Intrinsic::ppc_atomicrmw_xor_i128:
 
18382  case Intrinsic::ppc_cmpxchg_i128:
 
18384    Info.memVT = MVT::i128;
 
18385    Info.ptrVal = 
I.getArgOperand(0);
 
18387    Info.align = 
Align(16);
 
18391  case Intrinsic::ppc_atomic_load_i128:
 
18393    Info.memVT = MVT::i128;
 
18394    Info.ptrVal = 
I.getArgOperand(0);
 
18396    Info.align = 
Align(16);
 
18399  case Intrinsic::ppc_atomic_store_i128:
 
18401    Info.memVT = MVT::i128;
 
18402    Info.ptrVal = 
I.getArgOperand(2);
 
18404    Info.align = 
Align(16);
 
18407  case Intrinsic::ppc_altivec_lvx:
 
18408  case Intrinsic::ppc_altivec_lvxl:
 
18409  case Intrinsic::ppc_altivec_lvebx:
 
18410  case Intrinsic::ppc_altivec_lvehx:
 
18411  case Intrinsic::ppc_altivec_lvewx:
 
18412  case Intrinsic::ppc_vsx_lxvd2x:
 
18413  case Intrinsic::ppc_vsx_lxvw4x:
 
18414  case Intrinsic::ppc_vsx_lxvd2x_be:
 
18415  case Intrinsic::ppc_vsx_lxvw4x_be:
 
18416  case Intrinsic::ppc_vsx_lxvl:
 
18417  case Intrinsic::ppc_vsx_lxvll: {
 
18420    case Intrinsic::ppc_altivec_lvebx:
 
18423    case Intrinsic::ppc_altivec_lvehx:
 
18426    case Intrinsic::ppc_altivec_lvewx:
 
18429    case Intrinsic::ppc_vsx_lxvd2x:
 
18430    case Intrinsic::ppc_vsx_lxvd2x_be:
 
18440    Info.ptrVal = 
I.getArgOperand(0);
 
18443    Info.align = 
Align(1);
 
18447  case Intrinsic::ppc_altivec_stvx:
 
18448  case Intrinsic::ppc_altivec_stvxl:
 
18449  case Intrinsic::ppc_altivec_stvebx:
 
18450  case Intrinsic::ppc_altivec_stvehx:
 
18451  case Intrinsic::ppc_altivec_stvewx:
 
18452  case Intrinsic::ppc_vsx_stxvd2x:
 
18453  case Intrinsic::ppc_vsx_stxvw4x:
 
18454  case Intrinsic::ppc_vsx_stxvd2x_be:
 
18455  case Intrinsic::ppc_vsx_stxvw4x_be:
 
18456  case Intrinsic::ppc_vsx_stxvl:
 
18457  case Intrinsic::ppc_vsx_stxvll: {
 
18460    case Intrinsic::ppc_altivec_stvebx:
 
18463    case Intrinsic::ppc_altivec_stvehx:
 
18466    case Intrinsic::ppc_altivec_stvewx:
 
18469    case Intrinsic::ppc_vsx_stxvd2x:
 
18470    case Intrinsic::ppc_vsx_stxvd2x_be:
 
18480    Info.ptrVal = 
I.getArgOperand(1);
 
18483    Info.align = 
Align(1);
 
18487  case Intrinsic::ppc_stdcx:
 
18488  case Intrinsic::ppc_stwcx:
 
18489  case Intrinsic::ppc_sthcx:
 
18490  case Intrinsic::ppc_stbcx: {
 
18492    auto Alignment = 
Align(8);
 
18494    case Intrinsic::ppc_stdcx:
 
18497    case Intrinsic::ppc_stwcx:
 
18499      Alignment = 
Align(4);
 
18501    case Intrinsic::ppc_sthcx:
 
18503      Alignment = 
Align(2);
 
18505    case Intrinsic::ppc_stbcx:
 
18507      Alignment = 
Align(1);
 
18512    Info.ptrVal = 
I.getArgOperand(0);
 
18514    Info.align = Alignment;
 
 
18529    const AttributeList &FuncAttributes)
 const {
 
18533    if (Subtarget.hasAltivec() && 
Op.size() >= 16) {
 
18534      if (
Op.isMemset() && Subtarget.hasVSX()) {
 
18539        if (TailSize > 2 && TailSize <= 4) {
 
18544      if (
Op.isAligned(
Align(16)) || Subtarget.hasP8Vector())
 
18549  if (Subtarget.isPPC64()) {
 
 
18560  assert(Ty->isIntegerTy());
 
18562  unsigned BitSize = Ty->getPrimitiveSizeInBits();
 
18563  return !(BitSize == 0 || BitSize > 64);
 
 
18571  return NumBits1 == 64 && NumBits2 == 32;
 
 
18579  return NumBits1 == 64 && NumBits2 == 32;
 
 
18586    EVT MemVT = LD->getMemoryVT();
 
18587    if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
 
18588         (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
 
 
18604         "invalid fpext types");
 
18606  if (DestVT == MVT::f128)
 
 
18621                                                       unsigned *
Fast)
 const {
 
18635      !Subtarget.allowsUnalignedFPAccess())
 
18639    if (Subtarget.hasVSX()) {
 
18640      if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
 
18641          VT != MVT::v4f32 && VT != MVT::v4i32)
 
18648  if (VT == MVT::ppcf128)
 
 
18663    if (!ConstNode->getAPIntValue().isSignedIntN(64))
 
18671    int64_t Imm = ConstNode->getSExtValue();
 
 
18692  if (Subtarget.hasSPE() || Subtarget.useSoftFloat())
 
18694  switch (Ty->getScalarType()->getTypeID()) {
 
18699    return Subtarget.hasP9Vector();
 
 
18707  if (!
I->hasOneUse())
 
18711  assert(
User && 
"A single use instruction with no uses.");
 
18713  switch (
I->getOpcode()) {
 
18714  case Instruction::FMul: {
 
18716    if (
User->getOpcode() != Instruction::FSub &&
 
18717        User->getOpcode() != Instruction::FAdd)
 
18724    bool AllowContract = 
I->getFastMathFlags().allowContract() &&
 
18725                         User->getFastMathFlags().allowContract();
 
18731  case Instruction::Load: {
 
18744    if (
User->getOpcode() != Instruction::Store)
 
 
18764  static const MCPhysReg ScratchRegs[] = {
 
18765    PPC::X12, PPC::LR8, PPC::CTR8, 0
 
18768  return ScratchRegs;
 
 
18772    const Constant *PersonalityFn)
 const {
 
18773  return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
 
 
18777    const Constant *PersonalityFn)
 const {
 
18778  return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
 
 
18783                     EVT VT , 
unsigned DefinedValues)
 const {
 
18784  if (VT == MVT::v2i64)
 
18785    return Subtarget.hasDirectMove(); 
 
18787  if (Subtarget.hasVSX())
 
 
18821                                                bool LegalOps, 
bool OptForSize,
 
18823                                                unsigned Depth)
 const {
 
18827  unsigned Opc = 
Op.getOpcode();
 
18828  EVT VT = 
Op.getValueType();
 
18853    if (Flags.hasNoSignedZeros() || 
Options.NoSignedZerosFPMath) {
 
18857                                           N0Cost, 
Depth + 1);
 
18861                                           N1Cost, 
Depth + 1);
 
18863      if (NegN0 && N0Cost <= N1Cost) {
 
18864        Cost = std::min(N0Cost, N2Cost);
 
18866      } 
else if (NegN1) {
 
18867        Cost = std::min(N1Cost, N2Cost);
 
 
18887  if (M.getStackProtectorGuard() == 
"tls" || Subtarget.isTargetLinux())
 
 
18893                                     bool ForCodeSize)
 const {
 
18894  if (!VT.
isSimple() || !Subtarget.hasVSX())
 
18904    if (Subtarget.hasPrefixInstrs() && Subtarget.hasP10Vector()) {
 
18909    APSInt IntResult(16, 
false);
 
18914    if (IsExact && IntResult <= 15 && IntResult >= -16)
 
18916    return Imm.isZero();
 
18919    return Imm.isPosZero();
 
 
18931  unsigned Opcode = 
N->getOpcode();
 
18951      if (Mask->getZExtValue() == OpSizeInBits - 1)
 
 
18958                                              DAGCombinerInfo &DCI)
 const {
 
18959  EVT VT = 
N->getValueType(0);
 
18962  unsigned Opc = 
N->getOpcode();
 
18964         "Unexpected opcode.");
 
18971  if (EltTy != MVT::i64 && EltTy != MVT::i32)
 
18975  uint64_t SplatBits = 0;
 
18976  bool AddSplatCase = 
false;
 
18980    AddSplatCase = 
true;
 
18984  if (!AddSplatCase) {
 
18988    unsigned SplatBitSize;
 
18990    APInt APSplatBits, APSplatUndef;
 
18992    bool BVNIsConstantSplat =
 
18994                             HasAnyUndefs, 0, !Subtarget.isLittleEndian());
 
18995    if (!BVNIsConstantSplat || SplatBitSize != EltBits)
 
19006  if (SplatBits == (EltBits - 1)) {
 
19020    return DCI.DAG.getNode(NewOpc, 
DL, VT, N0, SplatOnes);
 
19028  if (EltTy != MVT::i64 || SplatBits != 1)
 
19031  return DCI.DAG.getNode(
ISD::ADD, SDLoc(
N), VT, N0, N0);
 
19034SDValue PPCTargetLowering::combineSHL(
SDNode *
N, DAGCombinerInfo &DCI)
 const {
 
19038  if (
N->getValueType(0).isVector())
 
19039    return combineVectorShift(
N, DCI);
 
19043  if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
 
19046      N->getValueType(0) != MVT::i64)
 
19061    ShiftBy = DCI.DAG.getConstant(CN1->
getZExtValue(), 
DL, MVT::i32);
 
19067SDValue PPCTargetLowering::combineSRA(
SDNode *
N, DAGCombinerInfo &DCI)
 const {
 
19071  if (
N->getValueType(0).isVector())
 
19072    return combineVectorShift(
N, DCI);
 
19077SDValue PPCTargetLowering::combineSRL(
SDNode *
N, DAGCombinerInfo &DCI)
 const {
 
19081  if (
N->getValueType(0).isVector())
 
19082    return combineVectorShift(
N, DCI);
 
19093  if (!Subtarget.isPPC64())
 
19099  auto isZextOfCompareWithConstant = [](
SDValue Op) {
 
19101        Op.getValueType() != MVT::i64)
 
19105    if (Cmp.getOpcode() != 
ISD::SETCC || !Cmp.hasOneUse() ||
 
19106        Cmp.getOperand(0).getValueType() != MVT::i64)
 
19110      int64_t NegConstant = 0 - 
Constant->getSExtValue();
 
19119  bool LHSHasPattern = isZextOfCompareWithConstant(
LHS);
 
19120  bool RHSHasPattern = isZextOfCompareWithConstant(
RHS);
 
19123  if (LHSHasPattern && !RHSHasPattern)
 
19125  else if (!LHSHasPattern && !RHSHasPattern)
 
19129  EVT CarryType = Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
 
19132  SDValue Z = Cmp.getOperand(0);
 
19134  int64_t NegConstant = 0 - 
Constant->getSExtValue();
 
19147    SDValue AddOrZ = NegConstant != 0 ? 
Add : Z;
 
19165    SDValue AddOrZ = NegConstant != 0 ? 
Add : Z;
 
 
19206  if (!GSDN || !ConstNode)
 
 
19226SDValue PPCTargetLowering::combineADD(
SDNode *
N, DAGCombinerInfo &DCI)
 const {
 
19246                                           DAGCombinerInfo &DCI)
 const {
 
19248  if (Subtarget.useCRBits()) {
 
19250    if (
SDValue CRTruncValue = DAGCombineTruncBoolExt(
N, DCI))
 
19251      return CRTruncValue;
 
19258  if (Op0.
getValueType() != MVT::i128 || 
N->getValueType(0) != MVT::i64)
 
19261  int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
 
19271    EltToExtract = EltToExtract ? 0 : 1;
 
19281    return DCI.DAG.getNode(
 
19283        DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
 
19288SDValue PPCTargetLowering::combineMUL(
SDNode *
N, DAGCombinerInfo &DCI)
 const {
 
19289  SelectionDAG &DAG = DCI.DAG;
 
19292  if (!ConstOpOrElement)
 
19300  auto IsProfitable = [
this](
bool IsNeg, 
bool IsAddOne, EVT VT) -> 
bool {
 
19301    switch (this->Subtarget.getCPUDirective()) {
 
19324      return IsAddOne && IsNeg ? VT.
isVector() : 
true;
 
19328  EVT VT = 
N->getValueType(0);
 
19333  APInt MulAmtAbs = MulAmt.
abs();
 
19335  if ((MulAmtAbs - 1).isPowerOf2()) {
 
19339    if (!IsProfitable(IsNeg, 
true, VT))
 
19352  } 
else if ((MulAmtAbs + 1).isPowerOf2()) {
 
19356    if (!IsProfitable(IsNeg, 
false, VT))
 
19377                                          DAGCombinerInfo &DCI)
 const {
 
19381  SDNodeFlags 
Flags = 
N->getFlags();
 
19382  EVT VT = 
N->getValueType(0);
 
19383  SelectionDAG &DAG = DCI.DAG;
 
19385  unsigned Opc = 
N->getOpcode();
 
19387  bool LegalOps = !DCI.isBeforeLegalizeOps();
 
19395  if (!
Flags.hasNoSignedZeros() && !
Options.NoSignedZerosFPMath)
 
19411bool PPCTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
 const {
 
19413  if (!Subtarget.is64BitELFABI())
 
19423  if (!TM.Options.GuaranteedTailCallOpt && 
DisableSCO)
 
19428  if (!Callee || 
Callee->isVarArg())
 
19441bool PPCTargetLowering::
 
19442isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI)
 const {
 
19447    if (CI->getBitWidth() > 64)
 
19449    int64_t ConstVal = CI->getZExtValue();
 
19451      (
isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
 
19460PPC::AddrMode PPCTargetLowering::getAddrModeForFlags(
unsigned Flags)
 const {
 
19466    if ((Flags & FlagSet) == FlagSet)
 
19469    if ((Flags & FlagSet) == FlagSet)
 
19472    if ((Flags & FlagSet) == FlagSet)
 
19475    if ((Flags & FlagSet) == FlagSet)
 
19496  if ((FrameIndexAlign % 4) != 0)
 
19497    FlagSet &= 
~PPC::MOF_RPlusSImm16Mult4;
 
19498  if ((FrameIndexAlign % 16) != 0)
 
19499    FlagSet &= 
~PPC::MOF_RPlusSImm16Mult16;
 
19503    if ((FrameIndexAlign % 4) == 0)
 
19505    if ((FrameIndexAlign % 16) == 0)
 
 
19518  auto SetAlignFlagsForImm = [&](
uint64_t Imm) {
 
19519    if ((Imm & 0x3) == 0)
 
19521    if ((Imm & 0xf) == 0)
 
19527    const APInt &ConstImm = CN->getAPIntValue();
 
19546      const APInt &ConstImm = CN->getAPIntValue();
 
19556    } 
else if (
RHS.getOpcode() == 
PPCISD::Lo && !
RHS.getConstantOperandVal(1))
 
 
19576unsigned PPCTargetLowering::computeMOFlags(
const SDNode *Parent, 
SDValue N,
 
19581  if (!Subtarget.hasP9Vector())
 
19586  if (Subtarget.hasPrefixInstrs())
 
19589  if (Subtarget.hasSPE())
 
19598  unsigned ParentOp = Parent->
getOpcode();
 
19602    if ((
ID == Intrinsic::ppc_vsx_lxvp) || (
ID == Intrinsic::ppc_vsx_stxvp)) {
 
19603      SDValue IntrinOp = (
ID == Intrinsic::ppc_vsx_lxvp)
 
19615    if (LSB->isIndexed())
 
19621  assert(MN && 
"Parent should be a MemSDNode!");
 
19626           "Not expecting scalar integers larger than 16 bytes!");
 
19629    else if (
Size == 32)
 
19636    else if (
Size == 256) {
 
19637      assert(Subtarget.pairedVectorMemops() &&
 
19638             "256-bit vectors are only available when paired vector memops is " 
19646    else if (MemVT == MVT::f128 || MemVT.
isVector())
 
19677    FlagSet &= ~PPC::MOF_NoExt;
 
19682  bool IsNonP1034BitConst =
 
19686      IsNonP1034BitConst)
 
19699  int16_t ForceXFormImm = 0;
 
19702    Disp = 
N.getOperand(0);
 
19703    Base = 
N.getOperand(1);
 
19714       !
N.getOperand(1).hasOneUse() || !
N.getOperand(0).hasOneUse())) {
 
19715    Disp = 
N.getOperand(0);
 
19716    Base = 
N.getOperand(1);
 
19721  Disp = DAG.
getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
 
 
19730    unsigned NumParts, 
MVT PartVT, std::optional<CallingConv::ID> CC)
 const {
 
19736  if (PartVT == MVT::f64 &&
 
19737      (ValVT == MVT::i32 || ValVT == MVT::i16 || ValVT == MVT::i8)) {
 
19739    Val = DAG.
getNode(ISD::BITCAST, 
DL, MVT::f64, Val);
 
 
19746SDValue PPCTargetLowering::lowerToLibCall(
const char *LibCallName, 
SDValue Op,
 
19750  EVT RetVT = 
Op.getValueType();
 
19757    EVT ArgVT = 
N.getValueType();
 
19761    Entry.IsZExt = !Entry.IsSExt;
 
19762    Args.push_back(Entry);
 
19770      (RetTy == 
F.getReturnType() || 
F.getReturnType()->isVoidTy());
 
19783SDValue PPCTargetLowering::lowerLibCallBasedOnType(
 
19784    const char *LibCallFloatName, 
const char *LibCallDoubleName, 
SDValue Op,
 
19786  if (
Op.getValueType() == MVT::f32)
 
19787    return lowerToLibCall(LibCallFloatName, 
Op, DAG);
 
19789  if (
Op.getValueType() == MVT::f64)
 
19790    return lowerToLibCall(LibCallDoubleName, 
Op, DAG);
 
19795bool PPCTargetLowering::isLowringToMASSFiniteSafe(
SDValue Op)
 const {
 
19796  SDNodeFlags 
Flags = 
Op.getNode()->getFlags();
 
19797  return isLowringToMASSSafe(
Op) && 
Flags.hasNoSignedZeros() &&
 
19801bool PPCTargetLowering::isLowringToMASSSafe(
SDValue Op)
 const {
 
19802  return Op.getNode()->getFlags().hasApproximateFuncs();
 
19805bool PPCTargetLowering::isScalarMASSConversionEnabled()
 const {
 
19809SDValue PPCTargetLowering::lowerLibCallBase(
const char *LibCallDoubleName,
 
19810                                            const char *LibCallFloatName,
 
19811                                            const char *LibCallDoubleNameFinite,
 
19812                                            const char *LibCallFloatNameFinite,
 
19815  if (!isScalarMASSConversionEnabled() || !isLowringToMASSSafe(
Op))
 
19818  if (!isLowringToMASSFiniteSafe(
Op))
 
19819    return lowerLibCallBasedOnType(LibCallFloatName, LibCallDoubleName, 
Op,
 
19822  return lowerLibCallBasedOnType(LibCallFloatNameFinite,
 
19823                                 LibCallDoubleNameFinite, 
Op, DAG);
 
19827  return lowerLibCallBase(
"__xl_pow", 
"__xl_powf", 
"__xl_pow_finite",
 
19828                          "__xl_powf_finite", 
Op, DAG);
 
19832  return lowerLibCallBase(
"__xl_sin", 
"__xl_sinf", 
"__xl_sin_finite",
 
19833                          "__xl_sinf_finite", 
Op, DAG);
 
19837  return lowerLibCallBase(
"__xl_cos", 
"__xl_cosf", 
"__xl_cos_finite",
 
19838                          "__xl_cosf_finite", 
Op, DAG);
 
19842  return lowerLibCallBase(
"__xl_log", 
"__xl_logf", 
"__xl_log_finite",
 
19843                          "__xl_logf_finite", 
Op, DAG);
 
19847  return lowerLibCallBase(
"__xl_log10", 
"__xl_log10f", 
"__xl_log10_finite",
 
19848                          "__xl_log10f_finite", 
Op, DAG);
 
19852  return lowerLibCallBase(
"__xl_exp", 
"__xl_expf", 
"__xl_exp_finite",
 
19853                          "__xl_expf_finite", 
Op, DAG);
 
19878  unsigned Flags = computeMOFlags(Parent, 
N, DAG);
 
19889    assert(Subtarget.isUsingPCRelativeCalls() &&
 
19890           "Must be using PC-Relative calls when a valid PC-Relative node is " 
19920      Disp = 
N.getOperand(1).getOperand(0); 
 
19925      Base = 
N.getOperand(0);
 
19933      EVT CNType = CN->getValueType(0);
 
19934      uint64_t CNImm = CN->getZExtValue();
 
19945      if ((CNType == MVT::i32 || 
isInt<32>(CNImm)) &&
 
19947        int32_t Addr = (int32_t)CNImm;
 
19952        uint32_t LIS = CNType == MVT::i32 ? PPC::LIS : PPC::LIS8;
 
19968    unsigned Opcode = 
N.getOpcode();
 
19976        Base = 
N.getOperand(0);
 
19995    Base = FI ? 
N : 
N.getOperand(1);
 
19996    Disp = FI ? DAG.
getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
 
 
20007                                                 bool IsVarArg)
 const {
 
 
20017  return Subtarget.isPPC64() && Subtarget.hasQuadwordAtomics();
 
 
20053    return Intrinsic::ppc_atomicrmw_xchg_i128;
 
20055    return Intrinsic::ppc_atomicrmw_add_i128;
 
20057    return Intrinsic::ppc_atomicrmw_sub_i128;
 
20059    return Intrinsic::ppc_atomicrmw_and_i128;
 
20061    return Intrinsic::ppc_atomicrmw_or_i128;
 
20063    return Intrinsic::ppc_atomicrmw_xor_i128;
 
20065    return Intrinsic::ppc_atomicrmw_nand_i128;
 
 
20073  Module *M = Builder.GetInsertBlock()->getParent()->getParent();
 
20075  assert(ValTy->getPrimitiveSizeInBits() == 128);
 
20077  Value *IncrLo = Builder.CreateTrunc(Incr, Int64Ty, 
"incr_lo");
 
20079      Builder.CreateTrunc(Builder.CreateLShr(Incr, 64), Int64Ty, 
"incr_hi");
 
20080  Value *LoHi = Builder.CreateIntrinsic(
 
20082      {AlignedAddr, IncrLo, IncrHi});
 
20083  Value *
Lo = Builder.CreateExtractValue(LoHi, 0, 
"lo");
 
20084  Value *
Hi = Builder.CreateExtractValue(LoHi, 1, 
"hi");
 
20085  Lo = Builder.CreateZExt(
Lo, ValTy, 
"lo64");
 
20086  Hi = Builder.CreateZExt(
Hi, ValTy, 
"hi64");
 
20087  return Builder.CreateOr(
 
20088      Lo, Builder.CreateShl(
Hi, ConstantInt::get(ValTy, 64)), 
"val64");
 
 
20095  Module *M = Builder.GetInsertBlock()->getParent()->getParent();
 
20097  assert(ValTy->getPrimitiveSizeInBits() == 128);
 
20101  Value *CmpLo = Builder.CreateTrunc(CmpVal, Int64Ty, 
"cmp_lo");
 
20103      Builder.CreateTrunc(Builder.CreateLShr(CmpVal, 64), Int64Ty, 
"cmp_hi");
 
20104  Value *NewLo = Builder.CreateTrunc(NewVal, Int64Ty, 
"new_lo");
 
20106      Builder.CreateTrunc(Builder.CreateLShr(NewVal, 64), Int64Ty, 
"new_hi");
 
20109      Builder.CreateCall(IntCmpXchg, {AlignedAddr, CmpLo, CmpHi, NewLo, NewHi});
 
20111  Value *
Lo = Builder.CreateExtractValue(LoHi, 0, 
"lo");
 
20112  Value *
Hi = Builder.CreateExtractValue(LoHi, 1, 
"hi");
 
20113  Lo = Builder.CreateZExt(
Lo, ValTy, 
"lo64");
 
20114  Hi = Builder.CreateZExt(
Hi, ValTy, 
"hi64");
 
20115  return Builder.CreateOr(
 
20116      Lo, Builder.CreateShl(
Hi, ConstantInt::get(ValTy, 64)), 
"val64");
 
 
20120  return Subtarget.useCRBits();
 
 
unsigned const MachineRegisterInfo * MRI
 
static MCRegister MatchRegisterName(StringRef Name)
 
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall, std::optional< CallLowering::PtrAuthInfo > &PAI, MachineRegisterInfo &MRI)
 
static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1, SDValue V2, unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &DL)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
 
static bool isSignExtended(SDValue N, SelectionDAG &DAG)
 
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
 
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
 
static std::pair< Register, unsigned > getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg)
 
This file declares a class to represent arbitrary precision floating point values and provide a varie...
 
This file implements a class to represent arbitrary precision integral constant values and operations...
 
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
 
static bool isLoad(int Opcode)
 
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
 
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
 
Function Alias Analysis Results
 
Atomic ordering constants.
 
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
 
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
 
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
 
Analysis containing CSE Info
 
This file contains the declarations for the subclasses of Constant, which represent the different fla...
 
static RegisterPass< DebugifyModulePass > DM("debugify", "Attach debug info to everything")
 
This file defines the DenseMap class.
 
const HexagonInstrInfo * TII
 
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
 
Module.h This file contains the declarations for the Module class.
 
This defines the Use class.
 
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
 
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
 
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
 
static int getEstimateRefinementSteps(EVT VT, const LoongArchSubtarget &Subtarget)
 
static bool isSplat(Value *V)
Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).
 
Machine Check Debug Module
 
Register const TargetRegisterInfo * TRI
 
Promote Memory to Register
 
static bool isConstantOrUndef(const SDValue Op)
 
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
 
cl::opt< bool > ANDIGlueBug("expose-ppc-andi-glue-bug", cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden)
 
static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getCanonicalConstSplat - Build a canonical splat immediate of Val with an element size of SplatSize.
 
static bool IsSelectCC(MachineInstr &MI)
 
static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
 
static const TargetRegisterClass * getRegClassForSVT(MVT::SimpleValueType SVT, bool IsPPC64, bool HasP8Vector, bool HasVSX)
 
static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign)
 
static SDValue DAGCombineAddc(SDNode *N, llvm::PPCTargetLowering::DAGCombinerInfo &DCI)
 
static bool needStackSlotPassParameters(const PPCSubtarget &Subtarget, const SmallVectorImpl< ISD::OutputArg > &Outs)
 
std::tuple< uint32_t, uint8_t > LXVKQPattern
 
static bool isAlternatingShuffMask(const ArrayRef< int > &Mask, int NumElts)
 
static bool isShuffleMaskInRange(const SmallVectorImpl< int > &ShuffV, int HalfVec, int LHSLastElementDefined, int RHSLastElementDefined)
 
static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, SDValue Input, uint64_t Elems, uint64_t CorrectElems)
 
static cl::opt< bool > DisablePPCUnaligned("disable-ppc-unaligned", cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden)
 
static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
 
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG)
 
static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, bool Swap, SDLoc &DL, SelectionDAG &DAG)
This function is called when we have proved that a SETCC node can be replaced by subtraction (and oth...
 
static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL)
 
static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool IsPPC64, SDValue Arg, int SPDiff, unsigned ArgOffset, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
CalculateTailCallArgDest - Remember Argument for later processing.
 
static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
 
static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet, SelectionDAG &DAG)
Set alignment flags based on whether or not the Frame Index is aligned.
 
static bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget)
 
static void updateForAIXShLibTLSModelOpt(TLSModel::Model &Model, SelectionDAG &DAG, const TargetMachine &TM)
updateForAIXShLibTLSModelOpt - Helper to initialize TLS model opt settings, and then apply the update...
 
static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N)
Used when computing address flags for selecting loads and stores.
 
static bool callsShareTOCBase(const Function *Caller, const GlobalValue *CalleeGV, const TargetMachine &TM)
 
static SDValue generateSToVPermutedForVecShuffle(int ScalarSize, uint64_t ShuffleEltWidth, unsigned &NumValidElts, int FirstElt, int &LastElt, SDValue VecShuffOperand, SDValue SToVNode, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
 
constexpr uint64_t AIXSmallTlsPolicySizeLimit
 
static bool isPCRelNode(SDValue N)
 
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, const SDLoc &dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls.
 
static cl::opt< unsigned > PPCGatherAllAliasesMaxDepth("ppc-gather-alias-max-depth", cl::init(18), cl::Hidden, cl::desc("max depth when checking alias info in GatherAllAliases()"))
 
static bool areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, CallingConv::ID CalleeCC)
 
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
 
static SDNode * isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG)
isCallCompatibleAddress - Return the immediate to use if the specified 32-bit value is representable ...
 
static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotAlignment - Calculates the alignment of this argument on the stack.
 
static bool IsSelect(MachineInstr &MI)
 
static SDValue ConvertCarryFlagToCarryValue(EVT SumType, SDValue Flag, EVT CarryType, SelectionDAG &DAG, const PPCSubtarget &STI)
 
static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, bool HasDirectMove, bool HasP8Vector)
Do we have an efficient pattern in a .td file for this node?
 
static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
 
static void setUsesTOCBasePtr(MachineFunction &MF)
 
static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, const SDLoc &dl, const PPCSubtarget &Subtarget)
 
static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, unsigned NumBytes)
EnsureStackAlignment - Round stack frame size up from NumBytes to ensure minimum alignment required f...
 
static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, SelectionDAG &DAG)
 
static bool isStoreConditional(SDValue Intrin, unsigned &StoreWidth)
 
static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB)
 
static bool isFPExtLoad(SDValue Op)
 
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, const SDLoc &dl, EVT DestVT=MVT::Other)
BuildIntrinsicOp - Return a unary operator intrinsic node with the specified intrinsic ID.
 
static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
 
static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, const SmallVectorImpl< TailCallArgumentInfo > &TailCallArgs, SmallVectorImpl< SDValue > &MemOpChains, const SDLoc &dl)
StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
 
static cl::opt< bool > UseAbsoluteJumpTables("ppc-use-absolute-jumptables", cl::desc("use absolute jump tables on ppc"), cl::Hidden)
 
static void setXFormForUnalignedFI(SDValue N, unsigned Flags, PPC::AddrMode &Mode)
 
static cl::opt< unsigned > PPCMinimumBitTestCmps("ppc-min-bit-test-cmps", cl::init(3), cl::Hidden, cl::desc("Set minimum of largest number of comparisons to use bit test for " "switch on PPC."))
 
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
 
static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
 
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned LHSStart, unsigned RHSStart)
isVMerge - Common function, used to match vmrg* shuffles.
 
static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, unsigned &HiOpFlags, unsigned &LoOpFlags, const GlobalValue *GV=nullptr)
Return true if we should reference labels using a PICBase, set the HiOpFlags and LoOpFlags to the tar...
 
cl::opt< bool > DisableAutoPairedVecSt("disable-auto-paired-vec-st", cl::desc("disable automatically generated 32byte paired vector stores"), cl::init(true), cl::Hidden)
 
static void buildCallOperands(SmallVectorImpl< SDValue > &Ops, PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, SmallVector< std::pair< unsigned, SDValue >, 8 > &RegsToPass, SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, const PPCSubtarget &Subtarget)
 
static cl::opt< bool > DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden)
 
static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget &ST)
Returns true if we should use a direct load into vector instruction (such as lxsd or lfd),...
 
static SDValue getDataClassTest(SDValue Op, FPClassTest Mask, const SDLoc &Dl, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
 
static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl< int > &ShuffV, int LHSFirstElt, int LHSLastElt, int RHSFirstElt, int RHSLastElt, int HalfVec, unsigned LHSNumValidElts, unsigned RHSNumValidElts, const PPCSubtarget &Subtarget)
 
static cl::opt< bool > DisableSCO("disable-ppc-sco", cl::desc("disable sibling call optimization on ppc"), cl::Hidden)
 
static std::optional< LXVKQPattern > getPatternInfo(const APInt &FullVal)
 
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT)
 
static cl::opt< bool > DisablePPCPreinc("disable-ppc-preinc", cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden)
 
static Intrinsic::ID getIntrinsicForAtomicRMWBinOp128(AtomicRMWInst::BinOp BinOp)
 
static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
 
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotSize - Calculates the size reserved for this argument on the stack.
 
static int CalculateTailCallSPDiff(SelectionDAG &DAG, bool isTailCall, unsigned ParamSize)
CalculateTailCallSPDiff - Get the amount the stack pointer has to be adjusted to accommodate the argu...
 
static Instruction * callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id)
 
static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, SDValue &Glue, SDValue &Chain, const SDLoc &dl)
 
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, SelectionDAG &DAG)
 
static SDValue isScalarToVec(SDValue Op)
 
static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl)
 
static cl::opt< bool > DisablePerfectShuffle("ppc-disable-perfect-shuffle", cl::desc("disable vector permute decomposition"), cl::init(true), cl::Hidden)
 
bool isValidMtVsrBmi(APInt &BitMask, BuildVectorSDNode &BVN, bool IsLittleEndian)
 
static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot, const PPCSubtarget &Subtarget)
getVectorCompareInfo - Given an intrinsic, return false if it is not a vector comparison.
 
static unsigned invertFMAOpcode(unsigned Opc)
 
static const SDValue * getNormalLoadInput(const SDValue &Op, bool &IsPermuted)
 
static cl::opt< unsigned > PPCMinimumJumpTableEntries("ppc-min-jump-table-entries", cl::init(64), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on PPC"))
 
static bool isValidSplatLoad(const PPCSubtarget &Subtarget, const SDValue &Op, unsigned &Opcode)
 
static SDValue ConvertCarryValueToCarryFlag(EVT SumType, SDValue Value, SelectionDAG &DAG, const PPCSubtarget &STI)
 
static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG, const PPCSubtarget &Subtarget, SDValue Chain=SDValue())
 
static void PrepareTailCall(SelectionDAG &DAG, SDValue &InGlue, SDValue &Chain, const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
 
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, SDValue OldRetAddr, SDValue OldFP, int SPDiff, const SDLoc &dl)
EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to the appropriate stack sl...
 
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified amount.
 
static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG)
 
static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, SelectionDAG &DAG, SDValue ArgValue, MVT LocVT, const SDLoc &dl)
 
static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet, SelectionDAG &DAG)
Given a node, compute flags that are used for address computation when selecting load and store instr...
 
static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart)
 
static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize, unsigned LinkageSize, unsigned ParamAreaSize, unsigned &ArgOffset, unsigned &AvailableFPRs, unsigned &AvailableVRs)
CalculateStackSlotUsed - Return whether this argument will use its stack slot (instead of being passe...
 
static cl::opt< unsigned > PPCAIXTLSModelOptUseIEForLDLimit("ppc-aix-shared-lib-tls-model-opt-limit", cl::init(1), cl::Hidden, cl::desc("Set inclusive limit count of TLS local-dynamic access(es) in a " "function to use initial-exec"))
 
static unsigned getPPCStrictOpcode(unsigned Opc)
 
static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, SDValue &Glue, SDValue &Chain, SDValue CallSeqStart, const CallBase *CB, const SDLoc &dl, bool hasNest, const PPCSubtarget &Subtarget)
 
static cl::opt< bool > DisableP10StoreForward("disable-p10-store-forward", cl::desc("disable P10 store forward-friendly conversion"), cl::Hidden, cl::init(false))
 
static bool isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width)
 
static bool isFunctionGlobalAddress(const GlobalValue *CalleeGV)
 
static bool isSplatBV(SDValue Op)
 
static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG)
 
static cl::opt< bool > DisableILPPref("disable-ppc-ilp-pref", cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden)
 
static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int)
Check that the mask is shuffling N byte elements.
 
static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG)
Reduce the number of loads when building a vector.
 
static bool isValidPCRelNode(SDValue N)
 
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
 
pre isel intrinsic Pre ISel Intrinsic Lowering
 
static constexpr MCPhysReg SPReg
 
const SmallVectorImpl< MachineOperand > & Cond
 
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
 
SI optimize exec mask operations pre RA
 
static const MCExpr * MaskShift(const MCExpr *Val, uint32_t Mask, uint32_t Shift, MCContext &Ctx)
 
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
 
This file defines the SmallPtrSet class.
 
This file defines the SmallVector class.
 
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
 
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
 
#define STATISTIC(VARNAME, DESC)
 
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
 
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
 
This file describes how to lower LLVM code to machine code.
 
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
 
static const fltSemantics & IEEEsingle()
 
static constexpr roundingMode rmTowardZero
 
static constexpr roundingMode rmNearestTiesToEven
 
static const fltSemantics & PPCDoubleDouble()
 
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
 
APInt bitcastToAPInt() const
 
Class for arbitrary precision integers.
 
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
 
void clearBit(unsigned BitPosition)
Set a given bit to 0.
 
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
 
uint64_t getZExtValue() const
Get zero extended value.
 
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
 
APInt abs() const
Get the absolute value.
 
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
 
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
 
bool isNegative() const
Determine sign of this APInt.
 
void clearAllBits()
Set every bit to 0.
 
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
 
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
 
bool getBoolValue() const
Convert APInt to a boolean value.
 
double bitsToDouble() const
Converts APInt bits to a double.
 
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
 
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
 
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
 
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
 
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
 
An arbitrary precision integer that knows its signedness.
 
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
 
size_t size() const
size - Get the array size.
 
An instruction that atomically checks whether a specified value is in a memory location,...
 
Value * getNewValOperand()
 
an instruction that atomically reads a memory location, combines it with another value,...
 
BinOp
This enumeration lists the possible modifications atomicrmw can make.
 
@ USubCond
Subtract only if no unsigned overflow.
 
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
 
@ UIncWrap
Increment one up to a maximum value.
 
@ UDecWrap
Decrement one until a minimum value or zero.
 
BinOp getOperation() const
 
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
 
LLVM Basic Block Representation.
 
int64_t getOffset() const
 
const BlockAddress * getBlockAddress() const
 
static BranchProbability getOne()
 
static BranchProbability getZero()
 
A "pseudo-class" with methods for operating on BUILD_VECTORs.
 
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
 
CCState - This class holds information needed while lowering arguments and return values.
 
Register getLocReg() const
 
LocInfo getLocInfo() const
 
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
 
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
 
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
 
int64_t getLocMemOffset() const
 
unsigned getValNo() const
 
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
 
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
 
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
 
bool isStrictFP() const
Determine if the call requires strict floating point semantics.
 
CallingConv::ID getCallingConv() const
 
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
 
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
 
Value * getCalledOperand() const
 
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
 
unsigned arg_size() const
 
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
 
This class represents a function call, abstracting a target machine's calling convention.
 
ConstantFP - Floating Point Values [float, double].
 
uint64_t getZExtValue() const
 
const APInt & getAPIntValue() const
 
int64_t getSExtValue() const
 
This is an important base class in LLVM.
 
uint64_t getNumOperands() const
 
A parsed version of the target data layout string in and methods for querying it.
 
bool isLittleEndian() const
Layout endianness...
 
LLVM_ABI unsigned getLargestLegalIntTypeSizeInBits() const
Returns the size of largest legal integer type size, or 0 if none are set.
 
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
 
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
 
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
 
iterator find(const_arg_type_t< KeyT > Val)
 
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
 
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
 
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
 
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
 
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
 
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
 
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
 
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
 
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
 
AttributeList getAttributes() const
Return the attribute list for this Function.
 
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
 
Type * getReturnType() const
Returns the type of the ret val.
 
const Argument * const_arg_iterator
 
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
 
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
 
int64_t getOffset() const
 
unsigned getTargetFlags() const
 
const GlobalValue * getGlobal() const
 
LLVM_ABI const GlobalObject * getAliaseeObject() const
 
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
 
void setThreadLocalMode(ThreadLocalMode Val)
 
bool hasHiddenVisibility() const
 
LLVM_ABI StringRef getSection() const
 
Module * getParent()
Get the module that this global value is contained inside of...
 
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
 
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
 
Type * getValueType() const
 
bool hasProtectedVisibility() const
 
Common base class shared among various IRBuilders.
 
LLVM_ABI bool hasAtomicLoad() const LLVM_READONLY
Return true if this atomic instruction loads from memory.
 
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
 
This is an important class for using LLVM in a threaded context.
 
Base class for LoadSDNode and StoreSDNode.
 
An instruction for reading from memory.
 
This class is used to represent ISD::LOAD nodes.
 
const SDValue & getBasePtr() const
 
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
 
TypeSize getValue() const
 
Context object for machine code objects.
 
Base class for the full range of assembler expressions which are needed for parsing.
 
Wrapper class representing physical registers. Should be passed by value.
 
MCSymbolXCOFF * getQualNameSymbol() const
 
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
 
@ INVALID_SIMPLE_VALUE_TYPE
 
uint64_t getScalarSizeInBits() const
 
unsigned getVectorNumElements() const
 
bool isVector() const
Return true if this is a vector value type.
 
bool isInteger() const
Return true if this is an integer or a vector integer type.
 
static auto integer_valuetypes()
 
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
 
static auto fixedlen_vector_valuetypes()
 
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
 
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
 
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
 
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
 
static MVT getIntegerVT(unsigned BitWidth)
 
static auto fp_valuetypes()
 
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
 
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
 
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
 
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
 
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
 
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
 
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
 
MachineInstrBundleIterator< MachineInstr > iterator
 
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
 
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
 
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
 
void setFrameAddressIsTaken(bool T)
 
void setHasTailCall(bool V=true)
 
void setReturnAddressIsTaken(bool s)
 
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
 
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
 
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
 
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
 
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
 
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
 
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
 
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
 
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
 
MCContext & getContext() const
 
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
 
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
 
Function & getFunction()
Return the LLVM function that this machine code represents.
 
BasicBlockListType::iterator iterator
 
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
 
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
 
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
 
void insert(iterator MBBI, MachineBasicBlock *MBB)
 
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
 
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
 
const MachineInstrBuilder & add(const MachineOperand &MO) const
 
const MachineInstrBuilder & addFrameIndex(int Idx) const
 
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
 
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
 
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
 
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
 
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
 
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
 
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
 
Representation of each machine instruction.
 
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
 
A description of a memory reference used in the backend.
 
LocationSize getSize() const
Return the size in bytes of the memory reference.
 
Flags
Flags values. These may be or'd together.
 
@ MOVolatile
The memory access is volatile.
 
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
 
@ MOLoad
The memory access reads data.
 
@ MOInvariant
The memory access always returns the same value (or traps).
 
@ MOStore
The memory access writes data.
 
Flags getFlags() const
Return the raw flags of the source value,.
 
MachineOperand class - Representation of each machine instruction operand.
 
static MachineOperand CreateImm(int64_t Val)
 
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
 
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
 
LLVM_ABI Register getLiveInVirtReg(MCRegister PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in virtual r...
 
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
 
This is an abstract virtual class for memory operations.
 
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
 
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
 
const SDValue & getBasePtr() const
 
const MachinePointerInfo & getPointerInfo() const
 
const SDValue & getChain() const
 
EVT getMemoryVT() const
Return the type of the in-memory value.
 
A Module instance is used to store all the information related to an LLVM module.
 
uint64_t getReturnSaveOffset() const
getReturnSaveOffset - Return the previous frame offset to save the return address.
 
unsigned getLinkageSize() const
getLinkageSize - Return the size of the PowerPC ABI linkage area.
 
uint64_t getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only.
 
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
 
void setVarArgsNumFPR(unsigned Num)
 
void setReturnAddrSaveIndex(int idx)
 
bool isAIXFuncUseTLSIEForLD() const
 
int getReturnAddrSaveIndex() const
 
unsigned getVarArgsNumFPR() const
 
void setAIXFuncUseTLSIEForLD()
 
int getFramePointerSaveIndex() const
 
void setVarArgsNumGPR(unsigned Num)
 
void appendParameterType(ParamType Type)
 
int getVarArgsFrameIndex() const
 
void setLRStoreRequired()
 
bool isAIXFuncTLSModelOptInitDone() const
 
void setTailCallSPDelta(int size)
 
void setAIXFuncTLSModelOptInitDone()
 
bool isLRStoreRequired() const
 
void setMinReservedArea(unsigned size)
 
unsigned getVarArgsNumGPR() const
 
unsigned getMinReservedArea() const
 
void setVarArgsStackOffset(int Offset)
 
void setVarArgsFrameIndex(int Index)
 
void addLiveInAttr(Register VReg, ISD::ArgFlagsTy Flags)
This function associates attributes for each live-in virtual register.
 
int getVarArgsStackOffset() const
 
void setFramePointerSaveIndex(int Idx)
 
static bool hasPCRelFlag(unsigned TF)
 
bool is32BitELFABI() const
 
unsigned descriptorTOCAnchorOffset() const
 
MVT getScalarIntVT() const
 
const PPCFrameLowering * getFrameLowering() const override
 
bool isUsingPCRelativeCalls() const
 
bool usesFunctionDescriptors() const
True if the ABI is descriptor based.
 
MCRegister getEnvironmentPointerRegister() const
 
bool isLittleEndian() const
 
MCRegister getTOCPointerRegister() const
 
MCRegister getStackPointerRegister() const
 
bool is64BitELFABI() const
 
const PPCTargetMachine & getTargetMachine() const
 
const PPCRegisterInfo * getRegisterInfo() const override
 
unsigned descriptorEnvironmentPointerOffset() const
 
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
 
CCAssignFn * ccAssignFnForCall(CallingConv::ID CC, bool Return, bool IsVarArg) const
 
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2.
 
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
 
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
 
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
 
PPC::AddrMode SelectForceXFormMode(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
SelectForceXFormMode - Given the specified address, force it to be represented as an indexed [r+r] op...
 
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
 
bool hasInlineStackProbe(const MachineFunction &MF) const override
 
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
 
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName() - This method returns the name of a target specific DAG node.
 
bool supportsTailCallFor(const CallBase *CB) const
 
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
 
MachineBasicBlock * emitProbedAlloca(MachineInstr &MI, MachineBasicBlock *MBB) const
 
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
 
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
 
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
 
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign EncodingAlignment) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
 
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
 
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
 
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
 
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
 
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
 
bool hasMultipleConditionRegisters(EVT VT) const override
Does the target have multiple (allocatable) condition registers that can be used to store the results...
 
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
 
Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
 
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, MaybeAlign EncodingAlignment=std::nullopt) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be more efficiently repre...
 
MachineBasicBlock * EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
 
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const override
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
 
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
 
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
 
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
 
bool useSoftFloat() const override
 
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
 
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
 
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
 
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always benefits from combining into FMA for a given value type.
 
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
 
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
 
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
 
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
 
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
 
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
 
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
 
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
 
bool isProfitableToHoist(Instruction *I) const override
isProfitableToHoist - Check if it is profitable to hoist instruction I to its dominator block.
 
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
 
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
 
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target.
 
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
 
bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const override
Return true if the target shall perform extract vector element and store given that the vector is kno...
 
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
 
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
 
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
 
void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const override
 
unsigned getStackProbeSize(const MachineFunction &MF) const
 
PPCTargetLowering(const PPCTargetMachine &TM, const PPCSubtarget &STI)
 
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
 
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
 
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
 
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation.
 
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
 
bool SelectAddressRegImm34(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
Similar to the 16-bit case but for instructions that take a 34-bit displacement field (prefixed loads...
 
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
 
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
 
bool isJumpTableRelative() const override
 
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
 
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
 
PPC::AddrMode SelectOptimalAddrMode(const SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign Align) const
SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode), compute the address flags of...
 
bool SelectAddressPCRel(SDValue N, SDValue &Base) const
SelectAddressPCRel - Represent the specified address as pc relative to be represented as [pc+imm].
 
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
 
bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressEVXRegReg - Given the specified addressed, check to see if it can be more efficiently re...
 
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
 
bool isAccessedAsGotIndirect(SDValue N) const
 
Align getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
 
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
 
bool shouldInlineQuadwordAtomics() const
 
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
 
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
 
Common code between 32-bit and 64-bit PowerPC targets.
 
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
 
Wrapper class representing virtual and physical registers.
 
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
 
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
 
This class provides iterator support for SDUse operands that use a specific SDNode.
 
Represents one node in the SelectionDAG.
 
ArrayRef< SDUse > ops() const
 
LLVM_ABI void dump() const
Dump this node, for debugging.
 
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
 
bool hasOneUse() const
Return true if there is exactly one use of this node.
 
iterator_range< value_op_iterator > op_values() const
 
iterator_range< use_iterator > uses()
 
SDNodeFlags getFlags() const
 
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
 
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
 
unsigned getNumOperands() const
Return the number of values used by this operation.
 
const SDValue & getOperand(unsigned Num) const
 
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
 
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
 
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
 
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
 
iterator_range< user_iterator > users()
 
user_iterator user_begin() const
Provide iteration support to walk over all users of an SDNode.
 
static use_iterator use_end()
 
Represents a use of a SDNode.
 
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
 
SDNode * getNode() const
get the SDNode which holds the desired result
 
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
 
SDValue getValue(unsigned R) const
 
EVT getValueType() const
Return the ValueType of the referenced return value.
 
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
 
const SDValue & getOperand(unsigned i) const
 
uint64_t getConstantOperandVal(unsigned i) const
 
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
 
unsigned getOpcode() const
 
unsigned getNumOperands() const
 
static SectionKind getMetadata()
 
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
 
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
 
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
 
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
 
const TargetSubtargetInfo & getSubtarget() const
 
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
 
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
 
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
 
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
 
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
 
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
 
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
 
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
 
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
 
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
 
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
 
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
 
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
 
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
 
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
 
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
 
const TargetLowering & getTargetLoweringInfo() const
 
static constexpr unsigned MaxRecursionDepth
 
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
 
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
 
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
 
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
 
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
 
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
 
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
 
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
 
const DataLayout & getDataLayout() const
 
SDValue getTargetFrameIndex(int FI, EVT VT)
 
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
 
LLVM_ABI bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
 
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
 
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
 
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
 
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
 
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
 
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
 
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
 
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
 
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
 
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
 
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
 
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
 
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
 
const TargetMachine & getTarget() const
 
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
 
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
 
LLVM_ABI SDValue getValueType(EVT)
 
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
 
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
 
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
 
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
 
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
 
LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
 
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
 
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
 
MachineFunction & getMachineFunction() const
 
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
 
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
 
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
 
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
 
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
 
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
 
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
 
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
 
LLVMContext * getContext() const
 
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
 
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
 
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
 
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
 
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
 
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
 
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
 
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
 
int getMaskElt(unsigned Idx) const
 
ArrayRef< int > getMask() const
 
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
 
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
 
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
 
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
 
void push_back(const T &Elt)
 
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
 
This class is used to represent ISD::STORE nodes.
 
const SDValue & getBasePtr() const
 
const SDValue & getValue() const
 
StringRef - Represent a constant reference to a string, i.e.
 
constexpr size_t size() const
size - Get the string size.
 
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
 
Class to represent struct types.
 
Information about stack frame layout on the target.
 
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
 
TargetInstrInfo - Interface to description of machine instruction set.
 
Provides information about what library functions are available for the current target.
 
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
 
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
 
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
 
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
 
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
 
void setMinimumBitTestCmps(unsigned Val)
Set the minimum of largest of number of comparisons to generate BitTest.
 
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
 
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
 
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
 
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
 
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
 
const TargetMachine & getTargetMachine() const
 
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
 
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
 
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
 
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
 
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
 
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
 
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
 
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
 
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
 
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
 
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
 
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
 
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
 
virtual Align getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
 
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
 
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
 
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
 
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
 
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
 
virtual bool isJumpTableRelative() const
 
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
 
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
 
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
 
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
 
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
 
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
 
@ ZeroOrOneBooleanContent
 
@ ZeroOrNegativeOneBooleanContent
 
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
 
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
 
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
 
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
 
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
 
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
 
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
 
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
 
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
 
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
 
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
 
NegatibleCost
Enum that specifies when a float negation is beneficial.
 
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
 
std::vector< ArgListEntry > ArgListTy
 
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
 
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
 
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
 
virtual MCSymbol * getFunctionEntryPointSymbol(const GlobalValue *Func, const TargetMachine &TM) const
If supported, return the function entry point symbol.
 
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
 
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
 
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
 
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
 
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
 
SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
 
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
 
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
 
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
 
bool isPositionIndependent() const
 
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
 
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
 
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
 
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
 
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
 
TargetLowering(const TargetLowering &)=delete
 
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
 
virtual SDValue getSqrtResultForDenormInput(SDValue Operand, SelectionDAG &DAG) const
Return a target-dependent result if the input operand is not suitable for use with a square root esti...
 
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
 
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
 
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
 
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
 
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
 
Primary interface to the complete machine description for the target machine.
 
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
 
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
 
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
 
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
 
CodeModel::Model getCodeModel() const
Returns the code model.
 
unsigned NoInfsFPMath
NoInfsFPMath - This flag is enabled when the -enable-no-infs-fp-math flag is specified on the command...
 
unsigned PPCGenScalarMASSEntries
Enables scalar MASS conversions.
 
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
 
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
 
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
 
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
 
static constexpr TypeSize getFixed(ScalarTy ExactSize)
 
The instances of the Type class are immutable: once they are created, they are never changed.
 
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
 
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
 
bool isVectorTy() const
True if this is an instance of VectorType.
 
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
 
@ FloatTyID
32-bit floating point type
 
@ DoubleTyID
64-bit floating point type
 
@ FP128TyID
128-bit floating point type (112-bit significand)
 
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
 
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
 
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
 
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
 
bool isFunctionTy() const
True if this is an instance of FunctionType.
 
bool isIntegerTy() const
True if this is an instance of IntegerType.
 
A Use represents the edge between a Value definition and its users.
 
User * getUser() const
Returns the User that contains this Use.
 
Value * getOperand(unsigned i) const
 
unsigned getNumOperands() const
 
LLVM Value Representation.
 
Type * getType() const
All values are typed, get the type of this value.
 
bool hasOneUse() const
Return true if there is exactly one use of this value.
 
const ParentTy * getParent() const
 
self_iterator getIterator()
 
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
 
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
 
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
 
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
 
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
 
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
 
@ Fast
Attempts to make calls as fast as possible (e.g.
 
@ C
The default llvm calling convention, compatible with C.
 
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
 
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
 
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
 
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
 
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
 
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
 
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
 
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
 
@ BSWAP
Byte Swap and Counting operators.
 
@ ADD
Simple integer binary arithmetic operators.
 
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
 
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
 
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
 
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
 
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
 
@ FADD
Simple binary floating point operators.
 
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
 
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
 
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
 
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
 
@ SIGN_EXTEND
Conversion operators.
 
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
 
@ SSUBO
Same for subtraction.
 
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
 
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
 
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
 
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
 
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
 
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
 
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
 
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
 
@ SHL
Shift and rotation operations.
 
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
 
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
 
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
 
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
 
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
 
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
 
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
 
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
 
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
 
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
 
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
 
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
 
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
 
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
 
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
 
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
 
@ AND
Bitwise operators - logical and, logical or, logical xor.
 
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
 
@ STRICT_FADD
Constrained versions of the binary floating point operators.
 
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
 
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
 
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
 
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
 
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
 
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
 
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
 
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
 
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
 
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
 
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
 
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
 
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
 
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
 
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
 
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
 
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
 
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
 
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
 
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
 
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
 
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
 
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
 
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
 
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
 
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
 
@ Bitcast
Perform the operation on a different, but equivalently sized type.
 
@ MO_TLSLDM_FLAG
MO_TLSLDM_FLAG - on AIX the ML relocation type is only valid for a reference to a TOC symbol from the...
 
@ MO_PIC_LO_FLAG
MO_PIC_LO_FLAG = MO_PIC_FLAG | MO_LO.
 
@ MO_TPREL_PCREL_FLAG
MO_TPREL_PCREL_FLAG = MO_PCREL_FLAG | MO_TPREL_FLAG.
 
@ MO_GOT_TPREL_PCREL_FLAG
MO_GOT_TPREL_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
 
@ MO_GOT_PCREL_FLAG
MO_GOT_PCREL_FLAG = MO_PCREL_FLAG | MO_GOT_FLAG.
 
@ MO_TLSGDM_FLAG
MO_TLSGDM_FLAG - If this bit is set the symbol reference is relative to the region handle of TLS Gene...
 
@ MO_PCREL_FLAG
MO_PCREL_FLAG - If this bit is set, the symbol reference is relative to the current instruction addre...
 
@ MO_TLSLD_FLAG
MO_TLSLD_FLAG - If this bit is set the symbol reference is relative to TLS Local Dynamic model.
 
@ MO_TLS_PCREL_FLAG
MO_TPREL_PCREL_FLAG = MO_PCREL_FLAG | MO_TLS.
 
@ MO_PLT
On PPC, the 12 bits are not enough for all target operand flags.
 
@ MO_TLS
Symbol for VK_TLS fixup attached to an ADD instruction.
 
@ MO_TPREL_FLAG
MO_TPREL_FLAG - If this bit is set, the symbol reference is relative to the thread pointer and the sy...
 
@ MO_LO
MO_LO, MO_HA - lo16(symbol) and ha16(symbol)
 
@ MO_GOT_TLSLD_PCREL_FLAG
MO_GOT_TLSLD_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
 
@ MO_PIC_HA_FLAG
MO_PIC_HA_FLAG = MO_PIC_FLAG | MO_HA.
 
@ MO_TLSGD_FLAG
MO_TLSGD_FLAG - If this bit is set the symbol reference is relative to TLS General Dynamic model for ...
 
@ MO_GOT_TLSGD_PCREL_FLAG
MO_GOT_TLSGD_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
 
@ MO_PIC_FLAG
MO_PIC_FLAG - If this bit is set, the symbol reference is relative to the function's picbase,...
 
@ SEXT_LD_SPLAT
VSRC, CHAIN = SEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory that sign-extends.
 
@ FCTIDUZ
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
 
@ ADDI_TLSGD_L_ADDR
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
 
@ FSQRT
Square root instruction.
 
@ STRICT_FCFID
Constrained integer-to-floating-point conversion instructions.
 
@ DYNALLOC
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
 
@ COND_BRANCH
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
 
@ TLSLD_AIX
[GP|G8]RC = TLSLD_AIX, TOC_ENTRY(module handle) Op that requires a single input of the module handle ...
 
@ CALL_RM
The variants that implicitly define rounding mode for calls with strictfp semantics.
 
@ STORE_VEC_BE
CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
 
@ BDNZ
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
 
@ MTVSRZ
Direct move from a GPR to a VSX register (zero)
 
@ SRL
These nodes represent PPC shifts.
 
@ VECINSERT
VECINSERT - The PPC vector insert instruction.
 
@ LXSIZX
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
 
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
 
@ FCTIDZ
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
 
@ GET_TLS_ADDR
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
 
@ XXSPLTI32DX
XXSPLTI32DX - The PPC XXSPLTI32DX instruction.
 
@ ANDI_rec_1_EQ_BIT
i1 = ANDI_rec_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after ex...
 
@ FRE
Reciprocal estimate instructions (unary FP ops).
 
@ ADDIS_GOT_TPREL_HA
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
 
@ STORE_COND
CHAIN,Glue = STORE_COND CHAIN, GPR, Ptr The store conditional instruction ST[BHWD]ARX that produces a...
 
@ SINT_VEC_TO_FP
Extract a subvector from signed integer vector and convert to FP.
 
@ EXTRACT_SPE
Extract SPE register component, second argument is high or low.
 
@ XXSWAPD
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
 
@ ADDI_TLSLD_L_ADDR
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
 
@ ATOMIC_CMP_SWAP_8
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
 
@ ST_VSR_SCAL_INT
Store scalar integers from VSR.
 
@ VCMP
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
 
@ BCTRL
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
 
@ BUILD_SPE64
BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and EXTRACT_ELEMENT but take f64 arguments in...
 
@ LFIWZX
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
 
@ RET_GLUE
Return with a glue operand, matched by 'blr'.
 
@ SCALAR_TO_VECTOR_PERMUTED
PowerPC instructions that have SCALAR_TO_VECTOR semantics tend to place the value into the least sign...
 
@ EXTRACT_VSX_REG
EXTRACT_VSX_REG = Extract one of the underlying vsx registers of an accumulator or pair register.
 
@ STXSIX
STXSIX - The STXSI[bh]X instruction.
 
@ MAT_PCREL_ADDR
MAT_PCREL_ADDR = Materialize a PC Relative address.
 
@ MFOCRF
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
 
@ XXSPLT
XXSPLT - The PPC VSX splat instructions.
 
@ TOC_ENTRY
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
 
@ XXPERMDI
XXPERMDI - The PPC XXPERMDI instruction.
 
@ ADDIS_DTPREL_HA
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
 
@ ADD_TLS
G8RC = ADD_TLS G8RReg, Symbol - Can be used by the initial-exec and local-exec TLS models,...
 
@ MTVSRA
Direct move from a GPR to a VSX register (algebraic)
 
@ VADD_SPLAT
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
 
@ PPC32_GOT
GPRC = address of GLOBAL_OFFSET_TABLE.
 
@ ADDI_DTPREL_L
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
 
@ BCTRL_LOAD_TOC
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
 
@ PPC32_PICGOT
GPRC = address of GLOBAL_OFFSET_TABLE.
 
@ FCFID
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
 
@ CR6SET
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
 
@ LBRX
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
 
@ GET_TLS_MOD_AIX
x3 = GET_TLS_MOD_AIX _$TLSML - For the AIX local-dynamic TLS model, produces a call to ....
 
@ SETBC
SETBC - The ISA 3.1 (P10) SETBC instruction.
 
@ LD_VSX_LH
VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a v2f32 value into the lower ha...
 
@ PROBED_ALLOCA
To avoid stack clash, allocation is performed by block and each block is probed.
 
@ XXMFACC
XXMFACC = This corresponds to the xxmfacc instruction.
 
@ ADDIS_TLSGD_HA
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
 
@ SETBCR
SETBCR - The ISA 3.1 (P10) SETBCR instruction.
 
@ ACC_BUILD
ACC_BUILD = Build an accumulator register from 4 VSX registers.
 
@ GlobalBaseReg
The result of the mflr at function entry, used for PIC code.
 
@ LXVD2X
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
 
@ XSMAXC
XSMAXC[DQ]P, XSMINC[DQ]P - C-type min/max instructions.
 
@ CALL
CALL - A direct function call.
 
@ MTCTR
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
 
@ TC_RETURN
TC_RETURN - A tail call return.
 
@ STFIWX
STFIWX - The STFIWX instruction.
 
@ LD_SPLAT
VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory instructions such as LXVDSX,...
 
@ VCMP_rec
RESVEC, OUTFLAG = VCMP_rec(LHS, RHS, OPC) - Represents one of the altivec VCMP*_rec instructions.
 
@ MFFS
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
 
@ VSRQ
VSRQ - The ISA 3.1 (P10) Vector Shift right quadword instruction.
 
@ PADDI_DTPREL
G8RC = PADDI_DTPREL x3, Symbol - For the pc-rel based local-dynamic TLS model, produces a PADDI8 inst...
 
@ BUILD_FP128
Direct move of 2 consecutive GPR to a VSX register.
 
@ VEXTS
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
 
@ TLS_LOCAL_EXEC_MAT_ADDR
TLS_LOCAL_EXEC_MAT_ADDR = Materialize an address for TLS global address when using local exec access ...
 
@ VPERM
VPERM - The PPC VPERM Instruction.
 
@ ADDIS_TLSLD_HA
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
 
@ XXSPLTI_SP_TO_DP
XXSPLTI_SP_TO_DP - The PPC VSX splat instructions for immediates for converting immediate single prec...
 
@ GET_TLSLD_ADDR
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
 
@ ADDI_TLSGD_L
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
 
@ DYNAREAOFFSET
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
 
@ PAIR_BUILD
PAIR_BUILD = Build a vector pair register from 2 VSX registers.
 
@ STRICT_FADDRTZ
Constrained floating point add in round-to-zero mode.
 
@ FTSQRT
Test instruction for software square root.
 
@ FP_EXTEND_HALF
FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or lower (IDX=1) half of v4f32 to v2f6...
 
@ CMPB
The CMPB instruction (takes two operands of i32 or i64).
 
@ VECSHL
VECSHL - The PPC vector shift left instruction.
 
@ ADDI_TLSLD_L
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
 
@ FADDRTZ
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
 
@ ZEXT_LD_SPLAT
VSRC, CHAIN = ZEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory that zero-extends.
 
@ SRA_ADDZE
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2.
 
@ EXTSWSLI
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate.
 
@ STXVD2X
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
 
@ ADDC
These nodes represent PPC arithmetic operations with carry.
 
@ TLSGD_AIX
GPRC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY G8RC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY Op that combines two re...
 
@ UINT_VEC_TO_FP
Extract a subvector from unsigned integer vector and convert to FP.
 
@ GET_TPOINTER
x3 = GET_TPOINTER - Used for the local- and initial-exec TLS model on 32-bit AIX, produces a call to ...
 
@ LXVRZX
LXVRZX - Load VSX Vector Rightmost and Zero Extend This node represents v1i128 BUILD_VECTOR of a zero...
 
@ FCFIDU
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
 
@ FSEL
FSEL - Traditional three-operand fsel node.
 
@ SWAP_NO_CHAIN
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain.
 
@ LOAD_VEC_BE
VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
 
@ LFIWAX
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
 
@ LD_GOT_TPREL_L
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
 
@ MFVSR
Direct move from a VSX register to a GPR.
 
@ TLS_DYNAMIC_MAT_PCREL_ADDR
TLS_DYNAMIC_MAT_PCREL_ADDR = Materialize a PC Relative address for TLS global address when using dyna...
 
@ Hi
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
 
Define some predicates that are used for node matching.
 
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
 
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
 
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
 
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
 
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
 
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
 
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
 
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
 
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
 
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction.
 
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
 
unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getSplatIdxForPPCMnemonics - Return the splat index as a value that is appropriate for PPC mnemonics ...
 
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction.
 
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1.
 
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
 
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
 
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
 
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
 
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
 
@ Define
Register definition.
 
Invariant opcodes: All instruction sets have these as their low opcodes.
 
@ XTY_ER
External reference.
 
initializer< Ty > init(const Ty &Val)
 
constexpr uint64_t PointerSize
aarch64 pointer size.
 
@ User
could "use" a pointer
 
NodeAddr< UseNode * > Use
 
NodeAddr< NodeBase * > Node
 
NodeAddr< FuncNode * > Func
 
This is an optimization pass for GlobalISel generic memory operations.
 
FunctionAddr VTableAddr Value
 
static bool isIndirectCall(const MachineInstr &MI)
 
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
 
bool checkConvertToNonDenormSingle(APFloat &ArgAPFloat)
 
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
 
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
 
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
 
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
 
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
 
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
 
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
 
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
 
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate,...
 
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
 
static bool isRunOfOnes64(uint64_t Val, unsigned &MB, unsigned &ME)
 
bool isa_and_nonnull(const Y &Val)
 
bool RetCC_PPC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
 
bool CC_PPC64_ELF(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
 
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
 
unsigned M1(unsigned Val)
 
bool isReleaseOrStronger(AtomicOrdering AO)
 
auto dyn_cast_or_null(const Y &Val)
 
constexpr bool has_single_bit(T Value) noexcept
 
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
 
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
 
bool convertToNonDenormSingle(APInt &ArgAPInt)
 
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
 
bool CC_PPC32_SVR4_ByVal(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
 
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
 
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
 
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
 
bool CC_PPC32_SVR4(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
 
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
 
bool RetCC_PPC_Cold(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
 
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
 
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
 
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
 
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
 
@ Success
The lock was released successfully.
 
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
 
AtomicOrdering
Atomic ordering for LLVM's memory model.
 
bool isIntS34Immediate(SDNode *N, int64_t &Imm)
isIntS34Immediate - This method tests if value of node given can be accurately represented as a sign ...
 
To bit_cast(const From &from) noexcept
 
@ Mul
Product of integers.
 
@ And
Bitwise or logical AND of integers.
 
@ Sub
Subtraction of integers.
 
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
 
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
 
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
 
DWARFExpression::Operation Op
 
bool isPhysRegUsedAfter(Register Reg, MachineBasicBlock::iterator MBI)
Check if physical register Reg is used after MBI.
 
unsigned M0(unsigned Val)
 
ArrayRef(const T &OneElt) -> ArrayRef< T >
 
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
 
bool isAcquireOrStronger(AtomicOrdering AO)
 
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
 
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
 
constexpr unsigned BitWidth
 
bool CC_PPC32_SVR4_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
 
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
 
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
 
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
 
static bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME)
Returns true iff Val consists of one contiguous run of 1s with any number of 0s on either side.
 
@ Increment
Incrementally increasing token ID.
 
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
 
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
 
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
 
static const unsigned PerfectShuffleTable[6561+1]
 
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
 
This is used by foldLoadsRecursive() to capture a Root Load node which is of type or(load,...
 
This struct is a compact representation of a valid (non-zero power of two) alignment.
 
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
 
Represent subnormal handling kind for floating point instruction inputs and outputs.
 
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
 
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
 
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
 
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
 
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
 
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
 
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
 
uint64_t getScalarSizeInBits() const
 
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
 
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
 
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
 
LLVM_ABI std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
 
bool isVector() const
Return true if this is a vector value type.
 
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
 
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
 
EVT getVectorElementType() const
Given a vector type, return the type of each element.
 
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
 
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
 
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
 
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
 
bool isInteger() const
Return true if this is an integer or a vector integer type.
 
unsigned getByValSize() const
 
void setByValSize(unsigned S)
 
Align getNonZeroByValAlign() const
 
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
 
bool isConstant() const
Returns true if we know the value of all bits.
 
void resetAll()
Resets the known state of all bits.
 
const APInt & getConstant() const
Returns the value when all bits have a known value.
 
This class contains a discriminated union of information about pointers in memory operands,...
 
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
 
MachinePointerInfo getWithOffset(int64_t O) const
 
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
 
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
 
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
 
Structure that collects some common arguments that get passed around between the functions for call l...
 
const CallingConv::ID CallConv
 
These are IR-level optimization flags that may be propagated to SDNodes.
 
void setNoFPExcept(bool b)
 
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
 
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
 
This contains information for each constraint that we are lowering.
 
This structure contains all information that is necessary for lowering calls.
 
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
 
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
 
SmallVector< ISD::InputArg, 32 > Ins
 
CallLoweringInfo & setZExtResult(bool Value=true)
 
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
 
CallLoweringInfo & setTailCall(bool Value=true)
 
CallLoweringInfo & setSExtResult(bool Value=true)
 
SmallVector< ISD::OutputArg, 32 > Outs
 
SmallVector< SDValue, 32 > OutVals
 
CallLoweringInfo & setChain(SDValue InChain)
 
bool isBeforeLegalizeOps() const
 
bool isAfterLegalizeDAG() const
 
LLVM_ABI void AddToWorklist(SDNode *N)
 
bool isBeforeLegalize() const
 
LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
 
This structure is used to pass arguments to makeLibCall function.