25#include "llvm/IR/IntrinsicsS390.h"
34#define DEBUG_TYPE "systemz-lower"
40 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
41 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
91 if (Subtarget.hasHighWord())
97 if (Subtarget.hasVector()) {
104 if (Subtarget.hasVectorEnhancements1())
109 if (Subtarget.hasVector()) {
118 if (Subtarget.hasVector())
145 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
146 I <= MVT::LAST_FP_VALUETYPE;
172 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
173 I <= MVT::LAST_INTEGER_VALUETYPE;
204 if (Subtarget.hasPopulationCount())
223 if (!Subtarget.hasFPExtension())
229 if (Subtarget.hasFPExtension())
234 if (Subtarget.hasFPExtension())
303 {MVT::i8, MVT::i16, MVT::i32},
Legal);
305 {MVT::i8, MVT::i16},
Legal);
322 if (!Subtarget.hasFPExtension()) {
335 if (Subtarget.hasMiscellaneousExtensions3()) {
431 if (VT != MVT::v2i64)
437 if (Subtarget.hasVectorEnhancements1())
468 if (Subtarget.hasVector()) {
490 if (Subtarget.hasVectorEnhancements2()) {
511 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
512 I <= MVT::LAST_FP_VALUETYPE;
520 if (Subtarget.hasFPExtension()) {
548 if (Subtarget.hasFPExtension()) {
559 if (Subtarget.hasVector()) {
605 if (Subtarget.hasVectorEnhancements1()) {
612 if (Subtarget.hasVectorEnhancements1()) {
666 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
667 MVT::v4f32, MVT::v2f64 }) {
676 if (!Subtarget.hasVectorEnhancements1()) {
682 if (Subtarget.hasVectorEnhancements1())
692 if (Subtarget.hasVectorEnhancements1()) {
704 if (!Subtarget.hasVector()) {
759 struct RTLibCallMapping {
763 static RTLibCallMapping RTLibCallCommon[] = {
764#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
765#include "ZOSLibcallNames.def"
767 for (
auto &E : RTLibCallCommon)
773 return Subtarget.hasSoftFloat();
795 return Subtarget.hasVectorEnhancements1();
808 if (!Subtarget.hasVector() ||
809 (isFP128 && !Subtarget.hasVectorEnhancements1()))
831 if (SplatBitSize > 64)
837 if (isInt<16>(SignedValue)) {
846 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
868 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
869 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
876 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
877 return tryValue(SplatBitsZ | Middle);
892 unsigned HalfSize = Width / 2;
897 if (HighValue != LowValue || 8 > HalfSize)
900 SplatBits = HighValue;
904 SplatBitSize = Width;
912 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
916 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
921 bool ForCodeSize)
const {
923 if (Imm.isZero() || Imm.isNegZero())
955 if (Subtarget.hasInterlockedAccess1() &&
969 return isInt<32>(Imm) || isUInt<32>(Imm);
974 return isUInt<32>(Imm) || isUInt<32>(-Imm);
996 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
1019 switch (II->getIntrinsicID()) {
1021 case Intrinsic::memset:
1022 case Intrinsic::memmove:
1023 case Intrinsic::memcpy:
1028 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1029 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1030 if (SingleUser->getParent() ==
I->getParent()) {
1031 if (isa<ICmpInst>(SingleUser)) {
1032 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1033 if (
C->getBitWidth() <= 64 &&
1034 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1037 }
else if (isa<StoreInst>(SingleUser))
1041 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1042 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1043 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1048 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1056 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1057 I->getOperand(0)->getType());
1059 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1063 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1064 Value *DataOp =
I->getOperand(0);
1065 if (isa<ExtractElementInst>(DataOp))
1066 IsVectorAccess =
true;
1071 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1072 User *LoadUser = *
I->user_begin();
1073 if (isa<InsertElementInst>(LoadUser))
1074 IsVectorAccess =
true;
1077 if (IsFPAccess || IsVectorAccess)
1106 return AM.
Scale == 0;
1113 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1114 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1115 const int MVCFastLen = 16;
1117 if (Limit != ~
unsigned(0)) {
1119 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1121 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1123 if (
Op.isZeroMemset())
1128 SrcAS, FuncAttributes);
1133 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1137 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1139 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1141 return FromBits > ToBits;
1149 return FromBits > ToBits;
1158 if (Constraint.
size() == 1) {
1159 switch (Constraint[0]) {
1185 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1186 switch (Constraint[1]) {
1202 const char *constraint)
const {
1204 Value *CallOperandVal =
info.CallOperandVal;
1207 if (!CallOperandVal)
1211 switch (*constraint) {
1229 if (Subtarget.hasVector())
1235 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1236 if (isUInt<8>(
C->getZExtValue()))
1241 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1242 if (isUInt<12>(
C->getZExtValue()))
1247 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1248 if (isInt<16>(
C->getSExtValue()))
1253 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1254 if (isInt<20>(
C->getSExtValue()))
1259 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1260 if (
C->getZExtValue() == 0x7fffffff)
1270static std::pair<unsigned, const TargetRegisterClass *>
1272 const unsigned *Map,
unsigned Size) {
1273 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1274 if (isdigit(Constraint[2])) {
1279 return std::make_pair(Map[
Index], RC);
1281 return std::make_pair(0U,
nullptr);
1284std::pair<unsigned, const TargetRegisterClass *>
1287 if (Constraint.
size() == 1) {
1289 switch (Constraint[0]) {
1294 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1296 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1297 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1301 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1302 else if (VT == MVT::i128)
1303 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1304 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1307 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1312 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1314 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1315 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1320 if (Subtarget.hasVector()) {
1322 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1324 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1325 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1334 auto getVTSizeInBits = [&VT]() {
1342 if (Constraint[1] ==
'r') {
1343 if (getVTSizeInBits() == 32)
1346 if (getVTSizeInBits() == 128)
1352 if (Constraint[1] ==
'f') {
1354 return std::make_pair(
1356 if (getVTSizeInBits() == 32)
1359 if (getVTSizeInBits() == 128)
1365 if (Constraint[1] ==
'v') {
1366 if (!Subtarget.hasVector())
1367 return std::make_pair(
1369 if (getVTSizeInBits() == 32)
1372 if (getVTSizeInBits() == 64)
1399 const Constant *PersonalityFn)
const {
1404 const Constant *PersonalityFn)
const {
1412 if (Constraint.
size() == 1) {
1413 switch (Constraint[0]) {
1415 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1416 if (isUInt<8>(
C->getZExtValue()))
1418 Op.getValueType()));
1422 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1423 if (isUInt<12>(
C->getZExtValue()))
1425 Op.getValueType()));
1429 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1430 if (isInt<16>(
C->getSExtValue()))
1432 Op.getValueType()));
1436 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1437 if (isInt<20>(
C->getSExtValue()))
1439 Op.getValueType()));
1443 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1444 if (
C->getZExtValue() == 0x7fffffff)
1446 Op.getValueType()));
1457#include "SystemZGenCallingConv.inc"
1461 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1467 Type *ToType)
const {
1530 if (BitCastToType == MVT::v2i64)
1555 MVT::Untyped,
Hi,
Lo);
1579 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1581 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1592 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1593 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1620 unsigned NumFixedGPRs = 0;
1621 unsigned NumFixedFPRs = 0;
1622 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1635 RC = &SystemZ::GR32BitRegClass;
1639 RC = &SystemZ::GR64BitRegClass;
1643 RC = &SystemZ::FP32BitRegClass;
1647 RC = &SystemZ::FP64BitRegClass;
1651 RC = &SystemZ::FP128BitRegClass;
1659 RC = &SystemZ::VR128BitRegClass;
1688 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1699 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1700 assert (Ins[
I].PartOffset == 0);
1701 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1703 unsigned PartOffset = Ins[
I + 1].PartOffset;
1726 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
1744 int64_t RegSaveOffset =
1759 &SystemZ::FP64BitRegClass);
1777 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
1789 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1796 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1798 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1805 unsigned Offset,
bool LoadAdr =
false) {
1828 bool LoadAddr =
false;
1829 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
1850 unsigned ADADelta = 0;
1851 unsigned EPADelta = 8;
1856 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1857 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
1858 G->getGlobal()->hasPrivateLinkage());
1873 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1935 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1941 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1943 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1945 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
1951 SlotVT = Outs[
I].VT;
1954 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1960 assert (Outs[
I].PartOffset == 0);
1961 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1962 SDValue PartValue = OutVals[
I + 1];
1963 unsigned PartOffset = Outs[
I + 1].PartOffset;
1970 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
1973 ArgValue = SpillSlot;
1990 if (!StackPtr.getNode())
2012 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2018 if (!MemOpChains.
empty())
2031 ->getAddressOfCalleeRegister();
2034 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2039 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2042 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2045 }
else if (IsTailCall) {
2048 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2053 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I) {
2055 RegsToPass[
I].second, Glue);
2066 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I)
2068 RegsToPass[
I].second.getValueType()));
2072 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2073 assert(Mask &&
"Missing call preserved mask for calling convention");
2097 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2101 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2123 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2125 Args.reserve(Ops.
size());
2130 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2133 Args.push_back(Entry);
2159 for (
auto &Out : Outs)
2160 if (Out.ArgVT == MVT::i128)
2165 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2182 if (RetLocs.
empty())
2192 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2221 unsigned &CCValid) {
2222 unsigned Id =
Op.getConstantOperandVal(1);
2224 case Intrinsic::s390_tbegin:
2229 case Intrinsic::s390_tbegin_nofloat:
2234 case Intrinsic::s390_tend:
2248 unsigned Id =
Op.getConstantOperandVal(0);
2250 case Intrinsic::s390_vpkshs:
2251 case Intrinsic::s390_vpksfs:
2252 case Intrinsic::s390_vpksgs:
2257 case Intrinsic::s390_vpklshs:
2258 case Intrinsic::s390_vpklsfs:
2259 case Intrinsic::s390_vpklsgs:
2264 case Intrinsic::s390_vceqbs:
2265 case Intrinsic::s390_vceqhs:
2266 case Intrinsic::s390_vceqfs:
2267 case Intrinsic::s390_vceqgs:
2272 case Intrinsic::s390_vchbs:
2273 case Intrinsic::s390_vchhs:
2274 case Intrinsic::s390_vchfs:
2275 case Intrinsic::s390_vchgs:
2280 case Intrinsic::s390_vchlbs:
2281 case Intrinsic::s390_vchlhs:
2282 case Intrinsic::s390_vchlfs:
2283 case Intrinsic::s390_vchlgs:
2288 case Intrinsic::s390_vtm:
2293 case Intrinsic::s390_vfaebs:
2294 case Intrinsic::s390_vfaehs:
2295 case Intrinsic::s390_vfaefs:
2300 case Intrinsic::s390_vfaezbs:
2301 case Intrinsic::s390_vfaezhs:
2302 case Intrinsic::s390_vfaezfs:
2307 case Intrinsic::s390_vfeebs:
2308 case Intrinsic::s390_vfeehs:
2309 case Intrinsic::s390_vfeefs:
2314 case Intrinsic::s390_vfeezbs:
2315 case Intrinsic::s390_vfeezhs:
2316 case Intrinsic::s390_vfeezfs:
2321 case Intrinsic::s390_vfenebs:
2322 case Intrinsic::s390_vfenehs:
2323 case Intrinsic::s390_vfenefs:
2328 case Intrinsic::s390_vfenezbs:
2329 case Intrinsic::s390_vfenezhs:
2330 case Intrinsic::s390_vfenezfs:
2335 case Intrinsic::s390_vistrbs:
2336 case Intrinsic::s390_vistrhs:
2337 case Intrinsic::s390_vistrfs:
2342 case Intrinsic::s390_vstrcbs:
2343 case Intrinsic::s390_vstrchs:
2344 case Intrinsic::s390_vstrcfs:
2349 case Intrinsic::s390_vstrczbs:
2350 case Intrinsic::s390_vstrczhs:
2351 case Intrinsic::s390_vstrczfs:
2356 case Intrinsic::s390_vstrsb:
2357 case Intrinsic::s390_vstrsh:
2358 case Intrinsic::s390_vstrsf:
2363 case Intrinsic::s390_vstrszb:
2364 case Intrinsic::s390_vstrszh:
2365 case Intrinsic::s390_vstrszf:
2370 case Intrinsic::s390_vfcedbs:
2371 case Intrinsic::s390_vfcesbs:
2376 case Intrinsic::s390_vfchdbs:
2377 case Intrinsic::s390_vfchsbs:
2382 case Intrinsic::s390_vfchedbs:
2383 case Intrinsic::s390_vfchesbs:
2388 case Intrinsic::s390_vftcidb:
2389 case Intrinsic::s390_vftcisb:
2394 case Intrinsic::s390_tdc:
2412 for (
unsigned I = 2;
I < NumOps; ++
I)
2415 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2421 return Intr.getNode();
2431 for (
unsigned I = 1;
I < NumOps; ++
I)
2435 return Intr.getNode();
2445 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2446 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2447 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2472 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2473 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2476 int64_t
Value = ConstOp1->getSExtValue();
2492 if (!
C.Op0.hasOneUse() ||
2498 auto *Load = cast<LoadSDNode>(
C.Op0);
2499 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2500 if ((NumBits != 8 && NumBits != 16) ||
2501 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2506 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2507 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2510 uint64_t Mask = (1 << NumBits) - 1;
2513 int64_t SignedValue = ConstOp1->getSExtValue();
2520 }
else if (NumBits == 8) {
2546 if (
C.Op0.getValueType() != MVT::i32 ||
2547 Load->getExtensionType() != ExtType) {
2549 Load->getBasePtr(), Load->getPointerInfo(),
2550 Load->getMemoryVT(), Load->getAlign(),
2551 Load->getMemOperand()->getFlags());
2557 if (
C.Op1.getValueType() != MVT::i32 ||
2558 Value != ConstOp1->getZExtValue())
2565 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2568 if (Load->getMemoryVT() == MVT::i8)
2571 switch (Load->getExtensionType()) {
2588 if (
C.Op0.getValueType() == MVT::i128)
2590 if (
C.Op0.getValueType() == MVT::f128)
2596 if (isa<ConstantFPSDNode>(
C.Op1))
2601 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2602 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2620 isUInt<16>(ConstOp1->getZExtValue()))
2625 isInt<16>(ConstOp1->getSExtValue()))
2631 unsigned Opcode0 =
C.Op0.getOpcode();
2638 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2653 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2654 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2658 Flags.setNoSignedWrap(
false);
2659 Flags.setNoUnsignedWrap(
false);
2678 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2679 if (C1 && C1->isZero()) {
2698 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2700 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2701 if (C1 && C1->getZExtValue() == 32) {
2702 SDValue ShlOp0 =
C.Op0.getOperand(0);
2706 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2721 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2723 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
2724 C.Op1->getAsZExtVal() == 0) {
2725 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2726 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2727 C.Op0.getValueSizeInBits().getFixedValue()) {
2728 unsigned Type = L->getExtensionType();
2731 C.Op0 =
C.Op0.getOperand(0);
2741 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2745 uint64_t Amount = Shift->getZExtValue();
2746 if (Amount >=
N.getValueSizeInBits())
2761 unsigned ICmpType) {
2762 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
2784 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
2790 if (EffectivelyUnsigned && CmpVal <
Low) {
2798 if (CmpVal == Mask) {
2804 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
2810 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
2818 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
2824 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
2853 if (
C.Op0.getValueType() == MVT::i128) {
2858 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
2859 if (Mask && Mask->getAPIntValue() == 0) {
2874 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2877 uint64_t CmpVal = ConstOp1->getZExtValue();
2884 NewC.Op0 =
C.Op0.getOperand(0);
2885 NewC.Op1 =
C.Op0.getOperand(1);
2886 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2889 MaskVal = Mask->getZExtValue();
2894 if (NewC.Op0.getValueType() != MVT::i64 ||
2909 MaskVal = -(CmpVal & -CmpVal);
2917 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2918 unsigned NewCCMask, ShiftVal;
2920 NewC.Op0.getOpcode() ==
ISD::SHL &&
2922 (MaskVal >> ShiftVal != 0) &&
2923 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2925 MaskVal >> ShiftVal,
2928 NewC.Op0 = NewC.Op0.getOperand(0);
2929 MaskVal >>= ShiftVal;
2931 NewC.Op0.getOpcode() ==
ISD::SRL &&
2933 (MaskVal << ShiftVal != 0) &&
2934 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2936 MaskVal << ShiftVal,
2939 NewC.Op0 = NewC.Op0.getOperand(0);
2940 MaskVal <<= ShiftVal;
2951 if (Mask && Mask->getZExtValue() == MaskVal)
2956 C.CCMask = NewCCMask;
2964 if (
C.Op0.getValueType() != MVT::i128)
2982 bool Swap =
false, Invert =
false;
3001 C.CCMask ^=
C.CCValid;
3011 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
3012 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3015 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3018 C.Op0 =
C.Op0.getOperand(0);
3030 C.CCValid = CCValid;
3033 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3036 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3040 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3043 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3047 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3050 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3053 C.CCMask &= CCValid;
3061 bool IsSignaling =
false) {
3064 unsigned Opcode, CCValid;
3076 Comparison
C(CmpOp0, CmpOp1, Chain);
3078 if (
C.Op0.getValueType().isFloatingPoint()) {
3082 else if (!IsSignaling)
3104 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3125 if (!
C.Op1.getNode()) {
3127 switch (
C.Op0.getOpcode()) {
3154 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3156 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3165 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3166 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3191 unsigned CCValid,
unsigned CCMask) {
3220 case CmpMode::Int:
return 0;
3240 case CmpMode::FP:
return 0;
3241 case CmpMode::StrictFP:
return 0;
3242 case CmpMode::SignalingFP:
return 0;
3274 int Mask[] = { Start, -1, Start + 1, -1 };
3294 !Subtarget.hasVectorEnhancements1()) {
3308 SDValue Ops[2] = { Res, NewChain };
3317 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3319 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3332 bool IsSignaling)
const {
3335 assert (!IsSignaling || Chain);
3336 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3337 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3338 bool Invert =
false;
3346 assert(IsFP &&
"Unexpected integer comparison");
3348 DL, VT, CmpOp1, CmpOp0, Chain);
3350 DL, VT, CmpOp0, CmpOp1, Chain);
3354 LT.getValue(1),
GE.getValue(1));
3363 assert(IsFP &&
"Unexpected integer comparison");
3365 DL, VT, CmpOp1, CmpOp0, Chain);
3367 DL, VT, CmpOp0, CmpOp1, Chain);
3371 LT.getValue(1),
GT.getValue(1));
3380 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3384 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3389 Chain =
Cmp.getValue(1);
3397 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3410 EVT VT =
Op.getValueType();
3412 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3414 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3421 bool IsSignaling)
const {
3427 EVT VT =
Op.getNode()->getValueType(0);
3429 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3430 Chain, IsSignaling);
3434 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL, Chain, IsSignaling));
3449 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3486 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3494 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3495 C.Op1->getAsZExtVal() == 0) {
3503 SDValue Ops[] = {TrueOp, FalseOp,
3577 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3584 Node->getValueType(0),
3596 assert(Mask &&
"Missing call preserved mask for calling convention");
3604 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3611SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3643 SDValue TP = lowerThreadPointer(
DL, DAG);
3751 if (
CP->isMachineConstantPoolEntry())
3770 unsigned Depth =
Op.getConstantOperandVal(0);
3777 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3806 unsigned Depth =
Op.getConstantOperandVal(0);
3814 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
3816 int Offset = TFL->getReturnAddressOffset(MF);
3827 &SystemZ::GR64BitRegClass);
3835 EVT InVT =
In.getValueType();
3836 EVT ResVT =
Op.getValueType();
3841 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
3844 LoadN->getBasePtr(), LoadN->getMemOperand());
3850 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3852 if (Subtarget.hasHighWord()) {
3856 MVT::i64,
SDValue(U64, 0), In);
3864 DL, MVT::f32, Out64);
3866 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3869 MVT::f64,
SDValue(U64, 0), In);
3871 if (Subtarget.hasHighWord())
3885 return lowerVASTART_XPLINK(
Op, DAG);
3887 return lowerVASTART_ELF(
Op, DAG);
3902 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3916 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3920 const unsigned NumFields = 4;
3931 for (
unsigned I = 0;
I < NumFields; ++
I) {
3936 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
3948 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
3949 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
3955 Align(8),
false,
false,
3961SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
3964 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
3966 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
3970SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
3982 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
3985 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3986 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3992 if (ExtraAlignSpace)
3996 bool IsSigned =
false;
3997 bool DoesNotReturn =
false;
3998 bool IsReturnValueUsed =
false;
3999 EVT VT =
Op.getValueType();
4010 Register SPReg = Regs.getStackPointerRegister();
4021 if (ExtraAlignSpace) {
4033SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4047 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4050 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4051 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4062 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4066 if (ExtraAlignSpace)
4074 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4090 if (RequiredAlign > StackAlign) {
4100 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4107SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4116 EVT VT =
Op.getValueType();
4123 Op.getOperand(1), Ops[1], Ops[0]);
4124 else if (Subtarget.hasMiscellaneousExtensions2())
4129 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4153 LL, RL, Ops[1], Ops[0]);
4164 EVT VT =
Op.getValueType();
4171 Op.getOperand(1), Ops[1], Ops[0]);
4177 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4185 EVT VT =
Op.getValueType();
4205 EVT VT =
Op.getValueType();
4212 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4217 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4220 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
4229 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
4231 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
4247 if (!isInt<16>(
Value))
4268 MVT::i64, HighOp, Low32);
4279 if (
N->getValueType(0) == MVT::i128) {
4280 unsigned BaseOp = 0;
4281 unsigned FlagOp = 0;
4282 bool IsBorrow =
false;
4283 switch (
Op.getOpcode()) {
4306 unsigned BaseOp = 0;
4307 unsigned CCValid = 0;
4308 unsigned CCMask = 0;
4310 switch (
Op.getOpcode()) {
4338 if (
N->getValueType(1) == MVT::i1)
4361 MVT VT =
N->getSimpleValueType(0);
4372 if (VT == MVT::i128) {
4373 unsigned BaseOp = 0;
4374 unsigned FlagOp = 0;
4375 bool IsBorrow =
false;
4376 switch (
Op.getOpcode()) {
4403 unsigned BaseOp = 0;
4404 unsigned CCValid = 0;
4405 unsigned CCMask = 0;
4407 switch (
Op.getOpcode()) {
4436 if (
N->getValueType(1) == MVT::i1)
4444 EVT VT =
Op.getValueType();
4446 Op =
Op.getOperand(0);
4494 if (NumSignificantBits == 0)
4500 BitSize = std::min(BitSize, OrigBitSize);
4509 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
4511 if (BitSize != OrigBitSize)
4548 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4550 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
4551 "Only custom lowering i128 or f128.");
4563 EVT PtrVT =
Addr.getValueType();
4564 EVT WideVT = MVT::i32;
4587 unsigned Opcode)
const {
4588 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4591 EVT NarrowVT =
Node->getMemoryVT();
4592 EVT WideVT = MVT::i32;
4593 if (NarrowVT == WideVT)
4605 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
4610 SDValue AlignedAddr, BitShift, NegBitShift;
4628 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
4647 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4648 EVT MemVT =
Node->getMemoryVT();
4649 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
4651 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
4652 assert(Subtarget.hasInterlockedAccess1() &&
4653 "Should have been expanded by AtomicExpand pass.");
4659 Node->getChain(),
Node->getBasePtr(), NegSrc2,
4660 Node->getMemOperand());
4669 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4677 if (
Node->getMemoryVT() == MVT::i128) {
4686 EVT NarrowVT =
Node->getMemoryVT();
4687 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
4688 if (NarrowVT == WideVT) {
4690 SDValue Ops[] = { ChainIn,
Addr, CmpVal, SwapVal };
4692 DL, Tys, Ops, NarrowVT, MMO);
4706 SDValue AlignedAddr, BitShift, NegBitShift;
4711 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
4714 VTList, Ops, NarrowVT, MMO);
4728SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
4733 if (
auto *SI = dyn_cast<StoreInst>(&
I))
4736 if (
auto *LI = dyn_cast<LoadInst>(&
I))
4739 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
4742 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
4754 "in GHC calling convention");
4756 Regs->getStackPointerRegister(),
Op.getValueType());
4767 "in GHC calling convention");
4774 if (StoreBackchain) {
4776 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
4777 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4781 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
4784 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4792 bool IsData =
Op.getConstantOperandVal(4);
4795 return Op.getOperand(0);
4798 bool IsWrite =
Op.getConstantOperandVal(2);
4800 auto *
Node = cast<MemIntrinsicSDNode>(
Op.getNode());
4804 Node->getVTList(), Ops,
4805 Node->getMemoryVT(),
Node->getMemOperand());
4817SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
4819 unsigned Opcode, CCValid;
4821 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
4832SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
4834 unsigned Opcode, CCValid;
4837 if (
Op->getNumValues() == 1)
4839 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
4844 unsigned Id =
Op.getConstantOperandVal(0);
4846 case Intrinsic::thread_pointer:
4847 return lowerThreadPointer(
SDLoc(
Op), DAG);
4849 case Intrinsic::s390_vpdi:
4851 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4853 case Intrinsic::s390_vperm:
4855 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4857 case Intrinsic::s390_vuphb:
4858 case Intrinsic::s390_vuphh:
4859 case Intrinsic::s390_vuphf:
4863 case Intrinsic::s390_vuplhb:
4864 case Intrinsic::s390_vuplhh:
4865 case Intrinsic::s390_vuplhf:
4869 case Intrinsic::s390_vuplb:
4870 case Intrinsic::s390_vuplhw:
4871 case Intrinsic::s390_vuplf:
4875 case Intrinsic::s390_vupllb:
4876 case Intrinsic::s390_vupllh:
4877 case Intrinsic::s390_vupllf:
4881 case Intrinsic::s390_vsumb:
4882 case Intrinsic::s390_vsumh:
4883 case Intrinsic::s390_vsumgh:
4884 case Intrinsic::s390_vsumgf:
4885 case Intrinsic::s390_vsumqf:
4886 case Intrinsic::s390_vsumqg:
4888 Op.getOperand(1),
Op.getOperand(2));
4890 case Intrinsic::s390_vaq:
4892 Op.getOperand(1),
Op.getOperand(2));
4893 case Intrinsic::s390_vaccb:
4894 case Intrinsic::s390_vacch:
4895 case Intrinsic::s390_vaccf:
4896 case Intrinsic::s390_vaccg:
4897 case Intrinsic::s390_vaccq:
4899 Op.getOperand(1),
Op.getOperand(2));
4900 case Intrinsic::s390_vacq:
4902 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4903 case Intrinsic::s390_vacccq:
4905 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4907 case Intrinsic::s390_vsq:
4909 Op.getOperand(1),
Op.getOperand(2));
4910 case Intrinsic::s390_vscbib:
4911 case Intrinsic::s390_vscbih:
4912 case Intrinsic::s390_vscbif:
4913 case Intrinsic::s390_vscbig:
4914 case Intrinsic::s390_vscbiq:
4916 Op.getOperand(1),
Op.getOperand(2));
4917 case Intrinsic::s390_vsbiq:
4919 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4920 case Intrinsic::s390_vsbcbiq:
4922 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4943 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4946 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4949 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4952 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4955 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4958 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4961 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4964 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4967 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4970 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4973 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4976 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4979 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4993 OpNo0 = OpNo1 = OpNos[1];
4994 }
else if (OpNos[1] < 0) {
4995 OpNo0 = OpNo1 = OpNos[0];
5013 unsigned &OpNo0,
unsigned &OpNo1) {
5014 int OpNos[] = { -1, -1 };
5027 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5029 OpNos[ModelOpNo] = RealOpNo;
5037 unsigned &OpNo0,
unsigned &OpNo1) {
5054 int Elt = Bytes[
From];
5057 Transform[
From] = -1;
5059 while (
P.Bytes[To] != Elt) {
5064 Transform[
From] = To;
5087 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
5088 Bytes.
resize(NumElements * BytesPerElement, -1);
5089 for (
unsigned I = 0;
I < NumElements; ++
I) {
5090 int Index = VSN->getMaskElt(
I);
5092 for (
unsigned J = 0; J < BytesPerElement; ++J)
5093 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5098 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
5100 Bytes.
resize(NumElements * BytesPerElement, -1);
5101 for (
unsigned I = 0;
I < NumElements; ++
I)
5102 for (
unsigned J = 0; J < BytesPerElement; ++J)
5103 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5114 unsigned BytesPerElement,
int &
Base) {
5116 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5117 if (Bytes[Start +
I] >= 0) {
5118 unsigned Elem = Bytes[Start +
I];
5122 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5124 }
else if (
unsigned(
Base) != Elem -
I)
5137 unsigned &StartIndex,
unsigned &OpNo0,
5139 int OpNos[] = { -1, -1 };
5141 for (
unsigned I = 0;
I < 16; ++
I) {
5148 Shift = ExpectedShift;
5149 else if (Shift != ExpectedShift)
5153 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5155 OpNos[ModelOpNo] = RealOpNo;
5192 N =
N->getOperand(0);
5194 if (
auto *
Op = dyn_cast<ConstantSDNode>(
N->getOperand(0)))
5195 return Op->getZExtValue() == 0;
5201 for (
unsigned I = 0;
I < Num ;
I++)
5213 for (
unsigned I = 0;
I < 2; ++
I)
5217 unsigned StartIndex, OpNo0, OpNo1;
5226 if (ZeroVecIdx != UINT32_MAX) {
5227 bool MaskFirst =
true;
5232 if (OpNo == ZeroVecIdx &&
I == 0) {
5237 if (OpNo != ZeroVecIdx && Byte == 0) {
5244 if (ZeroIdx != -1) {
5247 if (Bytes[
I] >= 0) {
5250 if (OpNo == ZeroVecIdx)
5260 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
5278 (!Ops[1].
isUndef() ? Ops[1] : Ops[0]), Op2);
5283struct GeneralShuffle {
5284 GeneralShuffle(
EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
5288 void tryPrepareForUnpack();
5289 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5304 unsigned UnpackFromEltSize;
5309void GeneralShuffle::addUndef() {
5311 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5312 Bytes.push_back(-1);
5321bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5327 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5332 if (FromBytesPerElement < BytesPerElement)
5336 (FromBytesPerElement - BytesPerElement));
5339 while (
Op.getNode()) {
5341 Op =
Op.getOperand(0);
5357 }
else if (
Op.isUndef()) {
5366 for (; OpNo < Ops.size(); ++OpNo)
5367 if (Ops[OpNo] ==
Op)
5369 if (OpNo == Ops.size())
5374 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5375 Bytes.push_back(
Base +
I);
5384 if (Ops.size() == 0)
5388 tryPrepareForUnpack();
5391 if (Ops.size() == 1)
5392 Ops.push_back(DAG.
getUNDEF(MVT::v16i8));
5403 unsigned Stride = 1;
5404 for (; Stride * 2 < Ops.size(); Stride *= 2) {
5405 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
5406 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
5415 else if (OpNo ==
I + Stride)
5426 if (NewBytes[J] >= 0) {
5428 "Invalid double permute");
5431 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
5437 if (NewBytes[J] >= 0)
5445 Ops[1] = Ops[Stride];
5453 unsigned OpNo0, OpNo1;
5455 if (unpackWasPrepared() && Ops[1].
isUndef())
5457 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
5462 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
5469 dbgs() << Msg.c_str() <<
" { ";
5470 for (
unsigned i = 0; i < Bytes.
size(); i++)
5471 dbgs() << Bytes[i] <<
" ";
5479void GeneralShuffle::tryPrepareForUnpack() {
5481 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
5486 if (Ops.size() > 2 &&
5491 UnpackFromEltSize = 1;
5492 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
5493 bool MatchUnpack =
true;
5496 unsigned ToEltSize = UnpackFromEltSize * 2;
5497 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
5500 if (Bytes[Elt] != -1) {
5502 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
5503 MatchUnpack =
false;
5509 if (Ops.size() == 2) {
5512 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 !=
int(i)) {
5513 UnpackFromEltSize = UINT_MAX;
5520 if (UnpackFromEltSize > 4)
5523 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
5524 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
5526 dumpBytes(Bytes,
"Original Bytes vector:"););
5531 Elt += UnpackFromEltSize;
5532 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
5533 Bytes[
B] = Bytes[Elt];
5539 Ops.erase(&Ops[ZeroVecOpNo]);
5541 if (Bytes[
I] >= 0) {
5543 if (OpNo > ZeroVecOpNo)
5554 if (!unpackWasPrepared())
5556 unsigned InBits = UnpackFromEltSize * 8;
5560 unsigned OutBits = InBits * 2;
5569 if (!
Op.getOperand(
I).isUndef())
5585 if (
Value.isUndef())
5638 GeneralShuffle GS(VT);
5640 bool FoundOne =
false;
5641 for (
unsigned I = 0;
I < NumElements; ++
I) {
5644 Op =
Op.getOperand(0);
5647 unsigned Elem =
Op.getConstantOperandVal(1);
5648 if (!GS.add(
Op.getOperand(0), Elem))
5651 }
else if (
Op.isUndef()) {
5665 if (!ResidueOps.
empty()) {
5666 while (ResidueOps.
size() < NumElements)
5668 for (
auto &
Op : GS.Ops) {
5669 if (!
Op.getNode()) {
5675 return GS.getNode(DAG,
SDLoc(BVN));
5678bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
5679 if (
Op.getOpcode() ==
ISD::LOAD && cast<LoadSDNode>(
Op)->isUnindexed())
5681 if (
auto *AL = dyn_cast<AtomicSDNode>(
Op))
5695 unsigned int NumElements = Elems.
size();
5696 unsigned int Count = 0;
5697 for (
auto Elem : Elems) {
5698 if (!Elem.isUndef()) {
5701 else if (Elem != Single) {
5721 if (
Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
5725 bool AllLoads =
true;
5726 for (
auto Elem : Elems)
5727 if (!isVectorElementLoad(Elem)) {
5733 if (VT == MVT::v2i64 && !AllLoads)
5737 if (VT == MVT::v2f64 && !AllLoads)
5747 if (VT == MVT::v4f32 && !AllLoads) {
5761 DL, MVT::v2i64, Op01, Op23);
5769 unsigned NumConstants = 0;
5770 for (
unsigned I = 0;
I < NumElements; ++
I) {
5784 if (NumConstants > 0) {
5785 for (
unsigned I = 0;
I < NumElements; ++
I)
5796 std::map<const SDNode*, unsigned> UseCounts;
5797 SDNode *LoadMaxUses =
nullptr;
5798 for (
unsigned I = 0;
I < NumElements; ++
I)
5799 if (isVectorElementLoad(Elems[
I])) {
5800 SDNode *Ld = Elems[
I].getNode();
5802 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
5805 if (LoadMaxUses !=
nullptr) {
5806 ReplicatedVal =
SDValue(LoadMaxUses, 0);
5810 unsigned I1 = NumElements / 2 - 1;
5811 unsigned I2 = NumElements - 1;
5812 bool Def1 = !Elems[
I1].isUndef();
5813 bool Def2 = !Elems[I2].isUndef();
5827 for (
unsigned I = 0;
I < NumElements; ++
I)
5828 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
5836 auto *BVN = cast<BuildVectorSDNode>(
Op.getNode());
5838 EVT VT =
Op.getValueType();
5840 if (BVN->isConstant()) {
5859 for (
unsigned I = 0;
I < NumElements; ++
I)
5860 Ops[
I] =
Op.getOperand(
I);
5861 return buildVector(DAG,
DL, VT, Ops);
5866 auto *VSN = cast<ShuffleVectorSDNode>(
Op.getNode());
5868 EVT VT =
Op.getValueType();
5871 if (VSN->isSplat()) {
5873 unsigned Index = VSN->getSplatIndex();
5875 "Splat index should be defined and in first operand");
5885 GeneralShuffle
GS(VT);
5886 for (
unsigned I = 0;
I < NumElements; ++
I) {
5887 int Elt = VSN->getMaskElt(
I);
5890 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
5891 unsigned(Elt) % NumElements))
5894 return GS.getNode(DAG,
SDLoc(VSN));
5913 EVT VT =
Op.getValueType();
5918 if (VT == MVT::v2f64 &&
5938SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
5944 EVT VT =
Op.getValueType();
5948 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
5963SDValue SystemZTargetLowering::
5966 EVT OutVT =
Op.getValueType();
5976 }
while (FromBits != ToBits);
5981SDValue SystemZTargetLowering::
5985 EVT OutVT =
Op.getValueType();
5989 unsigned NumInPerOut = InNumElts / OutNumElts;
5995 unsigned ZeroVecElt = InNumElts;
5996 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
5997 unsigned MaskElt = PackedElt * NumInPerOut;
5998 unsigned End = MaskElt + NumInPerOut - 1;
5999 for (; MaskElt <
End; MaskElt++)
6000 Mask[MaskElt] = ZeroVecElt++;
6001 Mask[MaskElt] = PackedElt;
6008 unsigned ByScalar)
const {
6013 EVT VT =
Op.getValueType();
6017 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
6018 APInt SplatBits, SplatUndef;
6019 unsigned SplatBitSize;
6023 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6024 ElemBitSize,
true) &&
6025 SplatBitSize == ElemBitSize) {
6028 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6037 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6043 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
6044 if (VSN->isSplat()) {
6046 unsigned Index = VSN->getSplatIndex();
6048 "Splat index should be defined and in first operand");
6055 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6067 MVT ResultVT =
Op.getSimpleValueType();
6069 unsigned Check =
Op.getConstantOperandVal(1);
6071 unsigned TDCMask = 0;
6105 int SPFI = cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex();
6116 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
6121 switch (
Op.getOpcode()) {
6123 return lowerFRAMEADDR(
Op, DAG);
6125 return lowerRETURNADDR(
Op, DAG);
6127 return lowerBR_CC(
Op, DAG);
6129 return lowerSELECT_CC(
Op, DAG);
6131 return lowerSETCC(
Op, DAG);
6133 return lowerSTRICT_FSETCC(
Op, DAG,
false);
6135 return lowerSTRICT_FSETCC(
Op, DAG,
true);
6137 return lowerGlobalAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6139 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6141 return lowerBlockAddress(cast<BlockAddressSDNode>(
Op), DAG);
6143 return lowerJumpTable(cast<JumpTableSDNode>(
Op), DAG);
6145 return lowerConstantPool(cast<ConstantPoolSDNode>(
Op), DAG);
6147 return lowerBITCAST(
Op, DAG);
6149 return lowerVASTART(
Op, DAG);
6151 return lowerVACOPY(
Op, DAG);
6153 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
6155 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
6157 return lowerSMUL_LOHI(
Op, DAG);
6159 return lowerUMUL_LOHI(
Op, DAG);
6161 return lowerSDIVREM(
Op, DAG);
6163 return lowerUDIVREM(
Op, DAG);
6168 return lowerXALUO(
Op, DAG);
6171 return lowerUADDSUBO_CARRY(
Op, DAG);
6173 return lowerOR(
Op, DAG);
6175 return lowerCTPOP(
Op, DAG);
6177 return lowerVECREDUCE_ADD(
Op, DAG);
6179 return lowerATOMIC_FENCE(
Op, DAG);
6184 return lowerATOMIC_LDST_I128(
Op, DAG);
6188 return lowerATOMIC_LOAD_SUB(
Op, DAG);
6206 return lowerATOMIC_CMP_SWAP(
Op, DAG);
6208 return lowerSTACKSAVE(
Op, DAG);
6210 return lowerSTACKRESTORE(
Op, DAG);
6212 return lowerPREFETCH(
Op, DAG);
6214 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
6216 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
6218 return lowerBUILD_VECTOR(
Op, DAG);
6220 return lowerVECTOR_SHUFFLE(
Op, DAG);
6222 return lowerSCALAR_TO_VECTOR(
Op, DAG);
6224 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6226 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6228 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
6230 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
6240 return lowerIS_FPCLASS(
Op, DAG);
6242 return lowerGET_ROUNDING(
Op, DAG);
6244 return lowerREADCYCLECOUNTER(
Op, DAG);
6263 SystemZ::REG_SEQUENCE, SL, MVT::f128,
6270static std::pair<SDValue, SDValue>
6296 switch (
N->getOpcode()) {
6300 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
6303 DL, Tys, Ops, MVT::i128, MMO);
6305 EVT VT =
N->getValueType(0);
6338 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
6341 DL, Tys, Ops, MVT::i128, MMO);
6344 if (cast<AtomicSDNode>(
N)->getSuccessOrdering() ==
6347 MVT::Other, Res), 0);
6354 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
6359 DL, Tys, Ops, MVT::i128, MMO);
6370 if (
N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 &&
6374 if (getRepRegClassFor(MVT::f128) == &SystemZ::VR128BitRegClass) {
6382 assert(getRepRegClassFor(MVT::f128) == &SystemZ::FP128BitRegClass &&
6383 "Unrecognized register class for f128.");
6404#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
6515 OPCODE(ATOMIC_LOADW_ADD);
6516 OPCODE(ATOMIC_LOADW_SUB);
6517 OPCODE(ATOMIC_LOADW_AND);
6519 OPCODE(ATOMIC_LOADW_XOR);
6520 OPCODE(ATOMIC_LOADW_NAND);
6521 OPCODE(ATOMIC_LOADW_MIN);
6522 OPCODE(ATOMIC_LOADW_MAX);
6523 OPCODE(ATOMIC_LOADW_UMIN);
6524 OPCODE(ATOMIC_LOADW_UMAX);
6525 OPCODE(ATOMIC_CMP_SWAPW);
6528 OPCODE(ATOMIC_STORE_128);
6529 OPCODE(ATOMIC_CMP_SWAP_128);
6544bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
6545 if (!Subtarget.hasVector())
6559 DAGCombinerInfo &DCI,
6567 unsigned Opcode =
Op.getOpcode();
6570 Op =
Op.getOperand(0);
6572 canTreatAsByteVector(
Op.getValueType())) {
6581 BytesPerElement,
First))
6588 if (Byte % BytesPerElement != 0)
6591 Index = Byte / BytesPerElement;
6595 canTreatAsByteVector(
Op.getValueType())) {
6598 EVT OpVT =
Op.getValueType();
6600 if (OpBytesPerElement < BytesPerElement)
6604 unsigned End = (
Index + 1) * BytesPerElement;
6605 if (
End % OpBytesPerElement != 0)
6608 Op =
Op.getOperand(
End / OpBytesPerElement - 1);
6609 if (!
Op.getValueType().isInteger()) {
6612 DCI.AddToWorklist(
Op.getNode());
6617 DCI.AddToWorklist(
Op.getNode());
6624 canTreatAsByteVector(
Op.getValueType()) &&
6625 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
6627 EVT ExtVT =
Op.getValueType();
6628 EVT OpVT =
Op.getOperand(0).getValueType();
6631 unsigned Byte =
Index * BytesPerElement;
6632 unsigned SubByte =
Byte % ExtBytesPerElement;
6633 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
6634 if (SubByte < MinSubByte ||
6635 SubByte + BytesPerElement > ExtBytesPerElement)
6638 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
6640 Byte += SubByte - MinSubByte;
6641 if (Byte % BytesPerElement != 0)
6643 Op =
Op.getOperand(0);
6650 if (
Op.getValueType() != VecVT) {
6652 DCI.AddToWorklist(
Op.getNode());
6662SDValue SystemZTargetLowering::combineTruncateExtract(
6671 if (canTreatAsByteVector(VecVT)) {
6672 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
Op.getOperand(1))) {
6675 if (BytesPerElement % TruncBytes == 0) {
6681 unsigned Scale = BytesPerElement / TruncBytes;
6682 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
6688 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
6689 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
6697SDValue SystemZTargetLowering::combineZERO_EXTEND(
6698 SDNode *
N, DAGCombinerInfo &DCI)
const {
6702 EVT VT =
N->getValueType(0);
6704 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
6705 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6706 if (TrueOp && FalseOp) {
6716 DCI.CombineTo(N0.
getNode(), TruncSelect);
6746SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
6747 SDNode *
N, DAGCombinerInfo &DCI)
const {
6753 EVT VT =
N->getValueType(0);
6754 EVT EVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
6767SDValue SystemZTargetLowering::combineSIGN_EXTEND(
6768 SDNode *
N, DAGCombinerInfo &DCI)
const {
6774 EVT VT =
N->getValueType(0);
6776 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6779 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.
getOperand(1))) {
6781 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
6782 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
6798SDValue SystemZTargetLowering::combineMERGE(
6799 SDNode *
N, DAGCombinerInfo &DCI)
const {
6801 unsigned Opcode =
N->getOpcode();
6809 if (Op1 ==
N->getOperand(0))
6814 if (ElemBytes <= 4) {
6822 DCI.AddToWorklist(Op1.
getNode());
6825 DCI.AddToWorklist(
Op.getNode());
6832SDValue SystemZTargetLowering::combineLOAD(
6833 SDNode *
N, DAGCombinerInfo &DCI)
const {
6835 EVT LdVT =
N->getValueType(0);
6840 if (LdVT == MVT::i128) {
6847 int UsedElements = 0;
6849 UI != UIEnd; ++UI) {
6851 if (UI.getUse().getResNo() != 0)
6864 User->getValueType(0) != MVT::i64)
6868 if (UsedElements & (1 <<
Index))
6871 UsedElements |= 1 <<
Index;
6877 for (
auto UserAndIndex :
Users) {
6879 unsigned Offset =
User->getValueType(0).getStoreSize() * UserAndIndex.second;
6884 LD->getPointerInfo().getWithOffset(
Offset),
6885 LD->getOriginalAlign(),
LD->getMemOperand()->getFlags(),
6888 DCI.CombineTo(
User, EltLoad,
true);
6896 DCI.AddToWorklist(Chain.
getNode());
6917 else if (UI.getUse().getResNo() == 0)
6920 if (!Replicate || OtherUses.
empty())
6926 for (
SDNode *U : OtherUses) {
6935bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
6936 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
6938 if (Subtarget.hasVectorEnhancements2())
6939 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
6951 for (
unsigned i = 0; i < NumElts; ++i) {
6952 if (M[i] < 0)
continue;
6953 if ((
unsigned) M[i] != NumElts - 1 - i)
6961 for (
auto *U : StoredVal->
uses()) {
6963 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
6966 }
else if (isa<BuildVectorSDNode>(U)) {
7004SDValue SystemZTargetLowering::combineSTORE(
7005 SDNode *
N, DAGCombinerInfo &DCI)
const {
7007 auto *SN = cast<StoreSDNode>(
N);
7008 auto &Op1 =
N->getOperand(1);
7009 EVT MemVT = SN->getMemoryVT();
7014 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
7016 combineTruncateExtract(
SDLoc(
N), MemVT, SN->getValue(), DCI)) {
7017 DCI.AddToWorklist(
Value.getNode());
7021 SN->getBasePtr(), SN->getMemoryVT(),
7022 SN->getMemOperand());
7026 if (!SN->isTruncatingStore() &&
7037 N->getOperand(0), BSwapOp,
N->getOperand(2)
7042 Ops, MemVT, SN->getMemOperand());
7045 if (!SN->isTruncatingStore() &&
7048 Subtarget.hasVectorEnhancements2()) {
7058 Ops, MemVT, SN->getMemOperand());
7063 if (!SN->isTruncatingStore() &&
7066 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
7070 Ops, MemVT, SN->getMemOperand());
7079 DAG.
getStore(SN->getChain(),
DL, HiPart, SN->getBasePtr(),
7080 SN->getPointerInfo(), SN->getOriginalAlign(),
7081 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7086 SN->getPointerInfo().getWithOffset(8),
7087 SN->getOriginalAlign(),
7088 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
7108 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
7112 if (VCI.isVectorConstantLegal(Subtarget) &&
7121 auto FindReplicatedReg = [&](
SDValue MulOp) {
7122 EVT MulVT = MulOp.getValueType();
7123 if (MulOp->getOpcode() ==
ISD::MUL &&
7124 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
7128 WordVT =
LHS->getOperand(0).getValueType();
7130 WordVT = cast<VTSDNode>(
LHS->getOperand(1))->getVT();
7134 if (
auto *
C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) {
7136 APInt(MulVT.getSizeInBits(),
C->getZExtValue()));
7137 if (VCI.isVectorConstantLegal(Subtarget) &&
7139 WordVT == VCI.VecVT.getScalarType())
7145 if (isa<BuildVectorSDNode>(Op1) &&
7148 if (
auto *
C = dyn_cast<ConstantSDNode>(SplatVal))
7151 FindReplicatedReg(SplatVal);
7153 if (
auto *
C = dyn_cast<ConstantSDNode>(Op1))
7156 FindReplicatedReg(Op1);
7161 "Bad type handling");
7166 SN->getBasePtr(), SN->getMemOperand());
7173SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
7174 SDNode *
N, DAGCombinerInfo &DCI)
const {
7178 N->getOperand(0).hasOneUse() &&
7179 Subtarget.hasVectorEnhancements2()) {
7194 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7198 DCI.CombineTo(
N, ESLoad);
7202 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
7212SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
7213 SDNode *
N, DAGCombinerInfo &DCI)
const {
7216 if (!Subtarget.hasVector())
7222 Op.getValueType().isVector() &&
7223 Op.getOperand(0).getValueType().isVector() &&
7224 Op.getValueType().getVectorNumElements() ==
7225 Op.getOperand(0).getValueType().getVectorNumElements())
7226 Op =
Op.getOperand(0);
7230 EVT VecVT =
Op.getValueType();
7233 Op.getOperand(0),
N->getOperand(1));
7234 DCI.AddToWorklist(
Op.getNode());
7236 if (EltVT !=
N->getValueType(0)) {
7237 DCI.AddToWorklist(
Op.getNode());
7244 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
7247 return combineExtract(
SDLoc(
N),
N->getValueType(0), VecVT, Op0,
7248 IndexN->getZExtValue(), DCI,
false);
7253SDValue SystemZTargetLowering::combineJOIN_DWORDS(
7254 SDNode *
N, DAGCombinerInfo &DCI)
const {
7257 if (
N->getOperand(0) ==
N->getOperand(1))
7268 if (Chain1 == Chain2)
7276SDValue SystemZTargetLowering::combineFP_ROUND(
7277 SDNode *
N, DAGCombinerInfo &DCI)
const {
7279 if (!Subtarget.hasVector())
7288 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7291 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
7297 for (
auto *U : Vec->
uses()) {
7298 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7300 U->getOperand(0) == Vec &&
7302 U->getConstantOperandVal(1) == 1) {
7304 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
7308 if (
N->isStrictFPOpcode()) {
7313 {MVT::v4f32, MVT::Other}, {Chain, Vec});
7318 DCI.AddToWorklist(VRound.
getNode());
7322 DCI.AddToWorklist(Extract1.
getNode());
7331 N->getVTList(), Extract0, Chain);
7340SDValue SystemZTargetLowering::combineFP_EXTEND(
7341 SDNode *
N, DAGCombinerInfo &DCI)
const {
7343 if (!Subtarget.hasVector())
7352 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7355 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
7361 for (
auto *U : Vec->
uses()) {
7362 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7364 U->getOperand(0) == Vec &&
7366 U->getConstantOperandVal(1) == 2) {
7368 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
7372 if (
N->isStrictFPOpcode()) {
7377 {MVT::v2f64, MVT::Other}, {Chain, Vec});
7382 DCI.AddToWorklist(VExtend.
getNode());
7386 DCI.AddToWorklist(Extract1.
getNode());
7395 N->getVTList(), Extract0, Chain);
7404SDValue SystemZTargetLowering::combineINT_TO_FP(
7405 SDNode *
N, DAGCombinerInfo &DCI)
const {
7410 unsigned Opcode =
N->getOpcode();
7411 EVT OutVT =
N->getValueType(0);
7415 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
7421 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
7422 OutScalarBits <= 64) {
7423 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements();
7426 unsigned ExtOpcode =
7434SDValue SystemZTargetLowering::combineBSWAP(
7435 SDNode *
N, DAGCombinerInfo &DCI)
const {
7439 N->getOperand(0).hasOneUse() &&
7440 canLoadStoreByteSwapped(
N->getValueType(0))) {
7449 EVT LoadVT =
N->getValueType(0);
7450 if (LoadVT == MVT::i16)
7455 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7459 if (
N->getValueType(0) == MVT::i16)
7464 DCI.CombineTo(
N, ResVal);
7468 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
7477 Op.getValueType().isVector() &&
7478 Op.getOperand(0).getValueType().isVector() &&
7479 Op.getValueType().getVectorNumElements() ==
7480 Op.getOperand(0).getValueType().getVectorNumElements())
7481 Op =
Op.getOperand(0);
7493 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
7495 EVT VecVT =
N->getValueType(0);
7496 EVT EltVT =
N->getValueType(0).getVectorElementType();
7499 DCI.AddToWorklist(Vec.
getNode());
7503 DCI.AddToWorklist(Elt.
getNode());
7506 DCI.AddToWorklist(Vec.
getNode());
7508 DCI.AddToWorklist(Elt.
getNode());
7516 if (SV &&
Op.hasOneUse()) {
7524 EVT VecVT =
N->getValueType(0);
7527 DCI.AddToWorklist(Op0.
getNode());
7531 DCI.AddToWorklist(Op1.
getNode());
7534 DCI.AddToWorklist(Op0.
getNode());
7536 DCI.AddToWorklist(Op1.
getNode());
7558 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
7565 bool Invert =
false;
7572 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
7575 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7578 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
7580 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
7584 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
7585 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
7586 if (!NewCCValid || !NewCCMask)
7588 CCValid = NewCCValid->getZExtValue();
7589 CCMask = NewCCMask->getZExtValue();
7599 if (CompareLHS->getOpcode() ==
ISD::SRA) {
7600 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7601 if (!SRACount || SRACount->getZExtValue() != 30)
7603 auto *SHL = CompareLHS->getOperand(0).getNode();
7606 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
7609 auto *IPM = SHL->getOperand(0).getNode();
7614 if (!CompareLHS->hasOneUse())
7617 if (CompareRHS->getZExtValue() != 0)
7624 CCReg = IPM->getOperand(0);
7631SDValue SystemZTargetLowering::combineBR_CCMASK(
7632 SDNode *
N, DAGCombinerInfo &DCI)
const {
7636 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7637 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7638 if (!CCValid || !CCMask)
7641 int CCValidVal = CCValid->getZExtValue();
7642 int CCMaskVal = CCMask->getZExtValue();
7651 N->getOperand(3), CCReg);
7655SDValue SystemZTargetLowering::combineSELECT_CCMASK(
7656 SDNode *
N, DAGCombinerInfo &DCI)
const {
7660 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7661 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(3));
7662 if (!CCValid || !CCMask)
7665 int CCValidVal = CCValid->getZExtValue();
7666 int CCMaskVal = CCMask->getZExtValue();
7671 N->getOperand(0),
N->getOperand(1),
7679SDValue SystemZTargetLowering::combineGET_CCMASK(
7680 SDNode *
N, DAGCombinerInfo &DCI)
const {
7683 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7684 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7685 if (!CCValid || !CCMask)
7687 int CCValidVal = CCValid->getZExtValue();
7688 int CCMaskVal = CCMask->getZExtValue();
7696 auto *SelectCCValid = dyn_cast<ConstantSDNode>(
Select->getOperand(2));
7697 auto *SelectCCMask = dyn_cast<ConstantSDNode>(
Select->getOperand(3));
7698 if (!SelectCCValid || !SelectCCMask)
7700 int SelectCCValidVal = SelectCCValid->getZExtValue();
7701 int SelectCCMaskVal = SelectCCMask->getZExtValue();
7703 auto *
TrueVal = dyn_cast<ConstantSDNode>(
Select->getOperand(0));
7704 auto *
FalseVal = dyn_cast<ConstantSDNode>(
Select->getOperand(1));
7705 if (!TrueVal || !FalseVal)
7709 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
7710 SelectCCMaskVal ^= SelectCCValidVal;
7714 if (SelectCCValidVal & ~CCValidVal)
7716 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
7719 return Select->getOperand(4);
7722SDValue SystemZTargetLowering::combineIntDIVREM(
7723 SDNode *
N, DAGCombinerInfo &DCI)
const {
7725 EVT VT =
N->getValueType(0);
7739SDValue SystemZTargetLowering::combineINTRINSIC(
7740 SDNode *
N, DAGCombinerInfo &DCI)
const {
7743 unsigned Id =
N->getConstantOperandVal(1);
7747 case Intrinsic::s390_vll:
7748 case Intrinsic::s390_vlrl:
7749 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(2)))
7750 if (
C->getZExtValue() >= 15)
7755 case Intrinsic::s390_vstl:
7756 case Intrinsic::s390_vstrl:
7757 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(3)))
7758 if (
C->getZExtValue() >= 15)
7769 return N->getOperand(0);
7775 switch(
N->getOpcode()) {
7800 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
7812 EVT VT =
Op.getValueType();
7815 unsigned Opcode =
Op.getOpcode();
7817 unsigned Id =
Op.getConstantOperandVal(0);
7819 case Intrinsic::s390_vpksh:
7820 case Intrinsic::s390_vpksf:
7821 case Intrinsic::s390_vpksg:
7822 case Intrinsic::s390_vpkshs:
7823 case Intrinsic::s390_vpksfs:
7824 case Intrinsic::s390_vpksgs:
7825 case Intrinsic::s390_vpklsh:
7826 case Intrinsic::s390_vpklsf:
7827 case Intrinsic::s390_vpklsg:
7828 case Intrinsic::s390_vpklshs:
7829 case Intrinsic::s390_vpklsfs:
7830 case Intrinsic::s390_vpklsgs:
7832 SrcDemE = DemandedElts;
7835 SrcDemE = SrcDemE.
trunc(NumElts / 2);
7838 case Intrinsic::s390_vuphb:
7839 case Intrinsic::s390_vuphh:
7840 case Intrinsic::s390_vuphf:
7841 case Intrinsic::s390_vuplhb:
7842 case Intrinsic::s390_vuplhh:
7843 case Intrinsic::s390_vuplhf:
7844 SrcDemE =
APInt(NumElts * 2, 0);
7847 case Intrinsic::s390_vuplb:
7848 case Intrinsic::s390_vuplhw:
7849 case Intrinsic::s390_vuplf:
7850 case Intrinsic::s390_vupllb:
7851 case Intrinsic::s390_vupllh:
7852 case Intrinsic::s390_vupllf:
7853 SrcDemE =
APInt(NumElts * 2, 0);
7856 case Intrinsic::s390_vpdi: {
7858 SrcDemE =
APInt(NumElts, 0);
7859 if (!DemandedElts[OpNo - 1])
7861 unsigned Mask =
Op.getConstantOperandVal(3);
7862 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
7864 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
7867 case Intrinsic::s390_vsldb: {
7869 assert(VT == MVT::v16i8 &&
"Unexpected type.");
7870 unsigned FirstIdx =
Op.getConstantOperandVal(3);
7871 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
7872 unsigned NumSrc0Els = 16 - FirstIdx;
7873 SrcDemE =
APInt(NumElts, 0);
7875 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
7878 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
7883 case Intrinsic::s390_vperm:
7884 SrcDemE =
APInt(NumElts, -1);
7894 SrcDemE =
APInt(1, 1);
7897 SrcDemE = DemandedElts;
7908 const APInt &DemandedElts,
7923 const APInt &DemandedElts,
7925 unsigned Depth)
const {
7929 unsigned tmp0, tmp1;
7934 EVT VT =
Op.getValueType();
7935 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
7938 "KnownBits does not match VT in bitwidth");
7941 "DemandedElts does not match VT number of elements");
7943 unsigned Opcode =
Op.getOpcode();
7945 bool IsLogical =
false;
7946 unsigned Id =
Op.getConstantOperandVal(0);
7948 case Intrinsic::s390_vpksh:
7949 case Intrinsic::s390_vpksf:
7950 case Intrinsic::s390_vpksg:
7951 case Intrinsic::s390_vpkshs:
7952 case Intrinsic::s390_vpksfs:
7953 case Intrinsic::s390_vpksgs:
7954 case Intrinsic::s390_vpklsh:
7955 case Intrinsic::s390_vpklsf:
7956 case Intrinsic::s390_vpklsg:
7957 case Intrinsic::s390_vpklshs:
7958 case Intrinsic::s390_vpklsfs:
7959 case Intrinsic::s390_vpklsgs:
7960 case Intrinsic::s390_vpdi:
7961 case Intrinsic::s390_vsldb:
7962 case Intrinsic::s390_vperm:
7965 case Intrinsic::s390_vuplhb:
7966 case Intrinsic::s390_vuplhh:
7967 case Intrinsic::s390_vuplhf:
7968 case Intrinsic::s390_vupllb:
7969 case Intrinsic::s390_vupllh:
7970 case Intrinsic::s390_vupllf:
7973 case Intrinsic::s390_vuphb:
7974 case Intrinsic::s390_vuphh:
7975 case Intrinsic::s390_vuphf:
7976 case Intrinsic::s390_vuplb:
7977 case Intrinsic::s390_vuplhw:
7978 case Intrinsic::s390_vuplf: {
8020 if (
LHS == 1)
return 1;
8023 if (
RHS == 1)
return 1;
8024 unsigned Common = std::min(
LHS,
RHS);
8025 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
8026 EVT VT =
Op.getValueType();
8028 if (SrcBitWidth > VTBits) {
8029 unsigned SrcExtraBits = SrcBitWidth - VTBits;
8030 if (Common > SrcExtraBits)
8031 return (Common - SrcExtraBits);
8034 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
8041 unsigned Depth)
const {
8042 if (
Op.getResNo() != 0)
8044 unsigned Opcode =
Op.getOpcode();
8046 unsigned Id =
Op.getConstantOperandVal(0);
8048 case Intrinsic::s390_vpksh:
8049 case Intrinsic::s390_vpksf:
8050 case Intrinsic::s390_vpksg:
8051 case Intrinsic::s390_vpkshs:
8052 case Intrinsic::s390_vpksfs:
8053 case Intrinsic::s390_vpksgs:
8054 case Intrinsic::s390_vpklsh:
8055 case Intrinsic::s390_vpklsf:
8056 case Intrinsic::s390_vpklsg:
8057 case Intrinsic::s390_vpklshs:
8058 case Intrinsic::s390_vpklsfs:
8059 case Intrinsic::s390_vpklsgs:
8060 case Intrinsic::s390_vpdi:
8061 case Intrinsic::s390_vsldb:
8062 case Intrinsic::s390_vperm:
8064 case Intrinsic::s390_vuphb:
8065 case Intrinsic::s390_vuphh:
8066 case Intrinsic::s390_vuphf:
8067 case Intrinsic::s390_vuplb:
8068 case Intrinsic::s390_vuplhw:
8069 case Intrinsic::s390_vuplf: {
8073 EVT VT =
Op.getValueType();
8097 switch (
Op->getOpcode()) {
8110 "Unexpected stack alignment");
8113 unsigned StackProbeSize =
8116 StackProbeSize &= ~(StackAlign - 1);
8117 return StackProbeSize ? StackProbeSize : StackAlign;
8134 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8140 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8166 if (Succ->isLiveIn(SystemZ::CC))
8177 switch (
MI.getOpcode()) {
8178 case SystemZ::Select32:
8179 case SystemZ::Select64:
8180 case SystemZ::Select128:
8181 case SystemZ::SelectF32:
8182 case SystemZ::SelectF64:
8183 case SystemZ::SelectF128:
8184 case SystemZ::SelectVR32:
8185 case SystemZ::SelectVR64:
8186 case SystemZ::SelectVR128:
8218 for (
auto *
MI : Selects) {
8219 Register DestReg =
MI->getOperand(0).getReg();
8220 Register TrueReg =
MI->getOperand(1).getReg();
8221 Register FalseReg =
MI->getOperand(2).getReg();
8226 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
8229 if (RegRewriteTable.
contains(TrueReg))
8230 TrueReg = RegRewriteTable[TrueReg].first;
8232 if (RegRewriteTable.
contains(FalseReg))
8233 FalseReg = RegRewriteTable[FalseReg].second;
8236 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
8241 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
8253 assert(TFL->hasReservedCallFrame(MF) &&
8254 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
8259 uint32_t NumBytes =
MI.getOperand(0).getImm();
8264 MI.eraseFromParent();
8275 unsigned CCValid =
MI.getOperand(3).getImm();
8276 unsigned CCMask =
MI.getOperand(4).getImm();
8288 assert(NextMI.getOperand(3).getImm() == CCValid &&
8289 "Bad CCValid operands since CC was not redefined.");
8290 if (NextMI.getOperand(4).getImm() == CCMask ||
8291 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
8297 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
8298 NextMI.usesCustomInsertionHook())
8301 for (
auto *SelMI : Selects)
8302 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
8306 if (NextMI.isDebugInstr()) {
8308 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
8311 }
else if (
User || ++Count > 20)
8316 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
8348 for (
auto *SelMI : Selects)
8349 SelMI->eraseFromParent();
8352 for (
auto *DbgMI : DbgValues)
8353 MBB->
splice(InsertPos, StartMBB, DbgMI);
8364 unsigned StoreOpcode,
8365 unsigned STOCOpcode,
8366 bool Invert)
const {
8371 int64_t Disp =
MI.getOperand(2).getImm();
8372 Register IndexReg =
MI.getOperand(3).getReg();
8373 unsigned CCValid =
MI.getOperand(4).getImm();
8374 unsigned CCMask =
MI.getOperand(5).getImm();
8377 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
8382 for (
auto *
I :
MI.memoperands())
8391 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
8403 MI.eraseFromParent();
8417 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
8444 MI.eraseFromParent();
8480 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
8499 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
8507 MI.eraseFromParent();
8518 bool Invert)
const {
8527 int64_t Disp =
MI.getOperand(2).getImm();
8529 Register BitShift =
MI.getOperand(4).getReg();
8530 Register NegBitShift =
MI.getOperand(5).getReg();
8531 unsigned BitSize =
MI.getOperand(6).getImm();
8535 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8536 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8537 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8540 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8541 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8542 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8543 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8544 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8575 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8580 }
else if (BinOpcode)
8603 MI.eraseFromParent();
8614 unsigned KeepOldMask)
const {
8622 int64_t Disp =
MI.getOperand(2).getImm();
8624 Register BitShift =
MI.getOperand(4).getReg();
8625 Register NegBitShift =
MI.getOperand(5).getReg();
8626 unsigned BitSize =
MI.getOperand(6).getImm();
8630 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8631 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8632 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8635 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8636 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8637 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8638 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8639 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8640 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8707 MI.eraseFromParent();
8723 int64_t Disp =
MI.getOperand(2).getImm();
8725 Register OrigSwapVal =
MI.getOperand(4).getReg();
8726 Register BitShift =
MI.getOperand(5).getReg();
8727 Register NegBitShift =
MI.getOperand(6).getReg();
8728 int64_t BitSize =
MI.getOperand(7).getImm();
8734 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8735 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8736 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
8737 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8740 Register OrigOldVal =
MRI.createVirtualRegister(RC);
8743 Register StoreVal =
MRI.createVirtualRegister(RC);
8744 Register OldValRot =
MRI.createVirtualRegister(RC);
8745 Register RetryOldVal =
MRI.createVirtualRegister(RC);
8746 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
8821 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
8824 MI.eraseFromParent();
8840 Register Tmp1 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8841 Register Tmp2 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8849 MI.eraseFromParent();
8858 bool ClearEven)
const {
8866 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8870 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8871 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
8882 MI.eraseFromParent();
8889 unsigned Opcode,
bool IsMemset)
const {
8896 uint64_t DestDisp =
MI.getOperand(1).getImm();
8902 if (!isUInt<12>(Disp)) {
8903 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8904 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
8914 SrcDisp =
MI.getOperand(3).getImm();
8917 SrcDisp = DestDisp++;
8918 foldDisplIfNeeded(DestBase, DestDisp);
8922 bool IsImmForm = LengthMO.
isImm();
8923 bool IsRegForm = !IsImmForm;
8930 unsigned Length) ->
void {
8949 bool NeedsLoop =
false;
8951 Register LenAdjReg = SystemZ::NoRegister;
8953 ImmLength = LengthMO.
getImm();
8954 ImmLength += IsMemset ? 2 : 1;
8955 if (ImmLength == 0) {
8956 MI.eraseFromParent();
8959 if (Opcode == SystemZ::CLC) {
8960 if (ImmLength > 3 * 256)
8970 }
else if (ImmLength > 6 * 256)
8978 LenAdjReg = LengthMO.
getReg();
8984 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
8990 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
8992 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
9003 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9007 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
9008 DestBase = loadZeroAddress();
9009 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
9010 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
9020 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
9023 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
9025 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
9026 Register NextSrcReg =
MRI.createVirtualRegister(RC);
9028 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
9029 RC = &SystemZ::GR64BitRegClass;
9030 Register ThisCountReg =
MRI.createVirtualRegister(RC);
9031 Register NextCountReg =
MRI.createVirtualRegister(RC);
9057 MBB = MemsetOneCheckMBB;
9100 if (EndMBB && !ImmLength)
9122 if (!HaveSingleBase)
9129 if (Opcode == SystemZ::MVC)
9156 if (!HaveSingleBase)
9178 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9179 Register RemDestReg = HaveSingleBase ? RemSrcReg
9180 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9184 if (!HaveSingleBase)
9200 if (Opcode != SystemZ::MVC) {
9210 while (ImmLength > 0) {
9214 foldDisplIfNeeded(DestBase, DestDisp);
9215 foldDisplIfNeeded(SrcBase, SrcDisp);
9216 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
9217 DestDisp += ThisLength;
9218 SrcDisp += ThisLength;
9219 ImmLength -= ThisLength;
9222 if (EndMBB && ImmLength > 0) {
9238 MI.eraseFromParent();
9251 uint64_t End1Reg =
MI.getOperand(0).getReg();
9252 uint64_t Start1Reg =
MI.getOperand(1).getReg();
9253 uint64_t Start2Reg =
MI.getOperand(2).getReg();
9254 uint64_t CharReg =
MI.getOperand(3).getReg();
9257 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
9258 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
9297 MI.eraseFromParent();
9304 bool NoFloat)
const {
9310 MI.setDesc(
TII->get(Opcode));
9314 uint64_t Control =
MI.getOperand(2).getImm();
9315 static const unsigned GPRControlBit[16] = {
9316 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
9317 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
9319 Control |= GPRControlBit[15];
9321 Control |= GPRControlBit[11];
9322 MI.getOperand(2).setImm(Control);
9325 for (
int I = 0;
I < 16;
I++) {
9326 if ((Control & GPRControlBit[
I]) == 0) {
9333 if (!NoFloat && (Control & 4) != 0) {
9334 if (Subtarget.hasVector()) {
9366 MI.eraseFromParent();
9379 Register SizeReg =
MI.getOperand(2).getReg();
9391 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9392 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9457 MI.eraseFromParent();
9461SDValue SystemZTargetLowering::
9472 switch (
MI.getOpcode()) {
9473 case SystemZ::ADJCALLSTACKDOWN:
9474 case SystemZ::ADJCALLSTACKUP:
9475 return emitAdjCallStack(
MI,
MBB);
9477 case SystemZ::Select32:
9478 case SystemZ::Select64:
9479 case SystemZ::Select128:
9480 case SystemZ::SelectF32:
9481 case SystemZ::SelectF64:
9482 case SystemZ::SelectF128:
9483 case SystemZ::SelectVR32:
9484 case SystemZ::SelectVR64:
9485 case SystemZ::SelectVR128:
9486 return emitSelect(
MI,
MBB);
9488 case SystemZ::CondStore8Mux:
9489 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
9490 case SystemZ::CondStore8MuxInv:
9491 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
9492 case SystemZ::CondStore16Mux:
9493 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
9494 case SystemZ::CondStore16MuxInv:
9495 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
9496 case SystemZ::CondStore32Mux:
9497 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
9498 case SystemZ::CondStore32MuxInv:
9499 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
9500 case SystemZ::CondStore8:
9501 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
9502 case SystemZ::CondStore8Inv:
9503 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
9504 case SystemZ::CondStore16:
9505 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
9506 case SystemZ::CondStore16Inv:
9507 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
9508 case SystemZ::CondStore32:
9509 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
9510 case SystemZ::CondStore32Inv:
9511 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
9512 case SystemZ::CondStore64:
9513 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
9514 case SystemZ::CondStore64Inv:
9515 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
9516 case SystemZ::CondStoreF32:
9517 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
9518 case SystemZ::CondStoreF32Inv:
9519 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
9520 case SystemZ::CondStoreF64:
9521 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
9522 case SystemZ::CondStoreF64Inv:
9523 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
9525 case SystemZ::SCmp128Hi:
9526 return emitICmp128Hi(
MI,
MBB,
false);
9527 case SystemZ::UCmp128Hi:
9528 return emitICmp128Hi(
MI,
MBB,
true);
9530 case SystemZ::PAIR128:
9531 return emitPair128(
MI,
MBB);
9532 case SystemZ::AEXT128:
9533 return emitExt128(
MI,
MBB,
false);
9534 case SystemZ::ZEXT128:
9535 return emitExt128(
MI,
MBB,
true);
9537 case SystemZ::ATOMIC_SWAPW:
9538 return emitAtomicLoadBinary(
MI,
MBB, 0);
9540 case SystemZ::ATOMIC_LOADW_AR:
9541 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
9542 case SystemZ::ATOMIC_LOADW_AFI:
9543 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
9545 case SystemZ::ATOMIC_LOADW_SR:
9546 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
9548 case SystemZ::ATOMIC_LOADW_NR:
9549 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
9550 case SystemZ::ATOMIC_LOADW_NILH:
9551 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
9553 case SystemZ::ATOMIC_LOADW_OR:
9554 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
9555 case SystemZ::ATOMIC_LOADW_OILH:
9556 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
9558 case SystemZ::ATOMIC_LOADW_XR:
9559 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
9560 case SystemZ::ATOMIC_LOADW_XILF:
9561 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
9563 case SystemZ::ATOMIC_LOADW_NRi:
9564 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
9565 case SystemZ::ATOMIC_LOADW_NILHi:
9566 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
9568 case SystemZ::ATOMIC_LOADW_MIN:
9570 case SystemZ::ATOMIC_LOADW_MAX:
9572 case SystemZ::ATOMIC_LOADW_UMIN:
9574 case SystemZ::ATOMIC_LOADW_UMAX:
9577 case SystemZ::ATOMIC_CMP_SWAPW:
9578 return emitAtomicCmpSwapW(
MI,
MBB);
9579 case SystemZ::MVCImm:
9580 case SystemZ::MVCReg:
9581 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
9582 case SystemZ::NCImm:
9583 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
9584 case SystemZ::OCImm:
9585 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
9586 case SystemZ::XCImm:
9587 case SystemZ::XCReg:
9588 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
9589 case SystemZ::CLCImm:
9590 case SystemZ::CLCReg:
9591 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
9592 case SystemZ::MemsetImmImm:
9593 case SystemZ::MemsetImmReg:
9594 case SystemZ::MemsetRegImm:
9595 case SystemZ::MemsetRegReg:
9596 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
9597 case SystemZ::CLSTLoop:
9598 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
9599 case SystemZ::MVSTLoop:
9600 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
9601 case SystemZ::SRSTLoop:
9602 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
9603 case SystemZ::TBEGIN:
9604 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
9605 case SystemZ::TBEGIN_nofloat:
9606 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
9607 case SystemZ::TBEGINC:
9608 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
9609 case SystemZ::LTEBRCompare_Pseudo:
9610 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
9611 case SystemZ::LTDBRCompare_Pseudo:
9612 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
9613 case SystemZ::LTXBRCompare_Pseudo:
9614 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
9616 case SystemZ::PROBED_ALLOCA:
9617 return emitProbedAlloca(
MI,
MBB);
9619 case TargetOpcode::STACKMAP:
9620 case TargetOpcode::PATCHPOINT:
9631SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
9632 if (VT == MVT::Untyped)
9633 return &SystemZ::ADDR128BitRegClass;
9659 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
9679 EVT VT =
Op.getValueType();
9680 Op =
Op.getOperand(0);
9681 EVT OpVT =
Op.getValueType();
9683 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
Function Alias Analysis Results
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
iv Induction Variable Users
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static bool isSelectPseudo(MachineInstr &MI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, SDValue Chain, const SDLoc &SL)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static std::pair< SDValue, SDValue > expandBitCastF128ToI128Parts(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static bool isMovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
StringRef getValueAsString() const
Return the attribute's value as a string.
The address of a basic block.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getAliaseeObject() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
void setReturnAddressIsTaken(bool s)
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setMaxCallFrameSize(unsigned S)
MachineFunctionProperties & reset(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
A SystemZ-specific constant pool value.
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
const TargetFrameLowering * getFrameLowering() const override
bool isTargetXPLINK64() const
SystemZCallingConventionRegisters * getSpecialRegisters() const
const SystemZRegisterInfo * getRegisterInfo() const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
int getCallFrameSize() final
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
Reg
All possible values of the reg field in the ModR/M byte.
support::ulittle32_t Word
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})