24#include "llvm/IR/IntrinsicsS390.h"
32#define DEBUG_TYPE "systemz-lower"
38 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
39 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
89 if (Subtarget.hasHighWord())
95 if (Subtarget.hasVector()) {
102 if (Subtarget.hasVectorEnhancements1())
107 if (Subtarget.hasVector()) {
138 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
139 I <= MVT::LAST_FP_VALUETYPE;
165 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
166 I <= MVT::LAST_INTEGER_VALUETYPE;
202 if (Subtarget.hasPopulationCount())
221 if (!Subtarget.hasFPExtension())
227 if (Subtarget.hasFPExtension())
232 if (Subtarget.hasFPExtension())
270 if (!Subtarget.hasFPExtension()) {
283 if (Subtarget.hasMiscellaneousExtensions3()) {
368 if (VT != MVT::v2i64)
374 if (Subtarget.hasVectorEnhancements1())
405 if (Subtarget.hasVector()) {
427 if (Subtarget.hasVectorEnhancements2()) {
448 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
449 I <= MVT::LAST_FP_VALUETYPE;
457 if (Subtarget.hasFPExtension()) {
485 if (Subtarget.hasFPExtension()) {
496 if (Subtarget.hasVector()) {
542 if (Subtarget.hasVectorEnhancements1()) {
549 if (Subtarget.hasVectorEnhancements1()) {
603 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
604 MVT::v4f32, MVT::v2f64 }) {
613 if (!Subtarget.hasVectorEnhancements1()) {
619 if (Subtarget.hasVectorEnhancements1())
629 if (Subtarget.hasVectorEnhancements1()) {
641 if (!Subtarget.hasVector()) {
697 return Subtarget.hasSoftFloat();
719 return Subtarget.hasVectorEnhancements1();
732 if (!Subtarget.hasVector() ||
733 (isFP128 && !Subtarget.hasVectorEnhancements1()))
755 if (SplatBitSize > 64)
761 if (isInt<16>(SignedValue)) {
770 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
792 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
793 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
800 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
801 return tryValue(SplatBitsZ | Middle);
816 unsigned HalfSize = Width / 2;
821 if (HighValue != LowValue || 8 > HalfSize)
824 SplatBits = HighValue;
828 SplatBitSize = Width;
836 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
840 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
845 bool ForCodeSize)
const {
847 if (Imm.isZero() || Imm.isNegZero())
864 return isInt<32>(Imm) || isUInt<32>(Imm);
869 return isUInt<32>(Imm) || isUInt<32>(-Imm);
891 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
914 switch (II->getIntrinsicID()) {
916 case Intrinsic::memset:
917 case Intrinsic::memmove:
918 case Intrinsic::memcpy:
923 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
924 auto *SingleUser = cast<Instruction>(*
I->user_begin());
925 if (SingleUser->getParent() ==
I->getParent()) {
926 if (isa<ICmpInst>(SingleUser)) {
927 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
928 if (
C->getBitWidth() <= 64 &&
929 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
932 }
else if (isa<StoreInst>(SingleUser))
936 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
937 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
938 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
943 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
951 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
952 I->getOperand(0)->getType());
954 bool IsVectorAccess = MemAccessTy->
isVectorTy();
958 if (!IsVectorAccess && isa<StoreInst>(
I)) {
959 Value *DataOp =
I->getOperand(0);
960 if (isa<ExtractElementInst>(DataOp))
961 IsVectorAccess =
true;
966 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
967 User *LoadUser = *
I->user_begin();
968 if (isa<InsertElementInst>(LoadUser))
969 IsVectorAccess =
true;
972 if (IsFPAccess || IsVectorAccess)
990 bool RequireD12 = Subtarget.hasVector() && Ty->
isVectorTy();
1000 return AM.
Scale == 0;
1007 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1008 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1009 const int MVCFastLen = 16;
1011 if (Limit != ~
unsigned(0)) {
1013 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1015 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1017 if (
Op.isZeroMemset())
1022 SrcAS, FuncAttributes);
1027 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1031 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1033 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1035 return FromBits > ToBits;
1043 return FromBits > ToBits;
1052 if (Constraint.
size() == 1) {
1053 switch (Constraint[0]) {
1079 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1080 switch (Constraint[1]) {
1096 const char *constraint)
const {
1098 Value *CallOperandVal =
info.CallOperandVal;
1101 if (!CallOperandVal)
1105 switch (*constraint) {
1123 if (Subtarget.hasVector())
1129 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1130 if (isUInt<8>(
C->getZExtValue()))
1135 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1136 if (isUInt<12>(
C->getZExtValue()))
1141 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1142 if (isInt<16>(
C->getSExtValue()))
1147 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1148 if (isInt<20>(
C->getSExtValue()))
1153 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1154 if (
C->getZExtValue() == 0x7fffffff)
1164static std::pair<unsigned, const TargetRegisterClass *>
1166 const unsigned *Map,
unsigned Size) {
1167 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1168 if (isdigit(Constraint[2])) {
1173 return std::make_pair(Map[
Index], RC);
1175 return std::make_pair(0U,
nullptr);
1178std::pair<unsigned, const TargetRegisterClass *>
1181 if (Constraint.
size() == 1) {
1183 switch (Constraint[0]) {
1188 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1190 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1191 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1195 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1196 else if (VT == MVT::i128)
1197 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1198 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1201 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1206 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1208 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1209 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1214 if (Subtarget.hasVector()) {
1216 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1218 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1219 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1224 if (Constraint.
size() > 0 && Constraint[0] ==
'{') {
1228 auto getVTSizeInBits = [&VT]() {
1236 if (Constraint[1] ==
'r') {
1237 if (getVTSizeInBits() == 32)
1240 if (getVTSizeInBits() == 128)
1246 if (Constraint[1] ==
'f') {
1248 return std::make_pair(
1250 if (getVTSizeInBits() == 32)
1253 if (getVTSizeInBits() == 128)
1259 if (Constraint[1] ==
'v') {
1260 if (!Subtarget.hasVector())
1261 return std::make_pair(
1263 if (getVTSizeInBits() == 32)
1266 if (getVTSizeInBits() == 64)
1296 if (Constraint.
size() == 1) {
1297 switch (Constraint[0]) {
1299 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1300 if (isUInt<8>(
C->getZExtValue()))
1302 Op.getValueType()));
1306 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1307 if (isUInt<12>(
C->getZExtValue()))
1309 Op.getValueType()));
1313 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1314 if (isInt<16>(
C->getSExtValue()))
1316 Op.getValueType()));
1320 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1321 if (isInt<20>(
C->getSExtValue()))
1323 Op.getValueType()));
1327 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1328 if (
C->getZExtValue() == 0x7fffffff)
1330 Op.getValueType()));
1341#include "SystemZGenCallingConv.inc"
1345 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1351 Type *ToType)
const {
1368 for (
unsigned i = 0; i < Ins.size(); ++i)
1373 for (
unsigned i = 0; i < Outs.
size(); ++i)
1432 if (BitCastToType == MVT::v2i64)
1449 MVT::Untyped,
Hi,
Lo);
1464 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1466 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1477 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1478 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1500 if (Subtarget.hasVector())
1509 unsigned NumFixedGPRs = 0;
1510 unsigned NumFixedFPRs = 0;
1511 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1524 RC = &SystemZ::GR32BitRegClass;
1528 RC = &SystemZ::GR64BitRegClass;
1532 RC = &SystemZ::FP32BitRegClass;
1536 RC = &SystemZ::FP64BitRegClass;
1540 RC = &SystemZ::FP128BitRegClass;
1548 RC = &SystemZ::VR128BitRegClass;
1577 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1588 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1589 assert (Ins[
I].PartOffset == 0);
1590 while (
I + 1 !=
E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1592 unsigned PartOffset = Ins[
I + 1].PartOffset;
1617 int64_t RegSaveOffset =
1632 &SystemZ::FP64BitRegClass);
1650 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
1662 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1669 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1671 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1678 unsigned Offset,
bool LoadAdr =
false) {
1701 bool LoadAddr =
false;
1702 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
1723 unsigned ADADelta = 0;
1724 unsigned EPADelta = 8;
1729 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1730 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
1731 G->getGlobal()->hasPrivateLinkage());
1746 }
else if (
auto *
E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1788 if (Subtarget.hasVector()) {
1811 NumBytes = std::max(64U, (
unsigned)
alignTo(NumBytes, 64));
1821 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1827 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1829 if (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1831 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
1837 SlotVT = Outs[
I].ArgVT;
1840 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1846 assert (Outs[
I].PartOffset == 0);
1847 while (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1848 SDValue PartValue = OutVals[
I + 1];
1849 unsigned PartOffset = Outs[
I + 1].PartOffset;
1856 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
1859 ArgValue = SpillSlot;
1876 if (!StackPtr.getNode())
1898 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
1904 if (!MemOpChains.
empty())
1917 ->getAddressOfCalleeRegister();
1920 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
1925 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1928 }
else if (
auto *
E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1931 }
else if (IsTailCall) {
1934 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
1939 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I) {
1941 RegsToPass[
I].second, Glue);
1952 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I)
1954 RegsToPass[
I].second.getValueType()));
1958 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
1959 assert(Mask &&
"Missing call preserved mask for calling convention");
1983 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
1987 for (
unsigned I = 0,
E = RetLocs.
size();
I !=
E; ++
I) {
2009 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2011 Args.reserve(Ops.
size());
2016 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2019 Args.push_back(Entry);
2044 if (Subtarget.hasVector())
2049 for (
auto &Out : Outs)
2050 if (Out.ArgVT == MVT::i128)
2055 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2067 if (Subtarget.hasVector())
2076 if (RetLocs.
empty())
2086 for (
unsigned I = 0,
E = RetLocs.
size();
I !=
E; ++
I) {
2115 unsigned &CCValid) {
2116 unsigned Id = cast<ConstantSDNode>(
Op.getOperand(1))->getZExtValue();
2118 case Intrinsic::s390_tbegin:
2123 case Intrinsic::s390_tbegin_nofloat:
2128 case Intrinsic::s390_tend:
2142 unsigned Id = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
2144 case Intrinsic::s390_vpkshs:
2145 case Intrinsic::s390_vpksfs:
2146 case Intrinsic::s390_vpksgs:
2151 case Intrinsic::s390_vpklshs:
2152 case Intrinsic::s390_vpklsfs:
2153 case Intrinsic::s390_vpklsgs:
2158 case Intrinsic::s390_vceqbs:
2159 case Intrinsic::s390_vceqhs:
2160 case Intrinsic::s390_vceqfs:
2161 case Intrinsic::s390_vceqgs:
2166 case Intrinsic::s390_vchbs:
2167 case Intrinsic::s390_vchhs:
2168 case Intrinsic::s390_vchfs:
2169 case Intrinsic::s390_vchgs:
2174 case Intrinsic::s390_vchlbs:
2175 case Intrinsic::s390_vchlhs:
2176 case Intrinsic::s390_vchlfs:
2177 case Intrinsic::s390_vchlgs:
2182 case Intrinsic::s390_vtm:
2187 case Intrinsic::s390_vfaebs:
2188 case Intrinsic::s390_vfaehs:
2189 case Intrinsic::s390_vfaefs:
2194 case Intrinsic::s390_vfaezbs:
2195 case Intrinsic::s390_vfaezhs:
2196 case Intrinsic::s390_vfaezfs:
2201 case Intrinsic::s390_vfeebs:
2202 case Intrinsic::s390_vfeehs:
2203 case Intrinsic::s390_vfeefs:
2208 case Intrinsic::s390_vfeezbs:
2209 case Intrinsic::s390_vfeezhs:
2210 case Intrinsic::s390_vfeezfs:
2215 case Intrinsic::s390_vfenebs:
2216 case Intrinsic::s390_vfenehs:
2217 case Intrinsic::s390_vfenefs:
2222 case Intrinsic::s390_vfenezbs:
2223 case Intrinsic::s390_vfenezhs:
2224 case Intrinsic::s390_vfenezfs:
2229 case Intrinsic::s390_vistrbs:
2230 case Intrinsic::s390_vistrhs:
2231 case Intrinsic::s390_vistrfs:
2236 case Intrinsic::s390_vstrcbs:
2237 case Intrinsic::s390_vstrchs:
2238 case Intrinsic::s390_vstrcfs:
2243 case Intrinsic::s390_vstrczbs:
2244 case Intrinsic::s390_vstrczhs:
2245 case Intrinsic::s390_vstrczfs:
2250 case Intrinsic::s390_vstrsb:
2251 case Intrinsic::s390_vstrsh:
2252 case Intrinsic::s390_vstrsf:
2257 case Intrinsic::s390_vstrszb:
2258 case Intrinsic::s390_vstrszh:
2259 case Intrinsic::s390_vstrszf:
2264 case Intrinsic::s390_vfcedbs:
2265 case Intrinsic::s390_vfcesbs:
2270 case Intrinsic::s390_vfchdbs:
2271 case Intrinsic::s390_vfchsbs:
2276 case Intrinsic::s390_vfchedbs:
2277 case Intrinsic::s390_vfchesbs:
2282 case Intrinsic::s390_vftcidb:
2283 case Intrinsic::s390_vftcisb:
2288 case Intrinsic::s390_tdc:
2306 for (
unsigned I = 2;
I < NumOps; ++
I)
2309 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2315 return Intr.getNode();
2325 for (
unsigned I = 1;
I < NumOps; ++
I)
2329 return Intr.getNode();
2339 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2340 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2341 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2366 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2370 int64_t
Value = ConstOp1->getSExtValue();
2386 if (!
C.Op0.hasOneUse() ||
2392 auto *Load = cast<LoadSDNode>(
C.Op0);
2393 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2394 if ((NumBits != 8 && NumBits != 16) ||
2395 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2400 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2402 uint64_t Mask = (1 << NumBits) - 1;
2405 int64_t SignedValue = ConstOp1->getSExtValue();
2412 }
else if (NumBits == 8) {
2438 if (
C.Op0.getValueType() != MVT::i32 ||
2439 Load->getExtensionType() != ExtType) {
2441 Load->getBasePtr(), Load->getPointerInfo(),
2442 Load->getMemoryVT(), Load->getAlign(),
2443 Load->getMemOperand()->getFlags());
2449 if (
C.Op1.getValueType() != MVT::i32 ||
2450 Value != ConstOp1->getZExtValue())
2457 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2460 if (Load->getMemoryVT() == MVT::i8)
2463 switch (Load->getExtensionType()) {
2480 if (
C.Op0.getValueType() == MVT::f128)
2486 if (isa<ConstantFPSDNode>(
C.Op1))
2491 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2492 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2510 isUInt<16>(ConstOp1->getZExtValue()))
2515 isInt<16>(ConstOp1->getSExtValue()))
2521 unsigned Opcode0 =
C.Op0.getOpcode();
2529 cast<ConstantSDNode>(
C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
2544 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2545 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2549 Flags.setNoSignedWrap(
false);
2550 Flags.setNoUnsignedWrap(
false);
2569 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2570 if (C1 && C1->isZero()) {
2590 C.Op0.getValueType() == MVT::i64 &&
2592 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
2593 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2594 if (C1 && C1->getZExtValue() == 32) {
2595 SDValue ShlOp0 =
C.Op0.getOperand(0);
2599 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2614 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2616 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
2617 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2618 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2619 C.Op0.getValueSizeInBits().getFixedValue()) {
2620 unsigned Type = L->getExtensionType();
2623 C.Op0 =
C.Op0.getOperand(0);
2633 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2637 uint64_t Amount = Shift->getZExtValue();
2638 if (Amount >=
N.getValueSizeInBits())
2653 unsigned ICmpType) {
2654 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
2676 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
2682 if (EffectivelyUnsigned && CmpVal <
Low) {
2690 if (CmpVal == Mask) {
2696 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
2702 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
2710 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
2716 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
2745 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2748 uint64_t CmpVal = ConstOp1->getZExtValue();
2755 NewC.Op0 =
C.Op0.getOperand(0);
2756 NewC.Op1 =
C.Op0.getOperand(1);
2757 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2760 MaskVal = Mask->getZExtValue();
2765 if (NewC.Op0.getValueType() != MVT::i64 ||
2780 MaskVal = -(CmpVal & -CmpVal);
2788 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2789 unsigned NewCCMask, ShiftVal;
2791 NewC.Op0.getOpcode() ==
ISD::SHL &&
2793 (MaskVal >> ShiftVal != 0) &&
2794 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2796 MaskVal >> ShiftVal,
2799 NewC.Op0 = NewC.Op0.getOperand(0);
2800 MaskVal >>= ShiftVal;
2802 NewC.Op0.getOpcode() ==
ISD::SRL &&
2804 (MaskVal << ShiftVal != 0) &&
2805 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2807 MaskVal << ShiftVal,
2810 NewC.Op0 = NewC.Op0.getOperand(0);
2811 MaskVal <<= ShiftVal;
2822 if (Mask && Mask->getZExtValue() == MaskVal)
2827 C.CCMask = NewCCMask;
2837 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2841 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
2844 C.Op0 =
C.Op0.getOperand(0);
2856 C.CCValid = CCValid;
2859 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
2862 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
2866 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
2869 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
2873 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
2876 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
2879 C.CCMask &= CCValid;
2887 bool IsSignaling =
false) {
2891 unsigned Opcode, CCValid;
2901 Comparison
C(CmpOp0, CmpOp1, Chain);
2903 if (
C.Op0.getValueType().isFloatingPoint()) {
2907 else if (!IsSignaling)
2929 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
2949 if (!
C.Op1.getNode()) {
2951 switch (
C.Op0.getOpcode()) {
2973 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
2975 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
2984 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
2985 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3010 unsigned CCValid,
unsigned CCMask) {
3039 case CmpMode::Int:
return 0;
3059 case CmpMode::FP:
return 0;
3060 case CmpMode::StrictFP:
return 0;
3061 case CmpMode::SignalingFP:
return 0;
3093 int Mask[] = { Start, -1, Start + 1, -1 };
3113 !Subtarget.hasVectorEnhancements1()) {
3127 SDValue Ops[2] = { Res, NewChain };
3136 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3138 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3151 bool IsSignaling)
const {
3154 assert (!IsSignaling || Chain);
3155 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3156 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3157 bool Invert =
false;
3165 assert(IsFP &&
"Unexpected integer comparison");
3167 DL, VT, CmpOp1, CmpOp0, Chain);
3169 DL, VT, CmpOp0, CmpOp1, Chain);
3173 LT.getValue(1),
GE.getValue(1));
3182 assert(IsFP &&
"Unexpected integer comparison");
3184 DL, VT, CmpOp1, CmpOp0, Chain);
3186 DL, VT, CmpOp0, CmpOp1, Chain);
3190 LT.getValue(1),
GT.getValue(1));
3199 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3203 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3208 Chain =
Cmp.getValue(1);
3216 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3229 EVT VT =
Op.getValueType();
3231 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3233 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3240 bool IsSignaling)
const {
3246 EVT VT =
Op.getNode()->getValueType(0);
3248 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3249 Chain, IsSignaling);
3253 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL, Chain, IsSignaling));
3268 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3281 cast<ConstantSDNode>(Neg.
getOperand(0))->getZExtValue() == 0 &&
3307 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3316 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
3324 SDValue Ops[] = {TrueOp, FalseOp,
3398 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3405 Node->getValueType(0),
3417 assert(Mask &&
"Missing call preserved mask for calling convention");
3425 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3432SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3464 SDValue TP = lowerThreadPointer(
DL, DAG);
3572 if (
CP->isMachineConstantPoolEntry())
3591 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
3598 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3619 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
3636 EVT InVT =
In.getValueType();
3637 EVT ResVT =
Op.getValueType();
3642 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
3645 LoadN->getBasePtr(), LoadN->getMemOperand());
3651 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3653 if (Subtarget.hasHighWord()) {
3657 MVT::i64,
SDValue(U64, 0), In);
3665 DL, MVT::f32, Out64);
3667 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3670 MVT::f64,
SDValue(U64, 0), In);
3672 if (Subtarget.hasHighWord())
3686 return lowerVASTART_XPLINK(
Op, DAG);
3688 return lowerVASTART_ELF(
Op, DAG);
3703 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3717 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3721 const unsigned NumFields = 4;
3732 for (
unsigned I = 0;
I < NumFields; ++
I) {
3737 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
3749 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
3750 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
3756 Align(8),
false,
false,
3762SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
3765 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
3767 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
3771SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
3784 (RealignOpt ? cast<ConstantSDNode>(
Align)->getZExtValue() : 0);
3787 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3794 if (ExtraAlignSpace)
3798 bool IsSigned =
false;
3799 bool DoesNotReturn =
false;
3800 bool IsReturnValueUsed =
false;
3801 EVT VT =
Op.getValueType();
3812 Register SPReg = Regs.getStackPointerRegister();
3823 if (ExtraAlignSpace) {
3835SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
3850 (RealignOpt ? cast<ConstantSDNode>(
Align)->getZExtValue() : 0);
3853 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3865 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
3869 if (ExtraAlignSpace)
3877 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
3893 if (RequiredAlign > StackAlign) {
3903 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
3910SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3919 EVT VT =
Op.getValueType();
3926 Op.getOperand(1), Ops[1], Ops[0]);
3927 else if (Subtarget.hasMiscellaneousExtensions2())
3932 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
3956 LL, RL, Ops[1], Ops[0]);